summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorvlefevre <vlefevre@280ebfd0-de03-0410-8827-d642c229c3f4>2010-08-17 09:10:13 +0000
committervlefevre <vlefevre@280ebfd0-de03-0410-8827-d642c229c3f4>2010-08-17 09:10:13 +0000
commitc9583bdfe064e1069828e518533f7bc29a8fdddb (patch)
tree2400842d4095628b8486fbeabaf7bc7b8af4ed02 /src
parent50ac5b5985174201c7fa6e20496cd2b096107001 (diff)
downloadmpfr-c9583bdfe064e1069828e518533f7bc29a8fdddb.tar.gz
Source reorganization. In short:
* Added directories and moved related files into them: - src for the MPFR source files (to build the library). - doc for documentation files (except INSTALL, README...). - tools for various tools (scripts) and mbench. - tune for tuneup-related source files. - other for other source files (not distributed in tarballs). Existing directories: - tests for the source files of the test suite (make check). - examples for examples. - m4 for m4 files. * Renamed configure.in to configure.ac. * Added/updated Makefile.am files where needed. * Updated acinclude.m4 and configure.ac (AC_CONFIG_FILES line). * Updated the documentation (INSTALL, README, doc/README.dev and doc/mpfr.texi). * Updated NEWS and TODO. * Updated the scripts now in tools. The following script was used: #!/usr/bin/env zsh svn mkdir doc other src tools tune svn mv ${${(M)$(sed -n '/libmpfr_la_SOURCES/,/[^\]$/p' \ Makefile.am):#*.[ch]}:#get_patches.c} mparam_h.in \ round_raw_generic.c jyn_asympt.c src svn mv mbench check_inits_clears coverage get_patches.sh mpfrlint \ nightly-test update-patchv update-version tools svn mv bidimensional_sample.c speed.c tuneup.c tune svn mv *.{c,h} other svn mv FAQ.html README.dev algorithm* faq.xsl fdl.texi mpfr.texi \ update-faq doc svn mv configure.in configure.ac svn cp Makefile.am src/Makefile.am svn rm replace_all [Modifying some files, see above] svn add doc/Makefile.am svn add tune/Makefile.am git-svn-id: svn://scm.gforge.inria.fr/svn/mpfr/trunk@7087 280ebfd0-de03-0410-8827-d642c229c3f4
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am87
-rw-r--r--src/abort_prec_max.c32
-rw-r--r--src/acos.c142
-rw-r--r--src/acosh.c156
-rw-r--r--src/add.c107
-rw-r--r--src/add1.c535
-rw-r--r--src/add1sp.c384
-rw-r--r--src/add_d.c49
-rw-r--r--src/add_ui.c53
-rw-r--r--src/agm.c177
-rw-r--r--src/ai.c631
-rw-r--r--src/asin.c121
-rw-r--r--src/asinh.c117
-rw-r--r--src/atan.c435
-rw-r--r--src/atan2.c262
-rw-r--r--src/atanh.c127
-rw-r--r--src/bernoulli.c80
-rw-r--r--src/buildopt.c44
-rw-r--r--src/cache.c145
-rw-r--r--src/cbrt.c148
-rw-r--r--src/check.c80
-rw-r--r--src/clear.c31
-rw-r--r--src/clears.c61
-rw-r--r--src/cmp.c104
-rw-r--r--src/cmp2.c243
-rw-r--r--src/cmp_abs.c94
-rw-r--r--src/cmp_d.c38
-rw-r--r--src/cmp_ld.c38
-rw-r--r--src/cmp_si.c101
-rw-r--r--src/cmp_ui.c101
-rw-r--r--src/comparisons.c78
-rw-r--r--src/const_catalan.c152
-rw-r--r--src/const_euler.c221
-rw-r--r--src/const_log2.c192
-rw-r--r--src/const_pi.c120
-rw-r--r--src/constant.c28
-rw-r--r--src/copysign.c38
-rw-r--r--src/cos.c296
-rw-r--r--src/cosh.c126
-rw-r--r--src/cot.c96
-rw-r--r--src/coth.c93
-rw-r--r--src/csc.c76
-rw-r--r--src/csch.c79
-rw-r--r--src/d_div.c49
-rw-r--r--src/d_sub.c49
-rw-r--r--src/digamma.c372
-rw-r--r--src/dim.c48
-rw-r--r--src/div.c676
-rw-r--r--src/div_2exp.c33
-rw-r--r--src/div_2si.c57
-rw-r--r--src/div_2ui.c71
-rw-r--r--src/div_d.c49
-rw-r--r--src/div_ui.c267
-rw-r--r--src/dump.c30
-rw-r--r--src/eint.c316
-rw-r--r--src/eq.c141
-rw-r--r--src/erf.c261
-rw-r--r--src/erfc.c263
-rw-r--r--src/exceptions.c336
-rw-r--r--src/exp.c162
-rw-r--r--src/exp10.c29
-rw-r--r--src/exp2.c146
-rw-r--r--src/exp3.c333
-rw-r--r--src/exp_2.c419
-rw-r--r--src/expm1.c174
-rw-r--r--src/extract.c55
-rw-r--r--src/factorial.c113
-rw-r--r--src/fits_intmax.c120
-rw-r--r--src/fits_s.h86
-rw-r--r--src/fits_sint.c28
-rw-r--r--src/fits_slong.c28
-rw-r--r--src/fits_sshort.c28
-rw-r--r--src/fits_u.h67
-rw-r--r--src/fits_uint.c27
-rw-r--r--src/fits_uintmax.c90
-rw-r--r--src/fits_ulong.c27
-rw-r--r--src/fits_ushort.c27
-rw-r--r--src/fma.c294
-rw-r--r--src/fms.c296
-rw-r--r--src/frac.c144
-rw-r--r--src/free_cache.c52
-rw-r--r--src/gamma.c402
-rw-r--r--src/gammaonethird.c191
-rw-r--r--src/gen_inverse.h106
-rw-r--r--src/get_d.c183
-rw-r--r--src/get_d64.c397
-rw-r--r--src/get_exp.c31
-rw-r--r--src/get_f.c148
-rw-r--r--src/get_flt.c123
-rw-r--r--src/get_ld.c215
-rw-r--r--src/get_si.c69
-rw-r--r--src/get_sj.c136
-rw-r--r--src/get_str.c2554
-rw-r--r--src/get_ui.c65
-rw-r--r--src/get_uj.c95
-rw-r--r--src/get_z.c61
-rw-r--r--src/get_z_exp.c79
-rw-r--r--src/gmp_op.c345
-rw-r--r--src/hypot.c187
-rw-r--r--src/ieee_floats.h76
-rw-r--r--src/init.c29
-rw-r--r--src/init2.c69
-rw-r--r--src/inits.c62
-rw-r--r--src/inits2.c66
-rw-r--r--src/inp_str.c87
-rw-r--r--src/int_ceil_log2.c42
-rw-r--r--src/isinf.c29
-rw-r--r--src/isinteger.c59
-rw-r--r--src/isnan.c29
-rw-r--r--src/isnum.c29
-rw-r--r--src/isqrt.c84
-rw-r--r--src/isregular.c29
-rw-r--r--src/iszero.c29
-rw-r--r--src/jn.c243
-rw-r--r--src/jyn_asympt.c269
-rw-r--r--src/li2.c631
-rw-r--r--src/lngamma.c637
-rw-r--r--src/log.c174
-rw-r--r--src/log10.c144
-rw-r--r--src/log1p.c152
-rw-r--r--src/log2.c136
-rw-r--r--src/logging.c165
-rw-r--r--src/min_prec.c61
-rw-r--r--src/minmax.c92
-rw-r--r--src/modf.c98
-rw-r--r--src/mp_clz_tab.c38
-rw-r--r--src/mparam_h.in1431
-rw-r--r--src/mpf2mpfr.h175
-rw-r--r--src/mpfr-gmp.c386
-rw-r--r--src/mpfr-gmp.h314
-rw-r--r--src/mpfr-impl.h1758
-rw-r--r--src/mpfr-longlong.h1938
-rw-r--r--src/mpfr-thread.h48
-rw-r--r--src/mpfr.h912
-rw-r--r--src/mpn_exp.c175
-rw-r--r--src/mul.c511
-rw-r--r--src/mul_2exp.c33
-rw-r--r--src/mul_2si.c56
-rw-r--r--src/mul_2ui.c63
-rw-r--r--src/mul_d.c49
-rw-r--r--src/mul_ui.c133
-rw-r--r--src/mulders.c115
-rw-r--r--src/neg.c39
-rw-r--r--src/next.c150
-rw-r--r--src/out_str.c98
-rw-r--r--src/pow.c675
-rw-r--r--src/pow_si.c250
-rw-r--r--src/pow_ui.c161
-rw-r--r--src/pow_z.c365
-rw-r--r--src/powerof2.c46
-rw-r--r--src/print_raw.c129
-rw-r--r--src/print_rnd_mode.c46
-rw-r--r--src/printf.c215
-rw-r--r--src/rec_sqrt.c535
-rw-r--r--src/reldiff.c73
-rw-r--r--src/rem1.c231
-rw-r--r--src/rint.c437
-rw-r--r--src/root.c199
-rw-r--r--src/round_near_x.c233
-rw-r--r--src/round_p.c123
-rw-r--r--src/round_prec.c240
-rw-r--r--src/round_raw_generic.c259
-rw-r--r--src/scale2.c91
-rw-r--r--src/sec.c34
-rw-r--r--src/sech.c40
-rw-r--r--src/set.c81
-rw-r--r--src/set_d.c255
-rw-r--r--src/set_d64.c224
-rw-r--r--src/set_dfl_prec.c41
-rw-r--r--src/set_exp.c37
-rw-r--r--src/set_f.c99
-rw-r--r--src/set_flt.c34
-rw-r--r--src/set_inf.c33
-rw-r--r--src/set_ld.c321
-rw-r--r--src/set_nan.c31
-rw-r--r--src/set_prc_raw.c31
-rw-r--r--src/set_prec.c55
-rw-r--r--src/set_q.c133
-rw-r--r--src/set_rnd.c40
-rw-r--r--src/set_si.c30
-rw-r--r--src/set_si_2exp.c73
-rw-r--r--src/set_sj.c65
-rw-r--r--src/set_str.c42
-rw-r--r--src/set_str_raw.c55
-rw-r--r--src/set_ui.c30
-rw-r--r--src/set_ui_2exp.c72
-rw-r--r--src/set_uj.c136
-rw-r--r--src/set_z.c30
-rw-r--r--src/set_z_exp.c180
-rw-r--r--src/set_zero.c31
-rw-r--r--src/setmax.c41
-rw-r--r--src/setmin.c38
-rw-r--r--src/setsign.c30
-rw-r--r--src/sgn.c40
-rw-r--r--src/si_op.c57
-rw-r--r--src/signbit.c30
-rw-r--r--src/sin.c180
-rw-r--r--src/sin_cos.c662
-rw-r--r--src/sinh.c182
-rw-r--r--src/sinh_cosh.c157
-rw-r--r--src/sqr.c107
-rw-r--r--src/sqrt.c256
-rw-r--r--src/sqrt_ui.c54
-rw-r--r--src/stack_interface.c104
-rw-r--r--src/strtofr.c825
-rw-r--r--src/sub.c111
-rw-r--r--src/sub1.c538
-rw-r--r--src/sub1sp.c809
-rw-r--r--src/sub_d.c49
-rw-r--r--src/sub_ui.c54
-rw-r--r--src/subnormal.c146
-rw-r--r--src/sum.c315
-rw-r--r--src/swap.c54
-rw-r--r--src/tan.c87
-rw-r--r--src/tanh.c151
-rw-r--r--src/uceil_exp2.c65
-rw-r--r--src/uceil_log2.c63
-rw-r--r--src/ufloor_log2.c53
-rw-r--r--src/ui_div.c96
-rw-r--r--src/ui_pow.c41
-rw-r--r--src/ui_pow_ui.c95
-rw-r--r--src/ui_sub.c63
-rw-r--r--src/urandom.c143
-rw-r--r--src/urandomb.c98
-rw-r--r--src/vasprintf.c2204
-rw-r--r--src/version.c29
-rw-r--r--src/volatile.c36
-rw-r--r--src/yn.c420
-rw-r--r--src/zeta.c463
-rw-r--r--src/zeta_ui.c229
230 files changed, 45127 insertions, 0 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
new file mode 100644
index 000000000..d94bf5d62
--- /dev/null
+++ b/src/Makefile.am
@@ -0,0 +1,87 @@
+# Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+# This Makefile.am is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+
+EXTRA_DIST = round_raw_generic.c jyn_asympt.c
+
+include_HEADERS = mpfr.h mpf2mpfr.h
+
+BUILT_SOURCES = mparam.h
+
+lib_LTLIBRARIES = libmpfr.la
+
+libmpfr_la_SOURCES = mpfr.h mpf2mpfr.h mpfr-gmp.h mpfr-impl.h \
+mpfr-longlong.h mpfr-thread.h exceptions.c extract.c uceil_exp2.c \
+uceil_log2.c ufloor_log2.c add.c add1.c add_ui.c agm.c clear.c cmp.c \
+cmp_abs.c cmp_si.c cmp_ui.c comparisons.c div_2exp.c div_2si.c \
+div_2ui.c div.c div_ui.c dump.c eq.c exp10.c exp2.c exp3.c exp.c \
+frac.c get_d.c get_exp.c get_str.c init.c inp_str.c isinteger.c \
+isinf.c isnan.c isnum.c const_log2.c log.c modf.c mul_2exp.c mul_2si.c \
+mul_2ui.c mul.c mul_ui.c neg.c next.c out_str.c printf.c vasprintf.c \
+const_pi.c pow.c pow_si.c pow_ui.c print_raw.c print_rnd_mode.c \
+reldiff.c round_prec.c set.c setmax.c setmin.c set_d.c set_dfl_prec.c \
+set_exp.c set_rnd.c set_f.c set_prc_raw.c set_prec.c set_q.c set_si.c \
+set_str.c set_str_raw.c set_ui.c set_z.c sqrt.c sqrt_ui.c sub.c sub1.c \
+sub_ui.c rint.c ui_div.c ui_sub.c urandom.c urandomb.c get_z_exp.c \
+swap.c factorial.c cosh.c sinh.c tanh.c sinh_cosh.c acosh.c asinh.c \
+atanh.c atan.c cmp2.c exp_2.c asin.c const_euler.c cos.c sin.c tan.c \
+fma.c fms.c hypot.c log1p.c expm1.c log2.c log10.c ui_pow.c \
+ui_pow_ui.c minmax.c dim.c signbit.c copysign.c setsign.c gmp_op.c \
+init2.c acos.c sin_cos.c set_nan.c set_inf.c set_zero.c powerof2.c \
+gamma.c set_ld.c get_ld.c cbrt.c volatile.c fits_s.h fits_sshort.c \
+fits_sint.c fits_slong.c fits_u.h fits_ushort.c fits_uint.c \
+fits_ulong.c fits_uintmax.c fits_intmax.c get_si.c get_ui.c zeta.c \
+cmp_d.c erf.c inits.c inits2.c clears.c sgn.c check.c sub1sp.c \
+version.c mpn_exp.c mpfr-gmp.c mp_clz_tab.c sum.c add1sp.c \
+free_cache.c si_op.c cmp_ld.c set_ui_2exp.c set_si_2exp.c set_uj.c \
+set_sj.c get_sj.c get_uj.c get_z.c iszero.c cache.c sqr.c \
+int_ceil_log2.c isqrt.c strtofr.c pow_z.c logging.c mulders.c get_f.c \
+round_p.c erfc.c atan2.c subnormal.c const_catalan.c root.c \
+gen_inverse.h sec.c csc.c cot.c eint.c sech.c csch.c coth.c \
+round_near_x.c constant.c abort_prec_max.c stack_interface.c lngamma.c \
+zeta_ui.c set_d64.c get_d64.c jn.c yn.c rem1.c get_patches.c add_d.c \
+sub_d.c d_sub.c mul_d.c div_d.c d_div.c li2.c rec_sqrt.c min_prec.c \
+buildopt.c digamma.c bernoulli.c isregular.c set_flt.c get_flt.c \
+scale2.c set_z_exp.c ai.c gammaonethird.c ieee_floats.h
+
+libmpfr_la_LIBADD = @LIBOBJS@
+
+# Libtool -version-info CURRENT[:REVISION[:AGE]] for libmpfr.la
+#
+# 1. No interfaces changed, only implementations (good):
+# ==> Increment REVISION.
+# 2. Interfaces added, none removed (good):
+# ==> Increment CURRENT, increment AGE, set REVISION to 0.
+# 3. Interfaces removed or changed (BAD, breaks upward compatibility):
+# ==> Increment CURRENT, set AGE and REVISION to 0.
+#
+# MPFR -version-info
+# 2.1.x -
+# 2.2.x 1:x:0
+# 2.3.x 2:x:1
+# 2.4.x 3:x:2
+# 3.0.x 4:x:0
+# 3.1.x 4:x:1
+libmpfr_la_LDFLAGS = -version-info 4:0:1
+
+# Important note: If for some reason, srcdir is read-only at build time
+# (and you use objdir != srcdir), then you need to rebuild get_patches.c
+# (with "make get_patches.c") just after patching the MPFR source. This
+# should not be a problem in practice, in particular because "make dist"
+# automatically rebuilds get_patches.c before generating the archives.
+$(srcdir)/get_patches.c: $(top_srcdir)/PATCHES $(top_srcdir)/tools/get_patches.sh
+ (cd $(top_srcdir) && ./tools/get_patches.sh) > $@ || rm -f $@
+
+# Do not add get_patches.c to CLEANFILES so that this file doesn't
+# need to be (re)built as long as no patches are applied. Anyway the
+# update of this file should be regarded as part of the patch process,
+# and "make clean" shouldn't remove it, just like it doesn't remove
+# what has been changed by "patch".
+#CLEANFILES = get_patches.c
diff --git a/src/abort_prec_max.c b/src/abort_prec_max.c
new file mode 100644
index 000000000..8cebae87e
--- /dev/null
+++ b/src/abort_prec_max.c
@@ -0,0 +1,32 @@
+/* mpfr_abort_prec_max -- Abort due to maximal precision overflow.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <stdlib.h>
+
+#include "mpfr-impl.h"
+
+void mpfr_abort_prec_max (void)
+{
+ fprintf (stderr, "MPFR: Maximal precision overflow\n");
+ abort ();
+}
+
diff --git a/src/acos.c b/src/acos.c
new file mode 100644
index 000000000..cf019d439
--- /dev/null
+++ b/src/acos.c
@@ -0,0 +1,142 @@
+/* mpfr_acos -- arc-cosinus of a floating-point number
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library, and was contributed by Mathieu Dutour.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_acos (mpfr_ptr acos, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t xp, arcc, tmp;
+ mpfr_exp_t supplement;
+ mpfr_prec_t prec;
+ int sign, compared, inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("acos[%#R]=%R inexact=%d", acos, acos, inexact));
+
+ /* Singular cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x) || MPFR_IS_INF (x))
+ {
+ MPFR_SET_NAN (acos);
+ MPFR_RET_NAN;
+ }
+ else /* necessarily x=0 */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(x));
+ /* acos(0)=Pi/2 */
+ inexact = mpfr_const_pi (acos, rnd_mode);
+ mpfr_div_2ui (acos, acos, 1, rnd_mode); /* exact */
+ MPFR_RET (inexact);
+ }
+ }
+
+ /* Set x_p=|x| */
+ sign = MPFR_SIGN (x);
+ mpfr_init2 (xp, MPFR_PREC (x));
+ mpfr_abs (xp, x, MPFR_RNDN); /* Exact */
+
+ compared = mpfr_cmp_ui (xp, 1);
+
+ if (MPFR_UNLIKELY (compared >= 0))
+ {
+ mpfr_clear (xp);
+ if (compared > 0) /* acos(x) = NaN for x > 1 */
+ {
+ MPFR_SET_NAN(acos);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ if (MPFR_IS_POS_SIGN (sign)) /* acos(+1) = 0 */
+ return mpfr_set_ui (acos, 0, rnd_mode);
+ else /* acos(-1) = Pi */
+ return mpfr_const_pi (acos, rnd_mode);
+ }
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Compute the supplement */
+ mpfr_ui_sub (xp, 1, xp, MPFR_RNDD);
+ if (MPFR_IS_POS_SIGN (sign))
+ supplement = 2 - 2 * MPFR_GET_EXP (xp);
+ else
+ supplement = 2 - MPFR_GET_EXP (xp);
+ mpfr_clear (xp);
+
+ prec = MPFR_PREC (acos);
+ prec += MPFR_INT_CEIL_LOG2(prec) + 10 + supplement;
+
+ /* VL: The following change concerning prec comes from r3145
+ "Optimize mpfr_acos by choosing a better initial precision."
+ but it doesn't seem to be correct and leads to problems (assertion
+ failure or very important inefficiency) with tiny arguments.
+ Therefore, I've disabled it. */
+ /* If x ~ 2^-N, acos(x) ~ PI/2 - x - x^3/6
+ If Prec < 2*N, we can't round since x^3/6 won't be counted. */
+#if 0
+ if (MPFR_PREC (acos) >= MPFR_PREC (x) && MPFR_GET_EXP (x) < 0)
+ {
+ mpfr_uexp_t pmin = (mpfr_uexp_t) (-2 * MPFR_GET_EXP (x)) + 5;
+ MPFR_ASSERTN (pmin <= MPFR_PREC_MAX);
+ if (prec < pmin)
+ prec = pmin;
+ }
+#endif
+
+ mpfr_init2 (tmp, prec);
+ mpfr_init2 (arcc, prec);
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ /* acos(x) = Pi/2 - asin(x) = Pi/2 - atan(x/sqrt(1-x^2)) */
+ mpfr_sqr (tmp, x, MPFR_RNDN);
+ mpfr_ui_sub (tmp, 1, tmp, MPFR_RNDN);
+ mpfr_sqrt (tmp, tmp, MPFR_RNDN);
+ mpfr_div (tmp, x, tmp, MPFR_RNDN);
+ mpfr_atan (arcc, tmp, MPFR_RNDN);
+ mpfr_const_pi (tmp, MPFR_RNDN);
+ mpfr_div_2ui (tmp, tmp, 1, MPFR_RNDN);
+ mpfr_sub (arcc, tmp, arcc, MPFR_RNDN);
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (arcc, prec - supplement,
+ MPFR_PREC (acos), rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (tmp, prec);
+ mpfr_set_prec (arcc, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (acos, arcc, rnd_mode);
+ mpfr_clear (tmp);
+ mpfr_clear (arcc);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (acos, inexact, rnd_mode);
+}
diff --git a/src/acosh.c b/src/acosh.c
new file mode 100644
index 000000000..142b0398e
--- /dev/null
+++ b/src/acosh.c
@@ -0,0 +1,156 @@
+/* mpfr_acosh -- inverse hyperbolic cosine
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of acosh is done by *
+ * acosh= ln(x + sqrt(x^2-1)) */
+
+int
+mpfr_acosh (mpfr_ptr y, mpfr_srcptr x , mpfr_rnd_t rnd_mode)
+{
+ MPFR_SAVE_EXPO_DECL (expo);
+ int inexact;
+ int comp;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ /* Deal with special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ /* Nan, or zero or -Inf */
+ if (MPFR_IS_INF (x) && MPFR_IS_POS (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ else /* Nan, or zero or -Inf */
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ }
+ comp = mpfr_cmp_ui (x, 1);
+ if (MPFR_UNLIKELY (comp < 0))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_UNLIKELY (comp == 0))
+ {
+ MPFR_SET_ZERO (y); /* acosh(1) = 0 */
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variables */
+ mpfr_t t;
+ /* Declaration of the size variables */
+ mpfr_prec_t Ny = MPFR_PREC(y); /* Precision of output variable */
+ mpfr_prec_t Nt; /* Precision of the intermediary variable */
+ mpfr_exp_t err, exp_te, d; /* Precision of error */
+ MPFR_ZIV_DECL (loop);
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + 4 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ /* initialization of intermediary variables */
+ mpfr_init2 (t, Nt);
+
+ /* First computation of acosh */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* compute acosh */
+ MPFR_BLOCK (flags, mpfr_mul (t, x, x, MPFR_RNDD)); /* x^2 */
+ if (MPFR_OVERFLOW (flags))
+ {
+ mpfr_t ln2;
+ mpfr_prec_t pln2;
+
+ /* As x is very large and the precision is not too large, we
+ assume that we obtain the same result by evaluating ln(2x).
+ We need to compute ln(x) + ln(2) as 2x can overflow. TODO:
+ write a proof and add an MPFR_ASSERTN. */
+ mpfr_log (t, x, MPFR_RNDN); /* err(log) < 1/2 ulp(t) */
+ pln2 = Nt - MPFR_PREC_MIN < MPFR_GET_EXP (t) ?
+ MPFR_PREC_MIN : Nt - MPFR_GET_EXP (t);
+ mpfr_init2 (ln2, pln2);
+ mpfr_const_log2 (ln2, MPFR_RNDN); /* err(ln2) < 1/2 ulp(t) */
+ mpfr_add (t, t, ln2, MPFR_RNDN); /* err <= 3/2 ulp(t) */
+ mpfr_clear (ln2);
+ err = 1;
+ }
+ else
+ {
+ exp_te = MPFR_GET_EXP (t);
+ mpfr_sub_ui (t, t, 1, MPFR_RNDD); /* x^2-1 */
+ if (MPFR_UNLIKELY (MPFR_IS_ZERO (t)))
+ {
+ /* This means that x is very close to 1: x = 1 + t with
+ t < 2^(-Nt). We have: acosh(x) = sqrt(2t) (1 - eps(t))
+ with 0 < eps(t) < t / 12. */
+ mpfr_sub_ui (t, x, 1, MPFR_RNDD); /* t = x - 1 */
+ mpfr_mul_2ui (t, t, 1, MPFR_RNDN); /* 2t */
+ mpfr_sqrt (t, t, MPFR_RNDN); /* sqrt(2t) */
+ err = 1;
+ }
+ else
+ {
+ d = exp_te - MPFR_GET_EXP (t);
+ mpfr_sqrt (t, t, MPFR_RNDN); /* sqrt(x^2-1) */
+ mpfr_add (t, t, x, MPFR_RNDN); /* sqrt(x^2-1)+x */
+ mpfr_log (t, t, MPFR_RNDN); /* ln(sqrt(x^2-1)+x) */
+
+ /* error estimate -- see algorithms.tex */
+ err = 3 + MAX (1, d) - MPFR_GET_EXP (t);
+ /* error is bounded by 1/2 + 2^err <= 2^(max(0,1+err)) */
+ err = MAX (0, 1 + err);
+ }
+ }
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, Nt - err, Ny, rnd_mode)))
+ break;
+
+ /* reactualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (y, t, rnd_mode);
+
+ mpfr_clear (t);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/add.c b/src/add.c
new file mode 100644
index 000000000..08b56de9a
--- /dev/null
+++ b/src/add.c
@@ -0,0 +1,107 @@
+/* mpfr_add -- add two floating-point numbers
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_add (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ MPFR_LOG_FUNC (("b[%#R]=%R c[%#R]=%R rnd=%d", b, b, c, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ if (MPFR_ARE_SINGULAR(b,c))
+ {
+ if (MPFR_IS_NAN(b) || MPFR_IS_NAN(c))
+ {
+ MPFR_SET_NAN(a);
+ MPFR_RET_NAN;
+ }
+ /* neither b nor c is NaN here */
+ else if (MPFR_IS_INF(b))
+ {
+ if (!MPFR_IS_INF(c) || MPFR_SIGN(b) == MPFR_SIGN(c))
+ {
+ MPFR_SET_INF(a);
+ MPFR_SET_SAME_SIGN(a, b);
+ MPFR_RET(0); /* exact */
+ }
+ else
+ {
+ MPFR_SET_NAN(a);
+ MPFR_RET_NAN;
+ }
+ }
+ else if (MPFR_IS_INF(c))
+ {
+ MPFR_SET_INF(a);
+ MPFR_SET_SAME_SIGN(a, c);
+ MPFR_RET(0); /* exact */
+ }
+ /* now either b or c is zero */
+ else if (MPFR_IS_ZERO(b))
+ {
+ if (MPFR_IS_ZERO(c))
+ {
+ /* for round away, we take the same convention for 0 + 0
+ as for round to zero or to nearest: it always gives +0,
+ except (-0) + (-0) = -0. */
+ MPFR_SET_SIGN(a,
+ (rnd_mode != MPFR_RNDD ?
+ ((MPFR_IS_NEG(b) && MPFR_IS_NEG(c)) ? -1 : 1) :
+ ((MPFR_IS_POS(b) && MPFR_IS_POS(c)) ? 1 : -1)));
+ MPFR_SET_ZERO(a);
+ MPFR_RET(0); /* 0 + 0 is exact */
+ }
+ return mpfr_set (a, c, rnd_mode);
+ }
+ else
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(c));
+ return mpfr_set (a, b, rnd_mode);
+ }
+ }
+
+ MPFR_ASSERTD(MPFR_IS_PURE_FP(b) && MPFR_IS_PURE_FP(c));
+
+ if (MPFR_UNLIKELY(MPFR_SIGN(b) != MPFR_SIGN(c)))
+ { /* signs differ, it's a subtraction */
+ if (MPFR_LIKELY(MPFR_PREC(a) == MPFR_PREC(b)
+ && MPFR_PREC(b) == MPFR_PREC(c)))
+ return mpfr_sub1sp(a,b,c,rnd_mode);
+ else
+ return mpfr_sub1(a, b, c, rnd_mode);
+ }
+ else
+ { /* signs are equal, it's an addition */
+ if (MPFR_LIKELY(MPFR_PREC(a) == MPFR_PREC(b)
+ && MPFR_PREC(b) == MPFR_PREC(c)))
+ if (MPFR_GET_EXP(b) < MPFR_GET_EXP(c))
+ return mpfr_add1sp(a, c, b, rnd_mode);
+ else
+ return mpfr_add1sp(a, b, c, rnd_mode);
+ else
+ if (MPFR_GET_EXP(b) < MPFR_GET_EXP(c))
+ return mpfr_add1(a, c, b, rnd_mode);
+ else
+ return mpfr_add1(a, b, c, rnd_mode);
+ }
+}
diff --git a/src/add1.c b/src/add1.c
new file mode 100644
index 000000000..009109212
--- /dev/null
+++ b/src/add1.c
@@ -0,0 +1,535 @@
+/* mpfr_add1 -- internal function to perform a "real" addition
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* compute sign(b) * (|b| + |c|), assuming b and c have same sign,
+ and are not NaN, Inf, nor zero. */
+int
+mpfr_add1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mp_limb_t *ap, *bp, *cp;
+ mpfr_prec_t aq, bq, cq, aq2;
+ mp_size_t an, bn, cn;
+ mpfr_exp_t difw, exp;
+ int sh, rb, fb, inex;
+ mpfr_uexp_t diff_exp;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_ASSERTD(MPFR_IS_PURE_FP(b) && MPFR_IS_PURE_FP(c));
+
+ MPFR_TMP_MARK(marker);
+
+ aq = MPFR_PREC(a);
+ bq = MPFR_PREC(b);
+ cq = MPFR_PREC(c);
+
+ an = (aq-1)/GMP_NUMB_BITS+1; /* number of limbs of a */
+ aq2 = (mpfr_prec_t) an * GMP_NUMB_BITS;
+ sh = aq2 - aq; /* non-significant bits in low limb */
+
+ bn = (bq-1)/GMP_NUMB_BITS+1; /* number of limbs of b */
+ cn = (cq-1)/GMP_NUMB_BITS+1; /* number of limbs of c */
+
+ ap = MPFR_MANT(a);
+ bp = MPFR_MANT(b);
+ cp = MPFR_MANT(c);
+
+ if (MPFR_UNLIKELY(ap == bp))
+ {
+ bp = (mp_ptr) MPFR_TMP_ALLOC (bn * BYTES_PER_MP_LIMB);
+ MPN_COPY (bp, ap, bn);
+ if (ap == cp)
+ { cp = bp; }
+ }
+ else if (MPFR_UNLIKELY(ap == cp))
+ {
+ cp = (mp_ptr) MPFR_TMP_ALLOC (cn * BYTES_PER_MP_LIMB);
+ MPN_COPY(cp, ap, cn);
+ }
+
+ exp = MPFR_GET_EXP (b);
+ MPFR_SET_SAME_SIGN(a, b);
+ MPFR_UPDATE2_RND_MODE(rnd_mode, MPFR_SIGN(b));
+ /* now rnd_mode is either MPFR_RNDN, MPFR_RNDZ or MPFR_RNDA */
+ diff_exp = (mpfr_uexp_t) exp - MPFR_GET_EXP(c);
+
+ /*
+ * 1. Compute the significant part A', the non-significant bits of A
+ * are taken into account.
+ *
+ * 2. Perform the rounding. At each iteration, we remember:
+ * _ r = rounding bit
+ * _ f = following bits (same value)
+ * where the result has the form: [number A]rfff...fff + a remaining
+ * value in the interval [0,2) ulp. We consider the most significant
+ * bits of the remaining value to update the result; a possible carry
+ * is immediately taken into account and A is updated accordingly. As
+ * soon as the bits f don't have the same value, A can be rounded.
+ * Variables:
+ * _ rb = rounding bit (0 or 1).
+ * _ fb = following bits (0 or 1), then sticky bit.
+ * If fb == 0, the only thing that can change is the sticky bit.
+ */
+
+ rb = fb = -1; /* means: not initialized */
+
+ if (MPFR_UNLIKELY(aq2 <= diff_exp))
+ { /* c does not overlap with a' */
+ if (MPFR_UNLIKELY(an > bn))
+ { /* a has more limbs than b */
+ /* copy b to the most significant limbs of a */
+ MPN_COPY(ap + (an - bn), bp, bn);
+ /* zero the least significant limbs of a */
+ MPN_ZERO(ap, an - bn);
+ }
+ else /* an <= bn */
+ {
+ /* copy the most significant limbs of b to a */
+ MPN_COPY(ap, bp + (bn - an), an);
+ }
+ }
+ else /* aq2 > diff_exp */
+ { /* c overlaps with a' */
+ mp_limb_t *a2p;
+ mp_limb_t cc;
+ mpfr_prec_t dif;
+ mp_size_t difn, k;
+ int shift;
+
+ /* copy c (shifted) into a */
+
+ dif = aq2 - diff_exp;
+ /* dif is the number of bits of c which overlap with a' */
+
+ difn = (dif-1)/GMP_NUMB_BITS + 1;
+ /* only the highest difn limbs from c have to be considered */
+ if (MPFR_UNLIKELY(difn > cn))
+ {
+ /* c doesn't have enough limbs; take into account the virtual
+ zero limbs now by zeroing the least significant limbs of a' */
+ MPFR_ASSERTD(difn - cn <= an);
+ MPN_ZERO(ap, difn - cn);
+ difn = cn;
+ }
+ k = diff_exp / GMP_NUMB_BITS;
+
+ /* zero the most significant k limbs of a */
+ a2p = ap + (an - k);
+ MPN_ZERO(a2p, k);
+
+ shift = diff_exp % GMP_NUMB_BITS;
+
+ if (MPFR_LIKELY(shift))
+ {
+ MPFR_ASSERTD(a2p - difn >= ap);
+ cc = mpn_rshift(a2p - difn, cp + (cn - difn), difn, shift);
+ if (MPFR_UNLIKELY(a2p - difn > ap))
+ *(a2p - difn - 1) = cc;
+ }
+ else
+ MPN_COPY(a2p - difn, cp + (cn - difn), difn);
+
+ /* add b to a */
+ cc = MPFR_UNLIKELY(an > bn)
+ ? mpn_add_n(ap + (an - bn), ap + (an - bn), bp, bn)
+ : mpn_add_n(ap, ap, bp + (bn - an), an);
+
+ if (MPFR_UNLIKELY(cc)) /* carry */
+ {
+ if (MPFR_UNLIKELY(exp == __gmpfr_emax))
+ {
+ inex = mpfr_overflow (a, rnd_mode, MPFR_SIGN(a));
+ goto end_of_add;
+ }
+ exp++;
+ rb = (ap[0] >> sh) & 1; /* LSB(a) --> rounding bit after the shift */
+ if (MPFR_LIKELY(sh))
+ {
+ mp_limb_t mask, bb;
+
+ mask = MPFR_LIMB_MASK (sh);
+ bb = ap[0] & mask;
+ ap[0] &= (~mask) << 1;
+ if (bb == 0)
+ fb = 0;
+ else if (bb == mask)
+ fb = 1;
+ }
+ mpn_rshift(ap, ap, an, 1);
+ ap[an-1] += MPFR_LIMB_HIGHBIT;
+ if (sh && fb < 0)
+ goto rounding;
+ } /* cc */
+ } /* aq2 > diff_exp */
+
+ /* non-significant bits of a */
+ if (MPFR_LIKELY(rb < 0 && sh))
+ {
+ mp_limb_t mask, bb;
+
+ mask = MPFR_LIMB_MASK (sh);
+ bb = ap[0] & mask;
+ ap[0] &= ~mask;
+ rb = bb >> (sh - 1);
+ if (MPFR_LIKELY(sh > 1))
+ {
+ mask >>= 1;
+ bb &= mask;
+ if (bb == 0)
+ fb = 0;
+ else if (bb == mask)
+ fb = 1;
+ else
+ goto rounding;
+ }
+ }
+
+ /* determine rounding and sticky bits (and possible carry) */
+
+ difw = (mpfr_exp_t) an - (mpfr_exp_t) (diff_exp / GMP_NUMB_BITS);
+ /* difw is the number of limbs from b (regarded as having an infinite
+ precision) that have already been combined with c; -n if the next
+ n limbs from b won't be combined with c. */
+
+ if (MPFR_UNLIKELY(bn > an))
+ { /* there are still limbs from b that haven't been taken into account */
+ mp_size_t bk;
+
+ if (fb == 0 && difw <= 0)
+ {
+ fb = 1; /* c hasn't been taken into account ==> sticky bit != 0 */
+ goto rounding;
+ }
+
+ bk = bn - an; /* index of lowest considered limb from b, > 0 */
+ while (difw < 0)
+ { /* ulp(next limb from b) > msb(c) */
+ mp_limb_t bb;
+
+ bb = bp[--bk];
+
+ MPFR_ASSERTD(fb != 0);
+ if (fb > 0)
+ {
+ if (bb != MP_LIMB_T_MAX)
+ {
+ fb = 1; /* c hasn't been taken into account
+ ==> sticky bit != 0 */
+ goto rounding;
+ }
+ }
+ else /* fb not initialized yet */
+ {
+ if (rb < 0) /* rb not initialized yet */
+ {
+ rb = bb >> (GMP_NUMB_BITS - 1);
+ bb |= MPFR_LIMB_HIGHBIT;
+ }
+ fb = 1;
+ if (bb != MP_LIMB_T_MAX)
+ goto rounding;
+ }
+
+ if (bk == 0)
+ { /* b has entirely been read */
+ fb = 1; /* c hasn't been taken into account
+ ==> sticky bit != 0 */
+ goto rounding;
+ }
+
+ difw++;
+ } /* while */
+ MPFR_ASSERTD(bk > 0 && difw >= 0);
+
+ if (difw <= cn)
+ {
+ mp_size_t ck;
+ mp_limb_t cprev;
+ int difs;
+
+ ck = cn - difw;
+ difs = diff_exp % GMP_NUMB_BITS;
+
+ if (difs == 0 && ck == 0)
+ goto c_read;
+
+ cprev = ck == cn ? 0 : cp[ck];
+
+ if (fb < 0)
+ {
+ mp_limb_t bb, cc;
+
+ if (difs)
+ {
+ cc = cprev << (GMP_NUMB_BITS - difs);
+ if (--ck >= 0)
+ {
+ cprev = cp[ck];
+ cc += cprev >> difs;
+ }
+ }
+ else
+ cc = cp[--ck];
+
+ bb = bp[--bk] + cc;
+
+ if (bb < cc /* carry */
+ && (rb < 0 || (rb ^= 1) == 0)
+ && mpn_add_1(ap, ap, an, MPFR_LIMB_ONE << sh))
+ {
+ if (exp == __gmpfr_emax)
+ {
+ inex = mpfr_overflow (a, rnd_mode, MPFR_SIGN(a));
+ goto end_of_add;
+ }
+ exp++;
+ ap[an-1] = MPFR_LIMB_HIGHBIT;
+ rb = 0;
+ }
+
+ if (rb < 0) /* rb not initialized yet */
+ {
+ rb = bb >> (GMP_NUMB_BITS - 1);
+ bb <<= 1;
+ bb |= bb >> (GMP_NUMB_BITS - 1);
+ }
+
+ fb = bb != 0;
+ if (fb && bb != MP_LIMB_T_MAX)
+ goto rounding;
+ } /* fb < 0 */
+
+ while (bk > 0)
+ {
+ mp_limb_t bb, cc;
+
+ if (difs)
+ {
+ if (ck < 0)
+ goto c_read;
+ cc = cprev << (GMP_NUMB_BITS - difs);
+ if (--ck >= 0)
+ {
+ cprev = cp[ck];
+ cc += cprev >> difs;
+ }
+ }
+ else
+ {
+ if (ck == 0)
+ goto c_read;
+ cc = cp[--ck];
+ }
+
+ bb = bp[--bk] + cc;
+ if (bb < cc) /* carry */
+ {
+ fb ^= 1;
+ if (fb)
+ goto rounding;
+ rb ^= 1;
+ if (rb == 0 && mpn_add_1(ap, ap, an, MPFR_LIMB_ONE << sh))
+ {
+ if (MPFR_UNLIKELY(exp == __gmpfr_emax))
+ {
+ inex = mpfr_overflow (a, rnd_mode, MPFR_SIGN(a));
+ goto end_of_add;
+ }
+ exp++;
+ ap[an-1] = MPFR_LIMB_HIGHBIT;
+ }
+ } /* bb < cc */
+
+ if (!fb && bb != 0)
+ {
+ fb = 1;
+ goto rounding;
+ }
+ if (fb && bb != MP_LIMB_T_MAX)
+ goto rounding;
+ } /* while */
+
+ /* b has entirely been read */
+
+ if (fb || ck < 0)
+ goto rounding;
+ if (difs && cprev << (GMP_NUMB_BITS - difs))
+ {
+ fb = 1;
+ goto rounding;
+ }
+ while (ck)
+ {
+ if (cp[--ck])
+ {
+ fb = 1;
+ goto rounding;
+ }
+ } /* while */
+ } /* difw <= cn */
+ else
+ { /* c has entirely been read */
+ c_read:
+ if (fb < 0) /* fb not initialized yet */
+ {
+ mp_limb_t bb;
+
+ MPFR_ASSERTD(bk > 0);
+ bb = bp[--bk];
+ if (rb < 0) /* rb not initialized yet */
+ {
+ rb = bb >> (GMP_NUMB_BITS - 1);
+ bb &= ~MPFR_LIMB_HIGHBIT;
+ }
+ fb = bb != 0;
+ } /* fb < 0 */
+ if (fb)
+ goto rounding;
+ while (bk)
+ {
+ if (bp[--bk])
+ {
+ fb = 1;
+ goto rounding;
+ }
+ } /* while */
+ } /* difw > cn */
+ } /* bn > an */
+ else if (fb != 1) /* if fb == 1, the sticky bit is 1 (no possible carry) */
+ { /* b has entirely been read */
+ if (difw > cn)
+ { /* c has entirely been read */
+ if (rb < 0)
+ rb = 0;
+ fb = 0;
+ }
+ else if (diff_exp > aq2)
+ { /* b is followed by at least a zero bit, then by c */
+ if (rb < 0)
+ rb = 0;
+ fb = 1;
+ }
+ else
+ {
+ mp_size_t ck;
+ int difs;
+
+ MPFR_ASSERTD(difw >= 0 && cn >= difw);
+ ck = cn - difw;
+ difs = diff_exp % GMP_NUMB_BITS;
+
+ if (difs == 0 && ck == 0)
+ { /* c has entirely been read */
+ if (rb < 0)
+ rb = 0;
+ fb = 0;
+ }
+ else
+ {
+ mp_limb_t cc;
+
+ cc = difs ? (MPFR_ASSERTD(ck < cn),
+ cp[ck] << (GMP_NUMB_BITS - difs)) : cp[--ck];
+ if (rb < 0)
+ {
+ rb = cc >> (GMP_NUMB_BITS - 1);
+ cc &= ~MPFR_LIMB_HIGHBIT;
+ }
+ while (cc == 0)
+ {
+ if (ck == 0)
+ {
+ fb = 0;
+ goto rounding;
+ }
+ cc = cp[--ck];
+ } /* while */
+ fb = 1;
+ }
+ }
+ } /* fb != 1 */
+
+ rounding:
+ /* rnd_mode should be one of MPFR_RNDN, MPFR_RNDZ or MPFR_RNDA */
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ if (fb == 0)
+ {
+ if (rb == 0)
+ {
+ inex = 0;
+ goto set_exponent;
+ }
+ /* round to even */
+ if (ap[0] & (MPFR_LIMB_ONE << sh))
+ goto rndn_away;
+ else
+ goto rndn_zero;
+ }
+ if (rb == 0)
+ {
+ rndn_zero:
+ inex = MPFR_IS_NEG(a) ? 1 : -1;
+ goto set_exponent;
+ }
+ else
+ {
+ rndn_away:
+ inex = MPFR_IS_POS(a) ? 1 : -1;
+ goto add_one_ulp;
+ }
+ }
+ else if (rnd_mode == MPFR_RNDZ)
+ {
+ inex = rb || fb ? (MPFR_IS_NEG(a) ? 1 : -1) : 0;
+ goto set_exponent;
+ }
+ else
+ {
+ MPFR_ASSERTN (rnd_mode == MPFR_RNDA);
+ inex = rb || fb ? (MPFR_IS_POS(a) ? 1 : -1) : 0;
+ if (inex)
+ goto add_one_ulp;
+ else
+ goto set_exponent;
+ }
+
+ add_one_ulp: /* add one unit in last place to a */
+ if (MPFR_UNLIKELY(mpn_add_1 (ap, ap, an, MPFR_LIMB_ONE << sh)))
+ {
+ if (MPFR_UNLIKELY(exp == __gmpfr_emax))
+ {
+ inex = mpfr_overflow (a, rnd_mode, MPFR_SIGN(a));
+ goto end_of_add;
+ }
+ exp++;
+ ap[an-1] = MPFR_LIMB_HIGHBIT;
+ }
+
+ set_exponent:
+ MPFR_SET_EXP (a, exp);
+
+ end_of_add:
+ MPFR_TMP_FREE(marker);
+ MPFR_RET (inex);
+}
diff --git a/src/add1sp.c b/src/add1sp.c
new file mode 100644
index 000000000..49e691c90
--- /dev/null
+++ b/src/add1sp.c
@@ -0,0 +1,384 @@
+/* mpfr_add1sp -- internal function to perform a "real" addition
+ All the op must have the same precision
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Check if we have to check the result of mpfr_add1sp with mpfr_add1 */
+#ifdef WANT_ASSERT
+# if WANT_ASSERT >= 2
+
+int mpfr_add1sp2 (mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mpfr_rnd_t);
+int mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t tmpa, tmpb, tmpc;
+ int inexb, inexc, inexact, inexact2;
+
+ mpfr_init2 (tmpa, MPFR_PREC (a));
+ mpfr_init2 (tmpb, MPFR_PREC (b));
+ mpfr_init2 (tmpc, MPFR_PREC (c));
+
+ inexb = mpfr_set (tmpb, b, MPFR_RNDN);
+ MPFR_ASSERTN (inexb == 0);
+
+ inexc = mpfr_set (tmpc, c, MPFR_RNDN);
+ MPFR_ASSERTN (inexc == 0);
+
+ inexact2 = mpfr_add1 (tmpa, tmpb, tmpc, rnd_mode);
+ inexact = mpfr_add1sp2 (a, b, c, rnd_mode);
+
+ if (mpfr_cmp (tmpa, a) || inexact != inexact2)
+ {
+ fprintf (stderr, "add1 & add1sp return different values for %s\n"
+ "Prec_a = %lu, Prec_b = %lu, Prec_c = %lu\nB = ",
+ mpfr_print_rnd_mode (rnd_mode),
+ MPFR_PREC (a), MPFR_PREC (b), MPFR_PREC (c));
+ mpfr_fprint_binary (stderr, tmpb);
+ fprintf (stderr, "\nC = ");
+ mpfr_fprint_binary (stderr, tmpc);
+ fprintf (stderr, "\n\nadd1 : ");
+ mpfr_fprint_binary (stderr, tmpa);
+ fprintf (stderr, "\nadd1sp: ");
+ mpfr_fprint_binary (stderr, a);
+ fprintf (stderr, "\nInexact sp = %d | Inexact = %d\n",
+ inexact, inexact2);
+ MPFR_ASSERTN (0);
+ }
+ mpfr_clears (tmpa, tmpb, tmpc, (mpfr_ptr) 0);
+ return inexact;
+}
+# define mpfr_add1sp mpfr_add1sp2
+# endif
+#endif
+
+/* Debugging support */
+#ifdef DEBUG
+# undef DEBUG
+# define DEBUG(x) (x)
+#else
+# define DEBUG(x) /**/
+#endif
+
+/* compute sign(b) * (|b| + |c|)
+ Returns 0 iff result is exact,
+ a negative value when the result is less than the exact value,
+ a positive value otherwise. */
+int
+mpfr_add1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mpfr_uexp_t d;
+ mpfr_prec_t p;
+ unsigned int sh;
+ mp_size_t n;
+ mp_limb_t *ap, *cp;
+ mpfr_exp_t bx;
+ mp_limb_t limb;
+ int inexact;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_TMP_MARK(marker);
+
+ MPFR_ASSERTD(MPFR_PREC(a) == MPFR_PREC(b) && MPFR_PREC(b) == MPFR_PREC(c));
+ MPFR_ASSERTD(MPFR_IS_PURE_FP(b) && MPFR_IS_PURE_FP(c));
+ MPFR_ASSERTD(MPFR_GET_EXP(b) >= MPFR_GET_EXP(c));
+
+ /* Read prec and num of limbs */
+ p = MPFR_PREC(b);
+ n = (p+GMP_NUMB_BITS-1)/GMP_NUMB_BITS;
+ MPFR_UNSIGNED_MINUS_MODULO(sh, p);
+ bx = MPFR_GET_EXP(b);
+ d = (mpfr_uexp_t) (bx - MPFR_GET_EXP(c));
+
+ DEBUG (printf ("New add1sp with diff=%lu\n", (unsigned long) d));
+
+ if (MPFR_UNLIKELY(d == 0))
+ {
+ /* d==0 */
+ DEBUG( mpfr_print_mant_binary("C= ", MPFR_MANT(c), p) );
+ DEBUG( mpfr_print_mant_binary("B= ", MPFR_MANT(b), p) );
+ bx++; /* exp + 1 */
+ ap = MPFR_MANT(a);
+ limb = mpn_add_n(ap, MPFR_MANT(b), MPFR_MANT(c), n);
+ DEBUG( mpfr_print_mant_binary("A= ", ap, p) );
+ MPFR_ASSERTD(limb != 0); /* There must be a carry */
+ limb = ap[0]; /* Get LSB (In fact, LSW) */
+ mpn_rshift(ap, ap, n, 1); /* Shift mantissa A */
+ ap[n-1] |= MPFR_LIMB_HIGHBIT; /* Set MSB */
+ ap[0] &= ~MPFR_LIMB_MASK(sh); /* Clear LSB bit */
+ if (MPFR_LIKELY((limb&(MPFR_LIMB_ONE<<sh)) == 0)) /* Check exact case */
+ { inexact = 0; goto set_exponent; }
+ /* Zero: Truncate
+ Nearest: Even Rule => truncate or add 1
+ Away: Add 1 */
+ if (MPFR_LIKELY(rnd_mode==MPFR_RNDN))
+ {
+ if (MPFR_LIKELY((ap[0]&(MPFR_LIMB_ONE<<sh))==0))
+ { inexact = -1; goto set_exponent; }
+ else
+ goto add_one_ulp;
+ }
+ MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(b));
+ if (rnd_mode==MPFR_RNDZ)
+ { inexact = -1; goto set_exponent; }
+ else
+ goto add_one_ulp;
+ }
+ else if (MPFR_UNLIKELY (d >= p))
+ {
+ if (MPFR_LIKELY (d > p))
+ {
+ /* d > p : Copy B in A */
+ /* Away: Add 1
+ Nearest: Trunc
+ Zero: Trunc */
+ if (MPFR_LIKELY (rnd_mode==MPFR_RNDN
+ || MPFR_IS_LIKE_RNDZ (rnd_mode, MPFR_IS_NEG (b))))
+ {
+ copy_set_exponent:
+ ap = MPFR_MANT (a);
+ MPN_COPY (ap, MPFR_MANT(b), n);
+ inexact = -1;
+ goto set_exponent;
+ }
+ else
+ {
+ copy_add_one_ulp:
+ ap = MPFR_MANT(a);
+ MPN_COPY (ap, MPFR_MANT(b), n);
+ goto add_one_ulp;
+ }
+ }
+ else
+ {
+ /* d==p : Copy B in A */
+ /* Away: Add 1
+ Nearest: Even Rule if C is a power of 2, else Add 1
+ Zero: Trunc */
+ if (MPFR_LIKELY(rnd_mode==MPFR_RNDN))
+ {
+ /* Check if C was a power of 2 */
+ cp = MPFR_MANT(c);
+ if (MPFR_UNLIKELY(cp[n-1] == MPFR_LIMB_HIGHBIT))
+ {
+ mp_size_t k = n-1;
+ do {
+ k--;
+ } while (k>=0 && cp[k]==0);
+ if (MPFR_UNLIKELY(k<0))
+ /* Power of 2: Even rule */
+ if ((MPFR_MANT (b)[0]&(MPFR_LIMB_ONE<<sh))==0)
+ goto copy_set_exponent;
+ }
+ /* Not a Power of 2 */
+ goto copy_add_one_ulp;
+ }
+ else if (MPFR_IS_LIKE_RNDZ (rnd_mode, MPFR_IS_NEG (b)))
+ goto copy_set_exponent;
+ else
+ goto copy_add_one_ulp;
+ }
+ }
+ else
+ {
+ mp_limb_t mask;
+ mp_limb_t bcp, bcp1; /* Cp and C'p+1 */
+
+ /* General case: 1 <= d < p */
+ cp = (mp_limb_t*) MPFR_TMP_ALLOC(n * BYTES_PER_MP_LIMB);
+
+ /* Shift c in temporary allocated place */
+ {
+ mpfr_uexp_t dm;
+ mp_size_t m;
+
+ dm = d % GMP_NUMB_BITS;
+ m = d / GMP_NUMB_BITS;
+ if (MPFR_UNLIKELY(dm == 0))
+ {
+ /* dm = 0 and m > 0: Just copy */
+ MPFR_ASSERTD(m!=0);
+ MPN_COPY(cp, MPFR_MANT(c)+m, n-m);
+ MPN_ZERO(cp+n-m, m);
+ }
+ else if (MPFR_LIKELY(m == 0))
+ {
+ /* dm >=1 and m == 0: just shift */
+ MPFR_ASSERTD(dm >= 1);
+ mpn_rshift(cp, MPFR_MANT(c), n, dm);
+ }
+ else
+ {
+ /* dm > 0 and m > 0: shift and zero */
+ mpn_rshift(cp, MPFR_MANT(c)+m, n-m, dm);
+ MPN_ZERO(cp+n-m, m);
+ }
+ }
+
+ DEBUG( mpfr_print_mant_binary("Before", MPFR_MANT(c), p) );
+ DEBUG( mpfr_print_mant_binary("B= ", MPFR_MANT(b), p) );
+ DEBUG( mpfr_print_mant_binary("After ", cp, p) );
+
+ /* Compute bcp=Cp and bcp1=C'p+1 */
+ if (MPFR_LIKELY (sh > 0))
+ {
+ /* Try to compute them from C' rather than C */
+ bcp = (cp[0] & (MPFR_LIMB_ONE<<(sh-1))) ;
+ if (MPFR_LIKELY(cp[0]&MPFR_LIMB_MASK(sh-1)))
+ bcp1 = 1;
+ else
+ {
+ /* We can't compute C'p+1 from C'. Compute it from C */
+ /* Start from bit x=p-d+sh in mantissa C
+ (+sh since we have already looked sh bits in C'!) */
+ mpfr_prec_t x = p-d+sh-1;
+ if (MPFR_LIKELY(x>p))
+ /* We are already looked at all the bits of c, so C'p+1 = 0*/
+ bcp1 = 0;
+ else
+ {
+ mp_limb_t *tp = MPFR_MANT(c);
+ mp_size_t kx = n-1 - (x / GMP_NUMB_BITS);
+ mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
+ DEBUG (printf ("(First) x=%lu Kx=%ld Sx=%lu\n",
+ (unsigned long) x, (long) kx,
+ (unsigned long) sx));
+ /* Looks at the last bits of limb kx (if sx=0 does nothing)*/
+ if (tp[kx] & MPFR_LIMB_MASK(sx))
+ bcp1 = 1;
+ else
+ {
+ /*kx += (sx==0);*/
+ /*If sx==0, tp[kx] hasn't been checked*/
+ do {
+ kx--;
+ } while (kx>=0 && tp[kx]==0);
+ bcp1 = (kx >= 0);
+ }
+ }
+ }
+ }
+ else /* sh == 0 */
+ {
+ /* Compute Cp and C'p+1 from C with sh=0 */
+ mp_limb_t *tp = MPFR_MANT(c);
+ /* Start from bit x=p-d in mantissa C */
+ mpfr_prec_t x = p-d;
+ mp_size_t kx = n-1 - (x / GMP_NUMB_BITS);
+ mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
+ MPFR_ASSERTD(p >= d);
+ bcp = tp[kx] & (MPFR_LIMB_ONE<<sx);
+ /* Looks at the last bits of limb kx (If sx=0, does nothing)*/
+ if (tp[kx]&MPFR_LIMB_MASK(sx))
+ bcp1 = 1;
+ else
+ {
+ do {
+ kx--;
+ } while (kx>=0 && tp[kx]==0);
+ bcp1 = (kx>=0);
+ }
+ }
+ DEBUG (printf("sh=%u Cp=%lu C'p+1=%lu\n", sh,
+ (unsigned long) bcp, (unsigned long) bcp1));
+
+ /* Clean shifted C' */
+ mask = ~MPFR_LIMB_MASK(sh);
+ cp[0] &= mask;
+
+ /* Add the mantissa c from b in a */
+ ap = MPFR_MANT(a);
+ limb = mpn_add_n (ap, MPFR_MANT(b), cp, n);
+ DEBUG( mpfr_print_mant_binary("Add= ", ap, p) );
+
+ /* Check for overflow */
+ if (MPFR_UNLIKELY (limb))
+ {
+ limb = ap[0] & (MPFR_LIMB_ONE<<sh); /* Get LSB */
+ mpn_rshift (ap, ap, n, 1); /* Shift mantissa*/
+ bx++; /* Fix exponent */
+ ap[n-1] |= MPFR_LIMB_HIGHBIT; /* Set MSB */
+ ap[0] &= mask; /* Clear LSB bit */
+ bcp1 |= bcp; /* Recompute C'p+1 */
+ bcp = limb; /* Recompute Cp */
+ DEBUG (printf ("(Overflow) Cp=%lu C'p+1=%lu\n",
+ (unsigned long) bcp, (unsigned long) bcp1));
+ DEBUG (mpfr_print_mant_binary ("Add= ", ap, p));
+ }
+
+ /* Round:
+ Zero: Truncate but could be exact.
+ Away: Add 1 if Cp or C'p+1 !=0
+ Nearest: Truncate but could be exact if Cp==0
+ Add 1 if C'p+1 !=0,
+ Even rule else */
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ if (MPFR_LIKELY(bcp == 0))
+ { inexact = MPFR_LIKELY(bcp1) ? -1 : 0; goto set_exponent; }
+ else if (MPFR_UNLIKELY(bcp1==0) && (ap[0]&(MPFR_LIMB_ONE<<sh))==0)
+ { inexact = -1; goto set_exponent; }
+ else
+ goto add_one_ulp;
+ }
+ MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(b));
+ if (rnd_mode == MPFR_RNDZ)
+ {
+ inexact = MPFR_LIKELY(bcp || bcp1) ? -1 : 0;
+ goto set_exponent;
+ }
+ else
+ {
+ if (MPFR_UNLIKELY(bcp==0 && bcp1==0))
+ { inexact = 0; goto set_exponent; }
+ else
+ goto add_one_ulp;
+ }
+ }
+ MPFR_ASSERTN(0);
+
+ add_one_ulp:
+ /* add one unit in last place to a */
+ DEBUG( printf("AddOneUlp\n") );
+ if (MPFR_UNLIKELY( mpn_add_1(ap, ap, n, MPFR_LIMB_ONE<<sh) ))
+ {
+ /* Case 100000x0 = 0x1111x1 + 1*/
+ DEBUG( printf("Pow of 2\n") );
+ bx++;
+ ap[n-1] = MPFR_LIMB_HIGHBIT;
+ }
+ inexact = 1;
+
+ set_exponent:
+ if (MPFR_UNLIKELY(bx > __gmpfr_emax)) /* Check for overflow */
+ {
+ DEBUG( printf("Overflow\n") );
+ MPFR_TMP_FREE(marker);
+ MPFR_SET_SAME_SIGN(a,b);
+ return mpfr_overflow(a, rnd_mode, MPFR_SIGN(a));
+ }
+ MPFR_SET_EXP (a, bx);
+ MPFR_SET_SAME_SIGN(a,b);
+
+ MPFR_TMP_FREE(marker);
+ MPFR_RET (inexact * MPFR_INT_SIGN (a));
+}
diff --git a/src/add_d.c b/src/add_d.c
new file mode 100644
index 000000000..041ce45c9
--- /dev/null
+++ b/src/add_d.c
@@ -0,0 +1,49 @@
+/* mpfr_add_d -- add a multiple precision floating-point number
+ to a machine double precision float
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_add_d (mpfr_ptr a, mpfr_srcptr b, double c, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("b[%#R]=%R c=%.20g rnd=%d", b, b, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (d, IEEE_DBL_MANT_DIG);
+ inexact = mpfr_set_d (d, c, rnd_mode);
+ MPFR_ASSERTN (inexact == 0);
+
+ mpfr_clear_flags ();
+ inexact = mpfr_add (a, b, d, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+
+ mpfr_clear (d);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (a, inexact, rnd_mode);
+}
diff --git a/src/add_ui.c b/src/add_ui.c
new file mode 100644
index 000000000..e163cb6e1
--- /dev/null
+++ b/src/add_ui.c
@@ -0,0 +1,53 @@
+/* mpfr_add_ui -- add a floating-point number with a machine integer
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_add_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_LIKELY(u != 0) ) /* if u=0, do nothing */
+ {
+ mpfr_t uu;
+ mp_limb_t up[1];
+ unsigned long cnt;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_TMP_INIT1 (up, uu, GMP_NUMB_BITS);
+ MPFR_ASSERTD (u == (mp_limb_t) u);
+ count_leading_zeros(cnt, (mp_limb_t) u);
+ up[0] = (mp_limb_t) u << cnt;
+
+ /* Optimization note: Exponent save/restore operations may be
+ removed if mpfr_add works even when uu is out-of-range. */
+ MPFR_SAVE_EXPO_MARK (expo);
+ MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt);
+ inex = mpfr_add(y, x, uu, rnd_mode);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range(y, inex, rnd_mode);
+ }
+ else
+ /* (unsigned long) 0 is assumed to be a real 0 (unsigned) */
+ return mpfr_set (y, x, rnd_mode);
+}
diff --git a/src/agm.c b/src/agm.c
new file mode 100644
index 000000000..7ec4a3670
--- /dev/null
+++ b/src/agm.c
@@ -0,0 +1,177 @@
+/* mpfr_agm -- arithmetic-geometric mean of two floating-point numbers
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* agm(x,y) is between x and y, so we don't need to save exponent range */
+int
+mpfr_agm (mpfr_ptr r, mpfr_srcptr op2, mpfr_srcptr op1, mpfr_rnd_t rnd_mode)
+{
+ int compare, inexact;
+ mp_size_t s;
+ mpfr_prec_t p, q;
+ mp_limb_t *up, *vp, *tmpp;
+ mpfr_t u, v, tmp;
+ unsigned long n; /* number of iterations */
+ unsigned long err = 0;
+ MPFR_ZIV_DECL (loop);
+ MPFR_TMP_DECL(marker);
+
+ MPFR_LOG_FUNC (("op2[%#R]=%R op1[%#R]=%R rnd=%d", op2,op2,op1,op1,rnd_mode),
+ ("r[%#R]=%R inexact=%d", r, r, inexact));
+
+ /* Deal with special values */
+ if (MPFR_ARE_SINGULAR (op1, op2))
+ {
+ /* If a or b is NaN, the result is NaN */
+ if (MPFR_IS_NAN(op1) || MPFR_IS_NAN(op2))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ /* now one of a or b is Inf or 0 */
+ /* If a and b is +Inf, the result is +Inf.
+ Otherwise if a or b is -Inf or 0, the result is NaN */
+ else if (MPFR_IS_INF(op1) || MPFR_IS_INF(op2))
+ {
+ if (MPFR_IS_STRICTPOS(op1) && MPFR_IS_STRICTPOS(op2))
+ {
+ MPFR_SET_INF(r);
+ MPFR_SET_SAME_SIGN(r, op1);
+ MPFR_RET(0); /* exact */
+ }
+ else
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ }
+ else /* a and b are neither NaN nor Inf, and one is zero */
+ { /* If a or b is 0, the result is +0 since a sqrt is positive */
+ MPFR_ASSERTD (MPFR_IS_ZERO (op1) || MPFR_IS_ZERO (op2));
+ MPFR_SET_POS (r);
+ MPFR_SET_ZERO (r);
+ MPFR_RET (0); /* exact */
+ }
+ }
+
+ /* If a or b is negative (excluding -Infinity), the result is NaN */
+ if (MPFR_UNLIKELY(MPFR_IS_NEG(op1) || MPFR_IS_NEG(op2)))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+
+ /* Precision of the following calculus */
+ q = MPFR_PREC(r);
+ p = q + MPFR_INT_CEIL_LOG2(q) + 15;
+ MPFR_ASSERTD (p >= 7); /* see algorithms.tex */
+ s = (p - 1) / GMP_NUMB_BITS + 1;
+
+ /* b (op2) and a (op1) are the 2 operands but we want b >= a */
+ compare = mpfr_cmp (op1, op2);
+ if (MPFR_UNLIKELY( compare == 0 ))
+ {
+ mpfr_set (r, op1, rnd_mode);
+ MPFR_RET (0); /* exact */
+ }
+ else if (compare > 0)
+ {
+ mpfr_srcptr t = op1;
+ op1 = op2;
+ op2 = t;
+ }
+ /* Now b(=op2) >= a (=op1) */
+
+ MPFR_TMP_MARK(marker);
+
+ /* Main loop */
+ MPFR_ZIV_INIT (loop, p);
+ for (;;)
+ {
+ mpfr_prec_t eq;
+
+ /* Init temporary vars */
+ MPFR_TMP_INIT (up, u, p, s);
+ MPFR_TMP_INIT (vp, v, p, s);
+ MPFR_TMP_INIT (tmpp, tmp, p, s);
+
+ /* Calculus of un and vn */
+ mpfr_mul (u, op1, op2, MPFR_RNDN); /* Faster since PREC(op) < PREC(u) */
+ mpfr_sqrt (u, u, MPFR_RNDN);
+ mpfr_add (v, op1, op2, MPFR_RNDN); /* add with !=prec is still good*/
+ mpfr_div_2ui (v, v, 1, MPFR_RNDN);
+ n = 1;
+ while (mpfr_cmp2 (u, v, &eq) != 0 && eq <= p - 2)
+ {
+ mpfr_add (tmp, u, v, MPFR_RNDN);
+ mpfr_div_2ui (tmp, tmp, 1, MPFR_RNDN);
+ /* See proof in algorithms.tex */
+ if (4*eq > p)
+ {
+ mpfr_t w;
+ /* tmp = U(k) */
+ mpfr_init2 (w, (p + 1) / 2);
+ mpfr_sub (w, v, u, MPFR_RNDN); /* e = V(k-1)-U(k-1) */
+ mpfr_sqr (w, w, MPFR_RNDN); /* e = e^2 */
+ mpfr_div_2ui (w, w, 4, MPFR_RNDN); /* e*= (1/2)^2*1/4 */
+ mpfr_div (w, w, tmp, MPFR_RNDN); /* 1/4*e^2/U(k) */
+ mpfr_sub (v, tmp, w, MPFR_RNDN);
+ err = MPFR_GET_EXP (tmp) - MPFR_GET_EXP (v); /* 0 or 1 */
+ mpfr_clear (w);
+ break;
+ }
+ mpfr_mul (u, u, v, MPFR_RNDN);
+ mpfr_sqrt (u, u, MPFR_RNDN);
+ mpfr_swap (v, tmp);
+ n ++;
+ }
+ /* the error on v is bounded by (18n+51) ulps, or twice if there
+ was an exponent loss in the final subtraction */
+ err += MPFR_INT_CEIL_LOG2(18 * n + 51); /* 18n+51 should not overflow
+ since n is about log(p) */
+ /* we should have n+2 <= 2^(p/4) [see algorithms.tex] */
+ if (MPFR_LIKELY (MPFR_INT_CEIL_LOG2(n + 2) <= p / 4 &&
+ MPFR_CAN_ROUND (v, p - err, q, rnd_mode)))
+ break; /* Stop the loop */
+
+ /* Next iteration */
+ MPFR_ZIV_NEXT (loop, p);
+ s = (p - 1) / GMP_NUMB_BITS + 1;
+ }
+ MPFR_ZIV_FREE (loop);
+
+ /* Setting of the result */
+ inexact = mpfr_set (r, v, rnd_mode);
+
+ /* Let's clean */
+ MPFR_TMP_FREE(marker);
+
+ return inexact; /* agm(u,v) can be exact for u, v rational only for u=v.
+ Proof (due to Nicolas Brisebarre): it suffices to consider
+ u=1 and v<1. Then 1/AGM(1,v) = 2F1(1/2,1/2,1;1-v^2),
+ and a theorem due to G.V. Chudnovsky states that for x a
+ non-zero algebraic number with |x|<1, then
+ 2F1(1/2,1/2,1;x) and 2F1(-1/2,1/2,1;x) are algebraically
+ independent over Q. */
+}
diff --git a/src/ai.c b/src/ai.c
new file mode 100644
index 000000000..b9c48d41d
--- /dev/null
+++ b/src/ai.c
@@ -0,0 +1,631 @@
+/* mpfr_ai -- Airy function Ai
+
+Copyright 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Reminder and notations:
+ -----------------------
+
+ Ai is the solution of:
+ / y'' - x*y = 0
+ { Ai(0) = 1/ ( 9^(1/3)*Gamma(2/3) )
+ \ Ai'(0) = -1/ ( 3^(1/3)*Gamma(1/3) )
+
+ Series development:
+ Ai(x) = sum (a_i*x^i)
+ = sum (t_i)
+
+ Recurrences:
+ a_(i+3) = a_i / ((i+2)*(i+3))
+ t_(i+3) = t_i * x^3 / ((i+2)*(i+3))
+
+ Values:
+ a_0 = Ai(0) ~ 0.355
+ a_1 = Ai'(0) ~ -0.259
+*/
+
+
+/* Airy function Ai evaluated by the most naive algorithm */
+static int
+mpfr_ai1 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ mpfr_prec_t wprec; /* working precision */
+ mpfr_prec_t prec; /* target precision */
+ mpfr_prec_t err; /* used to estimate the evaluation error */
+ mpfr_prec_t correct_bits; /* estimates the number of correct bits*/
+ unsigned long int k;
+ unsigned long int cond; /* condition number of the series */
+ unsigned long int assumed_exponent; /* used as a lowerbound of |EXP(Ai(x))| */
+ int r;
+ mpfr_t s; /* used to store the partial sum */
+ mpfr_t ti, tip1; /* used to store successive values of t_i */
+ mpfr_t x3; /* used to store x^3 */
+ mpfr_t tmp_sp, tmp2_sp; /* small precision variables */
+ unsigned long int x3u; /* used to store ceil(x^3) */
+ mpfr_t temp1, temp2;
+ int test1, test2;
+
+ /* Logging */
+ MPFR_LOG_FUNC ( ("x[%#R]=%R rnd=%d", x, x, rnd), ("y[%#R]=%R", y, y) );
+
+ /* Special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ return mpfr_set_ui (y, 0, rnd);
+ }
+
+ /* FIXME: handle the case x == 0 (and in a consistent way for +0 and -0) */
+
+ /* Save current exponents range */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* FIXME: underflow for large values of |x| ? */
+
+
+ /* Set initial precision */
+ /* If we compute sum(i=0, N-1, t_i), the relative error is bounded by */
+ /* 2*(4N)*2^(1-wprec)*C(|x|)/Ai(x) */
+ /* where C(|x|) = 1 if 0<=x<=1 */
+ /* and C(|x|) = (1/2)*x^(-1/4)*exp(2/3 x^(3/2)) if x >= 1 */
+
+ /* A priori, we do not know N, so we estimate it to ~ prec */
+ /* If 0<=x<=1, we estimate Ai(x) ~ 1/8 */
+ /* if 1<=x, we estimate Ai(x) ~ (1/4)*x^(-1/4)*exp(-2/3 * x^(3/2)) */
+ /* if x<=0, ????? */
+
+ /* We begin with 11 guard bits */
+ prec = MPFR_PREC (y)+11;
+ MPFR_ZIV_INIT (loop, prec);
+
+ /* The working precision is heuristically chosen in order to obtain */
+ /* approximately prec correct bits in the sum. To sum up: the sum */
+ /* is stopped when the *exact* sum gives ~ prec correct bit. And */
+ /* when it is stopped, the accuracy of the computed sum, with respect*/
+ /* to the exact one should be ~prec bits. */
+ mpfr_init2 (tmp_sp, MPFR_SMALL_PRECISION);
+ mpfr_init2 (tmp2_sp, MPFR_SMALL_PRECISION);
+ mpfr_abs (tmp_sp, x, MPFR_RNDU);
+ mpfr_pow_ui (tmp_sp, tmp_sp, 3, MPFR_RNDU);
+ mpfr_sqrt (tmp_sp, tmp_sp, MPFR_RNDU); /* tmp_sp ~ x^3/2 */
+
+ /* 0.96179669392597567 >~ 2/3 * log2(e). See algorithms.tex */
+ mpfr_set_str (tmp2_sp, "0.96179669392597567", 10, MPFR_RNDU);
+ mpfr_mul (tmp2_sp, tmp_sp, tmp2_sp, MPFR_RNDU);
+
+ /* cond represents the number of lost bits in the evaluation of the sum */
+ if ( (MPFR_IS_ZERO (x)) || (MPFR_GET_EXP (x) <= 0) )
+ cond = 0;
+ else
+ cond = mpfr_get_ui (tmp2_sp, MPFR_RNDU) - (MPFR_GET_EXP (x)-1)/4 - 1;
+
+ /* The variable assumed_exponent is used to store the maximal assumed */
+ /* exponent of Ai(x). More precisely, we assume that |Ai(x)| will be */
+ /* greater than 2^{-assumed_exponent}. */
+ if (MPFR_IS_ZERO (x))
+ assumed_exponent = 2;
+ else
+ {
+ if (MPFR_IS_POS (x))
+ {
+ if (MPFR_GET_EXP (x) <= 0)
+ assumed_exponent = 3;
+ else
+ assumed_exponent = (2 + (MPFR_GET_EXP (x)/4 + 1)
+ + mpfr_get_ui (tmp2_sp, MPFR_RNDU));
+ }
+ /* We do not know Ai (x) yet */
+ /* We cover the case when EXP (Ai (x))>=-10 */
+ else
+ assumed_exponent = 10;
+ }
+
+ wprec = prec + MPFR_INT_CEIL_LOG2 (prec) + 5 + cond + assumed_exponent;
+
+ mpfr_init (ti);
+ mpfr_init (tip1);
+ mpfr_init (temp1);
+ mpfr_init (temp2);
+ mpfr_init (x3);
+ mpfr_init (s);
+
+ /* ZIV loop */
+ for (;;)
+ {
+ MPFR_LOG_MSG (("Working precision: %Pu\n", wprec));
+ mpfr_set_prec (ti, wprec);
+ mpfr_set_prec (tip1, wprec);
+ mpfr_set_prec (x3, wprec);
+ mpfr_set_prec (s, wprec);
+
+ mpfr_sqr (x3, x, MPFR_RNDU);
+ mpfr_mul (x3, x3, x, (MPFR_IS_POS (x)?MPFR_RNDU:MPFR_RNDD)); /* x3=x^3 */
+ if (MPFR_IS_NEG (x))
+ MPFR_CHANGE_SIGN (x3);
+ x3u = mpfr_get_ui (x3, MPFR_RNDU); /* x3u >= ceil(x^3) */
+ if (MPFR_IS_NEG (x))
+ MPFR_CHANGE_SIGN (x3);
+
+ mpfr_gamma_one_and_two_third (temp1, temp2, wprec);
+ mpfr_set_ui (ti, 9, MPFR_RNDN);
+ mpfr_cbrt (ti, ti, MPFR_RNDN);
+ mpfr_mul (ti, ti, temp2, MPFR_RNDN);
+ mpfr_ui_div (ti, 1, ti , MPFR_RNDN); /* ti = 1/( Gamma (2/3)*9^(1/3) ) */
+
+ mpfr_set_ui (tip1, 3, MPFR_RNDN);
+ mpfr_cbrt (tip1, tip1, MPFR_RNDN);
+ mpfr_mul (tip1, tip1, temp1, MPFR_RNDN);
+ mpfr_neg (tip1, tip1, MPFR_RNDN);
+ mpfr_div (tip1, x, tip1, MPFR_RNDN); /* tip1 = -x/(Gamma (1/3)*3^(1/3)) */
+
+ mpfr_add (s, ti, tip1, MPFR_RNDN);
+
+
+ /* Evaluation of the series */
+ k = 2;
+ for (;;)
+ {
+ mpfr_mul (ti, ti, x3, MPFR_RNDN);
+ mpfr_mul (tip1, tip1, x3, MPFR_RNDN);
+
+ mpfr_div_ui2 (ti, ti, k, (k+1), MPFR_RNDN);
+ mpfr_div_ui2 (tip1, tip1, (k+1), (k+2), MPFR_RNDN);
+
+ k += 3;
+ mpfr_add (s, s, ti, MPFR_RNDN);
+ mpfr_add (s, s, tip1, MPFR_RNDN);
+
+ /* FIXME: if s==0 */
+ test1 = MPFR_IS_ZERO (ti)
+ || (MPFR_GET_EXP (ti) + (mpfr_exp_t)prec + 3 <= MPFR_GET_EXP (s));
+ test2 = MPFR_IS_ZERO (tip1)
+ || (MPFR_GET_EXP (tip1) + (mpfr_exp_t)prec + 3 <= MPFR_GET_EXP (s));
+
+ if ( test1 && test2 && (x3u <= k*(k+1)/2) )
+ break; /* FIXME: if k*(k+1) overflows */
+ }
+
+ MPFR_LOG_MSG (("Truncation rank: %lu\n", k));
+
+ err = 4 + MPFR_INT_CEIL_LOG2 (k) + cond - MPFR_GET_EXP (s);
+
+ /* err is the number of bits lost due to the evaluation error */
+ /* wprec-(prec+1): number of bits lost due to the approximation error */
+ MPFR_LOG_MSG (("Roundoff error: %Pu\n", err));
+ MPFR_LOG_MSG (("Approxim error: %Pu\n", wprec-prec-1));
+
+ if (wprec < err+1)
+ correct_bits=0;
+ else
+ {
+ if (wprec < err+prec+1)
+ correct_bits = wprec - err - 1;
+ else
+ correct_bits = prec;
+ }
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s, correct_bits, MPFR_PREC (y), rnd)))
+ break;
+
+ if (correct_bits == 0)
+ {
+ assumed_exponent *= 2;
+ MPFR_LOG_MSG (("Not a single bit correct (assumed_exponent=%lu)\n",
+ assumed_exponent));
+ wprec = prec + 5 + MPFR_INT_CEIL_LOG2 (k) + cond + assumed_exponent;
+ }
+ else
+ {
+ if (correct_bits < prec)
+ { /* The precision was badly chosen */
+ MPFR_LOG_MSG (("Bad assumption on the exponent of Ai(x)", 0));
+ MPFR_LOG_MSG ((" (E=%ld)\n", (long) MPFR_GET_EXP (s)));
+ wprec = prec + err + 1;
+ }
+ else
+ { /* We are really in a bad case of the TMD */
+ MPFR_ZIV_NEXT (loop, prec);
+
+ /* We update wprec */
+ /* We assume that K will not be multiplied by more than 4 */
+ wprec = prec + (MPFR_INT_CEIL_LOG2 (k)+2) + 5 + cond
+ - MPFR_GET_EXP (s);
+ }
+ }
+
+ } /* End of ZIV loop */
+
+ MPFR_ZIV_FREE (loop);
+
+ r = mpfr_set (y, s, rnd);
+
+ mpfr_clear (ti);
+ mpfr_clear (tip1);
+ mpfr_clear (temp1);
+ mpfr_clear (temp2);
+ mpfr_clear (x3);
+ mpfr_clear (s);
+ mpfr_clear (tmp_sp);
+ mpfr_clear (tmp2_sp);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, r, rnd);
+}
+
+
+/* Airy function Ai evaluated by Smith algorithm */
+static int
+mpfr_ai2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ mpfr_prec_t wprec; /* working precision */
+ mpfr_prec_t prec; /* target precision */
+ mpfr_prec_t err; /* used to estimate the evaluation error */
+ mpfr_prec_t correctBits; /* estimates the number of correct bits*/
+ unsigned long int i, j, L, t;
+ unsigned long int cond; /* condition number of the series */
+ unsigned long int assumed_exponent; /* used as a lowerbound of |EXP(Ai(x))| */
+ int r; /* returned ternary value */
+ mpfr_t s; /* used to store the partial sum */
+ mpfr_t u0, u1;
+ mpfr_t *z; /* used to store the (x^3j) */
+ mpfr_t result;
+ mpfr_t tmp_sp, tmp2_sp; /* small precision variables */
+ unsigned long int x3u; /* used to store ceil (x^3) */
+ mpfr_t temp1, temp2;
+ int test0, test1;
+
+ /* Logging */
+ MPFR_LOG_FUNC ( ("x[%#R]=%R rnd=%d", x, x, rnd), ("y[%#R]=%R", y, y) );
+
+ /* Special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ return mpfr_set_ui (y, 0, rnd);
+ }
+
+ /* Save current exponents range */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* FIXME: underflow for large values of |x| */
+
+
+ /* Set initial precision */
+ /* See the analysis for the naive evaluation */
+
+ /* We begin with 11 guard bits */
+ prec = MPFR_PREC (y) + 11;
+ MPFR_ZIV_INIT (loop, prec);
+
+ mpfr_init2 (tmp_sp, MPFR_SMALL_PRECISION);
+ mpfr_init2 (tmp2_sp, MPFR_SMALL_PRECISION);
+ mpfr_abs (tmp_sp, x, MPFR_RNDU);
+ mpfr_pow_ui (tmp_sp, tmp_sp, 3, MPFR_RNDU);
+ mpfr_sqrt (tmp_sp, tmp_sp, MPFR_RNDU); /* tmp_sp ~ x^3/2 */
+
+ /* 0.96179669392597567 >~ 2/3 * log2(e). See algorithms.tex */
+ mpfr_set_str (tmp2_sp, "0.96179669392597567", 10, MPFR_RNDU);
+ mpfr_mul (tmp2_sp, tmp_sp, tmp2_sp, MPFR_RNDU);
+
+ /* cond represents the number of lost bits in the evaluation of the sum */
+ if ( (MPFR_IS_ZERO (x)) || (MPFR_GET_EXP (x) <= 0) )
+ cond = 0;
+ else
+ cond = mpfr_get_ui (tmp2_sp, MPFR_RNDU) - (MPFR_GET_EXP (x) - 1)/4 - 1;
+
+ /* This variable is used to store the maximal assumed exponent of */
+ /* Ai (x). More precisely, we assume that |Ai (x)| will be greater than */
+ /* 2^{-assumedExp}. */
+ if (MPFR_IS_ZERO (x))
+ assumed_exponent = 2;
+ else
+ {
+ if (MPFR_IS_POS (x))
+ {
+ if (MPFR_GET_EXP (x) <= 0)
+ assumed_exponent = 3;
+ else
+ assumed_exponent = (2 + (MPFR_GET_EXP (x)/4 + 1)
+ + mpfr_get_ui (tmp2_sp, MPFR_RNDU));
+ }
+ /* We do not know Ai (x) yet */
+ /* We cover the case when EXP (Ai (x))>=-10 */
+ else
+ assumed_exponent = 10;
+ }
+
+ wprec = prec + MPFR_INT_CEIL_LOG2 (prec) + 6 + cond + assumed_exponent;
+
+ /* We assume that the truncation rank will be ~ prec */
+ L = __gmpfr_isqrt (prec);
+ MPFR_LOG_MSG (("size of blocks L = %lu\n", L));
+
+ z = (mpfr_t *) (*__gmp_allocate_func) ( (L + 1) * sizeof (mpfr_t) );
+ MPFR_ASSERTN (z != NULL);
+ for (j=0; j<=L; j++)
+ mpfr_init (z[j]);
+
+ mpfr_init (s);
+ mpfr_init (u0); mpfr_init (u1);
+ mpfr_init (result);
+ mpfr_init (temp1);
+ mpfr_init (temp2);
+
+ /* ZIV loop */
+ for (;;)
+ {
+ MPFR_LOG_MSG (("working precision: %Pu\n", wprec));
+
+ for (j=0; j<=L; j++)
+ mpfr_set_prec (z[j], wprec);
+ mpfr_set_prec (s, wprec);
+ mpfr_set_prec (u0, wprec); mpfr_set_prec (u1, wprec);
+ mpfr_set_prec (result, wprec);
+
+ mpfr_set_ui (u0, 1, MPFR_RNDN);
+ mpfr_set (u1, x, MPFR_RNDN);
+
+ mpfr_set_ui (z[0], 1, MPFR_RNDU);
+ mpfr_sqr (z[1], u1, MPFR_RNDU);
+ mpfr_mul (z[1], z[1], x, (MPFR_IS_POS (x) ? MPFR_RNDU : MPFR_RNDD) );
+
+ if (MPFR_IS_NEG (x))
+ MPFR_CHANGE_SIGN (z[1]);
+ x3u = mpfr_get_ui (z[1], MPFR_RNDU); /* x3u >= ceil (x^3) */
+ if (MPFR_IS_NEG (x))
+ MPFR_CHANGE_SIGN (z[1]);
+
+ for (j=2; j<=L ;j++)
+ {
+ if (j%2 == 0)
+ mpfr_sqr (z[j], z[j/2], MPFR_RNDN);
+ else
+ mpfr_mul (z[j], z[j-1], z[1], MPFR_RNDN);
+ }
+
+ mpfr_gamma_one_and_two_third (temp1, temp2, wprec);
+ mpfr_set_ui (u0, 9, MPFR_RNDN);
+ mpfr_cbrt (u0, u0, MPFR_RNDN);
+ mpfr_mul (u0, u0, temp2, MPFR_RNDN);
+ mpfr_ui_div (u0, 1, u0 , MPFR_RNDN); /* u0 = 1/( Gamma (2/3)*9^(1/3) ) */
+
+ mpfr_set_ui (u1, 3, MPFR_RNDN);
+ mpfr_cbrt (u1, u1, MPFR_RNDN);
+ mpfr_mul (u1, u1, temp1, MPFR_RNDN);
+ mpfr_neg (u1, u1, MPFR_RNDN);
+ mpfr_div (u1, x, u1, MPFR_RNDN); /* u1 = -x/(Gamma (1/3)*3^(1/3)) */
+
+ mpfr_set_ui (result, 0, MPFR_RNDN);
+ t = 0;
+
+ /* Evaluation of the series by Smith' method */
+ for (i=0; ; i++)
+ {
+ t += 3 * L;
+
+ /* k = 0 */
+ t -= 3;
+ mpfr_set (s, z[L-1], MPFR_RNDN);
+ for (j=L-2; ; j--)
+ {
+ t -= 3;
+ mpfr_div_ui2 (s, s, (t+2), (t+3), MPFR_RNDN);
+ mpfr_add (s, s, z[j], MPFR_RNDN);
+ if (j==0)
+ break;
+ }
+ mpfr_mul (s, s, u0, MPFR_RNDN);
+ mpfr_add (result, result, s, MPFR_RNDN);
+
+ mpfr_mul (u0, u0, z[L], MPFR_RNDN);
+ for (j=0; j<=L-1; j++)
+ {
+ mpfr_div_ui2 (u0, u0, (t + 2), (t + 3), MPFR_RNDN);
+ t += 3;
+ }
+
+ t++;
+
+ /* k = 1 */
+ t -= 3;
+ mpfr_set (s, z[L-1], MPFR_RNDN);
+ for (j=L-2; ; j--)
+ {
+ t -= 3;
+ mpfr_div_ui2 (s, s, (t + 2), (t + 3), MPFR_RNDN);
+ mpfr_add (s, s, z[j], MPFR_RNDN);
+ if (j==0)
+ break;
+ }
+ mpfr_mul (s, s, u1, MPFR_RNDN);
+ mpfr_add (result, result, s, MPFR_RNDN);
+
+ mpfr_mul (u1, u1, z[L], MPFR_RNDN);
+ for (j=0; j<=L-1; j++)
+ {
+ mpfr_div_ui2 (u1, u1, (t + 2), (t + 3), MPFR_RNDN);
+ t += 3;
+ }
+
+ t++;
+
+ /* k = 2 */
+ t++;
+
+ /* End of the loop over k */
+ t -= 3;
+
+ test0 = MPFR_IS_ZERO (u0) ||
+ MPFR_GET_EXP (u0) + (mpfr_exp_t)prec + 4 <= MPFR_GET_EXP (result);
+ test1 = MPFR_IS_ZERO (u1) ||
+ MPFR_GET_EXP (u1) + (mpfr_exp_t)prec + 4 <= MPFR_GET_EXP (result);
+
+ if ( test0 && test1 && (x3u <= (t + 2) * (t + 3) / 2) )
+ break;
+ }
+
+ MPFR_LOG_MSG (("Truncation rank: %lu\n", t));
+
+ err = (5 + MPFR_INT_CEIL_LOG2 (L+1) + MPFR_INT_CEIL_LOG2 (i+1)
+ + cond - MPFR_GET_EXP (result));
+
+ /* err is the number of bits lost due to the evaluation error */
+ /* wprec-(prec+1): number of bits lost due to the approximation error */
+ MPFR_LOG_MSG (("Roundoff error: %Pu\n", err));
+ MPFR_LOG_MSG (("Approxim error: %Pu\n", wprec - prec - 1));
+
+ if (wprec < err+1)
+ correctBits = 0;
+ else
+ {
+ if (wprec < err+prec+1)
+ correctBits = wprec - err - 1;
+ else
+ correctBits = prec;
+ }
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (result, correctBits,
+ MPFR_PREC (y), rnd)))
+ break;
+
+ for (j=0; j<=L; j++)
+ mpfr_clear (z[j]);
+ (*__gmp_free_func) (z, (L + 1) * sizeof (mpfr_t));
+ L = __gmpfr_isqrt (t);
+ MPFR_LOG_MSG (("size of blocks L = %lu\n", L));
+ z = (mpfr_t *) (*__gmp_allocate_func) ( (L + 1) * sizeof (mpfr_t));
+ MPFR_ASSERTN (z != NULL);
+ for (j=0; j<=L; j++)
+ mpfr_init (z[j]);
+
+ if (correctBits == 0)
+ {
+ assumed_exponent *= 2;
+ MPFR_LOG_MSG (("Not a single bit correct (assumed_exponent=%lu)\n",
+ assumed_exponent));
+ wprec = prec + 6 + MPFR_INT_CEIL_LOG2 (t) + cond + assumed_exponent;
+ }
+ else
+ {
+ if (correctBits < prec)
+ { /* The precision was badly chosen */
+ MPFR_LOG_MSG (("Bad assumption on the exponent of Ai (x)", 0));
+ MPFR_LOG_MSG ((" (E=%ld)\n", (long) (MPFR_GET_EXP (result))));
+ wprec = prec + err + 1;
+ }
+ else
+ { /* We are really in a bad case of the TMD */
+ MPFR_ZIV_NEXT (loop, prec);
+
+ /* We update wprec */
+ /* We assume that t will not be multiplied by more than 4 */
+ wprec = (prec + (MPFR_INT_CEIL_LOG2 (t) + 2) + 6 + cond
+ - MPFR_GET_EXP (result));
+ }
+ }
+ } /* End of ZIV loop */
+
+ MPFR_ZIV_FREE (loop);
+ MPFR_SAVE_EXPO_FREE (expo);
+
+ r = mpfr_set (y, result, rnd);
+
+ mpfr_clear (tmp_sp);
+ mpfr_clear (tmp2_sp);
+ for (j=0; j<=L; j++)
+ mpfr_clear (z[j]);
+ (*__gmp_free_func) (z, (L + 1) * sizeof (mpfr_t));
+
+ mpfr_clear (s);
+ mpfr_clear (u0); mpfr_clear (u1);
+ mpfr_clear (result);
+ mpfr_clear (temp1);
+ mpfr_clear (temp2);
+
+ return r;
+}
+
+/* We consider that the boundary between the area where the naive method
+ should preferably be used and the area where Smith' method should preferably
+ be used has the following form:
+ it is a triangle defined by two lines (one for the negative values of x, and
+ one for the positive values of x) crossing at x=0.
+
+ More precisely,
+
+ * If x<0 and MPFR_AI_THRESHOLD1*x + MPFR_AI_THRESHOLD2*prec > MPFR_AI_SCALE,
+ use Smith' algorithm;
+ * If x>0 and MPFR_AI_THRESHOLD3*x + MPFR_AI_THRESHOLD2*prec > MPFR_AI_SCALE,
+ use Smith' algorithm;
+ * otherwise, use the naive method.
+*/
+
+#define MPFR_AI_SCALE 1048576
+
+int
+mpfr_ai (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ mpfr_t temp1, temp2;
+ int use_ai2;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ /* The exponent range must be large enough for the computation of temp1. */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (temp1, MPFR_SMALL_PRECISION);
+ mpfr_init2 (temp2, MPFR_SMALL_PRECISION);
+
+ mpfr_set (temp1, x, MPFR_RNDN);
+ mpfr_set_si (temp2, MPFR_AI_THRESHOLD2, MPFR_RNDN);
+ mpfr_mul_ui (temp2, temp2, MPFR_PREC (y) > ULONG_MAX ?
+ ULONG_MAX : (unsigned long) MPFR_PREC (y), MPFR_RNDN);
+
+ if (MPFR_IS_NEG (x))
+ mpfr_mul_si (temp1, temp1, MPFR_AI_THRESHOLD1, MPFR_RNDN);
+ else
+ mpfr_mul_si (temp1, temp1, MPFR_AI_THRESHOLD3, MPFR_RNDN);
+
+ mpfr_add (temp1, temp1, temp2, MPFR_RNDN);
+ mpfr_clear (temp2);
+
+ use_ai2 = mpfr_cmp_si (temp1, MPFR_AI_SCALE) > 0;
+ mpfr_clear (temp1);
+
+ MPFR_SAVE_EXPO_FREE (expo); /* Ignore all previous exceptions. */
+
+ return use_ai2 ? mpfr_ai2 (y, x, rnd) : mpfr_ai1 (y, x, rnd);
+}
diff --git a/src/asin.c b/src/asin.c
new file mode 100644
index 000000000..3ef78f06d
--- /dev/null
+++ b/src/asin.c
@@ -0,0 +1,121 @@
+/* mpfr_asin -- arc-sinus of a floating-point number
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library, and was contributed by Mathieu Dutour.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_asin (mpfr_ptr asin, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t xp;
+ int compared, inexact;
+ mpfr_prec_t prec;
+ mpfr_exp_t xp_exp;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("asin[%#R]=%R inexact=%d", asin, asin, inexact));
+
+ /* Special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x) || MPFR_IS_INF (x))
+ {
+ MPFR_SET_NAN (asin);
+ MPFR_RET_NAN;
+ }
+ else /* x = 0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (asin);
+ MPFR_SET_SAME_SIGN (asin, x);
+ MPFR_RET (0); /* exact result */
+ }
+ }
+
+ /* asin(x) = x + x^3/6 + ... so the error is < 2^(3*EXP(x)-2) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (asin, x, -2 * MPFR_GET_EXP (x), 2, 1,
+ rnd_mode, {});
+
+ /* Set x_p=|x| (x is a normal number) */
+ mpfr_init2 (xp, MPFR_PREC (x));
+ inexact = mpfr_abs (xp, x, MPFR_RNDN);
+ MPFR_ASSERTD (inexact == 0);
+
+ compared = mpfr_cmp_ui (xp, 1);
+
+ if (MPFR_UNLIKELY (compared >= 0))
+ {
+ mpfr_clear (xp);
+ if (compared > 0) /* asin(x) = NaN for |x| > 1 */
+ {
+ MPFR_SET_NAN (asin);
+ MPFR_RET_NAN;
+ }
+ else /* x = 1 or x = -1 */
+ {
+ if (MPFR_IS_POS (x)) /* asin(+1) = Pi/2 */
+ inexact = mpfr_const_pi (asin, rnd_mode);
+ else /* asin(-1) = -Pi/2 */
+ {
+ inexact = -mpfr_const_pi (asin, MPFR_INVERT_RND(rnd_mode));
+ MPFR_CHANGE_SIGN (asin);
+ }
+ mpfr_div_2ui (asin, asin, 1, rnd_mode); /* May underflow */
+ return inexact;
+ }
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Compute exponent of 1 - ABS(x) */
+ mpfr_ui_sub (xp, 1, xp, MPFR_RNDD);
+ MPFR_ASSERTD (MPFR_GET_EXP (xp) <= 0);
+ MPFR_ASSERTD (MPFR_GET_EXP (x) <= 0);
+ xp_exp = 2 - MPFR_GET_EXP (xp);
+
+ /* Set up initial prec */
+ prec = MPFR_PREC (asin) + 10 + xp_exp;
+
+ /* use asin(x) = atan(x/sqrt(1-x^2)) */
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ mpfr_set_prec (xp, prec);
+ mpfr_sqr (xp, x, MPFR_RNDN);
+ mpfr_ui_sub (xp, 1, xp, MPFR_RNDN);
+ mpfr_sqrt (xp, xp, MPFR_RNDN);
+ mpfr_div (xp, x, xp, MPFR_RNDN);
+ mpfr_atan (xp, xp, MPFR_RNDN);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (xp, prec - xp_exp,
+ MPFR_PREC (asin), rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (asin, xp, rnd_mode);
+
+ mpfr_clear (xp);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (asin, inexact, rnd_mode);
+}
diff --git a/src/asinh.c b/src/asinh.c
new file mode 100644
index 000000000..164c3282c
--- /dev/null
+++ b/src/asinh.c
@@ -0,0 +1,117 @@
+/* mpfr_asinh -- inverse hyperbolic sine
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of asinh is done by *
+ * asinh = ln(x + sqrt(x^2 + 1)) */
+
+int
+mpfr_asinh (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ int signx, neg;
+ mpfr_prec_t Ny, Nt;
+ mpfr_t t; /* auxiliary variables */
+ mpfr_exp_t err;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ else /* x is necessarily 0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y); /* asinh(0) = 0 */
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ }
+
+ /* asinh(x) = x - x^3/6 + ... so the error is < 2^(3*EXP(x)-2) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, -2 * MPFR_GET_EXP (x), 2, 0,
+ rnd_mode, {});
+
+ Ny = MPFR_PREC (y); /* Precision of output variable */
+
+ signx = MPFR_SIGN (x);
+ neg = MPFR_IS_NEG (x);
+
+ /* General case */
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + 4 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* initialize intermediary variables */
+ mpfr_init2 (t, Nt);
+
+ /* First computation of asinh */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute asinh */
+ mpfr_mul (t, x, x, MPFR_RNDD); /* x^2 */
+ mpfr_add_ui (t, t, 1, MPFR_RNDD); /* x^2+1 */
+ mpfr_sqrt (t, t, MPFR_RNDN); /* sqrt(x^2+1) */
+ (neg ? mpfr_sub : mpfr_add) (t, t, x, MPFR_RNDN); /* sqrt(x^2+1)+x */
+ mpfr_log (t, t, MPFR_RNDN); /* ln(sqrt(x^2+1)+x)*/
+
+ if (MPFR_LIKELY (MPFR_IS_PURE_FP (t)))
+ {
+ /* error estimate -- see algorithms.tex */
+ err = Nt - (MAX (4 - MPFR_GET_EXP (t), 0) + 1);
+ if (MPFR_LIKELY (MPFR_IS_ZERO (t)
+ || MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ break;
+ }
+
+ /* actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set4 (y, t, rnd_mode, signx);
+
+ mpfr_clear (t);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/atan.c b/src/atan.c
new file mode 100644
index 000000000..53b34e030
--- /dev/null
+++ b/src/atan.c
@@ -0,0 +1,435 @@
+/* mpfr_atan -- arc-tangent of a floating-point number
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library, and was contributed by Mathieu Dutour.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* If x = p/2^r, put in y an approximation of atan(x)/x using 2^m terms
+ for the series expansion, with an error of at most 1 ulp.
+ Assumes |x| < 1.
+
+ If X=x^2, we want 1 - X/3 + X^2/5 - ... + (-1)^k*X^k/(2k+1) + ...
+
+ Assume p is non-zero.
+
+ When we sum terms up to x^k/(2k+1), the denominator Q[0] is
+ 3*5*7*...*(2k+1) ~ (2k/e)^k.
+*/
+static void
+mpfr_atan_aux (mpfr_ptr y, mpz_ptr p, long r, int m, mpz_t *tab)
+{
+ mpz_t *S, *Q, *ptoj;
+ unsigned long n, i, k, j, l;
+ mpfr_exp_t diff, expo;
+ int im, done;
+ mpfr_prec_t mult, *accu, *log2_nb_terms;
+ mpfr_prec_t precy = MPFR_PREC(y);
+
+ MPFR_ASSERTD(mpz_cmp_ui (p, 0) != 0);
+
+ accu = (mpfr_prec_t*) (*__gmp_allocate_func) ((2 * m + 2) * sizeof (mpfr_prec_t));
+ log2_nb_terms = accu + m + 1;
+
+ /* Set Tables */
+ S = tab; /* S */
+ ptoj = S + 1*(m+1); /* p^2^j Precomputed table */
+ Q = S + 2*(m+1); /* Product of Odd integer table */
+
+ /* From p to p^2, and r to 2r */
+ mpz_mul (p, p, p);
+ MPFR_ASSERTD (2 * r > r);
+ r = 2 * r;
+
+ /* Normalize p */
+ n = mpz_scan1 (p, 0);
+ mpz_tdiv_q_2exp (p, p, n); /* exact */
+ MPFR_ASSERTD (r > n);
+ r -= n;
+ /* since |p/2^r| < 1, and p is a non-zero integer, necessarily r > 0 */
+
+ MPFR_ASSERTD (mpz_sgn (p) > 0);
+ MPFR_ASSERTD (m > 0);
+
+ /* check if p=1 (special case) */
+ l = 0;
+ /*
+ We compute by binary splitting, with X = x^2 = p/2^r:
+ P(a,b) = p if a+1=b, P(a,c)*P(c,b) otherwise
+ Q(a,b) = (2a+1)*2^r if a+1=b [except Q(0,1)=1], Q(a,c)*Q(c,b) otherwise
+ S(a,b) = p*(2a+1) if a+1=b, Q(c,b)*S(a,c)+Q(a,c)*P(a,c)*S(c,b) otherwise
+ Then atan(x)/x ~ S(0,i)/Q(0,i) for i so that (p/2^r)^i/i is small enough.
+ The factor 2^(r*(b-a)) in Q(a,b) is implicit, thus we have to take it
+ into account when we compute with Q.
+ */
+ accu[0] = 0; /* accu[k] = Mult[0] + ... + Mult[k], where Mult[j] is the
+ number of bits of the corresponding term S[j]/Q[j] */
+ if (mpz_cmp_ui (p, 1) != 0)
+ {
+ /* p <> 1: precompute ptoj table */
+ mpz_set (ptoj[0], p);
+ for (im = 1 ; im <= m ; im ++)
+ mpz_mul (ptoj[im], ptoj[im - 1], ptoj[im - 1]);
+ /* main loop */
+ n = 1UL << m;
+ /* the ith term being X^i/(2i+1) with X=p/2^r, we can stop when
+ p^i/2^(r*i) < 2^(-precy), i.e. r*i > precy + log2(p^i) */
+ for (i = k = done = 0; (i < n) && (done == 0); i += 2, k ++)
+ {
+ /* initialize both S[k],Q[k] and S[k+1],Q[k+1] */
+ mpz_set_ui (Q[k+1], 2 * i + 3); /* Q(i+1,i+2) */
+ mpz_mul_ui (S[k+1], p, 2 * i + 1); /* S(i+1,i+2) */
+ mpz_mul_2exp (S[k], Q[k+1], r);
+ mpz_sub (S[k], S[k], S[k+1]); /* S(i,i+2) */
+ mpz_mul_ui (Q[k], Q[k+1], 2 * i + 1); /* Q(i,i+2) */
+ log2_nb_terms[k] = 1; /* S[k]/Q[k] corresponds to 2 terms */
+ for (j = (i + 2) >> 1, l = 1; (j & 1) == 0; l ++, j >>= 1, k --)
+ {
+ /* invariant: S[k-1]/Q[k-1] and S[k]/Q[k] correspond
+ to 2^l terms each. We combine them into S[k-1]/Q[k-1] */
+ MPFR_ASSERTD (k > 0);
+ mpz_mul (S[k], S[k], Q[k-1]);
+ mpz_mul (S[k], S[k], ptoj[l]);
+ mpz_mul (S[k-1], S[k-1], Q[k]);
+ mpz_mul_2exp (S[k-1], S[k-1], r << l);
+ mpz_add (S[k-1], S[k-1], S[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ log2_nb_terms[k-1] = l + 1;
+ /* now S[k-1]/Q[k-1] corresponds to 2^(l+1) terms */
+ MPFR_MPZ_SIZEINBASE2(mult, ptoj[l+1]);
+ /* FIXME: precompute bits(ptoj[l+1]) outside the loop? */
+ mult = (r << (l + 1)) - mult - 1;
+ accu[k-1] = (k == 1) ? mult : accu[k-2] + mult;
+ if (accu[k-1] > precy)
+ done = 1;
+ }
+ }
+ }
+ else /* special case p=1: the ith term being X^i/(2i+1) with X=1/2^r,
+ we can stop when r*i > precy i.e. i > precy/r */
+ {
+ n = 1UL << m;
+ for (i = k = 0; (i < n) && (i <= precy / r); i += 2, k ++)
+ {
+ mpz_set_ui (Q[k + 1], 2 * i + 3);
+ mpz_mul_2exp (S[k], Q[k+1], r);
+ mpz_sub_ui (S[k], S[k], 1 + 2 * i);
+ mpz_mul_ui (Q[k], Q[k + 1], 1 + 2 * i);
+ log2_nb_terms[k] = 1; /* S[k]/Q[k] corresponds to 2 terms */
+ for (j = (i + 2) >> 1, l = 1; (j & 1) == 0; l++, j >>= 1, k --)
+ {
+ MPFR_ASSERTD (k > 0);
+ mpz_mul (S[k], S[k], Q[k-1]);
+ mpz_mul (S[k-1], S[k-1], Q[k]);
+ mpz_mul_2exp (S[k-1], S[k-1], r << l);
+ mpz_add (S[k-1], S[k-1], S[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ log2_nb_terms[k-1] = l + 1;
+ }
+ }
+ }
+
+ /* we need to combine S[0]/Q[0]...S[k-1]/Q[k-1] */
+ l = 0; /* number of terms accumulated in S[k]/Q[k] */
+ while (k > 1)
+ {
+ k --;
+ /* combine S[k-1]/Q[k-1] and S[k]/Q[k] */
+ j = log2_nb_terms[k-1];
+ mpz_mul (S[k], S[k], Q[k-1]);
+ if (mpz_cmp_ui (p, 1) != 0)
+ mpz_mul (S[k], S[k], ptoj[j]);
+ mpz_mul (S[k-1], S[k-1], Q[k]);
+ l += 1 << log2_nb_terms[k];
+ mpz_mul_2exp (S[k-1], S[k-1], r * l);
+ mpz_add (S[k-1], S[k-1], S[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ }
+ (*__gmp_free_func) (accu, (2 * m + 2) * sizeof (mpfr_prec_t));
+
+ MPFR_MPZ_SIZEINBASE2 (diff, S[0]);
+ diff -= 2 * precy;
+ expo = diff;
+ if (diff >= 0)
+ mpz_tdiv_q_2exp (S[0], S[0], diff);
+ else
+ mpz_mul_2exp (S[0], S[0], -diff);
+
+ MPFR_MPZ_SIZEINBASE2 (diff, Q[0]);
+ diff -= precy;
+ expo -= diff;
+ if (diff >= 0)
+ mpz_tdiv_q_2exp (Q[0], Q[0], diff);
+ else
+ mpz_mul_2exp (Q[0], Q[0], -diff);
+
+ mpz_tdiv_q (S[0], S[0], Q[0]);
+ mpfr_set_z (y, S[0], MPFR_RNDD);
+ MPFR_SET_EXP (y, MPFR_EXP(y) + expo - r * (i - 1));
+}
+
+int
+mpfr_atan (mpfr_ptr atan, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t xp, arctgt, sk, tmp, tmp2;
+ mpz_t ukz;
+ mpz_t *tabz;
+ mpfr_exp_t exptol;
+ mpfr_prec_t prec, realprec, est_lost, lost;
+ unsigned long twopoweri, log2p, red;
+ int comparaison, inexact;
+ int i, n0, oldn0;
+ MPFR_GROUP_DECL (group);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("atan[%#R]=%R inexact=%d", atan, atan, inexact));
+
+ /* Singular cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (atan);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SAVE_EXPO_MARK (expo);
+ if (MPFR_IS_POS (x)) /* arctan(+inf) = Pi/2 */
+ inexact = mpfr_const_pi (atan, rnd_mode);
+ else /* arctan(-inf) = -Pi/2 */
+ {
+ inexact = -mpfr_const_pi (atan,
+ MPFR_INVERT_RND (rnd_mode));
+ MPFR_CHANGE_SIGN (atan);
+ }
+ mpfr_div_2ui (atan, atan, 1, rnd_mode); /* exact (no exceptions) */
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (atan, inexact, rnd_mode);
+ }
+ else /* x is necessarily 0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (atan);
+ MPFR_SET_SAME_SIGN (atan, x);
+ MPFR_RET (0);
+ }
+ }
+
+ /* atan(x) = x - x^3/3 + x^5/5...
+ so the error is < 2^(3*EXP(x)-1)
+ so `EXP(x)-(3*EXP(x)-1)` = -2*EXP(x)+1 */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (atan, x, -2 * MPFR_GET_EXP (x), 1, 0,
+ rnd_mode, {});
+
+ /* Set x_p=|x| */
+ MPFR_TMP_INIT_ABS (xp, x);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Other simple case arctan(-+1)=-+pi/4 */
+ comparaison = mpfr_cmp_ui (xp, 1);
+ if (MPFR_UNLIKELY (comparaison == 0))
+ {
+ int neg = MPFR_IS_NEG (x);
+ inexact = mpfr_const_pi (atan, MPFR_IS_POS (x) ? rnd_mode
+ : MPFR_INVERT_RND (rnd_mode));
+ if (neg)
+ {
+ inexact = -inexact;
+ MPFR_CHANGE_SIGN (atan);
+ }
+ mpfr_div_2ui (atan, atan, 2, rnd_mode); /* exact (no exceptions) */
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (atan, inexact, rnd_mode);
+ }
+
+ realprec = MPFR_PREC (atan) + MPFR_INT_CEIL_LOG2 (MPFR_PREC (atan)) + 4;
+ prec = realprec + GMP_NUMB_BITS;
+
+ /* Initialisation */
+ mpz_init (ukz);
+ MPFR_GROUP_INIT_4 (group, prec, sk, tmp, tmp2, arctgt);
+ oldn0 = 0;
+ tabz = (mpz_t *) 0;
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ /* First, if |x| < 1, we need to have more prec to be able to round (sup)
+ n0 = ceil(log(prec_requested + 2 + 1+ln(2.4)/ln(2))/log(2)) */
+ mpfr_prec_t sup;
+ sup = MPFR_GET_EXP (xp) < 0 ? 2 - MPFR_GET_EXP (xp) : 1; /* sup >= 1 */
+
+ n0 = MPFR_INT_CEIL_LOG2 ((realprec + sup) + 3);
+ /* since realprec >= 4, n0 >= ceil(log2(8)) >= 3, thus 3*n0 > 2 */
+ prec = (realprec + sup) + 1 + MPFR_INT_CEIL_LOG2 (3*n0-2);
+
+ /* the number of lost bits due to argument reduction is
+ 9 - 2 * EXP(sk), which we estimate by 9 + 2*ceil(log2(p))
+ since we manage that sk < 1/p */
+ if (MPFR_PREC (atan) > 100)
+ {
+ log2p = MPFR_INT_CEIL_LOG2(prec) / 2 - 3;
+ est_lost = 9 + 2 * log2p;
+ prec += est_lost;
+ }
+ else
+ log2p = est_lost = 0; /* don't reduce the argument */
+
+ /* Initialisation */
+ MPFR_GROUP_REPREC_4 (group, prec, sk, tmp, tmp2, arctgt);
+ if (MPFR_LIKELY (oldn0 == 0))
+ {
+ oldn0 = 3 * (n0 + 1);
+ tabz = (mpz_t *) (*__gmp_allocate_func) (oldn0 * sizeof (mpz_t));
+ for (i = 0; i < oldn0; i++)
+ mpz_init (tabz[i]);
+ }
+ else if (MPFR_UNLIKELY (oldn0 < 3 * (n0 + 1)))
+ {
+ tabz = (mpz_t *) (*__gmp_reallocate_func)
+ (tabz, oldn0 * sizeof (mpz_t), 3 * (n0 + 1)*sizeof (mpz_t));
+ for (i = oldn0; i < 3 * (n0 + 1); i++)
+ mpz_init (tabz[i]);
+ oldn0 = 3 * (n0 + 1);
+ }
+
+ /* The mpfr_ui_div below mustn't underflow. This is guaranteed by
+ MPFR_SAVE_EXPO_MARK, but let's check that for maintainability. */
+ MPFR_ASSERTD (__gmpfr_emax <= 1 - __gmpfr_emin);
+
+ if (comparaison > 0) /* use atan(xp) = Pi/2 - atan(1/xp) */
+ mpfr_ui_div (sk, 1, xp, MPFR_RNDN);
+ else
+ mpfr_set (sk, xp, MPFR_RNDN);
+
+ /* now 0 < sk <= 1 */
+
+ /* Argument reduction: atan(x) = 2 atan((sqrt(1+x^2)-1)/x).
+ We want |sk| < k/sqrt(p) where p is the target precision. */
+ lost = 0;
+ for (red = 0; MPFR_GET_EXP(sk) > - (mpfr_exp_t) log2p; red ++)
+ {
+ lost = 9 - 2 * MPFR_EXP(sk);
+ mpfr_mul (tmp, sk, sk, MPFR_RNDN);
+ mpfr_add_ui (tmp, tmp, 1, MPFR_RNDN);
+ mpfr_sqrt (tmp, tmp, MPFR_RNDN);
+ mpfr_sub_ui (tmp, tmp, 1, MPFR_RNDN);
+ if (red == 0 && comparaison > 0)
+ /* use xp = 1/sk */
+ mpfr_mul (sk, tmp, xp, MPFR_RNDN);
+ else
+ mpfr_div (sk, tmp, sk, MPFR_RNDN);
+ }
+
+ /* we started from x0 = 1/|x| if |x| > 1, and |x| otherwise, thus
+ we had x0 = min(|x|, 1/|x|) <= 1, and applied 'red' times the
+ argument reduction x -> (sqrt(1+x^2)-1)/x, which keeps 0 < x < 1,
+ thus 0 < sk <= 1, and sk=1 can occur only if red=0 */
+
+ /* If sk=1, then if |x| < 1, we have 1 - 2^(-prec-1) <= |x| < 1,
+ or if |x| > 1, we have 1 - 2^(-prec-1) <= 1/|x| < 1, thus in all
+ cases ||x| - 1| <= 2^(-prec), from which it follows
+ |atan|x| - Pi/4| <= 2^(-prec), given the Taylor expansion
+ atan(1+x) = Pi/4 + x/2 - x^2/4 + ...
+ Since Pi/4 = 0.785..., the error is at most one ulp.
+ */
+ if (MPFR_UNLIKELY(mpfr_cmp_ui (sk, 1) == 0))
+ {
+ mpfr_const_pi (arctgt, MPFR_RNDN); /* 1/2 ulp extra error */
+ mpfr_div_2ui (arctgt, arctgt, 2, MPFR_RNDN); /* exact */
+ realprec = prec - 2;
+ goto can_round;
+ }
+
+ /* Assignation */
+ MPFR_SET_ZERO (arctgt);
+ twopoweri = 1 << 0;
+ MPFR_ASSERTD (n0 >= 4);
+ for (i = 0 ; i < n0; i++)
+ {
+ if (MPFR_UNLIKELY (MPFR_IS_ZERO (sk)))
+ break;
+ /* Calculation of trunc(tmp) --> mpz */
+ mpfr_mul_2ui (tmp, sk, twopoweri, MPFR_RNDN);
+ mpfr_trunc (tmp, tmp);
+ if (!MPFR_IS_ZERO (tmp))
+ {
+ /* tmp = ukz*2^exptol */
+ exptol = mpfr_get_z_2exp (ukz, tmp);
+ /* since the s_k are decreasing (see algorithms.tex),
+ and s_0 = min(|x|, 1/|x|) < 1, we have sk < 1,
+ thus exptol < 0 */
+ MPFR_ASSERTD (exptol < 0);
+ mpz_tdiv_q_2exp (ukz, ukz, (unsigned long int) (-exptol));
+ /* since tmp is a non-zero integer, and tmp = ukzold*2^exptol,
+ we now have ukz = tmp, thus ukz is non-zero */
+ /* Calculation of arctan(Ak) */
+ mpfr_set_z (tmp, ukz, MPFR_RNDN);
+ mpfr_div_2ui (tmp, tmp, twopoweri, MPFR_RNDN);
+ mpfr_atan_aux (tmp2, ukz, twopoweri, n0 - i, tabz);
+ mpfr_mul (tmp2, tmp2, tmp, MPFR_RNDN);
+ /* Addition */
+ mpfr_add (arctgt, arctgt, tmp2, MPFR_RNDN);
+ /* Next iteration */
+ mpfr_sub (tmp2, sk, tmp, MPFR_RNDN);
+ mpfr_mul (sk, sk, tmp, MPFR_RNDN);
+ mpfr_add_ui (sk, sk, 1, MPFR_RNDN);
+ mpfr_div (sk, tmp2, sk, MPFR_RNDN);
+ }
+ twopoweri <<= 1;
+ }
+ /* Add last step (Arctan(sk) ~= sk */
+ mpfr_add (arctgt, arctgt, sk, MPFR_RNDN);
+
+ /* argument reduction */
+ mpfr_mul_2exp (arctgt, arctgt, red, MPFR_RNDN);
+
+ if (comparaison > 0)
+ { /* atan(x) = Pi/2-atan(1/x) for x > 0 */
+ mpfr_const_pi (tmp, MPFR_RNDN);
+ mpfr_div_2ui (tmp, tmp, 1, MPFR_RNDN);
+ mpfr_sub (arctgt, tmp, arctgt, MPFR_RNDN);
+ }
+ MPFR_SET_POS (arctgt);
+
+ can_round:
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (arctgt, realprec + est_lost - lost,
+ MPFR_PREC (atan), rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, realprec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set4 (atan, arctgt, rnd_mode, MPFR_SIGN (x));
+
+ for (i = 0 ; i < oldn0 ; i++)
+ mpz_clear (tabz[i]);
+ mpz_clear (ukz);
+ (*__gmp_free_func) (tabz, oldn0 * sizeof (mpz_t));
+ MPFR_GROUP_CLEAR (group);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (arctgt, inexact, rnd_mode);
+}
diff --git a/src/atan2.c b/src/atan2.c
new file mode 100644
index 000000000..786c4a017
--- /dev/null
+++ b/src/atan2.c
@@ -0,0 +1,262 @@
+/* mpfr_atan2 -- arc-tan 2 of a floating-point number
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library, and was contributed by Mathieu Dutour.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_atan2 (mpfr_ptr dest, mpfr_srcptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t tmp, pi;
+ int inexact;
+ mpfr_prec_t prec;
+ mpfr_exp_t e;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("y[%#R]=%R x[%#R]=%R rnd=%d", y, y, x, x, rnd_mode),
+ ("atan[%#R]=%R inexact=%d", dest, dest, inexact));
+
+ /* Special cases */
+ if (MPFR_ARE_SINGULAR (x, y))
+ {
+ /* atan2(0, 0) does not raise the "invalid" floating-point
+ exception, nor does atan2(y, 0) raise the "divide-by-zero"
+ floating-point exception.
+ -- atan2(±0, -0) returns ±pi.313)
+ -- atan2(±0, +0) returns ±0.
+ -- atan2(±0, x) returns ±pi, for x < 0.
+ -- atan2(±0, x) returns ±0, for x > 0.
+ -- atan2(y, ±0) returns -pi/2 for y < 0.
+ -- atan2(y, ±0) returns pi/2 for y > 0.
+ -- atan2(±oo, -oo) returns ±3pi/4.
+ -- atan2(±oo, +oo) returns ±pi/4.
+ -- atan2(±oo, x) returns ±pi/2, for finite x.
+ -- atan2(±y, -oo) returns ±pi, for finite y > 0.
+ -- atan2(±y, +oo) returns ±0, for finite y > 0.
+ */
+ if (MPFR_IS_NAN (x) || MPFR_IS_NAN (y))
+ {
+ MPFR_SET_NAN (dest);
+ MPFR_RET_NAN;
+ }
+ if (MPFR_IS_ZERO (y))
+ {
+ if (MPFR_IS_NEG (x)) /* +/- PI */
+ {
+ set_pi:
+ if (MPFR_IS_NEG (y))
+ {
+ inexact = mpfr_const_pi (dest, MPFR_INVERT_RND (rnd_mode));
+ MPFR_CHANGE_SIGN (dest);
+ return -inexact;
+ }
+ else
+ return mpfr_const_pi (dest, rnd_mode);
+ }
+ else /* +/- 0 */
+ {
+ set_zero:
+ MPFR_SET_ZERO (dest);
+ MPFR_SET_SAME_SIGN (dest, y);
+ return 0;
+ }
+ }
+ if (MPFR_IS_ZERO (x))
+ {
+ set_pi_2:
+ if (MPFR_IS_NEG (y)) /* -PI/2 */
+ {
+ inexact = mpfr_const_pi (dest, MPFR_INVERT_RND(rnd_mode));
+ MPFR_CHANGE_SIGN (dest);
+ mpfr_div_2ui (dest, dest, 1, rnd_mode);
+ return -inexact;
+ }
+ else /* PI/2 */
+ {
+ inexact = mpfr_const_pi (dest, rnd_mode);
+ mpfr_div_2ui (dest, dest, 1, rnd_mode);
+ return inexact;
+ }
+ }
+ if (MPFR_IS_INF (y))
+ {
+ if (!MPFR_IS_INF (x)) /* +/- PI/2 */
+ goto set_pi_2;
+ else if (MPFR_IS_POS (x)) /* +/- PI/4 */
+ {
+ if (MPFR_IS_NEG (y))
+ {
+ rnd_mode = MPFR_INVERT_RND (rnd_mode);
+ inexact = mpfr_const_pi (dest, rnd_mode);
+ MPFR_CHANGE_SIGN (dest);
+ mpfr_div_2ui (dest, dest, 2, rnd_mode);
+ return -inexact;
+ }
+ else
+ {
+ inexact = mpfr_const_pi (dest, rnd_mode);
+ mpfr_div_2ui (dest, dest, 2, rnd_mode);
+ return inexact;
+ }
+ }
+ else /* +/- 3*PI/4: Ugly since we have to round properly */
+ {
+ mpfr_t tmp2;
+ MPFR_ZIV_DECL (loop2);
+ mpfr_prec_t prec2 = MPFR_PREC (dest) + 10;
+
+ mpfr_init2 (tmp2, prec2);
+ MPFR_ZIV_INIT (loop2, prec2);
+ for (;;)
+ {
+ mpfr_const_pi (tmp2, MPFR_RNDN);
+ mpfr_mul_ui (tmp2, tmp2, 3, MPFR_RNDN); /* Error <= 2 */
+ mpfr_div_2ui (tmp2, tmp2, 2, MPFR_RNDN);
+ if (mpfr_round_p (MPFR_MANT (tmp2), MPFR_LIMB_SIZE (tmp2),
+ MPFR_PREC (tmp2) - 2,
+ MPFR_PREC (dest) + (rnd_mode == MPFR_RNDN)))
+ break;
+ MPFR_ZIV_NEXT (loop2, prec2);
+ mpfr_set_prec (tmp2, prec2);
+ }
+ MPFR_ZIV_FREE (loop2);
+ if (MPFR_IS_NEG (y))
+ MPFR_CHANGE_SIGN (tmp2);
+ inexact = mpfr_set (dest, tmp2, rnd_mode);
+ mpfr_clear (tmp2);
+ return inexact;
+ }
+ }
+ MPFR_ASSERTD (MPFR_IS_INF (x));
+ if (MPFR_IS_NEG (x))
+ goto set_pi;
+ else
+ goto set_zero;
+ }
+
+ /* When x=1, atan2(y,x) = atan(y). FIXME: more generally, if x is a power
+ of two, we could call directly atan(y/x) since y/x is exact. */
+ if (mpfr_cmp_ui (x, 1) == 0)
+ return mpfr_atan (dest, y, rnd_mode);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Set up initial prec */
+ prec = MPFR_PREC (dest) + 3 + MPFR_INT_CEIL_LOG2 (MPFR_PREC (dest));
+ mpfr_init2 (tmp, prec);
+
+ MPFR_ZIV_INIT (loop, prec);
+ if (MPFR_IS_POS (x))
+ /* use atan2(y,x) = atan(y/x) */
+ for (;;)
+ {
+ int div_inex;
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_BLOCK (flags, div_inex = mpfr_div (tmp, y, x, MPFR_RNDN));
+ if (div_inex == 0)
+ {
+ /* Result is exact. */
+ inexact = mpfr_atan (dest, tmp, rnd_mode);
+ goto end;
+ }
+
+ /* Error <= ulp (tmp) except in case of underflow or overflow. */
+
+ /* If the division underflowed, since |atan(z)/z| < 1, we have
+ an underflow. */
+ if (MPFR_UNDERFLOW (flags))
+ {
+ int sign;
+
+ /* In the case MPFR_RNDN with 2^(emin-2) < |y/x| < 2^(emin-1):
+ The smallest significand value S > 1 of |y/x| is:
+ * 1 / (1 - 2^(-px)) if py <= px,
+ * (1 - 2^(-px) + 2^(-py)) / (1 - 2^(-px)) if py >= px.
+ Therefore S - 1 > 2^(-pz), where pz = max(px,py). We have:
+ atan(|y/x|) > atan(z), where z = 2^(emin-2) * (1 + 2^(-pz)).
+ > z - z^3 / 3.
+ > 2^(emin-2) * (1 + 2^(-pz) - 2^(2 emin - 5))
+ Assuming pz <= -2 emin + 5, we can round away from zero
+ (this is what mpfr_underflow always does on MPFR_RNDN).
+ In the case MPFR_RNDN with |y/x| <= 2^(emin-2), we round
+ toward zero, as |atan(z)/z| < 1. */
+ MPFR_ASSERTN (MPFR_PREC_MAX <=
+ 2 * (mpfr_uexp_t) - MPFR_EMIN_MIN + 5);
+ if (rnd_mode == MPFR_RNDN && MPFR_IS_ZERO (tmp))
+ rnd_mode = MPFR_RNDZ;
+ sign = MPFR_SIGN (tmp);
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (dest, rnd_mode, sign);
+ }
+
+ mpfr_atan (tmp, tmp, MPFR_RNDN); /* Error <= 2*ulp (tmp) since
+ abs(D(arctan)) <= 1 */
+ /* TODO: check that the error bound is correct in case of overflow. */
+ /* FIXME: Error <= ulp(tmp) ? */
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (tmp, prec - 2, MPFR_PREC (dest),
+ rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (tmp, prec);
+ }
+ else /* x < 0 */
+ /* Use sign(y)*(PI - atan (|y/x|)) */
+ {
+ mpfr_init2 (pi, prec);
+ for (;;)
+ {
+ mpfr_div (tmp, y, x, MPFR_RNDN); /* Error <= ulp (tmp) */
+ /* If tmp is 0, we have |y/x| <= 2^(-emin-2), thus
+ atan|y/x| < 2^(-emin-2). */
+ MPFR_SET_POS (tmp); /* no error */
+ mpfr_atan (tmp, tmp, MPFR_RNDN); /* Error <= 2*ulp (tmp) since
+ abs(D(arctan)) <= 1 */
+ mpfr_const_pi (pi, MPFR_RNDN); /* Error <= ulp(pi) /2 */
+ e = MPFR_NOTZERO(tmp) ? MPFR_GET_EXP (tmp) : __gmpfr_emin - 1;
+ mpfr_sub (tmp, pi, tmp, MPFR_RNDN); /* see above */
+ if (MPFR_IS_NEG (y))
+ MPFR_CHANGE_SIGN (tmp);
+ /* Error(tmp) <= (1/2+2^(EXP(pi)-EXP(tmp)-1)+2^(e-EXP(tmp)+1))*ulp
+ <= 2^(MAX (MAX (EXP(PI)-EXP(tmp)-1, e-EXP(tmp)+1),
+ -1)+2)*ulp(tmp) */
+ e = MAX (MAX (MPFR_GET_EXP (pi)-MPFR_GET_EXP (tmp) - 1,
+ e - MPFR_GET_EXP (tmp) + 1), -1) + 2;
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (tmp, prec - e, MPFR_PREC (dest),
+ rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (tmp, prec);
+ mpfr_set_prec (pi, prec);
+ }
+ mpfr_clear (pi);
+ }
+ inexact = mpfr_set (dest, tmp, rnd_mode);
+
+ end:
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (dest, inexact, rnd_mode);
+}
diff --git a/src/atanh.c b/src/atanh.c
new file mode 100644
index 000000000..a8d1ee9a4
--- /dev/null
+++ b/src/atanh.c
@@ -0,0 +1,127 @@
+/* mpfr_atanh -- Inverse Hyperbolic Tangente
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of atanh is done by
+ atanh= 1/2*ln(x+1)-1/2*ln(1-x) */
+
+int
+mpfr_atanh (mpfr_ptr y, mpfr_srcptr xt , mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t x, t, te;
+ mpfr_prec_t Nx, Ny, Nt;
+ mpfr_exp_t err;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", xt, xt, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ /* Special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (xt)))
+ {
+ /* atanh(NaN) = NaN, and atanh(+/-Inf) = NaN since tanh gives a result
+ between -1 and 1 */
+ if (MPFR_IS_NAN (xt) || MPFR_IS_INF (xt))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else /* necessarily xt is 0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (xt));
+ MPFR_SET_ZERO (y); /* atanh(0) = 0 */
+ MPFR_SET_SAME_SIGN (y,xt);
+ MPFR_RET (0);
+ }
+ }
+
+ /* atanh (x) = NaN as soon as |x| > 1, and arctanh(+/-1) = +/-Inf */
+ if (MPFR_UNLIKELY (MPFR_EXP (xt) > 0))
+ {
+ if (MPFR_EXP (xt) == 1 && mpfr_powerof2_raw (xt))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, xt);
+ MPFR_RET (0);
+ }
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+
+ /* atanh(x) = x + x^3/3 + ... so the error is < 2^(3*EXP(x)-1) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, xt, -2 * MPFR_GET_EXP (xt), 1, 1,
+ rnd_mode, {});
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Compute initial precision */
+ Nx = MPFR_PREC (xt);
+ MPFR_TMP_INIT_ABS (x, xt);
+ Ny = MPFR_PREC (y);
+ Nt = MAX (Nx, Ny);
+ /* the optimal number of bits : see algorithms.ps */
+ Nt = Nt + MPFR_INT_CEIL_LOG2 (Nt) + 4;
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+ mpfr_init2 (te, Nt);
+
+ /* First computation of cosh */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute atanh */
+ mpfr_ui_sub (te, 1, x, MPFR_RNDU); /* (1-xt)*/
+ mpfr_add_ui (t, x, 1, MPFR_RNDD); /* (xt+1)*/
+ mpfr_div (t, t, te, MPFR_RNDN); /* (1+xt)/(1-xt)*/
+ mpfr_log (t, t, MPFR_RNDN); /* ln((1+xt)/(1-xt))*/
+ mpfr_div_2ui (t, t, 1, MPFR_RNDN); /* (1/2)*ln((1+xt)/(1-xt))*/
+
+ /* error estimate: see algorithms.tex */
+ /* FIXME: this does not correspond to the value in algorithms.tex!!! */
+ /* err=Nt-__gmpfr_ceil_log2(1+5*pow(2,1-MPFR_EXP(t)));*/
+ err = Nt - (MAX (4 - MPFR_GET_EXP (t), 0) + 1);
+
+ if (MPFR_LIKELY (MPFR_IS_ZERO (t)
+ || MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ break;
+
+ /* reactualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ mpfr_set_prec (te, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set4 (y, t, rnd_mode, MPFR_SIGN (xt));
+
+ mpfr_clear(t);
+ mpfr_clear(te);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
+
diff --git a/src/bernoulli.c b/src/bernoulli.c
new file mode 100644
index 000000000..ed5c1a5b2
--- /dev/null
+++ b/src/bernoulli.c
@@ -0,0 +1,80 @@
+/* bernoulli -- internal function to compute Bernoulli numbers.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* assuming b[0]...b[2(n-1)] are computed, computes and stores B[2n]*(2n+1)!
+
+ t/(exp(t)-1) = sum(B[j]*t^j/j!, j=0..infinity)
+ thus t = (exp(t)-1) * sum(B[j]*t^j/j!, n=0..infinity).
+ Taking the coefficient of degree n+1 > 1, we get:
+ 0 = sum(1/(n+1-k)!*B[k]/k!, k=0..n)
+ which gives:
+ B[n] = -sum(binomial(n+1,k)*B[k], k=0..n-1)/(n+1).
+
+ Let C[n] = B[n]*(n+1)!.
+ Then C[n] = -sum(binomial(n+1,k)*C[k]*n!/(k+1)!, k=0..n-1),
+ which proves that the C[n] are integers.
+*/
+mpz_t*
+mpfr_bernoulli_internal (mpz_t *b, unsigned long n)
+{
+ if (n == 0)
+ {
+ b = (mpz_t *) (*__gmp_allocate_func) (sizeof (mpz_t));
+ mpz_init_set_ui (b[0], 1);
+ }
+ else
+ {
+ mpz_t t;
+ unsigned long k;
+
+ b = (mpz_t *) (*__gmp_reallocate_func)
+ (b, n * sizeof (mpz_t), (n + 1) * sizeof (mpz_t));
+ mpz_init (b[n]);
+ /* b[n] = -sum(binomial(2n+1,2k)*C[k]*(2n)!/(2k+1)!, k=0..n-1) */
+ mpz_init_set_ui (t, 2 * n + 1);
+ mpz_mul_ui (t, t, 2 * n - 1);
+ mpz_mul_ui (t, t, 2 * n);
+ mpz_mul_ui (t, t, n);
+ mpz_fdiv_q_ui (t, t, 3); /* exact: t=binomial(2*n+1,2*k)*(2*n)!/(2*k+1)!
+ for k=n-1 */
+ mpz_mul (b[n], t, b[n-1]);
+ for (k = n - 1; k-- > 0;)
+ {
+ mpz_mul_ui (t, t, 2 * k + 1);
+ mpz_mul_ui (t, t, 2 * k + 2);
+ mpz_mul_ui (t, t, 2 * k + 2);
+ mpz_mul_ui (t, t, 2 * k + 3);
+ mpz_fdiv_q_ui (t, t, 2 * (n - k) + 1);
+ mpz_fdiv_q_ui (t, t, 2 * (n - k));
+ mpz_addmul (b[n], t, b[k]);
+ }
+ /* take into account C[1] */
+ mpz_mul_ui (t, t, 2 * n + 1);
+ mpz_fdiv_q_2exp (t, t, 1);
+ mpz_sub (b[n], b[n], t);
+ mpz_neg (b[n], b[n]);
+ mpz_clear (t);
+ }
+ return b;
+}
diff --git a/src/buildopt.c b/src/buildopt.c
new file mode 100644
index 000000000..267b30e5b
--- /dev/null
+++ b/src/buildopt.c
@@ -0,0 +1,44 @@
+/* buildopt.c -- functions giving information about options used during the
+ mpfr library compilation
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_buildopt_tls_p (void)
+{
+#ifdef MPFR_USE_THREAD_SAFE
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+int
+mpfr_buildopt_decimal_p (void)
+{
+#ifdef MPFR_WANT_DECIMAL_FLOATS
+ return 1;
+#else
+ return 0;
+#endif
+}
diff --git a/src/cache.c b/src/cache.c
new file mode 100644
index 000000000..10a276bbe
--- /dev/null
+++ b/src/cache.c
@@ -0,0 +1,145 @@
+/* mpfr_cache -- cache interface for multiple-precision constants in MPFR.
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#if 0 /* this function is not used/documented/tested so far, it could be
+ useful if some user wants to add a new constant to mpfr, and
+ implement a cache mechanism for that constant */
+void
+mpfr_init_cache (mpfr_cache_t cache, int (*func)(mpfr_ptr, mpfr_rnd_t))
+{
+ MPFR_PREC (cache->x) = 0; /* Invalid prec to detect that the cache is not
+ valid. Maybe add a flag? */
+ cache->func = func;
+}
+#endif
+
+void
+mpfr_clear_cache (mpfr_cache_t cache)
+{
+ if (MPFR_PREC (cache->x) != 0)
+ mpfr_clear (cache->x);
+ MPFR_PREC (cache->x) = 0;
+}
+
+int
+mpfr_cache (mpfr_ptr dest, mpfr_cache_t cache, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t prec = MPFR_PREC (dest);
+ mpfr_prec_t pold = MPFR_PREC (cache->x);
+ int inexact, sign;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ if (MPFR_UNLIKELY (prec > pold))
+ {
+ /* No previous result in the cache or the precision of the
+ previous result is not sufficient. */
+
+ if (MPFR_UNLIKELY (pold == 0)) /* No previous result. */
+ mpfr_init2 (cache->x, prec);
+
+ /* Update the cache. */
+ pold = prec;
+ /* no need to keep the previous value */
+ mpfr_set_prec (cache->x, pold);
+ cache->inexact = (*cache->func) (cache->x, MPFR_RNDN);
+ }
+
+ /* now pold >= prec is the precision of cache->x */
+
+ /* First, check if the cache has the exact value (unlikely).
+ Else the exact value is between (assuming x=cache->x > 0):
+ x and x+ulp(x) if cache->inexact < 0,
+ x-ulp(x) and x if cache->inexact > 0,
+ and abs(x-exact) <= ulp(x)/2. */
+
+ /* we assume all cached constants are positive */
+ MPFR_ASSERTN (MPFR_IS_POS (cache->x)); /* TODO... */
+ sign = MPFR_SIGN (cache->x);
+ MPFR_SET_EXP (dest, MPFR_GET_EXP (cache->x));
+ MPFR_SET_SIGN (dest, sign);
+
+ /* round cache->x from precision pold down to precision prec */
+ MPFR_RNDRAW_GEN (inexact, dest,
+ MPFR_MANT (cache->x), pold, rnd, sign,
+ if (MPFR_UNLIKELY (cache->inexact == 0))
+ {
+ if ((_sp[0] & _ulp) == 0)
+ {
+ inexact = -sign;
+ goto trunc_doit;
+ }
+ else
+ goto addoneulp;
+ }
+ else if (cache->inexact < 0)
+ goto addoneulp;
+ else /* cache->inexact > 0 */
+ {
+ inexact = -sign;
+ goto trunc_doit;
+ },
+ if (MPFR_UNLIKELY (++MPFR_EXP (dest) > __gmpfr_emax))
+ mpfr_overflow (dest, rnd, sign);
+ );
+
+ if (MPFR_LIKELY (cache->inexact != 0))
+ {
+ switch (rnd)
+ {
+ case MPFR_RNDZ:
+ case MPFR_RNDD:
+ if (MPFR_UNLIKELY (inexact == 0))
+ {
+ inexact = cache->inexact;
+ if (inexact > 0)
+ {
+ mpfr_nextbelow (dest);
+ inexact = -inexact;
+ }
+ }
+ break;
+ case MPFR_RNDU:
+ case MPFR_RNDA:
+ if (MPFR_UNLIKELY (inexact == 0))
+ {
+ inexact = cache->inexact;
+ if (inexact < 0)
+ {
+ mpfr_nextabove (dest);
+ inexact = -inexact;
+ }
+ }
+ break;
+ default: /* MPFR_RNDN */
+ if (MPFR_UNLIKELY(inexact == 0))
+ inexact = cache->inexact;
+ break;
+ }
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (dest, inexact, rnd);
+}
diff --git a/src/cbrt.c b/src/cbrt.c
new file mode 100644
index 000000000..4ecf28a46
--- /dev/null
+++ b/src/cbrt.c
@@ -0,0 +1,148 @@
+/* mpfr_cbrt -- cube root function.
+
+Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of y = x^(1/3) is done as follows:
+
+ Let x = sign * m * 2^(3*e) where m is an integer
+
+ with 2^(3n-3) <= m < 2^(3n) where n = PREC(y)
+
+ and m = s^3 + r where 0 <= r and m < (s+1)^3
+
+ we want that s has n bits i.e. s >= 2^(n-1), or m >= 2^(3n-3)
+ i.e. m must have at least 3n-2 bits
+
+ then x^(1/3) = s * 2^e if r=0
+ x^(1/3) = (s+1) * 2^e if round up
+ x^(1/3) = (s-1) * 2^e if round down
+ x^(1/3) = s * 2^e if nearest and r < 3/2*s^2+3/4*s+1/8
+ (s+1) * 2^e otherwise
+ */
+
+int
+mpfr_cbrt (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpz_t m;
+ mpfr_exp_t e, r, sh;
+ mpfr_prec_t n, size_m, tmp;
+ int inexact, negative;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ /* special values */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ /* case 0: cbrt(+/- 0) = +/- 0 */
+ else /* x is necessarily 0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ }
+
+ /* General case */
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpz_init (m);
+
+ e = mpfr_get_z_2exp (m, x); /* x = m * 2^e */
+ if ((negative = MPFR_IS_NEG(x)))
+ mpz_neg (m, m);
+ r = e % 3;
+ if (r < 0)
+ r += 3;
+ /* x = (m*2^r) * 2^(e-r) = (m*2^r) * 2^(3*q) */
+
+ MPFR_MPZ_SIZEINBASE2 (size_m, m);
+ n = MPFR_PREC (y) + (rnd_mode == MPFR_RNDN);
+
+ /* we want 3*n-2 <= size_m + 3*sh + r <= 3*n
+ i.e. 3*sh + size_m + r <= 3*n */
+ sh = (3 * (mpfr_exp_t) n - (mpfr_exp_t) size_m - r) / 3;
+ sh = 3 * sh + r;
+ if (sh >= 0)
+ {
+ mpz_mul_2exp (m, m, sh);
+ e = e - sh;
+ }
+ else if (r > 0)
+ {
+ mpz_mul_2exp (m, m, r);
+ e = e - r;
+ }
+
+ /* invariant: x = m*2^e, with e divisible by 3 */
+
+ /* we reuse the variable m to store the cube root, since it is not needed
+ any more: we just need to know if the root is exact */
+ inexact = mpz_root (m, m, 3) == 0;
+
+ MPFR_MPZ_SIZEINBASE2 (tmp, m);
+ sh = tmp - n;
+ if (sh > 0) /* we have to flush to 0 the last sh bits from m */
+ {
+ inexact = inexact || ((mpfr_exp_t) mpz_scan1 (m, 0) < sh);
+ mpz_fdiv_q_2exp (m, m, sh);
+ e += 3 * sh;
+ }
+
+ if (inexact)
+ {
+ if (negative)
+ rnd_mode = MPFR_INVERT_RND (rnd_mode);
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDA
+ || (rnd_mode == MPFR_RNDN && mpz_tstbit (m, 0)))
+ inexact = 1, mpz_add_ui (m, m, 1);
+ else
+ inexact = -1;
+ }
+
+ /* either inexact is not zero, and the conversion is exact, i.e. inexact
+ is not changed; or inexact=0, and inexact is set only when
+ rnd_mode=MPFR_RNDN and bit (n+1) from m is 1 */
+ inexact += mpfr_set_z (y, m, MPFR_RNDN);
+ MPFR_SET_EXP (y, MPFR_GET_EXP (y) + e / 3);
+
+ if (negative)
+ {
+ MPFR_CHANGE_SIGN (y);
+ inexact = -inexact;
+ }
+
+ mpz_clear (m);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/check.c b/src/check.c
new file mode 100644
index 000000000..e0137cebe
--- /dev/null
+++ b/src/check.c
@@ -0,0 +1,80 @@
+/* mpfr_check -- Check if a floating-point number has not been corrupted.
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/*
+ * Check if x is a valid mpfr_t initializes by mpfr_init
+ * Returns 0 if isn't valid
+ */
+int
+mpfr_check (mpfr_srcptr x)
+{
+ mp_size_t s, i;
+ mp_limb_t tmp;
+ volatile mp_limb_t *xm;
+ int rw;
+
+ /* Check Sign */
+ if (MPFR_SIGN(x) != MPFR_SIGN_POS && MPFR_SIGN(x) != MPFR_SIGN_NEG)
+ return 0;
+ /* Check Precision */
+ if ( (MPFR_PREC(x) < MPFR_PREC_MIN) || (MPFR_PREC(x) > MPFR_PREC_MAX))
+ return 0;
+ /* Check Mantissa */
+ xm = MPFR_MANT(x);
+ if (!xm)
+ return 0;
+ /* Check size of mantissa */
+ s = MPFR_GET_ALLOC_SIZE(x);
+ if (s<=0 || s > MP_SIZE_T_MAX ||
+ MPFR_PREC(x) > ((mpfr_prec_t)s*GMP_NUMB_BITS))
+ return 0;
+ /* Acces all the mp_limb of the mantissa: may do a seg fault */
+ for(i = 0 ; i < s ; i++)
+ tmp = xm[i];
+ /* Check if it isn't singular*/
+ if (MPFR_IS_PURE_FP(x))
+ {
+ /* Check first mp_limb of mantissa (Must start with a 1 bit) */
+ if ( ((xm[MPFR_LIMB_SIZE(x)-1])>>(GMP_NUMB_BITS-1)) == 0)
+ return 0;
+ /* Check last mp_limb of mantissa */
+ rw = (MPFR_PREC(x) % GMP_NUMB_BITS);
+ if (rw != 0)
+ {
+ tmp = MPFR_LIMB_MASK (GMP_NUMB_BITS - rw);
+ if ((xm[0] & tmp) != 0)
+ return 0;
+ }
+ /* Check exponent range */
+ if ((MPFR_EXP (x) < __gmpfr_emin) || (MPFR_EXP (x) > __gmpfr_emax))
+ return 0;
+ }
+ else
+ {
+ /* Singular value is zero, inf or nan */
+ MPFR_ASSERTD(MPFR_IS_ZERO(x) || MPFR_IS_NAN(x) || MPFR_IS_INF(x));
+ }
+ return 1;
+}
+
diff --git a/src/clear.c b/src/clear.c
new file mode 100644
index 000000000..267fd2329
--- /dev/null
+++ b/src/clear.c
@@ -0,0 +1,31 @@
+/* mpfr_clear -- free the memory space allocated for a floating-point number
+
+Copyright 1999, 2000, 2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_clear (mpfr_ptr m)
+{
+ (*__gmp_free_func) (MPFR_GET_REAL_PTR (m),
+ MPFR_MALLOC_SIZE (MPFR_GET_ALLOC_SIZE (m)));
+ MPFR_MANT (m) = (mp_limb_t *) 0;
+}
diff --git a/src/clears.c b/src/clears.c
new file mode 100644
index 000000000..c3a061029
--- /dev/null
+++ b/src/clears.c
@@ -0,0 +1,61 @@
+/* mpfr_clears -- free the memory space allocated for several
+ floating-point numbers
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+#undef HAVE_STDARG
+#include "config.h" /* for a build within gmp */
+#endif
+
+#if HAVE_STDARG
+# include <stdarg.h>
+#else
+# include <varargs.h>
+#endif
+
+#include "mpfr-impl.h"
+
+void
+#if HAVE_STDARG
+mpfr_clears (mpfr_ptr x, ...)
+#else
+mpfr_clears (va_alist)
+ va_dcl
+#endif
+{
+ va_list arg;
+
+#if HAVE_STDARG
+ va_start (arg, x);
+#else
+ mpfr_ptr x;
+ va_start(arg);
+ x = va_arg (arg, mpfr_ptr);
+#endif
+
+ while (x != 0)
+ {
+ mpfr_clear (x);
+ x = (mpfr_ptr) va_arg (arg, mpfr_ptr);
+ }
+ va_end (arg);
+}
diff --git a/src/cmp.c b/src/cmp.c
new file mode 100644
index 000000000..667a12439
--- /dev/null
+++ b/src/cmp.c
@@ -0,0 +1,104 @@
+/* mpfr_cmp -- compare two floating-point numbers
+
+Copyright 1999, 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* returns 0 iff b = sign(s) * c
+ a positive value iff b > sign(s) * c
+ a negative value iff b < sign(s) * c
+ returns 0 and sets erange flag if b and/or c is NaN.
+*/
+
+int
+mpfr_cmp3 (mpfr_srcptr b, mpfr_srcptr c, int s)
+{
+ mpfr_exp_t be, ce;
+ mp_size_t bn, cn;
+ mp_limb_t *bp, *cp;
+
+ s = MPFR_MULT_SIGN( s , MPFR_SIGN(c) );
+
+ if (MPFR_ARE_SINGULAR(b, c))
+ {
+ if (MPFR_IS_NAN (b) || MPFR_IS_NAN (c))
+ {
+ MPFR_SET_ERANGE ();
+ return 0;
+ }
+ else if (MPFR_IS_INF(b))
+ {
+ if (MPFR_IS_INF(c) && s == MPFR_SIGN(b) )
+ return 0;
+ else
+ return MPFR_SIGN(b);
+ }
+ else if (MPFR_IS_INF(c))
+ return -s;
+ else if (MPFR_IS_ZERO(b))
+ return MPFR_IS_ZERO(c) ? 0 : -s;
+ else /* necessarily c=0 */
+ return MPFR_SIGN(b);
+ }
+ /* b and c are real numbers */
+ if (s != MPFR_SIGN(b))
+ return MPFR_SIGN(b);
+
+ /* now signs are equal */
+
+ be = MPFR_GET_EXP (b);
+ ce = MPFR_GET_EXP (c);
+ if (be > ce)
+ return s;
+ if (be < ce)
+ return -s;
+
+ /* both signs and exponents are equal */
+
+ bn = (MPFR_PREC(b)-1)/GMP_NUMB_BITS;
+ cn = (MPFR_PREC(c)-1)/GMP_NUMB_BITS;
+
+ bp = MPFR_MANT(b);
+ cp = MPFR_MANT(c);
+
+ for ( ; bn >= 0 && cn >= 0; bn--, cn--)
+ {
+ if (bp[bn] > cp[cn])
+ return s;
+ if (bp[bn] < cp[cn])
+ return -s;
+ }
+ for ( ; bn >= 0; bn--)
+ if (bp[bn])
+ return s;
+ for ( ; cn >= 0; cn--)
+ if (cp[cn])
+ return -s;
+
+ return 0;
+}
+
+#undef mpfr_cmp
+int
+mpfr_cmp (mpfr_srcptr b, mpfr_srcptr c)
+{
+ return mpfr_cmp3 (b, c, 1);
+}
diff --git a/src/cmp2.c b/src/cmp2.c
new file mode 100644
index 000000000..14d28a389
--- /dev/null
+++ b/src/cmp2.c
@@ -0,0 +1,243 @@
+/* mpfr_cmp2 -- exponent shift when subtracting two numbers.
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* If |b| != |c|, puts the number of canceled bits when one subtracts |c|
+ from |b| in *cancel. Returns the sign of the difference.
+
+ Assumes neither of b or c is NaN, +/- infinity, or +/- 0.
+
+ In other terms, if |b| != |c|, mpfr_cmp2 (b, c) returns
+ EXP(max(|b|,|c|)) - EXP(|b| - |c|).
+*/
+
+int
+mpfr_cmp2 (mpfr_srcptr b, mpfr_srcptr c, mpfr_prec_t *cancel)
+{
+ mp_limb_t *bp, *cp, bb, cc = 0, lastc = 0, dif, high_dif = 0;
+ mp_size_t bn, cn;
+ mpfr_uexp_t diff_exp;
+ mpfr_prec_t res = 0;
+ int sign;
+
+ /* b=c should not happen, since cmp2 is called only from agm
+ (with different variables), and from sub1 (if same b=c, then
+ sub1sp would be called instead */
+ MPFR_ASSERTD (b != c);
+
+ /* the cases b=0 or c=0 are also treated apart in agm and sub
+ (which calls sub1) */
+ MPFR_ASSERTD (MPFR_IS_PURE_FP(b));
+ MPFR_ASSERTD (MPFR_IS_PURE_FP(c));
+
+ if (MPFR_GET_EXP (b) >= MPFR_GET_EXP (c))
+ {
+ sign = 1;
+ diff_exp = (mpfr_uexp_t) MPFR_GET_EXP (b) - MPFR_GET_EXP (c);
+
+ bp = MPFR_MANT(b);
+ cp = MPFR_MANT(c);
+
+ bn = (MPFR_PREC(b) - 1) / GMP_NUMB_BITS;
+ cn = (MPFR_PREC(c) - 1) / GMP_NUMB_BITS; /* # of limbs of c minus 1 */
+
+ if (MPFR_UNLIKELY( diff_exp == 0 ))
+ {
+ while (bn >= 0 && cn >= 0 && bp[bn] == cp[cn])
+ {
+ bn--;
+ cn--;
+ res += GMP_NUMB_BITS;
+ }
+
+ if (MPFR_UNLIKELY (bn < 0))
+ {
+ if (MPFR_LIKELY (cn < 0)) /* b = c */
+ return 0;
+
+ bp = cp;
+ bn = cn;
+ cn = -1;
+ sign = -1;
+ }
+
+ if (MPFR_UNLIKELY (cn < 0))
+ /* c discards exactly the upper part of b */
+ {
+ unsigned int z;
+
+ MPFR_ASSERTD (bn >= 0);
+
+ while (bp[bn] == 0)
+ {
+ if (--bn < 0) /* b = c */
+ return 0;
+ res += GMP_NUMB_BITS;
+ }
+
+ count_leading_zeros(z, bp[bn]); /* bp[bn] <> 0 */
+ *cancel = res + z;
+ return sign;
+ }
+
+ MPFR_ASSERTD (bn >= 0);
+ MPFR_ASSERTD (cn >= 0);
+ MPFR_ASSERTD (bp[bn] != cp[cn]);
+ if (bp[bn] < cp[cn])
+ {
+ mp_limb_t *tp;
+ mp_size_t tn;
+
+ tp = bp; bp = cp; cp = tp;
+ tn = bn; bn = cn; cn = tn;
+ sign = -1;
+ }
+ }
+ } /* MPFR_EXP(b) >= MPFR_EXP(c) */
+ else /* MPFR_EXP(b) < MPFR_EXP(c) */
+ {
+ sign = -1;
+ diff_exp = (mpfr_uexp_t) MPFR_GET_EXP (c) - MPFR_GET_EXP (b);
+
+ bp = MPFR_MANT(c);
+ cp = MPFR_MANT(b);
+
+ bn = (MPFR_PREC(c) - 1) / GMP_NUMB_BITS;
+ cn = (MPFR_PREC(b) - 1) / GMP_NUMB_BITS;
+ }
+
+ /* now we have removed the identical upper limbs of b and c
+ (can happen only when diff_exp = 0), and after the possible
+ swap, we have |b| > |c|: bp[bn] > cc, bn >= 0, cn >= 0,
+ diff_exp = EXP(b) - EXP(c).
+ */
+
+ if (MPFR_LIKELY (diff_exp < GMP_NUMB_BITS))
+ {
+ cc = cp[cn] >> diff_exp;
+ /* warning: a shift by GMP_NUMB_BITS may give wrong results */
+ if (diff_exp)
+ lastc = cp[cn] << (GMP_NUMB_BITS - diff_exp);
+ cn--;
+ }
+ else
+ diff_exp -= GMP_NUMB_BITS; /* cc = 0 */
+
+ dif = bp[bn--] - cc; /* necessarily dif >= 1 */
+ MPFR_ASSERTD(dif >= 1);
+
+ /* now high_dif = 0, dif >= 1, lastc is the neglected part of cp[cn+1] */
+
+ while (MPFR_UNLIKELY ((cn >= 0 || lastc != 0)
+ && (high_dif == 0) && (dif == 1)))
+ { /* dif=1 implies diff_exp = 0 or 1 */
+ bb = (bn >= 0) ? bp[bn] : 0;
+ cc = lastc;
+ if (cn >= 0)
+ {
+ if (diff_exp == 0)
+ {
+ cc += cp[cn];
+ }
+ else /* diff_exp = 1 */
+ {
+ cc += cp[cn] >> 1;
+ lastc = cp[cn] << (GMP_NUMB_BITS - 1);
+ }
+ }
+ else
+ lastc = 0;
+ high_dif = 1 - mpn_sub_n (&dif, &bb, &cc, 1);
+ bn--;
+ cn--;
+ res += GMP_NUMB_BITS;
+ }
+
+ /* (cn<0 and lastc=0) or (high_dif,dif)<>(0,1) */
+
+ if (MPFR_UNLIKELY (high_dif != 0)) /* high_dif == 1 */
+ {
+ res--;
+ if (dif != 0)
+ {
+ *cancel = res;
+ return sign;
+ }
+ }
+ else /* high_dif == 0 */
+ {
+ unsigned int z;
+
+ count_leading_zeros(z, dif); /* dif > 1 here */
+ res += z;
+ if (MPFR_LIKELY(dif != (MPFR_LIMB_ONE << (GMP_NUMB_BITS - z - 1))))
+ { /* dif is not a power of two */
+ *cancel = res;
+ return sign;
+ }
+ }
+
+ /* now result is res + (low(b) < low(c)) */
+ while (MPFR_UNLIKELY (bn >= 0 && (cn >= 0 || lastc != 0)))
+ {
+ if (diff_exp >= GMP_NUMB_BITS)
+ diff_exp -= GMP_NUMB_BITS;
+ else
+ {
+ cc = lastc;
+ if (cn >= 0)
+ {
+ cc += cp[cn] >> diff_exp;
+ if (diff_exp != 0)
+ lastc = cp[cn] << (GMP_NUMB_BITS - diff_exp);
+ }
+ else
+ lastc = 0;
+ cn--;
+ }
+ if (bp[bn] != cc)
+ {
+ *cancel = res + (bp[bn] < cc);
+ return sign;
+ }
+ bn--;
+ }
+
+ if (bn < 0)
+ {
+ if (lastc != 0)
+ res++;
+ else
+ {
+ while (cn >= 0 && cp[cn] == 0)
+ cn--;
+ if (cn >= 0)
+ res++;
+ }
+ }
+
+ *cancel = res;
+ return sign;
+}
diff --git a/src/cmp_abs.c b/src/cmp_abs.c
new file mode 100644
index 000000000..3f126d49e
--- /dev/null
+++ b/src/cmp_abs.c
@@ -0,0 +1,94 @@
+/* mpfr_cmpabs -- compare the absolute values of two FP numbers
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Return a positive value if abs(b) > abs(c), 0 if abs(b) = abs(c), and
+ a negative value if abs(b) < abs(c). Neither b nor c may be NaN. */
+
+int
+mpfr_cmpabs (mpfr_srcptr b, mpfr_srcptr c)
+{
+ mpfr_exp_t be, ce;
+ mp_size_t bn, cn;
+ mp_limb_t *bp, *cp;
+
+ if (MPFR_ARE_SINGULAR (b, c))
+ {
+ if (MPFR_IS_NAN (b) || MPFR_IS_NAN (c))
+ {
+ MPFR_SET_ERANGE ();
+ return 0;
+ }
+ else if (MPFR_IS_INF (b))
+ return ! MPFR_IS_INF (c);
+ else if (MPFR_IS_INF (c))
+ return -1;
+ else if (MPFR_IS_ZERO (c))
+ return ! MPFR_IS_ZERO (b);
+ else /* b == 0 */
+ return -1;
+ }
+
+ MPFR_ASSERTD (MPFR_IS_PURE_FP (b));
+ MPFR_ASSERTD (MPFR_IS_PURE_FP (c));
+
+ /* Now that we know that b and c are pure FP numbers (i.e. they have
+ a meaningful exponent), we use MPFR_EXP instead of MPFR_GET_EXP to
+ allow exponents outside the current exponent range. For instance,
+ this is useful for mpfr_pow, which compares values to __gmpfr_one.
+ This is for internal use only! For compatibility with other MPFR
+ versions, the user must still provide values that are representable
+ in the current exponent range. */
+ be = MPFR_EXP (b);
+ ce = MPFR_EXP (c);
+ if (be > ce)
+ return 1;
+ if (be < ce)
+ return -1;
+
+ /* exponents are equal */
+
+ bn = MPFR_LIMB_SIZE(b)-1;
+ cn = MPFR_LIMB_SIZE(c)-1;
+
+ bp = MPFR_MANT(b);
+ cp = MPFR_MANT(c);
+
+ for ( ; bn >= 0 && cn >= 0; bn--, cn--)
+ {
+ if (bp[bn] > cp[cn])
+ return 1;
+ if (bp[bn] < cp[cn])
+ return -1;
+ }
+
+ for ( ; bn >= 0; bn--)
+ if (bp[bn])
+ return 1;
+
+ for ( ; cn >= 0; cn--)
+ if (cp[cn])
+ return -1;
+
+ return 0;
+}
diff --git a/src/cmp_d.c b/src/cmp_d.c
new file mode 100644
index 000000000..4026f38bd
--- /dev/null
+++ b/src/cmp_d.c
@@ -0,0 +1,38 @@
+/* mpfr_cmp_d -- compare a floating-point number with a double
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_cmp_d (mpfr_srcptr b, double d)
+{
+ mpfr_t tmp;
+ int res;
+
+ mpfr_init2 (tmp, IEEE_DBL_MANT_DIG);
+ res = mpfr_set_d (tmp, d, MPFR_RNDN);
+ MPFR_ASSERTD (res == 0);
+ res = mpfr_cmp (b, tmp);
+ mpfr_clear (tmp);
+
+ return res;
+}
diff --git a/src/cmp_ld.c b/src/cmp_ld.c
new file mode 100644
index 000000000..02770fd7d
--- /dev/null
+++ b/src/cmp_ld.c
@@ -0,0 +1,38 @@
+/* mpfr_cmp_d -- compare a floating-point number with a long double
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_cmp_ld (mpfr_srcptr b, long double d)
+{
+ mpfr_t tmp;
+ int res;
+
+ mpfr_init2 (tmp, MPFR_LDBL_MANT_DIG);
+ res = mpfr_set_ld (tmp, d, MPFR_RNDN);
+ MPFR_ASSERTD (res == 0);
+ res = mpfr_cmp (b, tmp);
+ mpfr_clear (tmp);
+
+ return res;
+}
diff --git a/src/cmp_si.c b/src/cmp_si.c
new file mode 100644
index 000000000..4fbf3abfa
--- /dev/null
+++ b/src/cmp_si.c
@@ -0,0 +1,101 @@
+/* mpfr_cmp_si_2exp -- compare a floating-point number with a signed
+machine integer multiplied by a power of 2
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* returns a positive value if b > i*2^f,
+ a negative value if b < i*2^f,
+ zero if b = i*2^f.
+ b must not be NaN.
+*/
+
+int
+mpfr_cmp_si_2exp (mpfr_srcptr b, long int i, mpfr_exp_t f)
+{
+ int si;
+
+ si = i < 0 ? -1 : 1; /* sign of i */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (b)))
+ {
+ if (MPFR_IS_INF(b))
+ return MPFR_INT_SIGN(b);
+ else if (MPFR_IS_ZERO(b))
+ return i != 0 ? -si : 0;
+ /* NAN */
+ MPFR_SET_ERANGE ();
+ return 0;
+ }
+ else if (MPFR_SIGN(b) != si || i == 0)
+ return MPFR_INT_SIGN (b);
+ else /* b and i are of same sign si */
+ {
+ mpfr_exp_t e;
+ unsigned long ai;
+ int k;
+ mp_size_t bn;
+ mp_limb_t c, *bp;
+
+ ai = SAFE_ABS(unsigned long, i);
+
+ /* ai must be representable in a mp_limb_t */
+ MPFR_ASSERTN(ai == (mp_limb_t) ai);
+
+ e = MPFR_GET_EXP (b); /* 2^(e-1) <= b < 2^e */
+ if (e <= f)
+ return -si;
+ if (f < MPFR_EMAX_MAX - GMP_NUMB_BITS &&
+ e > f + GMP_NUMB_BITS)
+ return si;
+
+ /* now f < e <= f + GMP_NUMB_BITS */
+ c = (mp_limb_t) ai;
+ count_leading_zeros(k, c);
+ if ((int) (e - f) > GMP_NUMB_BITS - k)
+ return si;
+ if ((int) (e - f) < GMP_NUMB_BITS - k)
+ return -si;
+
+ /* now b and i*2^f have the same exponent */
+ c <<= k;
+ bn = (MPFR_PREC(b) - 1) / GMP_NUMB_BITS;
+ bp = MPFR_MANT(b);
+ if (bp[bn] > c)
+ return si;
+ if (bp[bn] < c)
+ return -si;
+
+ /* most significant limbs agree, check remaining limbs from b */
+ while (bn > 0)
+ if (bp[--bn])
+ return si;
+ return 0;
+ }
+}
+
+#undef mpfr_cmp_si
+int
+mpfr_cmp_si (mpfr_srcptr b, long int i)
+{
+ return mpfr_cmp_si_2exp (b, i, 0);
+}
diff --git a/src/cmp_ui.c b/src/cmp_ui.c
new file mode 100644
index 000000000..c414273b3
--- /dev/null
+++ b/src/cmp_ui.c
@@ -0,0 +1,101 @@
+/* mpfr_cmp_ui_2exp -- compare a floating-point number with an unsigned
+machine integer multiplied by a power of 2
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* returns a positive value if b > i*2^f,
+ a negative value if b < i*2^f,
+ zero if b = i*2^f.
+ b must not be NaN
+*/
+
+int
+mpfr_cmp_ui_2exp (mpfr_srcptr b, unsigned long int i, mpfr_exp_t f)
+{
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(b) ))
+ {
+ if (MPFR_IS_NAN (b))
+ {
+ MPFR_SET_ERANGE ();
+ return 0;
+ }
+ else if (MPFR_IS_INF(b))
+ return MPFR_INT_SIGN (b);
+ else /* since b cannot be NaN, b=0 here */
+ return i != 0 ? -1 : 0;
+ }
+
+ if (MPFR_IS_NEG (b))
+ return -1;
+ /* now b > 0 */
+ else if (MPFR_UNLIKELY(i == 0))
+ return 1;
+ else /* b > 0, i > 0 */
+ {
+ mpfr_exp_t e;
+ int k;
+ mp_size_t bn;
+ mp_limb_t c, *bp;
+
+ /* i must be representable in a mp_limb_t */
+ MPFR_ASSERTN(i == (mp_limb_t) i);
+
+ e = MPFR_GET_EXP (b); /* 2^(e-1) <= b < 2^e */
+ if (e <= f)
+ return -1;
+ if (f < MPFR_EMAX_MAX - GMP_NUMB_BITS &&
+ e > f + GMP_NUMB_BITS)
+ return 1;
+
+ /* now f < e <= f + GMP_NUMB_BITS */
+ c = (mp_limb_t) i;
+ count_leading_zeros(k, c);
+ if ((int) (e - f) > GMP_NUMB_BITS - k)
+ return 1;
+ if ((int) (e - f) < GMP_NUMB_BITS - k)
+ return -1;
+
+ /* now b and i*2^f have the same exponent */
+ c <<= k;
+ bn = (MPFR_PREC(b) - 1) / GMP_NUMB_BITS;
+ bp = MPFR_MANT(b);
+ if (bp[bn] > c)
+ return 1;
+ if (bp[bn] < c)
+ return -1;
+
+ /* most significant limbs agree, check remaining limbs from b */
+ while (bn > 0)
+ if (bp[--bn] != 0)
+ return 1;
+ return 0;
+ }
+}
+
+#undef mpfr_cmp_ui
+int
+mpfr_cmp_ui (mpfr_srcptr b, unsigned long int i)
+{
+ return mpfr_cmp_ui_2exp (b, i, 0);
+}
diff --git a/src/comparisons.c b/src/comparisons.c
new file mode 100644
index 000000000..53fd88c2b
--- /dev/null
+++ b/src/comparisons.c
@@ -0,0 +1,78 @@
+/* comparison predicates
+
+Copyright 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Note: these functions currently use mpfr_cmp; they could have their
+ own code to be faster. */
+
+/* = < > unordered
+ * mpfr_greater_p 0 0 1 0
+ * mpfr_greaterequal_p 1 0 1 0
+ * mpfr_less_p 0 1 0 0
+ * mpfr_lessequal_p 1 1 0 0
+ * mpfr_lessgreater_p 0 1 1 0
+ * mpfr_equal_p 1 0 0 0
+ * mpfr_unordered_p 0 0 0 1
+ */
+
+int
+mpfr_greater_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y) ? 0 : (mpfr_cmp (x, y) > 0);
+}
+
+int
+mpfr_greaterequal_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y) ? 0 : (mpfr_cmp (x, y) >= 0);
+}
+
+int
+mpfr_less_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y) ? 0 : (mpfr_cmp (x, y) < 0);
+}
+
+int
+mpfr_lessequal_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y) ? 0 : (mpfr_cmp (x, y) <= 0);
+}
+
+int
+mpfr_lessgreater_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y) ? 0 : (mpfr_cmp (x, y) != 0);
+}
+
+int
+mpfr_equal_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y) ? 0 : (mpfr_cmp (x, y) == 0);
+}
+
+int
+mpfr_unordered_p (mpfr_srcptr x, mpfr_srcptr y)
+{
+ return MPFR_IS_NAN(x) || MPFR_IS_NAN(y);
+}
diff --git a/src/const_catalan.c b/src/const_catalan.c
new file mode 100644
index 000000000..7905f4653
--- /dev/null
+++ b/src/const_catalan.c
@@ -0,0 +1,152 @@
+/* mpfr_const_catalan -- compute Catalan's constant.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Declare the cache */
+MPFR_DECL_INIT_CACHE(__gmpfr_cache_const_catalan, mpfr_const_catalan_internal);
+
+/* Set User Interface */
+#undef mpfr_const_catalan
+int
+mpfr_const_catalan (mpfr_ptr x, mpfr_rnd_t rnd_mode) {
+ return mpfr_cache (x, __gmpfr_cache_const_catalan, rnd_mode);
+}
+
+/* return T, Q such that T/Q = sum(k!^2/(2k)!/(2k+1)^2, k=n1..n2-1) */
+static void
+S (mpz_t T, mpz_t P, mpz_t Q, unsigned long n1, unsigned long n2)
+{
+ if (n2 == n1 + 1)
+ {
+ if (n1 == 0)
+ {
+ mpz_set_ui (P, 1);
+ mpz_set_ui (Q, 1);
+ }
+ else
+ {
+ mpz_set_ui (P, 2 * n1 - 1);
+ mpz_mul_ui (P, P, n1);
+ mpz_ui_pow_ui (Q, 2 * n1 + 1, 2);
+ mpz_mul_2exp (Q, Q, 1);
+ }
+ mpz_set (T, P);
+ }
+ else
+ {
+ unsigned long m = (n1 + n2) / 2;
+ mpz_t T2, P2, Q2;
+ S (T, P, Q, n1, m);
+ mpz_init (T2);
+ mpz_init (P2);
+ mpz_init (Q2);
+ S (T2, P2, Q2, m, n2);
+ mpz_mul (T, T, Q2);
+ mpz_mul (T2, T2, P);
+ mpz_add (T, T, T2);
+ mpz_mul (P, P, P2);
+ mpz_mul (Q, Q, Q2);
+ mpz_clear (T2);
+ mpz_clear (P2);
+ mpz_clear (Q2);
+ }
+}
+
+/* Don't need to save/restore exponent range: the cache does it.
+ Catalan's constant is G = sum((-1)^k/(2*k+1)^2, k=0..infinity).
+ We compute it using formula (31) of Victor Adamchik's page
+ "33 representations for Catalan's constant"
+ http://www-2.cs.cmu.edu/~adamchik/articles/catalan/catalan.htm
+
+ G = Pi/8*log(2+sqrt(3)) + 3/8*sum(k!^2/(2k)!/(2k+1)^2,k=0..infinity)
+*/
+int
+mpfr_const_catalan_internal (mpfr_ptr g, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t x, y, z;
+ mpz_t T, P, Q;
+ mpfr_prec_t pg, p;
+ int inex;
+ MPFR_ZIV_DECL (loop);
+ MPFR_GROUP_DECL (group);
+
+ MPFR_LOG_FUNC (("rnd_mode=%d", rnd_mode), ("g[%#R]=%R inex=%d", g, g, inex));
+
+ /* Here are the WC (max prec = 100.000.000)
+ Once we have found a chain of 11, we only look for bigger chain.
+ Found 3 '1' at 0
+ Found 5 '1' at 9
+ Found 6 '0' at 34
+ Found 9 '1' at 176
+ Found 11 '1' at 705
+ Found 12 '0' at 913
+ Found 14 '1' at 12762
+ Found 15 '1' at 152561
+ Found 16 '0' at 171725
+ Found 18 '0' at 525355
+ Found 20 '0' at 529245
+ Found 21 '1' at 6390133
+ Found 22 '0' at 7806417
+ Found 25 '1' at 11936239
+ Found 27 '1' at 51752950
+ */
+ pg = MPFR_PREC (g);
+ p = pg + MPFR_INT_CEIL_LOG2 (pg) + 7;
+
+ MPFR_GROUP_INIT_3 (group, p, x, y, z);
+ mpz_init (T);
+ mpz_init (P);
+ mpz_init (Q);
+
+ MPFR_ZIV_INIT (loop, p);
+ for (;;) {
+ mpfr_sqrt_ui (x, 3, MPFR_RNDU);
+ mpfr_add_ui (x, x, 2, MPFR_RNDU);
+ mpfr_log (x, x, MPFR_RNDU);
+ mpfr_const_pi (y, MPFR_RNDU);
+ mpfr_mul (x, x, y, MPFR_RNDN);
+ S (T, P, Q, 0, (p - 1) / 2);
+ mpz_mul_ui (T, T, 3);
+ mpfr_set_z (y, T, MPFR_RNDU);
+ mpfr_set_z (z, Q, MPFR_RNDD);
+ mpfr_div (y, y, z, MPFR_RNDN);
+ mpfr_add (x, x, y, MPFR_RNDN);
+ mpfr_div_2ui (x, x, 3, MPFR_RNDN);
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (x, p - 5, pg, rnd_mode)))
+ break;
+
+ MPFR_ZIV_NEXT (loop, p);
+ MPFR_GROUP_REPREC_3 (group, p, x, y, z);
+ }
+ MPFR_ZIV_FREE (loop);
+ inex = mpfr_set (g, x, rnd_mode);
+
+ MPFR_GROUP_CLEAR (group);
+ mpz_clear (T);
+ mpz_clear (P);
+ mpz_clear (Q);
+
+ return inex;
+}
diff --git a/src/const_euler.c b/src/const_euler.c
new file mode 100644
index 000000000..e5128682d
--- /dev/null
+++ b/src/const_euler.c
@@ -0,0 +1,221 @@
+/* mpfr_const_euler -- Euler's constant
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Declare the cache */
+MPFR_DECL_INIT_CACHE(__gmpfr_cache_const_euler, mpfr_const_euler_internal);
+
+/* Set User Interface */
+#undef mpfr_const_euler
+int
+mpfr_const_euler (mpfr_ptr x, mpfr_rnd_t rnd_mode) {
+ return mpfr_cache (x, __gmpfr_cache_const_euler, rnd_mode);
+}
+
+
+static void mpfr_const_euler_S2 (mpfr_ptr, unsigned long);
+static void mpfr_const_euler_R (mpfr_ptr, unsigned long);
+
+int
+mpfr_const_euler_internal (mpfr_t x, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t prec = MPFR_PREC(x), m, log2m;
+ mpfr_t y, z;
+ unsigned long n;
+ int inexact;
+ MPFR_ZIV_DECL (loop);
+
+ log2m = MPFR_INT_CEIL_LOG2 (prec);
+ m = prec + 2 * log2m + 23;
+
+ mpfr_init2 (y, m);
+ mpfr_init2 (z, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_exp_t exp_S, err;
+ /* since prec >= 1, we have m >= 24 here, which ensures n >= 9 below */
+ n = 1 + (unsigned long) ((double) m * LOG2 / 2.0);
+ MPFR_ASSERTD (n >= 9);
+ mpfr_const_euler_S2 (y, n); /* error <= 3 ulps */
+ exp_S = MPFR_EXP(y);
+ mpfr_set_ui (z, n, MPFR_RNDN);
+ mpfr_log (z, z, MPFR_RNDD); /* error <= 1 ulp */
+ mpfr_sub (y, y, z, MPFR_RNDN); /* S'(n) - log(n) */
+ /* the error is less than 1/2 + 3*2^(exp_S-EXP(y)) + 2^(EXP(z)-EXP(y))
+ <= 1/2 + 2^(exp_S+2-EXP(y)) + 2^(EXP(z)-EXP(y))
+ <= 1/2 + 2^(1+MAX(exp_S+2,EXP(z))-EXP(y)) */
+ err = 1 + MAX(exp_S + 2, MPFR_EXP(z)) - MPFR_EXP(y);
+ err = (err >= -1) ? err + 1 : 0; /* error <= 2^err ulp(y) */
+ exp_S = MPFR_EXP(y);
+ mpfr_const_euler_R (z, n); /* err <= ulp(1/2) = 2^(-m) */
+ mpfr_sub (y, y, z, MPFR_RNDN);
+ /* err <= 1/2 ulp(y) + 2^(-m) + 2^(err + exp_S - EXP(y)) ulp(y).
+ Since the result is between 0.5 and 1, ulp(y) = 2^(-m).
+ So we get 3/2*ulp(y) + 2^(err + exp_S - EXP(y)) ulp(y).
+ 3/2 + 2^e <= 2^(e+1) for e>=1, and <= 2^2 otherwise */
+ err = err + exp_S - MPFR_EXP(y);
+ err = (err >= 1) ? err + 1 : 2;
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (y, m - err, prec, rnd)))
+ break;
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (y, m);
+ mpfr_set_prec (z, m);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (x, y, rnd);
+
+ mpfr_clear (y);
+ mpfr_clear (z);
+
+ return inexact; /* always inexact */
+}
+
+static void
+mpfr_const_euler_S2_aux (mpz_t P, mpz_t Q, mpz_t T, unsigned long n,
+ unsigned long a, unsigned long b, int need_P)
+{
+ if (a + 1 == b)
+ {
+ mpz_set_ui (P, n);
+ if (a > 1)
+ mpz_mul_si (P, P, 1 - (long) a);
+ mpz_set (T, P);
+ mpz_set_ui (Q, a);
+ mpz_mul_ui (Q, Q, a);
+ }
+ else
+ {
+ unsigned long c = (a + b) / 2;
+ mpz_t P2, Q2, T2;
+ mpfr_const_euler_S2_aux (P, Q, T, n, a, c, 1);
+ mpz_init (P2);
+ mpz_init (Q2);
+ mpz_init (T2);
+ mpfr_const_euler_S2_aux (P2, Q2, T2, n, c, b, 1);
+ mpz_mul (T, T, Q2);
+ mpz_mul (T2, T2, P);
+ mpz_add (T, T, T2);
+ if (need_P)
+ mpz_mul (P, P, P2);
+ mpz_mul (Q, Q, Q2);
+ mpz_clear (P2);
+ mpz_clear (Q2);
+ mpz_clear (T2);
+ /* divide by 2 if possible */
+ {
+ unsigned long v2;
+ v2 = mpz_scan1 (P, 0);
+ c = mpz_scan1 (Q, 0);
+ if (c < v2)
+ v2 = c;
+ c = mpz_scan1 (T, 0);
+ if (c < v2)
+ v2 = c;
+ if (v2)
+ {
+ mpz_tdiv_q_2exp (P, P, v2);
+ mpz_tdiv_q_2exp (Q, Q, v2);
+ mpz_tdiv_q_2exp (T, T, v2);
+ }
+ }
+ }
+}
+
+/* computes S(n) = sum(n^k*(-1)^(k-1)/k!/k, k=1..ceil(4.319136566 * n))
+ using binary splitting.
+ We have S(n) = sum(f(k), k=1..N) with N=ceil(4.319136566 * n)
+ and f(k) = n^k*(-1)*(k-1)/k!/k,
+ thus f(k)/f(k-1) = -n*(k-1)/k^2
+*/
+static void
+mpfr_const_euler_S2 (mpfr_t x, unsigned long n)
+{
+ mpz_t P, Q, T;
+ unsigned long N = (unsigned long) (ALPHA * (double) n + 1.0);
+ mpz_init (P);
+ mpz_init (Q);
+ mpz_init (T);
+ mpfr_const_euler_S2_aux (P, Q, T, n, 1, N + 1, 0);
+ mpfr_set_z (x, T, MPFR_RNDN);
+ mpfr_div_z (x, x, Q, MPFR_RNDN);
+ mpz_clear (P);
+ mpz_clear (Q);
+ mpz_clear (T);
+}
+
+/* computes R(n) = exp(-n)/n * sum(k!/(-n)^k, k=0..n-2)
+ with error at most 4*ulp(x). Assumes n>=2.
+ Since x <= exp(-n)/n <= 1/8, then 4*ulp(x) <= ulp(1).
+*/
+static void
+mpfr_const_euler_R (mpfr_t x, unsigned long n)
+{
+ unsigned long k, m;
+ mpz_t a, s;
+ mpfr_t y;
+
+ MPFR_ASSERTN (n >= 2); /* ensures sum(k!/(-n)^k, k=0..n-2) >= 2/3 */
+
+ /* as we multiply the sum by exp(-n), we need only PREC(x) - n/LOG2 bits */
+ m = MPFR_PREC(x) - (unsigned long) ((double) n / LOG2);
+
+ mpz_init_set_ui (a, 1);
+ mpz_mul_2exp (a, a, m);
+ mpz_init_set (s, a);
+
+ for (k = 1; k <= n; k++)
+ {
+ mpz_mul_ui (a, a, k);
+ mpz_fdiv_q_ui (a, a, n);
+ /* the error e(k) on a is e(k) <= 1 + k/n*e(k-1) with e(0)=0,
+ i.e. e(k) <= k */
+ if (k % 2)
+ mpz_sub (s, s, a);
+ else
+ mpz_add (s, s, a);
+ }
+ /* the error on s is at most 1+2+...+n = n*(n+1)/2 */
+ mpz_fdiv_q_ui (s, s, n); /* err <= 1 + (n+1)/2 */
+ MPFR_ASSERTN (MPFR_PREC(x) >= mpz_sizeinbase(s, 2));
+ mpfr_set_z (x, s, MPFR_RNDD); /* exact */
+ mpfr_div_2ui (x, x, m, MPFR_RNDD);
+ /* now x = 1/n * sum(k!/(-n)^k, k=0..n-2) <= 1/n */
+ /* err(x) <= (n+1)/2^m <= (n+1)*exp(n)/2^PREC(x) */
+
+ mpfr_init2 (y, m);
+ mpfr_set_si (y, -(long)n, MPFR_RNDD); /* assumed exact */
+ mpfr_exp (y, y, MPFR_RNDD); /* err <= ulp(y) <= exp(-n)*2^(1-m) */
+ mpfr_mul (x, x, y, MPFR_RNDD);
+ /* err <= ulp(x) + (n + 1 + 2/n) / 2^prec(x)
+ <= ulp(x) + (n + 1 + 2/n) ulp(x)/x since x*2^(-prec(x)) < ulp(x)
+ <= ulp(x) + (n + 1 + 2/n) 3/(2n) ulp(x) since x >= 2/3*n for n >= 2
+ <= 4 * ulp(x) for n >= 2 */
+ mpfr_clear (y);
+
+ mpz_clear (a);
+ mpz_clear (s);
+}
diff --git a/src/const_log2.c b/src/const_log2.c
new file mode 100644
index 000000000..e97172b43
--- /dev/null
+++ b/src/const_log2.c
@@ -0,0 +1,192 @@
+/* mpfr_const_log2 -- compute natural logarithm of 2
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Declare the cache */
+MPFR_DECL_INIT_CACHE(__gmpfr_cache_const_log2, mpfr_const_log2_internal);
+
+/* Set User interface */
+#undef mpfr_const_log2
+int
+mpfr_const_log2 (mpfr_ptr x, mpfr_rnd_t rnd_mode) {
+ return mpfr_cache (x, __gmpfr_cache_const_log2, rnd_mode);
+}
+
+/* Auxiliary function: Compute the terms from n1 to n2 (excluded)
+ 3/4*sum((-1)^n*n!^2/2^n/(2*n+1)!, n = n1..n2-1).
+ Numerator is T[0], denominator is Q[0],
+ Compute P[0] only when need_P is non-zero.
+ Need 1+ceil(log(n2-n1)/log(2)) cells in T[],P[],Q[].
+*/
+static void
+S (mpz_t *T, mpz_t *P, mpz_t *Q, unsigned long n1, unsigned long n2, int need_P)
+{
+ if (n2 == n1 + 1)
+ {
+ if (n1 == 0)
+ mpz_set_ui (P[0], 3);
+ else
+ {
+ mpz_set_ui (P[0], n1);
+ mpz_neg (P[0], P[0]);
+ }
+ if (n1 <= (ULONG_MAX / 4 - 1) / 2)
+ mpz_set_ui (Q[0], 4 * (2 * n1 + 1));
+ else /* to avoid overflow in 4 * (2 * n1 + 1) */
+ {
+ mpz_set_ui (Q[0], n1);
+ mpz_mul_2exp (Q[0], Q[0], 1);
+ mpz_add_ui (Q[0], Q[0], 1);
+ mpz_mul_2exp (Q[0], Q[0], 2);
+ }
+ mpz_set (T[0], P[0]);
+ }
+ else
+ {
+ unsigned long m = (n1 / 2) + (n2 / 2) + (n1 & 1UL & n2);
+ unsigned long v, w;
+
+ S (T, P, Q, n1, m, 1);
+ S (T + 1, P + 1, Q + 1, m, n2, need_P);
+ mpz_mul (T[0], T[0], Q[1]);
+ mpz_mul (T[1], T[1], P[0]);
+ mpz_add (T[0], T[0], T[1]);
+ if (need_P)
+ mpz_mul (P[0], P[0], P[1]);
+ mpz_mul (Q[0], Q[0], Q[1]);
+
+ /* remove common trailing zeroes if any */
+ v = mpz_scan1 (T[0], 0);
+ if (v > 0)
+ {
+ w = mpz_scan1 (Q[0], 0);
+ if (w < v)
+ v = w;
+ if (need_P)
+ {
+ w = mpz_scan1 (P[0], 0);
+ if (w < v)
+ v = w;
+ }
+ /* now v = min(val(T), val(Q), val(P)) */
+ if (v > 0)
+ {
+ mpz_fdiv_q_2exp (T[0], T[0], v);
+ mpz_fdiv_q_2exp (Q[0], Q[0], v);
+ if (need_P)
+ mpz_fdiv_q_2exp (P[0], P[0], v);
+ }
+ }
+ }
+}
+
+/* Don't need to save / restore exponent range: the cache does it */
+int
+mpfr_const_log2_internal (mpfr_ptr x, mpfr_rnd_t rnd_mode)
+{
+ unsigned long n = MPFR_PREC (x);
+ mpfr_prec_t w; /* working precision */
+ unsigned long N;
+ mpz_t *T, *P, *Q;
+ mpfr_t t, q;
+ int inexact;
+ int ok = 1; /* ensures that the 1st try will give correct rounding */
+ unsigned long lgN, i;
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("rnd_mode=%d", rnd_mode), ("x[%#R]=%R inex=%d",x,x,inexact));
+
+ mpfr_init2 (t, MPFR_PREC_MIN);
+ mpfr_init2 (q, MPFR_PREC_MIN);
+
+ if (n < 1253)
+ w = n + 10; /* ensures correct rounding for the four rounding modes,
+ together with N = w / 3 + 1 (see below). */
+ else if (n < 2571)
+ w = n + 11; /* idem */
+ else if (n < 3983)
+ w = n + 12;
+ else if (n < 4854)
+ w = n + 13;
+ else if (n < 26248)
+ w = n + 14;
+ else
+ {
+ w = n + 15;
+ ok = 0;
+ }
+
+ MPFR_ZIV_INIT (loop, w);
+ for (;;)
+ {
+ N = w / 3 + 1; /* Warning: do not change that (even increasing N!)
+ without checking correct rounding in the above
+ ranges for n. */
+
+ /* the following are needed for error analysis (see algorithms.tex) */
+ MPFR_ASSERTD(w >= 3 && N >= 2);
+
+ lgN = MPFR_INT_CEIL_LOG2 (N) + 1;
+ T = (mpz_t *) (*__gmp_allocate_func) (3 * lgN * sizeof (mpz_t));
+ P = T + lgN;
+ Q = T + 2*lgN;
+ for (i = 0; i < lgN; i++)
+ {
+ mpz_init (T[i]);
+ mpz_init (P[i]);
+ mpz_init (Q[i]);
+ }
+
+ S (T, P, Q, 0, N, 0);
+
+ mpfr_set_prec (t, w);
+ mpfr_set_prec (q, w);
+
+ mpfr_set_z (t, T[0], MPFR_RNDN);
+ mpfr_set_z (q, Q[0], MPFR_RNDN);
+ mpfr_div (t, t, q, MPFR_RNDN);
+
+ for (i = 0; i < lgN; i++)
+ {
+ mpz_clear (T[i]);
+ mpz_clear (P[i]);
+ mpz_clear (Q[i]);
+ }
+ (*__gmp_free_func) (T, 3 * lgN * sizeof (mpz_t));
+
+ if (MPFR_LIKELY (ok != 0
+ || mpfr_can_round (t, w - 2, MPFR_RNDN, rnd_mode, n)))
+ break;
+
+ MPFR_ZIV_NEXT (loop, w);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (x, t, rnd_mode);
+
+ mpfr_clear (t);
+ mpfr_clear (q);
+
+ return inexact;
+}
diff --git a/src/const_pi.c b/src/const_pi.c
new file mode 100644
index 000000000..1452a2399
--- /dev/null
+++ b/src/const_pi.c
@@ -0,0 +1,120 @@
+/* mpfr_const_pi -- compute Pi
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Declare the cache */
+MPFR_DECL_INIT_CACHE(__gmpfr_cache_const_pi, mpfr_const_pi_internal);
+
+/* Set User Interface */
+#undef mpfr_const_pi
+int
+mpfr_const_pi (mpfr_ptr x, mpfr_rnd_t rnd_mode) {
+ return mpfr_cache (x, __gmpfr_cache_const_pi, rnd_mode);
+}
+
+/* Don't need to save/restore exponent range: the cache does it */
+int
+mpfr_const_pi_internal (mpfr_ptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t a, A, B, D, S;
+ mpfr_prec_t px, p, cancel, k, kmax;
+ MPFR_ZIV_DECL (loop);
+ int inex;
+
+ MPFR_LOG_FUNC (("rnd_mode=%d", rnd_mode), ("x[%#R]=%R inex=%d", x, x, inex));
+
+ px = MPFR_PREC (x);
+
+ /* we need 9*2^kmax - 4 >= px+2*kmax+8 */
+ for (kmax = 2; ((px + 2 * kmax + 12) / 9) >> kmax; kmax ++);
+
+ p = px + 3 * kmax + 14; /* guarantees no recomputation for px <= 10000 */
+
+ mpfr_init2 (a, p);
+ mpfr_init2 (A, p);
+ mpfr_init2 (B, p);
+ mpfr_init2 (D, p);
+ mpfr_init2 (S, p);
+
+ MPFR_ZIV_INIT (loop, p);
+ for (;;) {
+ mpfr_set_ui (a, 1, MPFR_RNDN); /* a = 1 */
+ mpfr_set_ui (A, 1, MPFR_RNDN); /* A = a^2 = 1 */
+ mpfr_set_ui_2exp (B, 1, -1, MPFR_RNDN); /* B = b^2 = 1/2 */
+ mpfr_set_ui_2exp (D, 1, -2, MPFR_RNDN); /* D = 1/4 */
+
+#define b B
+#define ap a
+#define Ap A
+#define Bp B
+ for (k = 0, cancel = 0; ; k++)
+ {
+ /* invariant: 1/2 <= B <= A <= a < 1 */
+ mpfr_add (S, A, B, MPFR_RNDN); /* 1 <= S <= 2 */
+ mpfr_div_2ui (S, S, 2, MPFR_RNDN); /* exact, 1/4 <= S <= 1/2 */
+ mpfr_sqrt (b, B, MPFR_RNDN); /* 1/2 <= b <= 1 */
+ mpfr_add (ap, a, b, MPFR_RNDN); /* 1 <= ap <= 2 */
+ mpfr_div_2ui (ap, ap, 1, MPFR_RNDN); /* exact, 1/2 <= ap <= 1 */
+ mpfr_mul (Ap, ap, ap, MPFR_RNDN); /* 1/4 <= Ap <= 1 */
+ mpfr_sub (Bp, Ap, S, MPFR_RNDN); /* -1/4 <= Bp <= 3/4 */
+ mpfr_mul_2ui (Bp, Bp, 1, MPFR_RNDN); /* -1/2 <= Bp <= 3/2 */
+ mpfr_sub (S, Ap, Bp, MPFR_RNDN);
+ MPFR_ASSERTN (mpfr_cmp_ui (S, 1) < 0);
+ cancel = mpfr_cmp_ui (S, 0) ? (mpfr_uexp_t) -mpfr_get_exp(S) : p;
+ /* MPFR_ASSERTN (cancel >= px || cancel >= 9 * (1 << k) - 4); */
+ mpfr_mul_2ui (S, S, k, MPFR_RNDN);
+ mpfr_sub (D, D, S, MPFR_RNDN);
+ /* stop when |A_k - B_k| <= 2^(k-p) i.e. cancel >= p-k */
+ if (cancel + k >= p)
+ break;
+ }
+#undef b
+#undef ap
+#undef Ap
+#undef Bp
+
+ mpfr_div (A, B, D, MPFR_RNDN);
+
+ /* MPFR_ASSERTN(p >= 2 * k + 8); */
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (A, p - 2 * k - 8, px, rnd_mode)))
+ break;
+
+ p += kmax;
+ MPFR_ZIV_NEXT (loop, p);
+ mpfr_set_prec (a, p);
+ mpfr_set_prec (A, p);
+ mpfr_set_prec (B, p);
+ mpfr_set_prec (D, p);
+ mpfr_set_prec (S, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ inex = mpfr_set (x, A, rnd_mode);
+
+ mpfr_clear (a);
+ mpfr_clear (A);
+ mpfr_clear (B);
+ mpfr_clear (D);
+ mpfr_clear (S);
+
+ return inex;
+}
diff --git a/src/constant.c b/src/constant.c
new file mode 100644
index 000000000..c559a864f
--- /dev/null
+++ b/src/constant.c
@@ -0,0 +1,28 @@
+/* MPFR internal constant FP numbers
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+static const mp_limb_t __gmpfr_limb1[1] = {MPFR_LIMB_HIGHBIT};
+const mpfr_t __gmpfr_one = {{2, MPFR_SIGN_POS, 1, (mp_limb_t*)__gmpfr_limb1}};
+const mpfr_t __gmpfr_two = {{2, MPFR_SIGN_POS, 2, (mp_limb_t*)__gmpfr_limb1}};
+const mpfr_t __gmpfr_four ={{2, MPFR_SIGN_POS, 3, (mp_limb_t*)__gmpfr_limb1}};
diff --git a/src/copysign.c b/src/copysign.c
new file mode 100644
index 000000000..05cfe088e
--- /dev/null
+++ b/src/copysign.c
@@ -0,0 +1,38 @@
+/* mpfr_copysign -- Produce a value with the magnitude of x and sign bit of y
+
+Copyright 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+ /*
+ The computation of z with magnitude of x and sign of y:
+ z = (-1)^signbit(y) * abs(x), i.e. with the same sign bit as y,
+ even if z is a NaN.
+ Note: This function implements copysign from the IEEE-754 standard
+ when no rounding occurs (e.g. if PREC(z) >= PREC(x)).
+ */
+
+#undef mpfr_copysign
+int
+mpfr_copysign (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set4 (z, x, rnd_mode, MPFR_SIGN (y));
+}
diff --git a/src/cos.c b/src/cos.c
new file mode 100644
index 000000000..d7d0bc196
--- /dev/null
+++ b/src/cos.c
@@ -0,0 +1,296 @@
+/* mpfr_cos -- cosine of a floating-point number
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+static int
+mpfr_cos_fast (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int inex;
+
+ inex = mpfr_sincos_fast (NULL, y, x, rnd_mode);
+ inex = inex >> 2; /* 0: exact, 1: rounded up, 2: rounded down */
+ return (inex == 2) ? -1 : inex;
+}
+
+/* f <- 1 - r/2! + r^2/4! + ... + (-1)^l r^l/(2l)! + ...
+ Assumes |r| < 1/2, and f, r have the same precision.
+ Returns e such that the error on f is bounded by 2^e ulps.
+*/
+static int
+mpfr_cos2_aux (mpfr_ptr f, mpfr_srcptr r)
+{
+ mpz_t x, t, s;
+ mpfr_exp_t ex, l, m;
+ mpfr_prec_t p, q;
+ unsigned long i, maxi, imax;
+
+ MPFR_ASSERTD(mpfr_get_exp (r) <= -1);
+
+ /* compute minimal i such that i*(i+1) does not fit in an unsigned long,
+ assuming that there are no padding bits. */
+ maxi = 1UL << (CHAR_BIT * sizeof(unsigned long) / 2);
+ if (maxi * (maxi / 2) == 0) /* test checked at compile time */
+ {
+ /* can occur only when there are padding bits. */
+ /* maxi * (maxi-1) is representable iff maxi * (maxi / 2) != 0 */
+ do
+ maxi /= 2;
+ while (maxi * (maxi / 2) == 0);
+ }
+
+ mpz_init (x);
+ mpz_init (s);
+ mpz_init (t);
+ ex = mpfr_get_z_2exp (x, r); /* r = x*2^ex */
+
+ /* remove trailing zeroes */
+ l = mpz_scan1 (x, 0);
+ ex += l;
+ mpz_fdiv_q_2exp (x, x, l);
+
+ /* since |r| < 1, r = x*2^ex, and x is an integer, necessarily ex < 0 */
+
+ p = mpfr_get_prec (f); /* same than r */
+ /* bound for number of iterations */
+ imax = p / (-mpfr_get_exp (r));
+ imax += (imax == 0);
+ q = 2 * MPFR_INT_CEIL_LOG2(imax) + 4; /* bound for (3l)^2 */
+
+ mpz_set_ui (s, 1); /* initialize sum with 1 */
+ mpz_mul_2exp (s, s, p + q); /* scale all values by 2^(p+q) */
+ mpz_set (t, s); /* invariant: t is previous term */
+ for (i = 1; (m = mpz_sizeinbase (t, 2)) >= q; i += 2)
+ {
+ /* adjust precision of x to that of t */
+ l = mpz_sizeinbase (x, 2);
+ if (l > m)
+ {
+ l -= m;
+ mpz_fdiv_q_2exp (x, x, l);
+ ex += l;
+ }
+ /* multiply t by r */
+ mpz_mul (t, t, x);
+ mpz_fdiv_q_2exp (t, t, -ex);
+ /* divide t by i*(i+1) */
+ if (i < maxi)
+ mpz_fdiv_q_ui (t, t, i * (i + 1));
+ else
+ {
+ mpz_fdiv_q_ui (t, t, i);
+ mpz_fdiv_q_ui (t, t, i + 1);
+ }
+ /* if m is the (current) number of bits of t, we can consider that
+ all operations on t so far had precision >= m, so we can prove
+ by induction that the relative error on t is of the form
+ (1+u)^(3l)-1, where |u| <= 2^(-m), and l=(i+1)/2 is the # of loops.
+ Since |(1+x^2)^(1/x) - 1| <= 4x/3 for |x| <= 1/2,
+ for |u| <= 1/(3l)^2, the absolute error is bounded by
+ 4/3*(3l)*2^(-m)*t <= 4*l since |t| < 2^m.
+ Therefore the error on s is bounded by 2*l*(l+1). */
+ /* add or subtract to s */
+ if (i % 4 == 1)
+ mpz_sub (s, s, t);
+ else
+ mpz_add (s, s, t);
+ }
+
+ mpfr_set_z (f, s, MPFR_RNDN);
+ mpfr_div_2ui (f, f, p + q, MPFR_RNDN);
+
+ mpz_clear (x);
+ mpz_clear (s);
+ mpz_clear (t);
+
+ l = (i - 1) / 2; /* number of iterations */
+ return 2 * MPFR_INT_CEIL_LOG2 (l + 1) + 1; /* bound is 2l(l+1) */
+}
+
+int
+mpfr_cos (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t K0, K, precy, m, k, l;
+ int inexact, reduce = 0;
+ mpfr_t r, s, xr, c;
+ mpfr_exp_t exps, cancel = 0, expx;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_GROUP_DECL (group);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x) || MPFR_IS_INF (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ return mpfr_set_ui (y, 1, rnd_mode);
+ }
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* cos(x) = 1-x^2/2 + ..., so error < 2^(2*EXP(x)-1) */
+ expx = MPFR_GET_EXP (x);
+ MPFR_SMALL_INPUT_AFTER_SAVE_EXPO (y, __gmpfr_one, -2 * expx,
+ 1, 0, rnd_mode, expo, {});
+
+ /* Compute initial precision */
+ precy = MPFR_PREC (y);
+
+ if (precy >= MPFR_SINCOS_THRESHOLD)
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_cos_fast (y, x, rnd_mode);
+ }
+
+ K0 = __gmpfr_isqrt (precy / 3);
+ m = precy + 2 * MPFR_INT_CEIL_LOG2 (precy) + 2 * K0;
+
+ if (expx >= 3)
+ {
+ reduce = 1;
+ /* As expx + m - 1 will silently be converted into mpfr_prec_t
+ in the mpfr_init2 call, the assert below may be useful to
+ avoid undefined behavior. */
+ MPFR_ASSERTN (expx + m - 1 <= MPFR_PREC_MAX);
+ mpfr_init2 (c, expx + m - 1);
+ mpfr_init2 (xr, m);
+ }
+
+ MPFR_GROUP_INIT_2 (group, m, r, s);
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ /* If |x| >= 4, first reduce x cmod (2*Pi) into xr, using mpfr_remainder:
+ let e = EXP(x) >= 3, and m the target precision:
+ (1) c <- 2*Pi [precision e+m-1, nearest]
+ (2) xr <- remainder (x, c) [precision m, nearest]
+ We have |c - 2*Pi| <= 1/2ulp(c) = 2^(3-e-m)
+ |xr - x - k c| <= 1/2ulp(xr) <= 2^(1-m)
+ |k| <= |x|/(2*Pi) <= 2^(e-2)
+ Thus |xr - x - 2kPi| <= |k| |c - 2Pi| + 2^(1-m) <= 2^(2-m).
+ It follows |cos(xr) - cos(x)| <= 2^(2-m). */
+ if (reduce)
+ {
+ mpfr_const_pi (c, MPFR_RNDN);
+ mpfr_mul_2ui (c, c, 1, MPFR_RNDN); /* 2Pi */
+ mpfr_remainder (xr, x, c, MPFR_RNDN);
+ if (MPFR_IS_ZERO(xr))
+ goto ziv_next;
+ /* now |xr| <= 4, thus r <= 16 below */
+ mpfr_mul (r, xr, xr, MPFR_RNDU); /* err <= 1 ulp */
+ }
+ else
+ mpfr_mul (r, x, x, MPFR_RNDU); /* err <= 1 ulp */
+
+ /* now |x| < 4 (or xr if reduce = 1), thus |r| <= 16 */
+
+ /* we need |r| < 1/2 for mpfr_cos2_aux, i.e., EXP(r) - 2K <= -1 */
+ K = K0 + 1 + MAX(0, MPFR_EXP(r)) / 2;
+ /* since K0 >= 0, if EXP(r) < 0, then K >= 1, thus EXP(r) - 2K <= -3;
+ otherwise if EXP(r) >= 0, then K >= 1/2 + EXP(r)/2, thus
+ EXP(r) - 2K <= -1 */
+
+ MPFR_SET_EXP (r, MPFR_GET_EXP (r) - 2 * K); /* Can't overflow! */
+
+ /* s <- 1 - r/2! + ... + (-1)^l r^l/(2l)! */
+ l = mpfr_cos2_aux (s, r);
+ /* l is the error bound in ulps on s */
+ MPFR_SET_ONE (r);
+ for (k = 0; k < K; k++)
+ {
+ mpfr_sqr (s, s, MPFR_RNDU); /* err <= 2*olderr */
+ MPFR_SET_EXP (s, MPFR_GET_EXP (s) + 1); /* Can't overflow */
+ mpfr_sub (s, s, r, MPFR_RNDN); /* err <= 4*olderr */
+ if (MPFR_IS_ZERO(s))
+ goto ziv_next;
+ MPFR_ASSERTD (MPFR_GET_EXP (s) <= 1);
+ }
+
+ /* The absolute error on s is bounded by (2l+1/3)*2^(2K-m)
+ 2l+1/3 <= 2l+1.
+ If |x| >= 4, we need to add 2^(2-m) for the argument reduction
+ by 2Pi: if K = 0, this amounts to add 4 to 2l+1/3, i.e., to add
+ 2 to l; if K >= 1, this amounts to add 1 to 2*l+1/3. */
+ l = 2 * l + 1;
+ if (reduce)
+ l += (K == 0) ? 4 : 1;
+ k = MPFR_INT_CEIL_LOG2 (l) + 2*K;
+ /* now the error is bounded by 2^(k-m) = 2^(EXP(s)-err) */
+
+ exps = MPFR_GET_EXP (s);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s, exps + m - k, precy, rnd_mode)))
+ break;
+
+ if (MPFR_UNLIKELY (exps == 1))
+ /* s = 1 or -1, and except x=0 which was already checked above,
+ cos(x) cannot be 1 or -1, so we can round if the error is less
+ than 2^(-precy) for directed rounding, or 2^(-precy-1) for rounding
+ to nearest. */
+ {
+ if (m > k && (m - k >= precy + (rnd_mode == MPFR_RNDN)))
+ {
+ /* If round to nearest or away, result is s = 1 or -1,
+ otherwise it is round(nexttoward (s, 0)). However in order to
+ have the inexact flag correctly set below, we set |s| to
+ 1 - 2^(-m) in all cases. */
+ mpfr_nexttozero (s);
+ break;
+ }
+ }
+
+ if (exps < cancel)
+ {
+ m += cancel - exps;
+ cancel = exps;
+ }
+
+ ziv_next:
+ MPFR_ZIV_NEXT (loop, m);
+ MPFR_GROUP_REPREC_2 (group, m, r, s);
+ if (reduce)
+ {
+ mpfr_set_prec (xr, m);
+ mpfr_set_prec (c, expx + m - 1);
+ }
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+ MPFR_GROUP_CLEAR (group);
+ if (reduce)
+ {
+ mpfr_clear (xr);
+ mpfr_clear (c);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/cosh.c b/src/cosh.c
new file mode 100644
index 000000000..69ae3b5f6
--- /dev/null
+++ b/src/cosh.c
@@ -0,0 +1,126 @@
+/* mpfr_cosh -- hyperbolic cosine
+
+Copyright 2001, 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of cosh is done by *
+ * cosh= 1/2[e^(x)+e^(-x)] */
+
+int
+mpfr_cosh (mpfr_ptr y, mpfr_srcptr xt , mpfr_rnd_t rnd_mode)
+{
+ mpfr_t x;
+ int inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", xt, xt, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(xt)))
+ {
+ if (MPFR_IS_NAN(xt))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(xt))
+ {
+ MPFR_SET_INF(y);
+ MPFR_SET_POS(y);
+ MPFR_RET(0);
+ }
+ else
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(xt));
+ return mpfr_set_ui (y, 1, rnd_mode); /* cosh(0) = 1 */
+ }
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* cosh(x) = 1+x^2/2 + ... <= 1+x^2 for x <= 2.9828...,
+ thus the error < 2^(2*EXP(x)). If x >= 1, then EXP(x) >= 1,
+ thus the following will always fail. */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, __gmpfr_one, -2 * MPFR_GET_EXP (xt), 0,
+ 1, rnd_mode, inexact = _inexact; goto end);
+
+ MPFR_TMP_INIT_ABS(x, xt);
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t, te;
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(y); /* Precision of output variable */
+ mpfr_prec_t Nt; /* Precision of the intermediary variable */
+ long int err; /* Precision of error */
+ MPFR_ZIV_DECL (loop);
+ MPFR_GROUP_DECL (group);
+
+ /* compute the precision of intermediary variable */
+ /* The optimal number of bits : see algorithms.tex */
+ Nt = Ny + 3 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ /* initialise of intermediary variables */
+ MPFR_GROUP_INIT_2 (group, Nt, t, te);
+
+ /* First computation of cosh */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* Compute cosh */
+ MPFR_BLOCK (flags, mpfr_exp (te, x, MPFR_RNDD)); /* exp(x) */
+ /* exp can overflow (but not underflow since x>0) */
+ if (MPFR_OVERFLOW (flags))
+ /* cosh(x) > exp(x), cosh(x) underflows too */
+ {
+ inexact = mpfr_overflow (y, rnd_mode, MPFR_SIGN_POS);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+ mpfr_ui_div (t, 1, te, MPFR_RNDU); /* 1/exp(x) */
+ mpfr_add (t, te, t, MPFR_RNDU); /* exp(x) + 1/exp(x)*/
+ mpfr_div_2ui (t, t, 1, MPFR_RNDN); /* 1/2(exp(x) + 1/exp(x))*/
+
+ /* Estimation of the error */
+ err = Nt - 3;
+ /* Check if we can round */
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ {
+ inexact = mpfr_set (y, t, rnd_mode);
+ break;
+ }
+
+ /* Actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ MPFR_GROUP_REPREC_2 (group, Nt, t, te);
+ }
+ MPFR_ZIV_FREE (loop);
+ MPFR_GROUP_CLEAR (group);
+ }
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/cot.c b/src/cot.c
new file mode 100644
index 000000000..3dce042bf
--- /dev/null
+++ b/src/cot.c
@@ -0,0 +1,96 @@
+/* mpfr_cot - cotangent function.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* the cotangent is defined by cot(x) = 1/tan(x) = cos(x)/sin(x).
+ cot (NaN) = NaN.
+ cot (+Inf) = csc (-Inf) = NaN.
+ cot (+0) = +Inf.
+ cot (-0) = -Inf.
+*/
+
+#define FUNCTION mpfr_cot
+#define INVERSE mpfr_tan
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_ZERO(y,x) do { MPFR_SET_SAME_SIGN(y,x); MPFR_SET_INF(y); \
+ MPFR_RET(0); } while (1)
+
+/* (This analysis is adapted from that for mpfr_coth.)
+ Near x=0, cot(x) = 1/x - x/3 + ..., more precisely we have
+ |cot(x) - 1/x| <= 0.36 for |x| <= 1. The error term has
+ the opposite sign as 1/x, thus |cot(x)| <= |1/x|. Then:
+ (i) either x is a power of two, then 1/x is exactly representable, and
+ as long as 1/2*ulp(1/x) > 0.36, we can conclude;
+ (ii) otherwise assume x has <= n bits, and y has <= n+1 bits, then
+ |y - 1/x| >= 2^(-2n) ufp(y), where ufp means unit in first place.
+ Since |cot(x) - 1/x| <= 0.36, if 2^(-2n) ufp(y) >= 0.72, then
+ |y - cot(x)| >= 2^(-2n-1) ufp(y), and rounding 1/x gives the correct
+ result. If x < 2^E, then y > 2^(-E), thus ufp(y) > 2^(-E-1).
+ A sufficient condition is thus EXP(x) + 1 <= -2 MAX(PREC(x),PREC(Y)).
+ The division can be inexact in case of underflow or overflow; but
+ an underflow is not possible as emin = - emax. The overflow is a
+ real overflow possibly except when |x| = 2^emin. */
+#define ACTION_TINY(y,x,r) \
+ if (MPFR_EXP(x) + 1 <= -2 * (mpfr_exp_t) MAX(MPFR_PREC(x), MPFR_PREC(y))) \
+ { \
+ int two2emin; \
+ int signx = MPFR_SIGN(x); \
+ MPFR_ASSERTN (MPFR_EMIN_MIN + MPFR_EMAX_MAX == 0); \
+ if ((two2emin = mpfr_get_exp (x) == __gmpfr_emin + 1 && \
+ mpfr_powerof2_raw (x))) \
+ { \
+ /* Case |x| = 2^emin. 1/x is not representable; so, compute \
+ 1/(2x) instead (exact), and correct the result later. */ \
+ mpfr_set_si_2exp (y, signx, __gmpfr_emax, MPFR_RNDN); \
+ inexact = 0; \
+ } \
+ else \
+ inexact = mpfr_ui_div (y, 1, x, r); \
+ if (inexact == 0) /* x is a power of two */ \
+ { /* result always 1/x, except when rounding to zero */ \
+ if (rnd_mode == MPFR_RNDA) \
+ rnd_mode = (signx > 0) ? MPFR_RNDU : MPFR_RNDD; \
+ if (rnd_mode == MPFR_RNDU || (rnd_mode == MPFR_RNDZ && signx < 0)) \
+ { \
+ if (signx < 0) \
+ mpfr_nextabove (y); /* -2^k + epsilon */ \
+ inexact = 1; \
+ } \
+ else if (rnd_mode == MPFR_RNDD || rnd_mode == MPFR_RNDZ) \
+ { \
+ if (signx > 0) \
+ mpfr_nextbelow (y); /* 2^k - epsilon */ \
+ inexact = -1; \
+ } \
+ else /* round to nearest */ \
+ inexact = signx; \
+ if (two2emin) \
+ mpfr_mul_2ui (y, y, 1, r); /* overflow in MPFR_RNDN */ \
+ } \
+ /* Underflow is not possible with emin = - emax, but we cannot */ \
+ /* add an assert as the underflow flag could have already been */ \
+ /* set before the call to mpfr_cot. */ \
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags); \
+ goto end; \
+ }
+
+#include "gen_inverse.h"
diff --git a/src/coth.c b/src/coth.c
new file mode 100644
index 000000000..edab6f19c
--- /dev/null
+++ b/src/coth.c
@@ -0,0 +1,93 @@
+/* mpfr_coth - Hyperbolic cotangent function.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* the hyperbolic cotangent is defined by coth(x) = 1/tanh(x)
+ coth (NaN) = NaN.
+ coth (+Inf) = 1
+ coth (-Inf) = -1
+ coth (+0) = +Inf.
+ coth (-0) = -Inf.
+*/
+
+#define FUNCTION mpfr_coth
+#define INVERSE mpfr_tanh
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) return mpfr_set_si (y, MPFR_IS_POS(x) ? 1 : -1, rnd_mode)
+#define ACTION_ZERO(y,x) do { MPFR_SET_SAME_SIGN(y,x); MPFR_SET_INF(y); \
+ MPFR_RET(0); } while (1)
+
+/* We know |coth(x)| > 1, thus if the approximation z is such that
+ 1 <= z <= 1 + 2^(-p) where p is the target precision, then the
+ result is either 1 or nextabove(1) = 1 + 2^(1-p). */
+#define ACTION_SPECIAL \
+ if (MPFR_GET_EXP(z) == 1) /* 1 <= |z| < 2 */ \
+ { \
+ /* the following is exact by Sterbenz theorem */ \
+ mpfr_sub_si (z, z, MPFR_SIGN(z) > 0 ? 1 : -1, MPFR_RNDN); \
+ if (MPFR_IS_ZERO(z) || MPFR_GET_EXP(z) <= - (mpfr_exp_t) precy) \
+ { \
+ mpfr_add_si (z, z, MPFR_SIGN(z) > 0 ? 1 : -1, MPFR_RNDN); \
+ break; \
+ } \
+ }
+
+/* The analysis is adapted from that for mpfr_csc:
+ near x=0, coth(x) = 1/x + x/3 + ..., more precisely we have
+ |coth(x) - 1/x| <= 0.32 for |x| <= 1. Like for csc, the error term has
+ the same sign as 1/x, thus |coth(x)| >= |1/x|. Then:
+ (i) either x is a power of two, then 1/x is exactly representable, and
+ as long as 1/2*ulp(1/x) > 0.32, we can conclude;
+ (ii) otherwise assume x has <= n bits, and y has <= n+1 bits, then
+ |y - 1/x| >= 2^(-2n) ufp(y), where ufp means unit in first place.
+ Since |coth(x) - 1/x| <= 0.32, if 2^(-2n) ufp(y) >= 0.64, then
+ |y - coth(x)| >= 2^(-2n-1) ufp(y), and rounding 1/x gives the correct
+ result. If x < 2^E, then y > 2^(-E), thus ufp(y) > 2^(-E-1).
+ A sufficient condition is thus EXP(x) + 1 <= -2 MAX(PREC(x),PREC(Y)). */
+#define ACTION_TINY(y,x,r) \
+ if (MPFR_EXP(x) + 1 <= -2 * (mpfr_exp_t) MAX(MPFR_PREC(x), MPFR_PREC(y))) \
+ { \
+ int signx = MPFR_SIGN(x); \
+ inexact = mpfr_ui_div (y, 1, x, r); \
+ if (inexact == 0) /* x is a power of two */ \
+ { /* result always 1/x, except when rounding away from zero */ \
+ if (rnd_mode == MPFR_RNDA) \
+ rnd_mode = (signx > 0) ? MPFR_RNDU : MPFR_RNDD; \
+ if (rnd_mode == MPFR_RNDU) \
+ { \
+ if (signx > 0) \
+ mpfr_nextabove (y); /* 2^k + epsilon */ \
+ inexact = 1; \
+ } \
+ else if (rnd_mode == MPFR_RNDD) \
+ { \
+ if (signx < 0) \
+ mpfr_nextbelow (y); /* -2^k - epsilon */ \
+ inexact = -1; \
+ } \
+ else /* round to zero, or nearest */ \
+ inexact = -signx; \
+ } \
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags); \
+ goto end; \
+ }
+
+#include "gen_inverse.h"
diff --git a/src/csc.c b/src/csc.c
new file mode 100644
index 000000000..e89a319bf
--- /dev/null
+++ b/src/csc.c
@@ -0,0 +1,76 @@
+/* mpfr_csc - cosecant function.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* the cosecant is defined by csc(x) = 1/sin(x).
+ csc (NaN) = NaN.
+ csc (+Inf) = csc (-Inf) = NaN.
+ csc (+0) = +Inf.
+ csc (-0) = -Inf.
+*/
+
+#define FUNCTION mpfr_csc
+#define INVERSE mpfr_sin
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_ZERO(y,x) do { MPFR_SET_SAME_SIGN(y,x); MPFR_SET_INF(y); \
+ MPFR_RET(0); } while (1)
+/* near x=0, we have csc(x) = 1/x + x/6 + ..., more precisely we have
+ |csc(x) - 1/x| <= 0.2 for |x| <= 1. The analysis is similar to that for
+ gamma(x) near x=0 (see gamma.c), except here the error term has the same
+ sign as 1/x, thus |csc(x)| >= |1/x|. Then:
+ (i) either x is a power of two, then 1/x is exactly representable, and
+ as long as 1/2*ulp(1/x) > 0.2, we can conclude;
+ (ii) otherwise assume x has <= n bits, and y has <= n+1 bits, then
+ |y - 1/x| >= 2^(-2n) ufp(y), where ufp means unit in first place.
+ Since |csc(x) - 1/x| <= 0.2, if 2^(-2n) ufp(y) >= 0.4, then
+ |y - csc(x)| >= 2^(-2n-1) ufp(y), and rounding 1/x gives the correct result.
+ If x < 2^E, then y > 2^(-E), thus ufp(y) > 2^(-E-1).
+ A sufficient condition is thus EXP(x) <= -2 MAX(PREC(x),PREC(Y)). */
+#define ACTION_TINY(y,x,r) \
+ if (MPFR_EXP(x) <= -2 * (mpfr_exp_t) MAX(MPFR_PREC(x), MPFR_PREC(y))) \
+ { \
+ int signx = MPFR_SIGN(x); \
+ inexact = mpfr_ui_div (y, 1, x, r); \
+ if (inexact == 0) /* x is a power of two */ \
+ { /* result always 1/x, except when rounding away from zero */ \
+ if (rnd_mode == MPFR_RNDA) \
+ rnd_mode = (signx > 0) ? MPFR_RNDU : MPFR_RNDD; \
+ if (rnd_mode == MPFR_RNDU) \
+ { \
+ if (signx > 0) \
+ mpfr_nextabove (y); /* 2^k + epsilon */ \
+ inexact = 1; \
+ } \
+ else if (rnd_mode == MPFR_RNDD) \
+ { \
+ if (signx < 0) \
+ mpfr_nextbelow (y); /* -2^k - epsilon */ \
+ inexact = -1; \
+ } \
+ else /* round to zero, or nearest */ \
+ inexact = -signx; \
+ } \
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags); \
+ goto end; \
+ }
+
+#include "gen_inverse.h"
diff --git a/src/csch.c b/src/csch.c
new file mode 100644
index 000000000..d15bdd3e4
--- /dev/null
+++ b/src/csch.c
@@ -0,0 +1,79 @@
+/* mpfr_csch - Hyperbolic cosecant function.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* the hyperbolic cosecant is defined by csch(x) = 1/sinh(x).
+ csch (NaN) = NaN.
+ csch (+Inf) = +0.
+ csch (-Inf) = -0.
+ csch (+0) = +Inf.
+ csch (-0) = -Inf.
+*/
+
+#define FUNCTION mpfr_csch
+#define INVERSE mpfr_sinh
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) do { MPFR_SET_SAME_SIGN(y,x); MPFR_SET_ZERO (y); \
+ MPFR_RET(0); } while (1)
+#define ACTION_ZERO(y,x) do { MPFR_SET_SAME_SIGN(y,x); MPFR_SET_INF(y); \
+ MPFR_RET(0); } while (1)
+
+/* (This analysis is adapted from that for mpfr_csc.)
+ Near x=0, we have csch(x) = 1/x - x/6 + ..., more precisely we have
+ |csch(x) - 1/x| <= 0.2 for |x| <= 1. The error term has the opposite
+ sign as 1/x, thus |csch(x)| <= |1/x|. Then:
+ (i) either x is a power of two, then 1/x is exactly representable, and
+ as long as 1/2*ulp(1/x) > 0.2, we can conclude;
+ (ii) otherwise assume x has <= n bits, and y has <= n+1 bits, then
+ |y - 1/x| >= 2^(-2n) ufp(y), where ufp means unit in first place.
+ Since |csch(x) - 1/x| <= 0.2, if 2^(-2n) ufp(y) >= 0.4, then
+ |y - csch(x)| >= 2^(-2n-1) ufp(y), and rounding 1/x gives the correct
+ result. If x < 2^E, then y > 2^(-E), thus ufp(y) > 2^(-E-1).
+ A sufficient condition is thus EXP(x) <= -2 MAX(PREC(x),PREC(Y)). */
+#define ACTION_TINY(y,x,r) \
+ if (MPFR_EXP(x) <= -2 * (mpfr_exp_t) MAX(MPFR_PREC(x), MPFR_PREC(y))) \
+ { \
+ int signx = MPFR_SIGN(x); \
+ inexact = mpfr_ui_div (y, 1, x, r); \
+ if (inexact == 0) /* x is a power of two */ \
+ { /* result always 1/x, except when rounding to zero */ \
+ if (rnd_mode == MPFR_RNDA) \
+ rnd_mode = (signx > 0) ? MPFR_RNDU : MPFR_RNDD; \
+ if (rnd_mode == MPFR_RNDU || (rnd_mode == MPFR_RNDZ && signx < 0)) \
+ { \
+ if (signx < 0) \
+ mpfr_nextabove (y); /* -2^k + epsilon */ \
+ inexact = 1; \
+ } \
+ else if (rnd_mode == MPFR_RNDD || rnd_mode == MPFR_RNDZ) \
+ { \
+ if (signx > 0) \
+ mpfr_nextbelow (y); /* 2^k - epsilon */ \
+ inexact = -1; \
+ } \
+ else /* round to nearest */ \
+ inexact = signx; \
+ } \
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags); \
+ goto end; \
+ }
+
+#include "gen_inverse.h"
diff --git a/src/d_div.c b/src/d_div.c
new file mode 100644
index 000000000..a67b28209
--- /dev/null
+++ b/src/d_div.c
@@ -0,0 +1,49 @@
+/* mpfr_d_div -- divide a machine double precision float
+ by a multiple precision floating-point number
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_d_div (mpfr_ptr a, double b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("b=%.20g c[%#R]=%R rnd=%d", b, c, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (d, IEEE_DBL_MANT_DIG);
+ inexact = mpfr_set_d (d, b, rnd_mode);
+ MPFR_ASSERTN (inexact == 0);
+
+ mpfr_clear_flags ();
+ inexact = mpfr_div (a, d, c, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+
+ mpfr_clear(d);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (a, inexact, rnd_mode);
+}
diff --git a/src/d_sub.c b/src/d_sub.c
new file mode 100644
index 000000000..ef3a418c6
--- /dev/null
+++ b/src/d_sub.c
@@ -0,0 +1,49 @@
+/* mpfr_d_sub -- subtract a multiple precision floating-point number
+ from a machine double precision float
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_d_sub (mpfr_ptr a, double b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("b=%.20g c[%#R]=%R rnd=%d", b, c, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (d, IEEE_DBL_MANT_DIG);
+ inexact = mpfr_set_d (d, b, rnd_mode);
+ MPFR_ASSERTN (inexact == 0);
+
+ mpfr_clear_flags ();
+ inexact = mpfr_sub (a, d, c, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+
+ mpfr_clear(d);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (a, inexact, rnd_mode);
+}
diff --git a/src/digamma.c b/src/digamma.c
new file mode 100644
index 000000000..b6d48a4a8
--- /dev/null
+++ b/src/digamma.c
@@ -0,0 +1,372 @@
+/* mpfr_digamma -- digamma function of a floating-point number
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Put in s an approximation of digamma(x).
+ Assumes x >= 2.
+ Assumes s does not overlap with x.
+ Returns an integer e such that the error is bounded by 2^e ulps
+ of the result s.
+*/
+static mpfr_exp_t
+mpfr_digamma_approx (mpfr_ptr s, mpfr_srcptr x)
+{
+ mpfr_prec_t p = MPFR_PREC (s);
+ mpfr_t t, u, invxx;
+ mpfr_exp_t e, exps, f, expu;
+ mpz_t *INITIALIZED(B); /* variable B declared as initialized */
+ unsigned long n0, n; /* number of allocated B[] */
+
+ MPFR_ASSERTN(MPFR_IS_POS(x) && (MPFR_EXP(x) >= 2));
+
+ mpfr_init2 (t, p);
+ mpfr_init2 (u, p);
+ mpfr_init2 (invxx, p);
+
+ mpfr_log (s, x, MPFR_RNDN); /* error <= 1/2 ulp */
+ mpfr_ui_div (t, 1, x, MPFR_RNDN); /* error <= 1/2 ulp */
+ mpfr_div_2exp (t, t, 1, MPFR_RNDN); /* exact */
+ mpfr_sub (s, s, t, MPFR_RNDN);
+ /* error <= 1/2 + 1/2*2^(EXP(olds)-EXP(s)) + 1/2*2^(EXP(t)-EXP(s)).
+ For x >= 2, log(x) >= 2*(1/(2x)), thus olds >= 2t, and olds - t >= olds/2,
+ thus 0 <= EXP(olds)-EXP(s) <= 1, and EXP(t)-EXP(s) <= 0, thus
+ error <= 1/2 + 1/2*2 + 1/2 <= 2 ulps. */
+ e = 2; /* initial error */
+ mpfr_mul (invxx, x, x, MPFR_RNDZ); /* invxx = x^2 * (1 + theta)
+ for |theta| <= 2^(-p) */
+ mpfr_ui_div (invxx, 1, invxx, MPFR_RNDU); /* invxx = 1/x^2 * (1 + theta)^2 */
+
+ /* in the following we note err=xxx when the ratio between the approximation
+ and the exact result can be written (1 + theta)^xxx for |theta| <= 2^(-p),
+ following Higham's method */
+ B = mpfr_bernoulli_internal ((mpz_t *) 0, 0);
+ mpfr_set_ui (t, 1, MPFR_RNDN); /* err = 0 */
+ for (n = 1;; n++)
+ {
+ /* compute next Bernoulli number */
+ B = mpfr_bernoulli_internal (B, n);
+ /* The main term is Bernoulli[2n]/(2n)/x^(2n) = B[n]/(2n+1)!(2n)/x^(2n)
+ = B[n]*t[n]/(2n) where t[n]/t[n-1] = 1/(2n)/(2n+1)/x^2. */
+ mpfr_mul (t, t, invxx, MPFR_RNDU); /* err = err + 3 */
+ mpfr_div_ui (t, t, 2 * n, MPFR_RNDU); /* err = err + 1 */
+ mpfr_div_ui (t, t, 2 * n + 1, MPFR_RNDU); /* err = err + 1 */
+ /* we thus have err = 5n here */
+ mpfr_div_ui (u, t, 2 * n, MPFR_RNDU); /* err = 5n+1 */
+ mpfr_mul_z (u, u, B[n], MPFR_RNDU); /* err = 5n+2, and the
+ absolute error is bounded
+ by 10n+4 ulp(u) [Rule 11] */
+ /* if the terms 'u' are decreasing by a factor two at least,
+ then the error coming from those is bounded by
+ sum((10n+4)/2^n, n=1..infinity) = 24 */
+ exps = mpfr_get_exp (s);
+ expu = mpfr_get_exp (u);
+ if (expu < exps - (mpfr_exp_t) p)
+ break;
+ mpfr_sub (s, s, u, MPFR_RNDN); /* error <= 24 + n/2 */
+ if (mpfr_get_exp (s) < exps)
+ e <<= exps - mpfr_get_exp (s);
+ e ++; /* error in mpfr_sub */
+ f = 10 * n + 4;
+ while (expu < exps)
+ {
+ f = (1 + f) / 2;
+ expu ++;
+ }
+ e += f; /* total rouding error coming from 'u' term */
+ }
+
+ n0 = ++n;
+ while (n--)
+ mpz_clear (B[n]);
+ (*__gmp_free_func) (B, n0 * sizeof (mpz_t));
+
+ mpfr_clear (t);
+ mpfr_clear (u);
+ mpfr_clear (invxx);
+
+ f = 0;
+ while (e > 1)
+ {
+ f++;
+ e = (e + 1) / 2;
+ /* Invariant: 2^f * e does not decrease */
+ }
+ return f;
+}
+
+/* Use the reflection formula Digamma(1-x) = Digamma(x) + Pi * cot(Pi*x),
+ i.e., Digamma(x) = Digamma(1-x) - Pi * cot(Pi*x).
+ Assume x < 1/2. */
+static int
+mpfr_digamma_reflection (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t p = MPFR_PREC(y) + 10, q;
+ mpfr_t t, u, v;
+ mpfr_exp_t e1, expv;
+ int inex;
+ MPFR_ZIV_DECL (loop);
+
+ /* we want that 1-x is exact with precision q: if 0 < x < 1/2, then
+ q = PREC(x)-EXP(x) is ok, otherwise if -1 <= x < 0, q = PREC(x)-EXP(x)
+ is ok, otherwise for x < -1, PREC(x) is ok if EXP(x) <= PREC(x),
+ otherwise we need EXP(x) */
+ if (MPFR_EXP(x) < 0)
+ q = MPFR_PREC(x) + 1 - MPFR_EXP(x);
+ else if (MPFR_EXP(x) <= MPFR_PREC(x))
+ q = MPFR_PREC(x) + 1;
+ else
+ q = MPFR_EXP(x);
+ mpfr_init2 (u, q);
+ MPFR_ASSERTN(mpfr_ui_sub (u, 1, x, MPFR_RNDN) == 0);
+
+ /* if x is half an integer, cot(Pi*x) = 0, thus Digamma(x) = Digamma(1-x) */
+ mpfr_mul_2exp (u, u, 1, MPFR_RNDN);
+ inex = mpfr_integer_p (u);
+ mpfr_div_2exp (u, u, 1, MPFR_RNDN);
+ if (inex)
+ {
+ inex = mpfr_digamma (y, u, rnd_mode);
+ goto end;
+ }
+
+ mpfr_init2 (t, p);
+ mpfr_init2 (v, p);
+
+ MPFR_ZIV_INIT (loop, p);
+ for (;;)
+ {
+ mpfr_const_pi (v, MPFR_RNDN); /* v = Pi*(1+theta) for |theta|<=2^(-p) */
+ mpfr_mul (t, v, x, MPFR_RNDN); /* (1+theta)^2 */
+ e1 = MPFR_EXP(t) - (mpfr_exp_t) p + 1; /* bound for t: err(t) <= 2^e1 */
+ mpfr_cot (t, t, MPFR_RNDN);
+ /* cot(t * (1+h)) = cot(t) - theta * (1 + cot(t)^2) with |theta|<=t*h */
+ if (MPFR_EXP(t) > 0)
+ e1 = e1 + 2 * MPFR_EXP(t) + 1;
+ else
+ e1 = e1 + 1;
+ /* now theta * (1 + cot(t)^2) <= 2^e1 */
+ e1 += (mpfr_exp_t) p - MPFR_EXP(t); /* error is now 2^e1 ulps */
+ mpfr_mul (t, t, v, MPFR_RNDN);
+ e1 ++;
+ mpfr_digamma (v, u, MPFR_RNDN); /* error <= 1/2 ulp */
+ expv = MPFR_EXP(v);
+ mpfr_sub (v, v, t, MPFR_RNDN);
+ if (MPFR_EXP(v) < MPFR_EXP(t))
+ e1 += MPFR_EXP(t) - MPFR_EXP(v); /* scale error for t wrt new v */
+ /* now take into account the 1/2 ulp error for v */
+ if (expv - MPFR_EXP(v) - 1 > e1)
+ e1 = expv - MPFR_EXP(v) - 1;
+ else
+ e1 ++;
+ e1 ++; /* rounding error for mpfr_sub */
+ if (MPFR_CAN_ROUND (v, p - e1, MPFR_PREC(y), rnd_mode))
+ break;
+ MPFR_ZIV_NEXT (loop, p);
+ mpfr_set_prec (t, p);
+ mpfr_set_prec (v, p);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = mpfr_set (y, v, rnd_mode);
+
+ mpfr_clear (t);
+ mpfr_clear (v);
+ end:
+ mpfr_clear (u);
+
+ return inex;
+}
+
+/* we have x >= 1/2 here */
+static int
+mpfr_digamma_positive (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t p = MPFR_PREC(y) + 10, q;
+ mpfr_t t, u, x_plus_j;
+ int inex;
+ mpfr_exp_t errt, erru, expt;
+ unsigned long j = 0, min;
+ MPFR_ZIV_DECL (loop);
+
+ /* compute a precision q such that x+1 is exact */
+ if (MPFR_PREC(x) < MPFR_EXP(x))
+ q = MPFR_EXP(x);
+ else
+ q = MPFR_PREC(x) + 1;
+ mpfr_init2 (x_plus_j, q);
+
+ mpfr_init2 (t, p);
+ mpfr_init2 (u, p);
+ MPFR_ZIV_INIT (loop, p);
+ for(;;)
+ {
+ /* Lower bound for x+j in mpfr_digamma_approx call: since the smallest
+ term of the divergent series for Digamma(x) is about exp(-2*Pi*x), and
+ we want it to be less than 2^(-p), this gives x > p*log(2)/(2*Pi)
+ i.e., x >= 0.1103 p.
+ To be safe, we ensure x >= 0.25 * p.
+ */
+ min = (p + 3) / 4;
+ if (min < 2)
+ min = 2;
+
+ mpfr_set (x_plus_j, x, MPFR_RNDN);
+ mpfr_set_ui (u, 0, MPFR_RNDN);
+ j = 0;
+ while (mpfr_cmp_ui (x_plus_j, min) < 0)
+ {
+ j ++;
+ mpfr_ui_div (t, 1, x_plus_j, MPFR_RNDN); /* err <= 1/2 ulp */
+ mpfr_add (u, u, t, MPFR_RNDN);
+ inex = mpfr_add_ui (x_plus_j, x_plus_j, 1, MPFR_RNDZ);
+ if (inex != 0) /* we lost one bit */
+ {
+ q ++;
+ mpfr_prec_round (x_plus_j, q, MPFR_RNDZ);
+ mpfr_nextabove (x_plus_j);
+ }
+ /* since all terms are positive, the error is bounded by j ulps */
+ }
+ for (erru = 0; j > 1; erru++, j = (j + 1) / 2);
+ errt = mpfr_digamma_approx (t, x_plus_j);
+ expt = MPFR_EXP(t);
+ mpfr_sub (t, t, u, MPFR_RNDN);
+ if (MPFR_EXP(t) < expt)
+ errt += expt - MPFR_EXP(t);
+ if (MPFR_EXP(t) < MPFR_EXP(u))
+ erru += MPFR_EXP(u) - MPFR_EXP(t);
+ if (errt > erru)
+ errt = errt + 1;
+ else if (errt == erru)
+ errt = errt + 2;
+ else
+ errt = erru + 1;
+ if (MPFR_CAN_ROUND (t, p - errt, MPFR_PREC(y), rnd_mode))
+ break;
+ MPFR_ZIV_NEXT (loop, p);
+ mpfr_set_prec (t, p);
+ mpfr_set_prec (u, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ inex = mpfr_set (y, t, rnd_mode);
+ mpfr_clear (t);
+ mpfr_clear (u);
+ mpfr_clear (x_plus_j);
+ return inex;
+}
+
+int
+mpfr_digamma (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)))
+ {
+ if (MPFR_IS_NAN(x))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(x))
+ {
+ if (MPFR_IS_POS(x)) /* Digamma(+Inf) = +Inf */
+ {
+ MPFR_SET_SAME_SIGN(y, x);
+ MPFR_SET_INF(y);
+ MPFR_RET(0);
+ }
+ else /* Digamma(-Inf) = NaN */
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ }
+ else /* Zero case */
+ {
+ /* the following works also in case of overlap */
+ MPFR_SET_INF(y);
+ MPFR_SET_OPPOSITE_SIGN(y, x);
+ MPFR_RET(0);
+ }
+ }
+
+ /* Digamma is undefined for negative integers */
+ if (MPFR_IS_NEG(x) && mpfr_integer_p (x))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+
+ /* now x is a normal number */
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ /* for x very small, we have Digamma(x) = -1/x - gamma + O(x), more precisely
+ -1 < Digamma(x) + 1/x < 0 for -0.2 < x < 0.2, thus:
+ (i) either x is a power of two, then 1/x is exactly representable, and
+ as long as 1/2*ulp(1/x) > 1, we can conclude;
+ (ii) otherwise assume x has <= n bits, and y has <= n+1 bits, then
+ |y + 1/x| >= 2^(-2n) ufp(y), where ufp means unit in first place.
+ Since |Digamma(x) + 1/x| <= 1, if 2^(-2n) ufp(y) >= 2, then
+ |y - Digamma(x)| >= 2^(-2n-1)ufp(y), and rounding -1/x gives the correct result.
+ If x < 2^E, then y > 2^(-E), thus ufp(y) > 2^(-E-1).
+ A sufficient condition is thus EXP(x) <= -2 MAX(PREC(x),PREC(Y)). */
+ if (MPFR_EXP(x) < -2)
+ {
+ if (MPFR_EXP(x) <= -2 * (mpfr_exp_t) MAX(MPFR_PREC(x), MPFR_PREC(y)))
+ {
+ int signx = MPFR_SIGN(x);
+ inex = mpfr_si_div (y, -1, x, rnd_mode);
+ if (inex == 0) /* x is a power of two */
+ { /* result always -1/x, except when rounding down */
+ if (rnd_mode == MPFR_RNDA)
+ rnd_mode = (signx > 0) ? MPFR_RNDD : MPFR_RNDU;
+ if (rnd_mode == MPFR_RNDZ)
+ rnd_mode = (signx > 0) ? MPFR_RNDU : MPFR_RNDD;
+ if (rnd_mode == MPFR_RNDU)
+ inex = 1;
+ else if (rnd_mode == MPFR_RNDD)
+ {
+ mpfr_nextbelow (y);
+ inex = -1;
+ }
+ else /* nearest */
+ inex = 1;
+ }
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ goto end;
+ }
+ }
+
+ if (MPFR_IS_NEG(x))
+ inex = mpfr_digamma_reflection (y, x, rnd_mode);
+ /* if x < 1/2 we use the reflection formula */
+ else if (MPFR_EXP(x) < 0)
+ inex = mpfr_digamma_reflection (y, x, rnd_mode);
+ else
+ inex = mpfr_digamma_positive (y, x, rnd_mode);
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inex, rnd_mode);
+}
diff --git a/src/dim.c b/src/dim.c
new file mode 100644
index 000000000..078a69e03
--- /dev/null
+++ b/src/dim.c
@@ -0,0 +1,48 @@
+/* mpfr_dim -- positive difference
+
+Copyright 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* dim (x,y) is defined as:
+
+ x-y if x > y
+ +0 if x <= y
+*/
+
+int
+mpfr_dim (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_IS_NAN(x) || MPFR_IS_NAN(y))
+ {
+ MPFR_SET_NAN(z);
+ MPFR_RET_NAN;
+ }
+
+ if (mpfr_cmp (x,y) > 0)
+ return mpfr_sub (z, x, y, rnd_mode);
+ else
+ {
+ MPFR_SET_ZERO(z);
+ MPFR_SET_POS(z);
+ MPFR_RET(0);
+ }
+}
diff --git a/src/div.c b/src/div.c
new file mode 100644
index 000000000..534511e26
--- /dev/null
+++ b/src/div.c
@@ -0,0 +1,676 @@
+/* mpfr_div -- divide two floating-point numbers
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#ifdef DEBUG2
+#define mpfr_mpn_print(ap,n) mpfr_mpn_print3 (ap,n,MPFR_LIMB_ZERO)
+static void
+mpfr_mpn_print3 (mp_ptr ap, mp_size_t n, mp_limb_t cy)
+{
+ mp_size_t i;
+ for (i = 0; i < n; i++)
+ printf ("+%lu*2^%lu", (unsigned long) ap[i], (unsigned long)
+ (GMP_NUMB_BITS * i));
+ if (cy)
+ printf ("+2^%lu", (unsigned long) (GMP_NUMB_BITS * n));
+ printf ("\n");
+}
+#endif
+
+/* check if {ap, an} is zero */
+static int
+mpfr_mpn_cmpzero (mp_ptr ap, mp_size_t an)
+{
+ while (an > 0)
+ if (MPFR_LIKELY(ap[--an] != MPFR_LIMB_ZERO))
+ return 1;
+ return 0;
+}
+
+/* compare {ap, an} and {bp, bn} >> extra,
+ aligned by the more significant limbs.
+ Takes into account bp[0] for extra=1.
+*/
+static int
+mpfr_mpn_cmp_aux (mp_ptr ap, mp_size_t an, mp_ptr bp, mp_size_t bn, int extra)
+{
+ int cmp = 0;
+ mp_size_t k;
+ mp_limb_t bb;
+
+ if (an >= bn)
+ {
+ k = an - bn;
+ while (cmp == 0 && bn > 0)
+ {
+ bn --;
+ bb = (extra) ? ((bp[bn+1] << (GMP_NUMB_BITS - 1)) | (bp[bn] >> 1))
+ : bp[bn];
+ cmp = (ap[k + bn] > bb) ? 1 : ((ap[k + bn] < bb) ? -1 : 0);
+ }
+ bb = (extra) ? bp[0] << (GMP_NUMB_BITS - 1) : MPFR_LIMB_ZERO;
+ while (cmp == 0 && k > 0)
+ {
+ k--;
+ cmp = (ap[k] > bb) ? 1 : ((ap[k] < bb) ? -1 : 0);
+ bb = MPFR_LIMB_ZERO; /* ensure we consider only once bp[0] & 1 */
+ }
+ if (cmp == 0 && bb != MPFR_LIMB_ZERO)
+ cmp = -1;
+ }
+ else /* an < bn */
+ {
+ k = bn - an;
+ while (cmp == 0 && an > 0)
+ {
+ an --;
+ bb = (extra) ? ((bp[k+an+1] << (GMP_NUMB_BITS - 1)) | (bp[k+an] >> 1))
+ : bp[k+an];
+ if (ap[an] > bb)
+ cmp = 1;
+ else if (ap[an] < bb)
+ cmp = -1;
+ }
+ while (cmp == 0 && k > 0)
+ {
+ k--;
+ bb = (extra) ? ((bp[k+1] << (GMP_NUMB_BITS - 1)) | (bp[k] >> 1))
+ : bp[k];
+ cmp = (bb != MPFR_LIMB_ZERO) ? -1 : 0;
+ }
+ if (cmp == 0 && extra && (bp[0] & MPFR_LIMB_ONE))
+ cmp = -1;
+ }
+ return cmp;
+}
+
+/* {ap, n} <- {ap, n} - {bp, n} >> extra - cy, with cy = 0 or 1.
+ Return borrow out.
+*/
+static mp_limb_t
+mpfr_mpn_sub_aux (mp_ptr ap, mp_ptr bp, mp_size_t n, mp_limb_t cy, int extra)
+{
+ mp_limb_t bb, rp;
+
+ MPFR_ASSERTD (cy <= 1);
+ while (n--)
+ {
+ bb = (extra) ? ((bp[1] << (GMP_NUMB_BITS-1)) | (bp[0] >> 1)) : bp[0];
+ rp = ap[0] - bb - cy;
+ cy = (ap[0] < bb) || (cy && ~rp == MPFR_LIMB_ZERO) ?
+ MPFR_LIMB_ONE : MPFR_LIMB_ZERO;
+ ap[0] = rp;
+ ap ++;
+ bp ++;
+ }
+ MPFR_ASSERTD (cy <= 1);
+ return cy;
+}
+
+int
+mpfr_div (mpfr_ptr q, mpfr_srcptr u, mpfr_srcptr v, mpfr_rnd_t rnd_mode)
+{
+ mp_size_t q0size = MPFR_LIMB_SIZE(q); /* number of limbs of destination */
+ mp_size_t usize = MPFR_LIMB_SIZE(u);
+ mp_size_t vsize = MPFR_LIMB_SIZE(v);
+ mp_size_t qsize; /* number of limbs of the computed quotient */
+ mp_size_t qqsize;
+ mp_size_t k;
+ mp_ptr q0p = MPFR_MANT(q), qp;
+ mp_ptr up = MPFR_MANT(u);
+ mp_ptr vp = MPFR_MANT(v);
+ mp_ptr ap;
+ mp_ptr bp;
+ mp_limb_t qh;
+ mp_limb_t sticky_u = MPFR_LIMB_ZERO;
+ mp_limb_t low_u;
+ mp_limb_t sticky_v = MPFR_LIMB_ZERO;
+ mp_limb_t sticky;
+ mp_limb_t sticky3;
+ mp_limb_t round_bit = MPFR_LIMB_ZERO;
+ mpfr_exp_t qexp;
+ int sign_quotient;
+ int extra_bit;
+ int sh, sh2;
+ int inex;
+ int like_rndz;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_LOG_FUNC (("u[%#R]=%R v[%#R]=%R rnd=%d", u, u, v, v, rnd_mode),
+ ("q[%#R]=%R inexact=%d", q, q, inex));
+
+ /**************************************************************************
+ * *
+ * This part of the code deals with special cases *
+ * *
+ **************************************************************************/
+
+ if (MPFR_UNLIKELY(MPFR_ARE_SINGULAR(u,v)))
+ {
+ if (MPFR_IS_NAN(u) || MPFR_IS_NAN(v))
+ {
+ MPFR_SET_NAN(q);
+ MPFR_RET_NAN;
+ }
+ sign_quotient = MPFR_MULT_SIGN( MPFR_SIGN(u) , MPFR_SIGN(v) );
+ MPFR_SET_SIGN(q, sign_quotient);
+ if (MPFR_IS_INF(u))
+ {
+ if (MPFR_IS_INF(v))
+ {
+ MPFR_SET_NAN(q);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_SET_INF(q);
+ MPFR_RET(0);
+ }
+ }
+ else if (MPFR_IS_INF(v))
+ {
+ MPFR_SET_ZERO (q);
+ MPFR_RET (0);
+ }
+ else if (MPFR_IS_ZERO (v))
+ {
+ if (MPFR_IS_ZERO (u))
+ {
+ MPFR_SET_NAN(q);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_SET_INF(q);
+ MPFR_RET(0);
+ }
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (u));
+ MPFR_SET_ZERO (q);
+ MPFR_RET (0);
+ }
+ }
+
+ /**************************************************************************
+ * *
+ * End of the part concerning special values. *
+ * *
+ **************************************************************************/
+
+ MPFR_TMP_MARK(marker);
+
+ /* set sign */
+ sign_quotient = MPFR_MULT_SIGN( MPFR_SIGN(u) , MPFR_SIGN(v) );
+ MPFR_SET_SIGN(q, sign_quotient);
+
+ /* determine if an extra bit comes from the division, i.e. if the
+ significand of u (as a fraction in [1/2, 1[) is larger than that
+ of v */
+ if (MPFR_LIKELY(up[usize - 1] != vp[vsize - 1]))
+ extra_bit = (up[usize - 1] > vp[vsize - 1]) ? 1 : 0;
+ else /* most significant limbs are equal, must look at further limbs */
+ {
+ mp_size_t l;
+
+ k = usize - 1;
+ l = vsize - 1;
+ while (k != 0 && l != 0 && up[--k] == vp[--l]);
+ /* now k=0 or l=0 or up[k] != vp[l] */
+ if (up[k] > vp[l])
+ extra_bit = 1;
+ else if (up[k] < vp[l])
+ extra_bit = 0;
+ /* now up[k] = vp[l], thus either k=0 or l=0 */
+ else if (l == 0) /* no more divisor limb */
+ extra_bit = 1;
+ else /* k=0: no more dividend limb */
+ extra_bit = mpfr_mpn_cmpzero (vp, l) == 0;
+ }
+#ifdef DEBUG
+ printf ("extra_bit=%d\n", extra_bit);
+#endif
+
+ /* set exponent */
+ qexp = MPFR_GET_EXP (u) - MPFR_GET_EXP (v) + extra_bit;
+
+ MPFR_UNSIGNED_MINUS_MODULO(sh, MPFR_PREC(q));
+
+ if (MPFR_UNLIKELY(rnd_mode == MPFR_RNDN && sh == 0))
+ { /* we compute the quotient with one more limb, in order to get
+ the round bit in the quotient, and the remainder only contains
+ sticky bits */
+ qsize = q0size + 1;
+ /* need to allocate memory for the quotient */
+ qp = (mp_ptr) MPFR_TMP_ALLOC (qsize * sizeof(mp_limb_t));
+ }
+ else
+ {
+ qsize = q0size;
+ qp = q0p; /* directly put the quotient in the destination */
+ }
+ qqsize = qsize + qsize;
+
+ /* prepare the dividend */
+ ap = (mp_ptr) MPFR_TMP_ALLOC (qqsize * sizeof(mp_limb_t));
+ if (MPFR_LIKELY(qqsize > usize)) /* use the full dividend */
+ {
+ k = qqsize - usize; /* k > 0 */
+ MPN_ZERO(ap, k);
+ if (extra_bit)
+ ap[k - 1] = mpn_rshift (ap + k, up, usize, 1);
+ else
+ MPN_COPY(ap + k, up, usize);
+ }
+ else /* truncate the dividend */
+ {
+ k = usize - qqsize;
+ if (extra_bit)
+ sticky_u = mpn_rshift (ap, up + k, qqsize, 1);
+ else
+ MPN_COPY(ap, up + k, qqsize);
+ sticky_u = sticky_u || mpfr_mpn_cmpzero (up, k);
+ }
+ low_u = sticky_u;
+
+ /* now sticky_u is non-zero iff the truncated part of u is non-zero */
+
+ /* prepare the divisor */
+ if (MPFR_LIKELY(vsize >= qsize))
+ {
+ k = vsize - qsize;
+ if (qp != vp)
+ bp = vp + k; /* avoid copying the divisor */
+ else /* need to copy, since mpn_divrem doesn't allow overlap
+ between quotient and divisor, necessarily k = 0
+ since quotient and divisor are the same mpfr variable */
+ {
+ bp = (mp_ptr) MPFR_TMP_ALLOC (qsize * sizeof(mp_limb_t));
+ MPN_COPY(bp, vp, vsize);
+ }
+ sticky_v = sticky_v || mpfr_mpn_cmpzero (vp, k);
+ k = 0;
+ }
+ else /* vsize < qsize: small divisor case */
+ {
+ bp = vp;
+ k = qsize - vsize;
+ }
+
+ /* we now can perform the division */
+ qh = mpn_divrem (qp, 0, ap + k, qqsize - k, bp, qsize - k);
+ /* warning: qh may be 1 if u1 == v1, but u < v */
+#ifdef DEBUG2
+ printf ("q="); mpfr_mpn_print (qp, qsize);
+ printf ("r="); mpfr_mpn_print (ap, qsize);
+#endif
+
+ k = qsize;
+ sticky_u = sticky_u || mpfr_mpn_cmpzero (ap, k);
+
+ sticky = sticky_u | sticky_v;
+
+ /* now sticky is non-zero iff one of the following holds:
+ (a) the truncated part of u is non-zero
+ (b) the truncated part of v is non-zero
+ (c) the remainder from division is non-zero */
+
+ if (MPFR_LIKELY(qsize == q0size))
+ {
+ sticky3 = qp[0] & MPFR_LIMB_MASK(sh); /* does nothing when sh=0 */
+ sh2 = sh;
+ }
+ else /* qsize = q0size + 1: only happens when rnd_mode=MPFR_RNDN and sh=0 */
+ {
+ MPN_COPY (q0p, qp + 1, q0size);
+ sticky3 = qp[0];
+ sh2 = GMP_NUMB_BITS;
+ }
+ qp[0] ^= sticky3;
+ /* sticky3 contains the truncated bits from the quotient,
+ including the round bit, and 1 <= sh2 <= GMP_NUMB_BITS
+ is the number of bits in sticky3 */
+ inex = (sticky != MPFR_LIMB_ZERO) || (sticky3 != MPFR_LIMB_ZERO);
+#ifdef DEBUG
+ printf ("sticky=%lu sticky3=%lu inex=%d\n",
+ (unsigned long) sticky, (unsigned long) sticky3, inex);
+#endif
+
+ like_rndz = rnd_mode == MPFR_RNDZ ||
+ rnd_mode == (sign_quotient < 0 ? MPFR_RNDU : MPFR_RNDD);
+
+ /* to round, we distinguish two cases:
+ (a) vsize <= qsize: we used the full divisor
+ (b) vsize > qsize: the divisor was truncated
+ */
+
+#ifdef DEBUG
+ printf ("vsize=%lu qsize=%lu\n",
+ (unsigned long) vsize, (unsigned long) qsize);
+#endif
+ if (MPFR_LIKELY(vsize <= qsize)) /* use the full divisor */
+ {
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ round_bit = sticky3 & (MPFR_LIMB_ONE << (sh2 - 1));
+ sticky = (sticky3 ^ round_bit) | sticky_u;
+ }
+ else if (like_rndz || inex == 0)
+ sticky = (inex == 0) ? MPFR_LIMB_ZERO : MPFR_LIMB_ONE;
+ else /* round away from zero */
+ sticky = MPFR_LIMB_ONE;
+ goto case_1;
+ }
+ else /* vsize > qsize: need to truncate the divisor */
+ {
+ if (inex == 0)
+ goto truncate;
+ else
+ {
+ /* We know the estimated quotient is an upper bound of the exact
+ quotient (with rounding toward zero), with a difference of at
+ most 2 in qp[0].
+ Thus we can round except when sticky3 is 000...000 or 000...001
+ for directed rounding, and 100...000 or 100...001 for rounding
+ to nearest. (For rounding to nearest, we cannot determine the
+ inexact flag for 000...000 or 000...001.)
+ */
+ mp_limb_t sticky3orig = sticky3;
+ if (rnd_mode == MPFR_RNDN)
+ {
+ round_bit = sticky3 & (MPFR_LIMB_ONE << (sh2 - 1));
+ sticky3 = sticky3 ^ round_bit;
+#ifdef DEBUG
+ printf ("rb=%lu sb=%lu\n",
+ (unsigned long) round_bit, (unsigned long) sticky3);
+#endif
+ }
+ if (sticky3 != MPFR_LIMB_ZERO && sticky3 != MPFR_LIMB_ONE)
+ {
+ sticky = sticky3;
+ goto case_1;
+ }
+ else /* hard case: we have to compare q1 * v0 and r + low(u),
+ where q1 * v0 has qsize + (vsize-qsize) = vsize limbs, and
+ r + low(u) has qsize + (usize-2*qsize) = usize-qsize limbs */
+ {
+ mp_size_t l;
+ mp_ptr sp;
+ int cmp_s_r;
+ mp_limb_t qh2;
+
+ sp = (mp_ptr) MPFR_TMP_ALLOC (vsize * sizeof(mp_limb_t));
+ k = vsize - qsize;
+ /* sp <- {qp, qsize} * {vp, vsize-qsize} */
+ qp[0] ^= sticky3orig; /* restore original quotient */
+ if (qsize >= k)
+ mpn_mul (sp, qp, qsize, vp, k);
+ else
+ mpn_mul (sp, vp, k, qp, qsize);
+ if (qh)
+ qh2 = mpn_add_n (sp + qsize, sp + qsize, vp, k);
+ else
+ qh2 = (mp_limb_t) 0;
+ qp[0] ^= sticky3orig; /* restore truncated quotient */
+
+ /* compare qh2 + {sp, k + qsize} to {ap, qsize} + low(u) */
+ cmp_s_r = (qh2 != 0) ? 1 : mpn_cmp (sp + k, ap, qsize);
+ if (cmp_s_r == 0) /* compare {sp, k} and low(u) */
+ {
+ cmp_s_r = (usize >= qqsize) ?
+ mpfr_mpn_cmp_aux (sp, k, up, usize - qqsize, extra_bit) :
+ mpfr_mpn_cmpzero (sp, k);
+ }
+#ifdef DEBUG
+ printf ("cmp(q*v0,r+u0)=%d\n", cmp_s_r);
+#endif
+ /* now cmp_s_r > 0 if {sp, vsize} > {ap, qsize} + low(u)
+ cmp_s_r = 0 if {sp, vsize} = {ap, qsize} + low(u)
+ cmp_s_r < 0 if {sp, vsize} < {ap, qsize} + low(u) */
+ if (cmp_s_r <= 0) /* quotient is in [q1, q1+1) */
+ {
+ sticky = (cmp_s_r == 0) ? sticky3 : MPFR_LIMB_ONE;
+ goto case_1;
+ }
+ else /* cmp_s_r > 0, quotient is < q1: to determine if it is
+ in [q1-2,q1-1] or in [q1-1,q1], we need to subtract
+ the low part u0 of the dividend u0 from q*v0 */
+ {
+ mp_limb_t cy = MPFR_LIMB_ZERO;
+
+ /* subtract low(u)>>extra_bit if non-zero */
+ if (qh2 != 0) /* whatever the value of {up, m + k}, it
+ will be smaller than qh2 + {sp, k} */
+ cmp_s_r = 1;
+ else
+ {
+ if (low_u != MPFR_LIMB_ZERO)
+ {
+ mp_size_t m;
+ l = usize - qqsize; /* number of low limbs in u */
+ m = (l > k) ? l - k : 0;
+ cy = (extra_bit) ?
+ (up[m] & MPFR_LIMB_ONE) : MPFR_LIMB_ZERO;
+ if (l >= k) /* u0 has more limbs than s:
+ first look if {up, m} is not zero,
+ and compare {sp, k} and {up + m, k} */
+ {
+ cy = cy || mpfr_mpn_cmpzero (up, m);
+ low_u = cy;
+ cy = mpfr_mpn_sub_aux (sp, up + m, k,
+ cy, extra_bit);
+ }
+ else /* l < k: s has more limbs than u0 */
+ {
+ low_u = MPFR_LIMB_ZERO;
+ if (cy != MPFR_LIMB_ZERO)
+ cy = mpn_sub_1 (sp + k - l - 1, sp + k - l - 1,
+ 1, MPFR_LIMB_HIGHBIT);
+ cy = mpfr_mpn_sub_aux (sp + k - l, up, l,
+ cy, extra_bit);
+ }
+ }
+ MPFR_ASSERTD (cy <= 1);
+ cy = mpn_sub_1 (sp + k, sp + k, qsize, cy);
+ /* subtract r */
+ cy += mpn_sub_n (sp + k, sp + k, ap, qsize);
+ MPFR_ASSERTD (cy <= 1);
+ /* now compare {sp, ssize} to v */
+ cmp_s_r = mpn_cmp (sp, vp, vsize);
+ if (cmp_s_r == 0 && low_u != MPFR_LIMB_ZERO)
+ cmp_s_r = 1; /* since in fact we subtracted
+ less than 1 */
+ }
+#ifdef DEBUG
+ printf ("cmp(q*v0-(r+u0),v)=%d\n", cmp_s_r);
+#endif
+ if (cmp_s_r <= 0) /* q1-1 <= u/v < q1 */
+ {
+ if (sticky3 == MPFR_LIMB_ONE)
+ { /* q1-1 is either representable (directed rounding),
+ or the middle of two numbers (nearest) */
+ sticky = (cmp_s_r) ? MPFR_LIMB_ONE : MPFR_LIMB_ZERO;
+ goto case_1;
+ }
+ /* now necessarily sticky3=0 */
+ else if (round_bit == MPFR_LIMB_ZERO)
+ { /* round_bit=0, sticky3=0: q1-1 is exact only
+ when sh=0 */
+ inex = (cmp_s_r || sh) ? -1 : 0;
+ if (rnd_mode == MPFR_RNDN ||
+ (! like_rndz && inex != 0))
+ {
+ inex = 1;
+ goto truncate_check_qh;
+ }
+ else /* round down */
+ goto sub_one_ulp;
+ }
+ else /* sticky3=0, round_bit=1 ==> rounding to nearest */
+ {
+ inex = cmp_s_r;
+ goto truncate;
+ }
+ }
+ else /* q1-2 < u/v < q1-1 */
+ {
+ /* if rnd=MPFR_RNDN, the result is q1 when
+ q1-2 >= q1-2^(sh-1), i.e. sh >= 2,
+ otherwise (sh=1) it is q1-2 */
+ if (rnd_mode == MPFR_RNDN) /* sh > 0 */
+ {
+ /* Case sh=1: sb=0 always, and q1-rb is exactly
+ representable, like q1-rb-2.
+ rb action
+ 0 subtract two ulps, inex=-1
+ 1 truncate, inex=1
+
+ Case sh>1: one ulp is 2^(sh-1) >= 2
+ rb sb action
+ 0 0 truncate, inex=1
+ 0 1 truncate, inex=1
+ 1 x truncate, inex=-1
+ */
+ if (sh == 1)
+ {
+ if (round_bit == MPFR_LIMB_ZERO)
+ {
+ inex = -1;
+ sh = 0;
+ goto sub_two_ulp;
+ }
+ else
+ {
+ inex = 1;
+ goto truncate_check_qh;
+ }
+ }
+ else /* sh > 1 */
+ {
+ inex = (round_bit == MPFR_LIMB_ZERO) ? 1 : -1;
+ goto truncate_check_qh;
+ }
+ }
+ else if (like_rndz)
+ {
+ /* the result is down(q1-2), i.e. subtract one
+ ulp if sh > 0, and two ulps if sh=0 */
+ inex = -1;
+ if (sh > 0)
+ goto sub_one_ulp;
+ else
+ goto sub_two_ulp;
+ }
+ /* if round away from zero, the result is up(q1-1),
+ which is q1 unless sh = 0, where it is q1-1 */
+ else
+ {
+ inex = 1;
+ if (sh > 0)
+ goto truncate_check_qh;
+ else /* sh = 0 */
+ goto sub_one_ulp;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ case_1: /* quotient is in [q1, q1+1),
+ round_bit is the round_bit (0 for directed rounding),
+ sticky the sticky bit */
+ if (like_rndz || (round_bit == MPFR_LIMB_ZERO && sticky == MPFR_LIMB_ZERO))
+ {
+ inex = round_bit == MPFR_LIMB_ZERO && sticky == MPFR_LIMB_ZERO ? 0 : -1;
+ goto truncate;
+ }
+ else if (rnd_mode == MPFR_RNDN) /* sticky <> 0 or round <> 0 */
+ {
+ if (round_bit == MPFR_LIMB_ZERO) /* necessarily sticky <> 0 */
+ {
+ inex = -1;
+ goto truncate;
+ }
+ /* round_bit = 1 */
+ else if (sticky != MPFR_LIMB_ZERO)
+ goto add_one_ulp; /* inex=1 */
+ else /* round_bit=1, sticky=0 */
+ goto even_rule;
+ }
+ else /* round away from zero, sticky <> 0 */
+ goto add_one_ulp; /* with inex=1 */
+
+ sub_two_ulp:
+ /* we cannot subtract MPFR_LIMB_MPFR_LIMB_ONE << (sh+1) since this is
+ undefined for sh = GMP_NUMB_BITS */
+ qh -= mpn_sub_1 (q0p, q0p, q0size, MPFR_LIMB_ONE << sh);
+ /* go through */
+
+ sub_one_ulp:
+ qh -= mpn_sub_1 (q0p, q0p, q0size, MPFR_LIMB_ONE << sh);
+ /* go through truncate_check_qh */
+
+ truncate_check_qh:
+ if (qh)
+ {
+ qexp ++;
+ q0p[q0size - 1] = MPFR_LIMB_HIGHBIT;
+ }
+ goto truncate;
+
+ even_rule: /* has to set inex */
+ inex = (q0p[0] & (MPFR_LIMB_ONE << sh)) ? 1 : -1;
+ if (inex < 0)
+ goto truncate;
+ /* else go through add_one_ulp */
+
+ add_one_ulp:
+ inex = 1; /* always here */
+ if (mpn_add_1 (q0p, q0p, q0size, MPFR_LIMB_ONE << sh))
+ {
+ qexp ++;
+ q0p[q0size - 1] = MPFR_LIMB_HIGHBIT;
+ }
+
+ truncate: /* inex already set */
+
+ MPFR_TMP_FREE(marker);
+
+ /* check for underflow/overflow */
+ if (MPFR_UNLIKELY(qexp > __gmpfr_emax))
+ return mpfr_overflow (q, rnd_mode, sign_quotient);
+ else if (MPFR_UNLIKELY(qexp < __gmpfr_emin))
+ {
+ if (rnd_mode == MPFR_RNDN && ((qexp < __gmpfr_emin - 1) ||
+ (inex >= 0 && mpfr_powerof2_raw (q))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (q, rnd_mode, sign_quotient);
+ }
+ MPFR_SET_EXP(q, qexp);
+
+ inex *= sign_quotient;
+ MPFR_RET (inex);
+}
diff --git a/src/div_2exp.c b/src/div_2exp.c
new file mode 100644
index 000000000..4d321ee45
--- /dev/null
+++ b/src/div_2exp.c
@@ -0,0 +1,33 @@
+/* mpfr_div_2exp -- divide a floating-point number by a power of two
+
+Copyright 1999, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Obsolete function, use mpfr_div_2ui or mpfr_div_2si instead. */
+
+#undef mpfr_div_2exp
+
+int
+mpfr_div_2exp (mpfr_ptr y, mpfr_srcptr x, unsigned long int n, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_div_2ui (y, x, n, rnd_mode);
+}
diff --git a/src/div_2si.c b/src/div_2si.c
new file mode 100644
index 000000000..2837924a2
--- /dev/null
+++ b/src/div_2si.c
@@ -0,0 +1,57 @@
+/* mpfr_div_2si -- divide a floating-point number by a power of two
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_div_2si (mpfr_ptr y, mpfr_srcptr x, long int n, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%ld rnd=%d", x, x, n, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ inexact = MPFR_UNLIKELY(y != x) ? mpfr_set (y, x, rnd_mode) : 0;
+
+ if (MPFR_LIKELY( MPFR_IS_PURE_FP(y) ))
+ {
+ mpfr_exp_t exp = MPFR_GET_EXP (y);
+ if (MPFR_UNLIKELY( n > 0 && (__gmpfr_emin > MPFR_EMAX_MAX - n ||
+ exp < __gmpfr_emin + n)) )
+ {
+ if (rnd_mode == MPFR_RNDN &&
+ (__gmpfr_emin > MPFR_EMAX_MAX - (n - 1) ||
+ exp < __gmpfr_emin + (n - 1) ||
+ (inexact >= 0 && mpfr_powerof2_raw (y))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (y, rnd_mode, MPFR_SIGN(y));
+ }
+
+ if (MPFR_UNLIKELY(n < 0 && (__gmpfr_emax < MPFR_EMIN_MIN - n ||
+ exp > __gmpfr_emax + n)) )
+ return mpfr_overflow (y, rnd_mode, MPFR_SIGN(y));
+
+ MPFR_SET_EXP (y, exp - n);
+ }
+
+ return inexact;
+}
diff --git a/src/div_2ui.c b/src/div_2ui.c
new file mode 100644
index 000000000..8526b69a9
--- /dev/null
+++ b/src/div_2ui.c
@@ -0,0 +1,71 @@
+/* mpfr_div_2ui -- divide a floating-point number by a power of two
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_div_2ui (mpfr_ptr y, mpfr_srcptr x, unsigned long n, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%lu rnd=%d", x, x, n, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ /* Most of the times, this function is called with y==x */
+ inexact = MPFR_UNLIKELY(y != x) ? mpfr_set (y, x, rnd_mode) : 0;
+
+ if (MPFR_LIKELY( MPFR_IS_PURE_FP(y)) )
+ {
+ /* n will have to be casted to long to make sure that the addition
+ and subtraction below (for overflow detection) are signed */
+ while (MPFR_UNLIKELY(n > LONG_MAX))
+ {
+ int inex2;
+
+ n -= LONG_MAX;
+ inex2 = mpfr_div_2ui(y, y, LONG_MAX, rnd_mode);
+ if (inex2)
+ return inex2; /* underflow */
+ }
+
+ /* MPFR_EMAX_MAX - (long) n is signed and doesn't lead to an integer
+ overflow; the first test useful so that the real test can't lead
+ to an integer overflow. */
+ {
+ mpfr_exp_t exp = MPFR_GET_EXP (y);
+ if (MPFR_UNLIKELY( __gmpfr_emin > MPFR_EMAX_MAX - (long) n ||
+ exp < __gmpfr_emin + (long) n) )
+ {
+ if (rnd_mode == MPFR_RNDN &&
+ (__gmpfr_emin > MPFR_EMAX_MAX - (long) (n - 1) ||
+ exp < __gmpfr_emin + (long) (n - 1) ||
+ (inexact >= 0 && mpfr_powerof2_raw (y))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (y, rnd_mode, MPFR_SIGN(y));
+ }
+
+ MPFR_SET_EXP(y, exp - (long) n);
+ }
+ }
+
+ return inexact;
+}
diff --git a/src/div_d.c b/src/div_d.c
new file mode 100644
index 000000000..f8af1a17f
--- /dev/null
+++ b/src/div_d.c
@@ -0,0 +1,49 @@
+/* mpfr_div_d -- divide a multiple precision floating-point number
+ by a machine double precision float
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_div_d (mpfr_ptr a, mpfr_srcptr b, double c, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("b[%#R]=%R c%.20g rnd=%d", b, b, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (d, IEEE_DBL_MANT_DIG);
+ inexact = mpfr_set_d (d, c, rnd_mode);
+ MPFR_ASSERTN (inexact == 0);
+
+ mpfr_clear_flags ();
+ inexact = mpfr_div (a, b, d, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+
+ mpfr_clear(d);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (a, inexact, rnd_mode);
+}
diff --git a/src/div_ui.c b/src/div_ui.c
new file mode 100644
index 000000000..b7e9a3b7f
--- /dev/null
+++ b/src/div_ui.c
@@ -0,0 +1,267 @@
+/* mpfr_div_{ui,si} -- divide a floating-point number by a machine integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* returns 0 if result exact, non-zero otherwise */
+int
+mpfr_div_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mpfr_rnd_t rnd_mode)
+{
+ long i;
+ int sh;
+ mp_size_t xn, yn, dif;
+ mp_limb_t *xp, *yp, *tmp, c, d;
+ mpfr_exp_t exp;
+ int inexact, middle = 1, nexttoinf;
+ MPFR_TMP_DECL(marker);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO(x));
+ if (u == 0) /* 0/0 is NaN */
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_SET_ZERO(y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET(0);
+ }
+ }
+ }
+ else if (MPFR_UNLIKELY (u <= 1))
+ {
+ if (u < 1)
+ {
+ /* x/0 is Inf since x != 0*/
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ else /* y = x/1 = x */
+ return mpfr_set (y, x, rnd_mode);
+ }
+ else if (MPFR_UNLIKELY (IS_POW2 (u)))
+ return mpfr_div_2si (y, x, MPFR_INT_CEIL_LOG2 (u), rnd_mode);
+
+ MPFR_SET_SAME_SIGN (y, x);
+
+ MPFR_TMP_MARK (marker);
+ xn = MPFR_LIMB_SIZE (x);
+ yn = MPFR_LIMB_SIZE (y);
+
+ xp = MPFR_MANT (x);
+ yp = MPFR_MANT (y);
+ exp = MPFR_GET_EXP (x);
+
+ dif = yn + 1 - xn;
+
+ /* we need to store yn+1 = xn + dif limbs of the quotient */
+ /* don't use tmp=yp since the mpn_lshift call below requires yp >= tmp+1 */
+ tmp = (mp_limb_t*) MPFR_TMP_ALLOC ((yn + 1) * BYTES_PER_MP_LIMB);
+
+ c = (mp_limb_t) u;
+ MPFR_ASSERTN (u == c);
+ if (dif >= 0)
+ c = mpn_divrem_1 (tmp, dif, xp, xn, c); /* used all the dividend */
+ else /* dif < 0 i.e. xn > yn, don't use the (-dif) low limbs from x */
+ c = mpn_divrem_1 (tmp, 0, xp - dif, yn + 1, c);
+
+ inexact = (c != 0);
+
+ /* First pass in estimating next bit of the quotient, in case of RNDN *
+ * In case we just have the right number of bits (postpone this ?), *
+ * we need to check whether the remainder is more or less than half *
+ * the divisor. The test must be performed with a subtraction, so as *
+ * to prevent carries. */
+
+ if (MPFR_LIKELY (rnd_mode == MPFR_RNDN))
+ {
+ if (c < (mp_limb_t) u - c) /* We have u > c */
+ middle = -1;
+ else if (c > (mp_limb_t) u - c)
+ middle = 1;
+ else
+ middle = 0; /* exactly in the middle */
+ }
+
+ /* If we believe that we are right in the middle or exact, we should check
+ that we did not neglect any word of x (division large / 1 -> small). */
+
+ for (i=0; ((inexact == 0) || (middle == 0)) && (i < -dif); i++)
+ if (xp[i])
+ inexact = middle = 1; /* larger than middle */
+
+ /*
+ If the high limb of the result is 0 (xp[xn-1] < u), remove it.
+ Otherwise, compute the left shift to be performed to normalize.
+ In the latter case, we discard some low bits computed. They
+ contain information useful for the rounding, hence the updating
+ of middle and inexact.
+ */
+
+ if (tmp[yn] == 0)
+ {
+ MPN_COPY(yp, tmp, yn);
+ exp -= GMP_NUMB_BITS;
+ }
+ else
+ {
+ int shlz;
+
+ count_leading_zeros (shlz, tmp[yn]);
+
+ /* shift left to normalize */
+ if (MPFR_LIKELY (shlz != 0))
+ {
+ mp_limb_t w = tmp[0] << shlz;
+
+ mpn_lshift (yp, tmp + 1, yn, shlz);
+ yp[0] += tmp[0] >> (GMP_NUMB_BITS - shlz);
+
+ if (w > (MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1)))
+ { middle = 1; }
+ else if (w < (MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1)))
+ { middle = -1; }
+ else
+ { middle = (c != 0); }
+
+ inexact = inexact || (w != 0);
+ exp -= shlz;
+ }
+ else
+ { /* this happens only if u == 1 and xp[xn-1] >=
+ 1<<(GMP_NUMB_BITS-1). It might be better to handle the
+ u == 1 case seperately ?
+ */
+
+ MPN_COPY (yp, tmp + 1, yn);
+ }
+ }
+
+ MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC (y));
+ /* it remains sh bits in less significant limb of y */
+
+ d = *yp & MPFR_LIMB_MASK (sh);
+ *yp ^= d; /* set to zero lowest sh bits */
+
+ MPFR_TMP_FREE (marker);
+
+ if (exp < __gmpfr_emin - 1)
+ return mpfr_underflow (y, rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode,
+ MPFR_SIGN (y));
+
+ if (MPFR_UNLIKELY (d == 0 && inexact == 0))
+ nexttoinf = 0; /* result is exact */
+ else
+ {
+ MPFR_UPDATE2_RND_MODE(rnd_mode, MPFR_SIGN (y));
+ switch (rnd_mode)
+ {
+ case MPFR_RNDZ:
+ inexact = - MPFR_INT_SIGN (y); /* result is inexact */
+ nexttoinf = 0;
+ break;
+
+ case MPFR_RNDA:
+ inexact = MPFR_INT_SIGN (y);
+ nexttoinf = 1;
+ break;
+
+ default: /* should be MPFR_RNDN */
+ MPFR_ASSERTD (rnd_mode == MPFR_RNDN);
+ /* We have one more significant bit in yn. */
+ if (sh && d < (MPFR_LIMB_ONE << (sh - 1)))
+ {
+ inexact = - MPFR_INT_SIGN (y);
+ nexttoinf = 0;
+ }
+ else if (sh && d > (MPFR_LIMB_ONE << (sh - 1)))
+ {
+ inexact = MPFR_INT_SIGN (y);
+ nexttoinf = 1;
+ }
+ else /* sh = 0 or d = 1 << (sh-1) */
+ {
+ /* The first case is "false" even rounding (significant bits
+ indicate even rounding, but the result is inexact, so up) ;
+ The second case is the case where middle should be used to
+ decide the direction of rounding (no further bit computed) ;
+ The third is the true even rounding.
+ */
+ if ((sh && inexact) || (!sh && middle > 0) ||
+ (!inexact && *yp & (MPFR_LIMB_ONE << sh)))
+ {
+ inexact = MPFR_INT_SIGN (y);
+ nexttoinf = 1;
+ }
+ else
+ {
+ inexact = - MPFR_INT_SIGN (y);
+ nexttoinf = 0;
+ }
+ }
+ }
+ }
+
+ if (nexttoinf &&
+ MPFR_UNLIKELY (mpn_add_1 (yp, yp, yn, MPFR_LIMB_ONE << sh)))
+ {
+ exp++;
+ yp[yn-1] = MPFR_LIMB_HIGHBIT;
+ }
+
+ /* Set the exponent. Warning! One may still have an underflow. */
+ MPFR_EXP (y) = exp;
+
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
+
+int mpfr_div_si (mpfr_ptr y, mpfr_srcptr x, long int u, mpfr_rnd_t rnd_mode)
+{
+ int res;
+
+ if (u >= 0)
+ res = mpfr_div_ui (y, x, u, rnd_mode);
+ else
+ {
+ res = -mpfr_div_ui (y, x, -u, MPFR_INVERT_RND (rnd_mode));
+ MPFR_CHANGE_SIGN (y);
+ }
+ return res;
+}
diff --git a/src/dump.c b/src/dump.c
new file mode 100644
index 000000000..b6d3983a9
--- /dev/null
+++ b/src/dump.c
@@ -0,0 +1,30 @@
+/* mpfr_dump -- Dump a float to stdout.
+
+Copyright 1999, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_dump (mpfr_srcptr u)
+{
+ mpfr_print_binary(u);
+ putchar('\n');
+}
diff --git a/src/eint.c b/src/eint.c
new file mode 100644
index 000000000..b5897ba14
--- /dev/null
+++ b/src/eint.c
@@ -0,0 +1,316 @@
+/* mpfr_eint, mpfr_eint1 -- the exponential integral
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* eint1(x) = -gamma - log(x) - sum((-1)^k*z^k/k/k!, k=1..infinity) for x > 0
+ = - eint(-x) for x < 0
+ where
+ eint (x) = gamma + log(x) + sum(z^k/k/k!, k=1..infinity) for x > 0
+ eint (x) is undefined for x < 0.
+*/
+
+/* compute in y an approximation of sum(x^k/k/k!, k=1..infinity),
+ and return e such that the absolute error is bound by 2^e ulp(y) */
+static mpfr_exp_t
+mpfr_eint_aux (mpfr_t y, mpfr_srcptr x)
+{
+ mpfr_t eps; /* dynamic (absolute) error bound on t */
+ mpfr_t erru, errs;
+ mpz_t m, s, t, u;
+ mpfr_exp_t e, sizeinbase;
+ mpfr_prec_t w = MPFR_PREC(y);
+ unsigned long k;
+ MPFR_GROUP_DECL (group);
+
+ /* for |x| <= 1, we have S := sum(x^k/k/k!, k=1..infinity) = x + R(x)
+ where |R(x)| <= (x/2)^2/(1-x/2) <= 2*(x/2)^2
+ thus |R(x)/x| <= |x|/2
+ thus if |x| <= 2^(-PREC(y)) we have |S - o(x)| <= ulp(y) */
+
+ if (MPFR_GET_EXP(x) <= - (mpfr_exp_t) w)
+ {
+ mpfr_set (y, x, MPFR_RNDN);
+ return 0;
+ }
+
+ mpz_init (s); /* initializes to 0 */
+ mpz_init (t);
+ mpz_init (u);
+ mpz_init (m);
+ MPFR_GROUP_INIT_3 (group, 31, eps, erru, errs);
+ e = mpfr_get_z_2exp (m, x); /* x = m * 2^e */
+ MPFR_ASSERTD (mpz_sizeinbase (m, 2) == MPFR_PREC (x));
+ if (MPFR_PREC (x) > w)
+ {
+ e += MPFR_PREC (x) - w;
+ mpz_tdiv_q_2exp (m, m, MPFR_PREC (x) - w);
+ }
+ /* remove trailing zeroes from m: this will speed up much cases where
+ x is a small integer divided by a power of 2 */
+ k = mpz_scan1 (m, 0);
+ mpz_tdiv_q_2exp (m, m, k);
+ e += k;
+ /* initialize t to 2^w */
+ mpz_set_ui (t, 1);
+ mpz_mul_2exp (t, t, w);
+ mpfr_set_ui (eps, 0, MPFR_RNDN); /* eps[0] = 0 */
+ mpfr_set_ui (errs, 0, MPFR_RNDN);
+ for (k = 1;; k++)
+ {
+ /* let eps[k] be the absolute error on t[k]:
+ since t[k] = trunc(t[k-1]*m*2^e/k), we have
+ eps[k+1] <= 1 + eps[k-1]*m*2^e/k + t[k-1]*m*2^(1-w)*2^e/k
+ = 1 + (eps[k-1] + t[k-1]*2^(1-w))*m*2^e/k
+ = 1 + (eps[k-1]*2^(w-1) + t[k-1])*2^(1-w)*m*2^e/k */
+ mpfr_mul_2ui (eps, eps, w - 1, MPFR_RNDU);
+ mpfr_add_z (eps, eps, t, MPFR_RNDU);
+ MPFR_MPZ_SIZEINBASE2 (sizeinbase, m);
+ mpfr_mul_2si (eps, eps, sizeinbase - (w - 1) + e, MPFR_RNDU);
+ mpfr_div_ui (eps, eps, k, MPFR_RNDU);
+ mpfr_add_ui (eps, eps, 1, MPFR_RNDU);
+ mpz_mul (t, t, m);
+ if (e < 0)
+ mpz_tdiv_q_2exp (t, t, -e);
+ else
+ mpz_mul_2exp (t, t, e);
+ mpz_tdiv_q_ui (t, t, k);
+ mpz_tdiv_q_ui (u, t, k);
+ mpz_add (s, s, u);
+ /* the absolute error on u is <= 1 + eps[k]/k */
+ mpfr_div_ui (erru, eps, k, MPFR_RNDU);
+ mpfr_add_ui (erru, erru, 1, MPFR_RNDU);
+ /* and that on s is the sum of all errors on u */
+ mpfr_add (errs, errs, erru, MPFR_RNDU);
+ /* we are done when t is smaller than errs */
+ if (mpz_sgn (t) == 0)
+ sizeinbase = 0;
+ else
+ MPFR_MPZ_SIZEINBASE2 (sizeinbase, t);
+ if (sizeinbase < MPFR_GET_EXP (errs))
+ break;
+ }
+ /* the truncation error is bounded by (|t|+eps)/k*(|x|/k + |x|^2/k^2 + ...)
+ <= (|t|+eps)/k*|x|/(k-|x|) */
+ mpz_abs (t, t);
+ mpfr_add_z (eps, eps, t, MPFR_RNDU);
+ mpfr_div_ui (eps, eps, k, MPFR_RNDU);
+ mpfr_abs (erru, x, MPFR_RNDU); /* |x| */
+ mpfr_mul (eps, eps, erru, MPFR_RNDU);
+ mpfr_ui_sub (erru, k, erru, MPFR_RNDD);
+ if (MPFR_IS_NEG (erru))
+ {
+ /* the truncated series does not converge, return fail */
+ e = w;
+ }
+ else
+ {
+ mpfr_div (eps, eps, erru, MPFR_RNDU);
+ mpfr_add (errs, errs, eps, MPFR_RNDU);
+ mpfr_set_z (y, s, MPFR_RNDN);
+ mpfr_div_2ui (y, y, w, MPFR_RNDN);
+ /* errs was an absolute error bound on s. We must convert it to an error
+ in terms of ulp(y). Since ulp(y) = 2^(EXP(y)-PREC(y)), we must
+ divide the error by 2^(EXP(y)-PREC(y)), but since we divided also
+ y by 2^w = 2^PREC(y), we must simply divide by 2^EXP(y). */
+ e = MPFR_GET_EXP (errs) - MPFR_GET_EXP (y);
+ }
+ MPFR_GROUP_CLEAR (group);
+ mpz_clear (s);
+ mpz_clear (t);
+ mpz_clear (u);
+ mpz_clear (m);
+ return e;
+}
+
+/* Return in y an approximation of Ei(x) using the asymptotic expansion:
+ Ei(x) = exp(x)/x * (1 + 1/x + 2/x^2 + ... + k!/x^k + ...)
+ Assumes x >= PREC(y) * log(2).
+ Returns the error bound in terms of ulp(y).
+*/
+static mpfr_exp_t
+mpfr_eint_asympt (mpfr_ptr y, mpfr_srcptr x)
+{
+ mpfr_prec_t p = MPFR_PREC(y);
+ mpfr_t invx, t, err;
+ unsigned long k;
+ mpfr_exp_t err_exp;
+
+ mpfr_init2 (t, p);
+ mpfr_init2 (invx, p);
+ mpfr_init2 (err, 31); /* error in ulps on y */
+ mpfr_ui_div (invx, 1, x, MPFR_RNDN); /* invx = 1/x*(1+u) with |u|<=2^(1-p) */
+ mpfr_set_ui (t, 1, MPFR_RNDN); /* exact */
+ mpfr_set (y, t, MPFR_RNDN);
+ mpfr_set_ui (err, 0, MPFR_RNDN);
+ for (k = 1; MPFR_GET_EXP(t) + (mpfr_exp_t) p > MPFR_GET_EXP(y); k++)
+ {
+ mpfr_mul (t, t, invx, MPFR_RNDN); /* 2 more roundings */
+ mpfr_mul_ui (t, t, k, MPFR_RNDN); /* 1 more rounding: t = k!/x^k*(1+u)^e
+ with u=2^{-p} and |e| <= 3*k */
+ /* we use the fact that |(1+u)^n-1| <= 2*|n*u| for |n*u| <= 1, thus
+ the error on t is less than 6*k*2^{-p}*t <= 6*k*ulp(t) */
+ /* err is in terms of ulp(y): transform it in terms of ulp(t) */
+ mpfr_mul_2si (err, err, MPFR_GET_EXP(y) - MPFR_GET_EXP(t), MPFR_RNDU);
+ mpfr_add_ui (err, err, 6 * k, MPFR_RNDU);
+ /* transform back in terms of ulp(y) */
+ mpfr_div_2si (err, err, MPFR_GET_EXP(y) - MPFR_GET_EXP(t), MPFR_RNDU);
+ mpfr_add (y, y, t, MPFR_RNDN);
+ }
+ /* add the truncation error bounded by ulp(y): 1 ulp */
+ mpfr_mul (y, y, invx, MPFR_RNDN); /* err <= 2*err + 3/2 */
+ mpfr_exp (t, x, MPFR_RNDN); /* err(t) <= 1/2*ulp(t) */
+ mpfr_mul (y, y, t, MPFR_RNDN); /* again: err <= 2*err + 3/2 */
+ mpfr_mul_2ui (err, err, 2, MPFR_RNDU);
+ mpfr_add_ui (err, err, 8, MPFR_RNDU);
+ err_exp = MPFR_GET_EXP(err);
+ mpfr_clear (t);
+ mpfr_clear (invx);
+ mpfr_clear (err);
+ return err_exp;
+}
+
+int
+mpfr_eint (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ int inex;
+ mpfr_t tmp, ump;
+ mpfr_exp_t err, te;
+ mpfr_prec_t prec;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd),
+ ("y[%#R]=%R inexact=%d", y, y, inex));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ /* exp(NaN) = exp(-Inf) = NaN */
+ if (MPFR_IS_NAN (x) || (MPFR_IS_INF (x) && MPFR_IS_NEG(x)))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ /* eint(+inf) = +inf */
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SET_INF(y);
+ MPFR_SET_POS(y);
+ MPFR_RET(0);
+ }
+ else /* eint(+/-0) = -Inf */
+ {
+ MPFR_SET_INF(y);
+ MPFR_SET_NEG(y);
+ MPFR_RET(0);
+ }
+ }
+
+ /* eint(x) = NaN for x < 0 */
+ if (MPFR_IS_NEG(x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Since eint(x) >= exp(x)/x, we have log2(eint(x)) >= (x-log(x))/log(2).
+ Let's compute k <= (x-log(x))/log(2) in a low precision. If k >= emax,
+ then log2(eint(x)) >= emax, and eint(x) >= 2^emax, i.e. it overflows. */
+ mpfr_init2 (tmp, 64);
+ mpfr_init2 (ump, 64);
+ mpfr_log (tmp, x, MPFR_RNDU);
+ mpfr_sub (ump, x, tmp, MPFR_RNDD);
+ mpfr_const_log2 (tmp, MPFR_RNDU);
+ mpfr_div (ump, ump, tmp, MPFR_RNDD);
+ /* FIXME: We really need mpfr_set_exp_t and mpfr_cmpfr_exp_t functions. */
+ MPFR_ASSERTN (MPFR_EMAX_MAX <= LONG_MAX);
+ if (mpfr_cmp_ui (ump, __gmpfr_emax) >= 0)
+ {
+ mpfr_clear (tmp);
+ mpfr_clear (ump);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_overflow (y, rnd, 1);
+ }
+
+ /* Init stuff */
+ prec = MPFR_PREC (y) + 2 * MPFR_INT_CEIL_LOG2 (MPFR_PREC (y)) + 6;
+
+ /* eint() has a root 0.37250741078136663446..., so if x is near,
+ already take more bits */
+ if (MPFR_GET_EXP(x) == -1) /* 1/4 <= x < 1/2 */
+ {
+ double d;
+ d = mpfr_get_d (x, MPFR_RNDN) - 0.37250741078136663;
+ d = (d == 0.0) ? -53 : __gmpfr_ceil_log2 (d);
+ prec += -d;
+ }
+
+ mpfr_set_prec (tmp, prec);
+ mpfr_set_prec (ump, prec);
+
+ MPFR_ZIV_INIT (loop, prec); /* Initialize the ZivLoop controler */
+ for (;;) /* Infinite loop */
+ {
+ /* We need that the smallest value of k!/x^k is smaller than 2^(-p).
+ The minimum is obtained for x=k, and it is smaller than e*sqrt(x)/e^x
+ for x>=1. */
+ if (MPFR_GET_EXP (x) > 0 && mpfr_cmp_d (x, ((double) prec +
+ 0.5 * (double) MPFR_GET_EXP (x)) * LOG2 + 1.0) > 0)
+ err = mpfr_eint_asympt (tmp, x);
+ else
+ {
+ err = mpfr_eint_aux (tmp, x); /* error <= 2^err ulp(tmp) */
+ te = MPFR_GET_EXP(tmp);
+ mpfr_const_euler (ump, MPFR_RNDN); /* 0.577 -> EXP(ump)=0 */
+ mpfr_add (tmp, tmp, ump, MPFR_RNDN);
+ /* error <= 1/2 + 1/2*2^(EXP(ump)-EXP(tmp)) + 2^(te-EXP(tmp)+err)
+ <= 1/2 + 2^(MAX(EXP(ump), te+err+1) - EXP(tmp))
+ <= 2^(MAX(0, 1 + MAX(EXP(ump), te+err+1) - EXP(tmp))) */
+ err = MAX(1, te + err + 2) - MPFR_GET_EXP(tmp);
+ err = MAX(0, err);
+ te = MPFR_GET_EXP(tmp);
+ mpfr_log (ump, x, MPFR_RNDN);
+ mpfr_add (tmp, tmp, ump, MPFR_RNDN);
+ /* same formula as above, except now EXP(ump) is not 0 */
+ err += te + 1;
+ if (MPFR_LIKELY (!MPFR_IS_ZERO (ump)))
+ err = MAX (MPFR_GET_EXP (ump), err);
+ err = MAX(0, err - MPFR_GET_EXP (tmp));
+ }
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (tmp, prec - err, MPFR_PREC (y), rnd)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec); /* Increase used precision */
+ mpfr_set_prec (tmp, prec);
+ mpfr_set_prec (ump, prec);
+ }
+ MPFR_ZIV_FREE (loop); /* Free the ZivLoop Controler */
+
+ inex = mpfr_set (y, tmp, rnd); /* Set y to the computed value */
+ mpfr_clear (tmp);
+ mpfr_clear (ump);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inex, rnd);
+}
diff --git a/src/eq.c b/src/eq.c
new file mode 100644
index 000000000..598b1c25d
--- /dev/null
+++ b/src/eq.c
@@ -0,0 +1,141 @@
+/* mpfr_eq -- Compare two floats up to a specified bit #.
+
+Copyright 1999, 2001, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#include "mpfr-impl.h"
+
+/* return non-zero if the first n_bits bits of u, v are equal,
+ 0 otherwise */
+int
+mpfr_eq (mpfr_srcptr u, mpfr_srcptr v, unsigned long int n_bits)
+{
+ mp_srcptr up, vp;
+ mp_size_t usize, vsize, size, i;
+ mpfr_exp_t uexp, vexp;
+ int k;
+
+ if (MPFR_ARE_SINGULAR(u, v))
+ {
+ if (MPFR_IS_NAN(u) || MPFR_IS_NAN(v))
+ return 0; /* non equal */
+ else if (MPFR_IS_INF(u) && MPFR_IS_INF(v))
+ return (MPFR_SIGN(u) == MPFR_SIGN(v));
+ else if (MPFR_IS_ZERO(u) && MPFR_IS_ZERO(v))
+ return 1;
+ else
+ return 0;
+ }
+
+ /* 1. Are the signs different? */
+ if (MPFR_SIGN(u) != MPFR_SIGN(v))
+ return 0;
+
+ uexp = MPFR_GET_EXP (u);
+ vexp = MPFR_GET_EXP (v);
+
+ /* 2. Are the exponents different? */
+ if (uexp != vexp)
+ return 0; /* no bit agree */
+
+ usize = (MPFR_PREC(u) - 1) / GMP_NUMB_BITS + 1;
+ vsize = (MPFR_PREC(v) - 1) / GMP_NUMB_BITS + 1;
+
+ if (vsize > usize) /* exchange u and v */
+ {
+ up = MPFR_MANT(v);
+ vp = MPFR_MANT(u);
+ size = vsize;
+ vsize = usize;
+ usize = size;
+ }
+ else
+ {
+ up = MPFR_MANT(u);
+ vp = MPFR_MANT(v);
+ }
+
+ /* now usize >= vsize */
+ MPFR_ASSERTD(usize >= vsize);
+
+ if (usize > vsize)
+ {
+ if ((unsigned long) vsize * GMP_NUMB_BITS < n_bits)
+ {
+ /* check if low min(PREC(u), n_bits) - (vsize * GMP_NUMB_BITS)
+ bits from u are non-zero */
+ unsigned long remains = n_bits - (vsize * GMP_NUMB_BITS);
+ k = usize - vsize - 1;
+ while (k >= 0 && remains >= GMP_NUMB_BITS && !up[k])
+ {
+ k--;
+ remains -= GMP_NUMB_BITS;
+ }
+ /* now either k < 0: all low bits from u are zero
+ or remains < GMP_NUMB_BITS: check high bits from up[k]
+ or up[k] <> 0: different */
+ if (k >= 0 && (((remains < GMP_NUMB_BITS) &&
+ (up[k] >> (GMP_NUMB_BITS - remains))) ||
+ (remains >= GMP_NUMB_BITS && up[k])))
+ return 0; /* surely too different */
+ }
+ size = vsize;
+ }
+ else
+ {
+ size = usize;
+ }
+
+ /* now size = min (usize, vsize) */
+
+ /* If size is too large wrt n_bits, reduce it to look only at the
+ high n_bits bits.
+ Otherwise, if n_bits > size * GMP_NUMB_BITS, reduce n_bits to
+ size * GMP_NUMB_BITS, since the extra low bits of one of the
+ operands have already been check above. */
+ if ((unsigned long) size > 1 + (n_bits - 1) / GMP_NUMB_BITS)
+ size = 1 + (n_bits - 1) / GMP_NUMB_BITS;
+ else if (n_bits > (unsigned long) size * GMP_NUMB_BITS)
+ n_bits = size * GMP_NUMB_BITS;
+
+ up += usize - size;
+ vp += vsize - size;
+
+ for (i = size - 1; i > 0 && n_bits >= GMP_NUMB_BITS; i--)
+ {
+ if (up[i] != vp[i])
+ return 0;
+ n_bits -= GMP_NUMB_BITS;
+ }
+
+ /* now either i=0 or n_bits<GMP_NUMB_BITS */
+
+ /* since n_bits <= size * GMP_NUMB_BITS before the above for-loop,
+ we have the invariant n_bits <= (i+1) * GMP_NUMB_BITS, thus
+ we always have n_bits <= GMP_NUMB_BITS here */
+ MPFR_ASSERTD(n_bits <= GMP_NUMB_BITS);
+
+ if (n_bits & (GMP_NUMB_BITS - 1))
+ return (up[i] >> (GMP_NUMB_BITS - (n_bits & (GMP_NUMB_BITS - 1))) ==
+ vp[i] >> (GMP_NUMB_BITS - (n_bits & (GMP_NUMB_BITS - 1))));
+ else
+ return (up[i] == vp[i]);
+}
diff --git a/src/erf.c b/src/erf.c
new file mode 100644
index 000000000..2ed4c299e
--- /dev/null
+++ b/src/erf.c
@@ -0,0 +1,261 @@
+/* mpfr_erf -- error function of a floating-point number
+
+Copyright 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by Ludovic Meunier and Paul Zimmermann.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#define EXP1 2.71828182845904523536 /* exp(1) */
+
+static int mpfr_erf_0 (mpfr_ptr, mpfr_srcptr, double, mpfr_rnd_t);
+
+int
+mpfr_erf (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t xf;
+ int inex, large;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inex));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x)) /* erf(+inf) = +1, erf(-inf) = -1 */
+ return mpfr_set_si (y, MPFR_INT_SIGN (x), MPFR_RNDN);
+ else /* erf(+0) = +0, erf(-0) = -0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ return mpfr_set (y, x, MPFR_RNDN); /* should keep the sign of x */
+ }
+ }
+
+ /* now x is neither NaN, Inf nor 0 */
+
+ /* first try expansion at x=0 when x is small, or asymptotic expansion
+ where x is large */
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* around x=0, we have erf(x) = 2x/sqrt(Pi) (1 - x^2/3 + ...),
+ with 1 - x^2/3 <= sqrt(Pi)*erf(x)/2/x <= 1 for x >= 0. This means that
+ if x^2/3 < 2^(-PREC(y)-1) we can decide of the correct rounding,
+ unless we have a worst-case for 2x/sqrt(Pi). */
+ if (MPFR_EXP(x) < - (mpfr_exp_t) (MPFR_PREC(y) / 2))
+ {
+ /* we use 2x/sqrt(Pi) (1 - x^2/3) <= erf(x) <= 2x/sqrt(Pi) for x > 0
+ and 2x/sqrt(Pi) <= erf(x) <= 2x/sqrt(Pi) (1 - x^2/3) for x < 0.
+ In both cases |2x/sqrt(Pi) (1 - x^2/3)| <= |erf(x)| <= |2x/sqrt(Pi)|.
+ We will compute l and h such that l <= |2x/sqrt(Pi) (1 - x^2/3)|
+ and |2x/sqrt(Pi)| <= h. If l and h round to the same value to
+ precision PREC(y) and rounding rnd_mode, then we are done. */
+ mpfr_t l, h; /* lower and upper bounds for erf(x) */
+ int ok, inex2;
+
+ mpfr_init2 (l, MPFR_PREC(y) + 17);
+ mpfr_init2 (h, MPFR_PREC(y) + 17);
+ /* first compute l */
+ mpfr_mul (l, x, x, MPFR_RNDU);
+ mpfr_div_ui (l, l, 3, MPFR_RNDU); /* upper bound on x^2/3 */
+ mpfr_ui_sub (l, 1, l, MPFR_RNDZ); /* lower bound on 1 - x^2/3 */
+ mpfr_const_pi (h, MPFR_RNDU); /* upper bound of Pi */
+ mpfr_sqrt (h, h, MPFR_RNDU); /* upper bound on sqrt(Pi) */
+ mpfr_div (l, l, h, MPFR_RNDZ); /* lower bound on 1/sqrt(Pi) (1 - x^2/3) */
+ mpfr_mul_2ui (l, l, 1, MPFR_RNDZ); /* 2/sqrt(Pi) (1 - x^2/3) */
+ mpfr_mul (l, l, x, MPFR_RNDZ); /* |l| is a lower bound on
+ |2x/sqrt(Pi) (1 - x^2/3)| */
+ /* now compute h */
+ mpfr_const_pi (h, MPFR_RNDD); /* lower bound on Pi */
+ mpfr_sqrt (h, h, MPFR_RNDD); /* lower bound on sqrt(Pi) */
+ mpfr_div_2ui (h, h, 1, MPFR_RNDD); /* lower bound on sqrt(Pi)/2 */
+ /* since sqrt(Pi)/2 < 1, the following should not underflow */
+ mpfr_div (h, x, h, MPFR_IS_POS(x) ? MPFR_RNDU : MPFR_RNDD);
+ /* round l and h to precision PREC(y) */
+ inex = mpfr_prec_round (l, MPFR_PREC(y), rnd_mode);
+ inex2 = mpfr_prec_round (h, MPFR_PREC(y), rnd_mode);
+ /* Caution: we also need inex=inex2 (inex might be 0). */
+ ok = SAME_SIGN (inex, inex2) && mpfr_cmp (l, h) == 0;
+ if (ok)
+ mpfr_set (y, h, rnd_mode);
+ mpfr_clear (l);
+ mpfr_clear (h);
+ if (ok)
+ goto end;
+ /* this test can still fail for small precision, for example
+ for x=-0.100E-2 with a target precision of 3 bits, since
+ the error term x^2/3 is not that small. */
+ }
+
+ mpfr_init2 (xf, 53);
+ mpfr_const_log2 (xf, MPFR_RNDU);
+ mpfr_div (xf, x, xf, MPFR_RNDZ); /* round to zero ensures we get a lower
+ bound of |x/log(2)| */
+ mpfr_mul (xf, xf, x, MPFR_RNDZ);
+ large = mpfr_cmp_ui (xf, MPFR_PREC (y) + 1) > 0;
+ mpfr_clear (xf);
+
+ /* when x goes to infinity, we have erf(x) = 1 - 1/sqrt(Pi)/exp(x^2)/x + ...
+ and |erf(x) - 1| <= exp(-x^2) is true for any x >= 0, thus if
+ exp(-x^2) < 2^(-PREC(y)-1) the result is 1 or 1-epsilon.
+ This rewrites as x^2/log(2) > p+1. */
+ if (MPFR_UNLIKELY (large))
+ /* |erf x| = 1 or 1- */
+ {
+ mpfr_rnd_t rnd2 = MPFR_IS_POS (x) ? rnd_mode : MPFR_INVERT_RND(rnd_mode);
+ if (rnd2 == MPFR_RNDN || rnd2 == MPFR_RNDU || rnd2 == MPFR_RNDA)
+ {
+ inex = MPFR_INT_SIGN (x);
+ mpfr_set_si (y, inex, rnd2);
+ }
+ else /* round to zero */
+ {
+ inex = -MPFR_INT_SIGN (x);
+ mpfr_setmax (y, 0); /* warning: setmax keeps the old sign of y */
+ MPFR_SET_SAME_SIGN (y, x);
+ }
+ }
+ else /* use Taylor */
+ {
+ double xf2;
+
+ /* FIXME: get rid of doubles/mpfr_get_d here */
+ xf2 = mpfr_get_d (x, MPFR_RNDN);
+ xf2 = xf2 * xf2; /* xf2 ~ x^2 */
+ inex = mpfr_erf_0 (y, x, xf2, rnd_mode);
+ }
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inex, rnd_mode);
+}
+
+/* return x*2^e */
+static double
+mul_2exp (double x, mpfr_exp_t e)
+{
+ if (e > 0)
+ {
+ while (e--)
+ x *= 2.0;
+ }
+ else
+ {
+ while (e++)
+ x /= 2.0;
+ }
+
+ return x;
+}
+
+/* evaluates erf(x) using the expansion at x=0:
+
+ erf(x) = 2/sqrt(Pi) * sum((-1)^k*x^(2k+1)/k!/(2k+1), k=0..infinity)
+
+ Assumes x is neither NaN nor infinite nor zero.
+ Assumes also that e*x^2 <= n (target precision).
+ */
+static int
+mpfr_erf_0 (mpfr_ptr res, mpfr_srcptr x, double xf2, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t n, m;
+ mpfr_exp_t nuk, sigmak;
+ double tauk;
+ mpfr_t y, s, t, u;
+ unsigned int k;
+ int log2tauk;
+ int inex;
+ MPFR_ZIV_DECL (loop);
+
+ n = MPFR_PREC (res); /* target precision */
+
+ /* initial working precision */
+ m = n + (mpfr_prec_t) (xf2 / LOG2) + 8 + MPFR_INT_CEIL_LOG2 (n);
+
+ mpfr_init2 (y, m);
+ mpfr_init2 (s, m);
+ mpfr_init2 (t, m);
+ mpfr_init2 (u, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_mul (y, x, x, MPFR_RNDU); /* err <= 1 ulp */
+ mpfr_set_ui (s, 1, MPFR_RNDN);
+ mpfr_set_ui (t, 1, MPFR_RNDN);
+ tauk = 0.0;
+
+ for (k = 1; ; k++)
+ {
+ mpfr_mul (t, y, t, MPFR_RNDU);
+ mpfr_div_ui (t, t, k, MPFR_RNDU);
+ mpfr_div_ui (u, t, 2 * k + 1, MPFR_RNDU);
+ sigmak = MPFR_GET_EXP (s);
+ if (k % 2)
+ mpfr_sub (s, s, u, MPFR_RNDN);
+ else
+ mpfr_add (s, s, u, MPFR_RNDN);
+ sigmak -= MPFR_GET_EXP(s);
+ nuk = MPFR_GET_EXP(u) - MPFR_GET_EXP(s);
+
+ if ((nuk < - (mpfr_exp_t) m) && ((double) k >= xf2))
+ break;
+
+ /* tauk <- 1/2 + tauk * 2^sigmak + (1+8k)*2^nuk */
+ tauk = 0.5 + mul_2exp (tauk, sigmak)
+ + mul_2exp (1.0 + 8.0 * (double) k, nuk);
+ }
+
+ mpfr_mul (s, x, s, MPFR_RNDU);
+ MPFR_SET_EXP (s, MPFR_GET_EXP (s) + 1);
+
+ mpfr_const_pi (t, MPFR_RNDZ);
+ mpfr_sqrt (t, t, MPFR_RNDZ);
+ mpfr_div (s, s, t, MPFR_RNDN);
+ tauk = 4.0 * tauk + 11.0; /* final ulp-error on s */
+ log2tauk = __gmpfr_ceil_log2 (tauk);
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s, m - log2tauk, n, rnd_mode)))
+ break;
+
+ /* Actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (y, m);
+ mpfr_set_prec (s, m);
+ mpfr_set_prec (t, m);
+ mpfr_set_prec (u, m);
+
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = mpfr_set (res, s, rnd_mode);
+
+ mpfr_clear (y);
+ mpfr_clear (t);
+ mpfr_clear (u);
+ mpfr_clear (s);
+
+ return inex;
+}
diff --git a/src/erfc.c b/src/erfc.c
new file mode 100644
index 000000000..bca363360
--- /dev/null
+++ b/src/erfc.c
@@ -0,0 +1,263 @@
+/* mpfr_erfc -- The Complementary Error Function of a floating-point number
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* erfc(x) = 1 - erf(x) */
+
+/* Put in y an approximation of erfc(x) for large x, using formulae 7.1.23 and
+ 7.1.24 from Abramowitz and Stegun.
+ Returns e such that the error is bounded by 2^e ulp(y),
+ or returns 0 in case of underflow.
+*/
+static mpfr_exp_t
+mpfr_erfc_asympt (mpfr_ptr y, mpfr_srcptr x)
+{
+ mpfr_t t, xx, err;
+ unsigned long k;
+ mpfr_prec_t prec = MPFR_PREC(y);
+ mpfr_exp_t exp_err;
+
+ mpfr_init2 (t, prec);
+ mpfr_init2 (xx, prec);
+ mpfr_init2 (err, 31);
+ /* let u = 2^(1-p), and let us represent the error as (1+u)^err
+ with a bound for err */
+ mpfr_mul (xx, x, x, MPFR_RNDD); /* err <= 1 */
+ mpfr_ui_div (xx, 1, xx, MPFR_RNDU); /* upper bound for 1/(2x^2), err <= 2 */
+ mpfr_div_2ui (xx, xx, 1, MPFR_RNDU); /* exact */
+ mpfr_set_ui (t, 1, MPFR_RNDN); /* current term, exact */
+ mpfr_set (y, t, MPFR_RNDN); /* current sum */
+ mpfr_set_ui (err, 0, MPFR_RNDN);
+ for (k = 1; ; k++)
+ {
+ mpfr_mul_ui (t, t, 2 * k - 1, MPFR_RNDU); /* err <= 4k-3 */
+ mpfr_mul (t, t, xx, MPFR_RNDU); /* err <= 4k */
+ /* for -1 < x < 1, and |nx| < 1, we have |(1+x)^n| <= 1+7/4|nx|.
+ Indeed, for x>=0: log((1+x)^n) = n*log(1+x) <= n*x. Let y=n*x < 1,
+ then exp(y) <= 1+7/4*y.
+ For x<=0, let x=-x, we can prove by induction that (1-x)^n >= 1-n*x.*/
+ mpfr_mul_2si (err, err, MPFR_GET_EXP (y) - MPFR_GET_EXP (t), MPFR_RNDU);
+ mpfr_add_ui (err, err, 14 * k, MPFR_RNDU); /* 2^(1-p) * t <= 2 ulp(t) */
+ mpfr_div_2si (err, err, MPFR_GET_EXP (y) - MPFR_GET_EXP (t), MPFR_RNDU);
+ if (MPFR_GET_EXP (t) + (mpfr_exp_t) prec <= MPFR_GET_EXP (y))
+ {
+ /* the truncation error is bounded by |t| < ulp(y) */
+ mpfr_add_ui (err, err, 1, MPFR_RNDU);
+ break;
+ }
+ if (k & 1)
+ mpfr_sub (y, y, t, MPFR_RNDN);
+ else
+ mpfr_add (y, y, t, MPFR_RNDN);
+ }
+ /* the error on y is bounded by err*ulp(y) */
+ mpfr_mul (t, x, x, MPFR_RNDU); /* rel. err <= 2^(1-p) */
+ mpfr_div_2ui (err, err, 3, MPFR_RNDU); /* err/8 */
+ mpfr_add (err, err, t, MPFR_RNDU); /* err/8 + xx */
+ mpfr_mul_2ui (err, err, 3, MPFR_RNDU); /* err + 8*xx */
+ mpfr_exp (t, t, MPFR_RNDU); /* err <= 1/2*ulp(t) + err(x*x)*t
+ <= 1/2*ulp(t)+2*|x*x|*ulp(t)
+ <= (2*|x*x|+1/2)*ulp(t) */
+ mpfr_mul (t, t, x, MPFR_RNDN); /* err <= 1/2*ulp(t) + (4*|x*x|+1)*ulp(t)
+ <= (4*|x*x|+3/2)*ulp(t) */
+ mpfr_const_pi (xx, MPFR_RNDZ); /* err <= ulp(Pi) */
+ mpfr_sqrt (xx, xx, MPFR_RNDN); /* err <= 1/2*ulp(xx) + ulp(Pi)/2/sqrt(Pi)
+ <= 3/2*ulp(xx) */
+ mpfr_mul (t, t, xx, MPFR_RNDN); /* err <= (8 |xx| + 13/2) * ulp(t) */
+ mpfr_div (y, y, t, MPFR_RNDN); /* the relative error on input y is bounded
+ by (1+u)^err with u = 2^(1-p), that on
+ t is bounded by (1+u)^(8 |xx| + 13/2),
+ thus that on output y is bounded by
+ 8 |xx| + 7 + err. */
+
+ if (MPFR_IS_ZERO(y))
+ {
+ /* If y is zero, most probably we have underflow. We check it directly
+ using the fact that erfc(x) <= exp(-x^2)/sqrt(Pi)/x for x >= 0.
+ We compute an upper approximation of exp(-x^2)/sqrt(Pi)/x.
+ */
+ mpfr_mul (t, x, x, MPFR_RNDD); /* t <= x^2 */
+ mpfr_neg (t, t, MPFR_RNDU); /* -x^2 <= t */
+ mpfr_exp (t, t, MPFR_RNDU); /* exp(-x^2) <= t */
+ mpfr_const_pi (xx, MPFR_RNDD); /* xx <= sqrt(Pi), cached */
+ mpfr_mul (xx, xx, x, MPFR_RNDD); /* xx <= sqrt(Pi)*x */
+ mpfr_div (y, t, xx, MPFR_RNDN); /* if y is zero, this means that the upper
+ approximation of exp(-x^2)/sqrt(Pi)/x
+ is nearer from 0 than from 2^(-emin-1),
+ thus we have underflow. */
+ exp_err = 0;
+ }
+ else
+ {
+ mpfr_add_ui (err, err, 7, MPFR_RNDU);
+ exp_err = MPFR_GET_EXP (err);
+ }
+
+ mpfr_clear (t);
+ mpfr_clear (xx);
+ mpfr_clear (err);
+ return exp_err;
+}
+
+int
+mpfr_erfc (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ int inex;
+ mpfr_t tmp;
+ mpfr_exp_t te, err;
+ mpfr_prec_t prec;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd),
+ ("y[%#R]=%R inexact=%d", y, y, inex));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ /* erfc(+inf) = 0+, erfc(-inf) = 2 erfc (0) = 1 */
+ else if (MPFR_IS_INF (x))
+ return mpfr_set_ui (y, MPFR_IS_POS (x) ? 0 : 2, rnd);
+ else
+ return mpfr_set_ui (y, 1, rnd);
+ }
+
+ if (MPFR_SIGN (x) > 0)
+ {
+ /* for x >= 27282, erfc(x) < 2^(-2^30-1) */
+ if (mpfr_cmp_ui (x, 27282) >= 0)
+ return mpfr_underflow (y, (rnd == MPFR_RNDN) ? MPFR_RNDZ : rnd, 1);
+ }
+
+ if (MPFR_SIGN (x) < 0)
+ {
+ mpfr_exp_t e = MPFR_EXP(x);
+ /* For x < 0 going to -infinity, erfc(x) tends to 2 by below.
+ More precisely, we have 2 + 1/sqrt(Pi)/x/exp(x^2) < erfc(x) < 2.
+ Thus log2 |2 - erfc(x)| <= -log2|x| - x^2 / log(2).
+ If |2 - erfc(x)| < 2^(-PREC(y)) then the result is either 2 or
+ nextbelow(2).
+ For x <= -27282, -log2|x| - x^2 / log(2) <= -2^30.
+ */
+ if ((MPFR_PREC(y) <= 7 && e >= 2) || /* x <= -2 */
+ (MPFR_PREC(y) <= 25 && e >= 3) || /* x <= -4 */
+ (MPFR_PREC(y) <= 120 && mpfr_cmp_si (x, -9) <= 0) ||
+ mpfr_cmp_si (x, -27282) <= 0)
+ {
+ near_two:
+ mpfr_set_ui (y, 2, MPFR_RNDN);
+ mpfr_set_inexflag ();
+ if (rnd == MPFR_RNDZ || rnd == MPFR_RNDD)
+ {
+ mpfr_nextbelow (y);
+ return -1;
+ }
+ else
+ return 1;
+ }
+ else if (e >= 3) /* more accurate test */
+ {
+ mpfr_t t, u;
+ int near_2;
+ mpfr_init2 (t, 32);
+ mpfr_init2 (u, 32);
+ /* the following is 1/log(2) rounded to zero on 32 bits */
+ mpfr_set_str_binary (t, "1.0111000101010100011101100101001");
+ mpfr_sqr (u, x, MPFR_RNDZ);
+ mpfr_mul (t, t, u, MPFR_RNDZ); /* t <= x^2/log(2) */
+ mpfr_neg (u, x, MPFR_RNDZ); /* 0 <= u <= |x| */
+ mpfr_log2 (u, u, MPFR_RNDZ); /* u <= log2(|x|) */
+ mpfr_add (t, t, u, MPFR_RNDZ); /* t <= log2|x| + x^2 / log(2) */
+ near_2 = mpfr_cmp_ui (t, MPFR_PREC(y)) >= 0;
+ mpfr_clear (t);
+ mpfr_clear (u);
+ if (near_2)
+ goto near_two;
+ }
+ }
+
+ /* Init stuff */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* erfc(x) ~ 1, with error < 2^(EXP(x)+1) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, __gmpfr_one, - MPFR_GET_EXP (x) - 1,
+ 0, MPFR_SIGN(x) < 0,
+ rnd, inex = _inexact; goto end);
+
+ prec = MPFR_PREC (y) + MPFR_INT_CEIL_LOG2 (MPFR_PREC (y)) + 3;
+ if (MPFR_GET_EXP (x) > 0)
+ prec += 2 * MPFR_GET_EXP(x);
+
+ mpfr_init2 (tmp, prec);
+
+ MPFR_ZIV_INIT (loop, prec); /* Initialize the ZivLoop controler */
+ for (;;) /* Infinite loop */
+ {
+ /* use asymptotic formula only whenever x^2 >= p*log(2),
+ otherwise it will not converge */
+ if (MPFR_SIGN (x) > 0 &&
+ 2 * MPFR_GET_EXP (x) - 2 >= MPFR_INT_CEIL_LOG2 (prec))
+ /* we have x^2 >= p in that case */
+ {
+ err = mpfr_erfc_asympt (tmp, x);
+ if (err == 0) /* underflow case */
+ {
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (y, (rnd == MPFR_RNDN) ? MPFR_RNDZ : rnd, 1);
+ }
+ }
+ else
+ {
+ mpfr_erf (tmp, x, MPFR_RNDN);
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (tmp)); /* FIXME: 0 only for x=0 ? */
+ te = MPFR_GET_EXP (tmp);
+ mpfr_ui_sub (tmp, 1, tmp, MPFR_RNDN);
+ /* See error analysis in algorithms.tex for details */
+ if (MPFR_IS_ZERO (tmp))
+ {
+ prec *= 2;
+ err = prec; /* ensures MPFR_CAN_ROUND fails */
+ }
+ else
+ err = MAX (te - MPFR_GET_EXP (tmp), 0) + 1;
+ }
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (tmp, prec - err, MPFR_PREC (y), rnd)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec); /* Increase used precision */
+ mpfr_set_prec (tmp, prec);
+ }
+ MPFR_ZIV_FREE (loop); /* Free the ZivLoop Controler */
+
+ inex = mpfr_set (y, tmp, rnd); /* Set y to the computed value */
+ mpfr_clear (tmp);
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inex, rnd);
+}
diff --git a/src/exceptions.c b/src/exceptions.c
new file mode 100644
index 000000000..34e26b31f
--- /dev/null
+++ b/src/exceptions.c
@@ -0,0 +1,336 @@
+/* Exception flags and utilities.
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+unsigned int MPFR_THREAD_ATTR __gmpfr_flags = 0;
+
+mpfr_exp_t MPFR_THREAD_ATTR __gmpfr_emin = MPFR_EMIN_DEFAULT;
+mpfr_exp_t MPFR_THREAD_ATTR __gmpfr_emax = MPFR_EMAX_DEFAULT;
+
+#undef mpfr_get_emin
+
+mpfr_exp_t
+mpfr_get_emin (void)
+{
+ return __gmpfr_emin;
+}
+
+#undef mpfr_set_emin
+
+int
+mpfr_set_emin (mpfr_exp_t exponent)
+{
+ if (exponent >= MPFR_EMIN_MIN && exponent <= MPFR_EMIN_MAX)
+ {
+ __gmpfr_emin = exponent;
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+}
+
+mpfr_exp_t
+mpfr_get_emin_min (void)
+{
+ return MPFR_EMIN_MIN;
+}
+
+mpfr_exp_t
+mpfr_get_emin_max (void)
+{
+ return MPFR_EMIN_MAX;
+}
+
+#undef mpfr_get_emax
+
+mpfr_exp_t
+mpfr_get_emax (void)
+{
+ return __gmpfr_emax;
+}
+
+#undef mpfr_set_emax
+
+int
+mpfr_set_emax (mpfr_exp_t exponent)
+{
+ if (exponent >= MPFR_EMAX_MIN && exponent <= MPFR_EMAX_MAX)
+ {
+ __gmpfr_emax = exponent;
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+}
+
+mpfr_exp_t
+mpfr_get_emax_min (void)
+{
+ return MPFR_EMAX_MIN;
+}
+mpfr_exp_t
+mpfr_get_emax_max (void)
+{
+ return MPFR_EMAX_MAX;
+}
+
+
+#undef mpfr_clear_flags
+
+void
+mpfr_clear_flags (void)
+{
+ __gmpfr_flags = 0;
+}
+
+#undef mpfr_clear_underflow
+
+void
+mpfr_clear_underflow (void)
+{
+ __gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_UNDERFLOW;
+}
+
+#undef mpfr_clear_overflow
+
+void
+mpfr_clear_overflow (void)
+{
+ __gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_OVERFLOW;
+}
+
+#undef mpfr_clear_nanflag
+
+void
+mpfr_clear_nanflag (void)
+{
+ __gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_NAN;
+}
+
+#undef mpfr_clear_inexflag
+
+void
+mpfr_clear_inexflag (void)
+{
+ __gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_INEXACT;
+}
+
+#undef mpfr_clear_erangeflag
+
+void
+mpfr_clear_erangeflag (void)
+{
+ __gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_ERANGE;
+}
+
+#undef mpfr_clear_underflow
+
+void
+mpfr_set_underflow (void)
+{
+ __gmpfr_flags |= MPFR_FLAGS_UNDERFLOW;
+}
+
+#undef mpfr_clear_overflow
+
+void
+mpfr_set_overflow (void)
+{
+ __gmpfr_flags |= MPFR_FLAGS_OVERFLOW;
+}
+
+#undef mpfr_clear_nanflag
+
+void
+mpfr_set_nanflag (void)
+{
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+}
+
+#undef mpfr_clear_inexflag
+
+void
+mpfr_set_inexflag (void)
+{
+ __gmpfr_flags |= MPFR_FLAGS_INEXACT;
+}
+
+#undef mpfr_clear_erangeflag
+
+void
+mpfr_set_erangeflag (void)
+{
+ __gmpfr_flags |= MPFR_FLAGS_ERANGE;
+}
+
+
+#undef mpfr_check_range
+
+int
+mpfr_check_range (mpfr_ptr x, int t, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_LIKELY( MPFR_IS_PURE_FP(x)) )
+ { /* x is a non-zero FP */
+ mpfr_exp_t exp = MPFR_EXP (x); /* Do not use MPFR_GET_EXP */
+ if (MPFR_UNLIKELY( exp < __gmpfr_emin) )
+ {
+ /* The following test is necessary because in the rounding to the
+ * nearest mode, mpfr_underflow always rounds away from 0. In
+ * this rounding mode, we need to round to 0 if:
+ * _ |x| < 2^(emin-2), or
+ * _ |x| = 2^(emin-2) and the absolute value of the exact
+ * result is <= 2^(emin-2).
+ */
+ if (rnd_mode == MPFR_RNDN &&
+ (exp + 1 < __gmpfr_emin ||
+ (mpfr_powerof2_raw(x) &&
+ (MPFR_IS_NEG(x) ? t <= 0 : t >= 0))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow(x, rnd_mode, MPFR_SIGN(x));
+ }
+ if (MPFR_UNLIKELY( exp > __gmpfr_emax) )
+ return mpfr_overflow (x, rnd_mode, MPFR_SIGN(x));
+ }
+ else if (MPFR_UNLIKELY (t != 0 && MPFR_IS_INF (x)))
+ {
+ /* We need to do the following because most MPFR functions are
+ * implemented in the following way:
+ * Ziv's loop:
+ * | Compute an approximation to the result and an error bound.
+ * | Possible underflow/overflow detection -> return.
+ * | If can_round, break (exit the loop).
+ * | Otherwise, increase the working precision and loop.
+ * Round the approximation in the target precision. <== See below
+ * Restore the flags (that could have been set due to underflows
+ * or overflows during the internal computations).
+ * Execute: return mpfr_check_range (...).
+ * The problem is that an overflow could be generated when rounding the
+ * approximation (in general, such an overflow could not be detected
+ * earlier), and the overflow flag is lost when the flags are restored.
+ * This can occur only when the rounding yields an exponent change
+ * and the new exponent is larger than the maximum exponent, so that
+ * an infinity is necessarily obtained.
+ * So, the simplest solution is to detect this overflow case here in
+ * mpfr_check_range, which is easy to do since the rounded result is
+ * necessarily an inexact infinity.
+ */
+ __gmpfr_flags |= MPFR_FLAGS_OVERFLOW;
+ }
+ MPFR_RET (t); /* propagate inexact ternary value, unlike most functions */
+}
+
+#undef mpfr_underflow_p
+
+int
+mpfr_underflow_p (void)
+{
+ return __gmpfr_flags & MPFR_FLAGS_UNDERFLOW;
+}
+
+#undef mpfr_overflow_p
+
+int
+mpfr_overflow_p (void)
+{
+ return __gmpfr_flags & MPFR_FLAGS_OVERFLOW;
+}
+
+#undef mpfr_nanflag_p
+
+int
+mpfr_nanflag_p (void)
+{
+ return __gmpfr_flags & MPFR_FLAGS_NAN;
+}
+
+#undef mpfr_inexflag_p
+
+int
+mpfr_inexflag_p (void)
+{
+ return __gmpfr_flags & MPFR_FLAGS_INEXACT;
+}
+
+#undef mpfr_erangeflag_p
+
+int
+mpfr_erangeflag_p (void)
+{
+ return __gmpfr_flags & MPFR_FLAGS_ERANGE;
+}
+
+/* #undef mpfr_underflow */
+
+/* Note: In the rounding to the nearest mode, mpfr_underflow
+ always rounds away from 0. In this rounding mode, you must call
+ mpfr_underflow with rnd_mode = MPFR_RNDZ if the exact result
+ is <= 2^(emin-2) in absolute value. */
+
+int
+mpfr_underflow (mpfr_ptr x, mpfr_rnd_t rnd_mode, int sign)
+{
+ int inex;
+
+ MPFR_ASSERT_SIGN (sign);
+
+ if (MPFR_IS_LIKE_RNDZ(rnd_mode, sign < 0))
+ {
+ MPFR_SET_ZERO(x);
+ inex = -1;
+ }
+ else
+ {
+ mpfr_setmin (x, __gmpfr_emin);
+ inex = 1;
+ }
+ MPFR_SET_SIGN(x, sign);
+ __gmpfr_flags |= MPFR_FLAGS_INEXACT | MPFR_FLAGS_UNDERFLOW;
+ return sign > 0 ? inex : -inex;
+}
+
+/* #undef mpfr_overflow */
+
+int
+mpfr_overflow (mpfr_ptr x, mpfr_rnd_t rnd_mode, int sign)
+{
+ int inex;
+
+ MPFR_ASSERT_SIGN(sign);
+ if (MPFR_IS_LIKE_RNDZ(rnd_mode, sign < 0))
+ {
+ mpfr_setmax (x, __gmpfr_emax);
+ inex = -1;
+ }
+ else
+ {
+ MPFR_SET_INF(x);
+ inex = 1;
+ }
+ MPFR_SET_SIGN(x,sign);
+ __gmpfr_flags |= MPFR_FLAGS_INEXACT | MPFR_FLAGS_OVERFLOW;
+ return sign > 0 ? inex : -inex;
+}
diff --git a/src/exp.c b/src/exp.c
new file mode 100644
index 000000000..893e9a6b5
--- /dev/null
+++ b/src/exp.c
@@ -0,0 +1,162 @@
+/* mpfr_exp -- exponential of a floating-point number
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* #define DEBUG */
+
+int
+mpfr_exp (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_exp_t expx;
+ mpfr_prec_t precy;
+ int inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) ))
+ {
+ if (MPFR_IS_NAN(x))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(x))
+ {
+ if (MPFR_IS_POS(x))
+ MPFR_SET_INF(y);
+ else
+ MPFR_SET_ZERO(y);
+ MPFR_SET_POS(y);
+ MPFR_RET(0);
+ }
+ else
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(x));
+ return mpfr_set_ui (y, 1, rnd_mode);
+ }
+ }
+
+ /* First, let's detect most overflow and underflow cases. */
+ {
+ mpfr_t e, bound;
+
+ /* We must extended the exponent range and save the flags now. */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (e, sizeof (mpfr_exp_t) * CHAR_BIT);
+ mpfr_init2 (bound, 32);
+
+ inexact = mpfr_set_exp_t (e, expo.saved_emax, MPFR_RNDN);
+ MPFR_ASSERTD (inexact == 0);
+ mpfr_const_log2 (bound, expo.saved_emax < 0 ? MPFR_RNDD : MPFR_RNDU);
+ mpfr_mul (bound, bound, e, MPFR_RNDU);
+ if (MPFR_UNLIKELY (mpfr_cmp (x, bound) >= 0))
+ {
+ /* x > log(2^emax), thus exp(x) > 2^emax */
+ mpfr_clears (e, bound, (mpfr_ptr) 0);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_overflow (y, rnd_mode, 1);
+ }
+
+ inexact = mpfr_set_exp_t (e, expo.saved_emin, MPFR_RNDN);
+ MPFR_ASSERTD (inexact == 0);
+ inexact = mpfr_sub_ui (e, e, 2, MPFR_RNDN);
+ MPFR_ASSERTD (inexact == 0);
+ mpfr_const_log2 (bound, expo.saved_emin < 0 ? MPFR_RNDU : MPFR_RNDD);
+ mpfr_mul (bound, bound, e, MPFR_RNDD);
+ if (MPFR_UNLIKELY (mpfr_cmp (x, bound) <= 0))
+ {
+ /* x < log(2^(emin - 2)), thus exp(x) < 2^(emin - 2) */
+ mpfr_clears (e, bound, (mpfr_ptr) 0);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (y, rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode,
+ 1);
+ }
+
+ /* Other overflow/underflow cases must be detected
+ by the generic routines. */
+ mpfr_clears (e, bound, (mpfr_ptr) 0);
+ MPFR_SAVE_EXPO_FREE (expo);
+ }
+
+ expx = MPFR_GET_EXP (x);
+ precy = MPFR_PREC (y);
+
+ /* if x < 2^(-precy), then exp(x) i.e. gives 1 +/- 1 ulp(1) */
+ if (MPFR_UNLIKELY (expx < 0 && (mpfr_uexp_t) (-expx) > precy))
+ {
+ mpfr_exp_t emin = __gmpfr_emin;
+ mpfr_exp_t emax = __gmpfr_emax;
+ int signx = MPFR_SIGN (x);
+
+ MPFR_SET_POS (y);
+ if (MPFR_IS_NEG_SIGN (signx) && (rnd_mode == MPFR_RNDD ||
+ rnd_mode == MPFR_RNDZ))
+ {
+ __gmpfr_emin = 0;
+ __gmpfr_emax = 0;
+ mpfr_setmax (y, 0); /* y = 1 - epsilon */
+ inexact = -1;
+ }
+ else
+ {
+ __gmpfr_emin = 1;
+ __gmpfr_emax = 1;
+ mpfr_setmin (y, 1); /* y = 1 */
+ if (MPFR_IS_POS_SIGN (signx) && (rnd_mode == MPFR_RNDU ||
+ rnd_mode == MPFR_RNDA))
+ {
+ mp_size_t yn;
+ int sh;
+
+ yn = 1 + (MPFR_PREC(y) - 1) / GMP_NUMB_BITS;
+ sh = (mpfr_prec_t) yn * GMP_NUMB_BITS - MPFR_PREC(y);
+ MPFR_MANT(y)[0] += MPFR_LIMB_ONE << sh;
+ inexact = 1;
+ }
+ else
+ inexact = -MPFR_FROM_SIGN_TO_INT(signx);
+ }
+
+ __gmpfr_emin = emin;
+ __gmpfr_emax = emax;
+ }
+ else /* General case */
+ {
+ if (MPFR_UNLIKELY (precy >= MPFR_EXP_THRESHOLD))
+ /* mpfr_exp_3 saves the exponent range and flags itself, otherwise
+ the flag changes in mpfr_exp_3 are lost */
+ inexact = mpfr_exp_3 (y, x, rnd_mode); /* O(M(n) log(n)^2) */
+ else
+ {
+ MPFR_SAVE_EXPO_MARK (expo);
+ inexact = mpfr_exp_2 (y, x, rnd_mode); /* O(n^(1/3) M(n)) */
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ MPFR_SAVE_EXPO_FREE (expo);
+ }
+ }
+
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/exp10.c b/src/exp10.c
new file mode 100644
index 000000000..a69486a5c
--- /dev/null
+++ b/src/exp10.c
@@ -0,0 +1,29 @@
+/* mpfr_exp10 -- power of 10 function 10^y
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_exp10 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_ui_pow (y, 10, x, rnd_mode);
+}
diff --git a/src/exp2.c b/src/exp2.c
new file mode 100644
index 000000000..bf0902c86
--- /dev/null
+++ b/src/exp2.c
@@ -0,0 +1,146 @@
+/* mpfr_exp2 -- power of 2 function 2^y
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of y = 2^z is done by *
+ * y = exp(z*log(2)). The result is exact iff z is an integer. */
+
+int
+mpfr_exp2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ long xint;
+ mpfr_t xfrac;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ if (MPFR_IS_POS (x))
+ MPFR_SET_INF (y);
+ else
+ MPFR_SET_ZERO (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ else /* 2^0 = 1 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO(x));
+ return mpfr_set_ui (y, 1, rnd_mode);
+ }
+ }
+
+ /* since the smallest representable non-zero float is 1/2*2^__gmpfr_emin,
+ if x < __gmpfr_emin - 1, the result is either 1/2*2^__gmpfr_emin or 0 */
+ MPFR_ASSERTN (MPFR_EMIN_MIN >= LONG_MIN + 2);
+ if (MPFR_UNLIKELY (mpfr_cmp_si (x, __gmpfr_emin - 1) < 0))
+ {
+ mpfr_rnd_t rnd2 = rnd_mode;
+ /* in round to nearest mode, round to zero when x <= __gmpfr_emin-2 */
+ if (rnd_mode == MPFR_RNDN &&
+ mpfr_cmp_si_2exp (x, __gmpfr_emin - 2, 0) <= 0)
+ rnd2 = MPFR_RNDZ;
+ return mpfr_underflow (y, rnd2, 1);
+ }
+
+ MPFR_ASSERTN (MPFR_EMAX_MAX <= LONG_MAX);
+ if (MPFR_UNLIKELY (mpfr_cmp_si (x, __gmpfr_emax) >= 0))
+ return mpfr_overflow (y, rnd_mode, 1);
+
+ /* We now know that emin - 1 <= x < emax. */
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* 2^x = 1 + x*log(2) + O(x^2) for x near zero, and for |x| <= 1 we have
+ |2^x - 1| <= x < 2^EXP(x). If x > 0 we must round away from 0 (dir=1);
+ if x < 0 we must round toward 0 (dir=0). */
+ MPFR_SMALL_INPUT_AFTER_SAVE_EXPO (y, __gmpfr_one, - MPFR_GET_EXP (x), 0,
+ MPFR_SIGN(x) > 0, rnd_mode, expo, {});
+
+ xint = mpfr_get_si (x, MPFR_RNDZ);
+ mpfr_init2 (xfrac, MPFR_PREC (x));
+ mpfr_sub_si (xfrac, x, xint, MPFR_RNDN); /* exact */
+
+ if (MPFR_IS_ZERO (xfrac))
+ {
+ mpfr_set_ui (y, 1, MPFR_RNDN);
+ inexact = 0;
+ }
+ else
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t;
+
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(y); /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ mpfr_exp_t err; /* error */
+ MPFR_ZIV_DECL (loop);
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + 5 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+
+ /* First computation */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute exp(x*ln(2))*/
+ mpfr_const_log2 (t, MPFR_RNDU); /* ln(2) */
+ mpfr_mul (t, xfrac, t, MPFR_RNDU); /* xfrac * ln(2) */
+ err = Nt - (MPFR_GET_EXP (t) + 2); /* Estimate of the error */
+ mpfr_exp (t, t, MPFR_RNDN); /* exp(xfrac * ln(2)) */
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ break;
+
+ /* Actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (y, t, rnd_mode);
+
+ mpfr_clear (t);
+ }
+
+ mpfr_clear (xfrac);
+ mpfr_clear_flags ();
+ mpfr_mul_2si (y, y, xint, MPFR_RNDN); /* exact or overflow */
+ /* Note: We can have an overflow only when t was rounded up to 2. */
+ MPFR_ASSERTD (MPFR_IS_PURE_FP (y) || inexact > 0);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/exp3.c b/src/exp3.c
new file mode 100644
index 000000000..a3998614c
--- /dev/null
+++ b/src/exp3.c
@@ -0,0 +1,333 @@
+/* mpfr_exp -- exponential of a floating-point number
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H /* for MPFR_MPZ_SIZEINBASE2 */
+#include "mpfr-impl.h"
+
+/* y <- exp(p/2^r) within 1 ulp, using 2^m terms from the series
+ Assume |p/2^r| < 1.
+ We use the following binary splitting formula:
+ P(a,b) = p if a+1=b, P(a,c)*P(c,b) otherwise
+ Q(a,b) = a*2^r if a+1=b [except Q(0,1)=1], Q(a,c)*Q(c,b) otherwise
+ T(a,b) = P(a,b) if a+1=b, Q(c,b)*T(a,c)+P(a,c)*T(c,b) otherwise
+ Then exp(p/2^r) ~ T(0,i)/Q(0,i) for i so that (p/2^r)^i/i! is small enough.
+
+ Since P(a,b) = p^(b-a), and we consider only values of b-a of the form 2^j,
+ we don't need to compute P(), we only precompute p^(2^j) in the ptoj[] array
+ below.
+
+ Since Q(a,b) is divisible by 2^(r*(b-a-1)), we don't compute the power of
+ two part.
+*/
+static void
+mpfr_exp_rational (mpfr_ptr y, mpz_ptr p, long r, int m,
+ mpz_t *Q, mpfr_prec_t *mult)
+{
+ unsigned long n, i, j;
+ mpz_t *S, *ptoj;
+ mpfr_prec_t *log2_nb_terms;
+ mpfr_exp_t diff, expo;
+ mpfr_prec_t precy = MPFR_PREC(y), prec_i_have, prec_ptoj;
+ int k, l;
+
+ MPFR_ASSERTN ((size_t) m < sizeof (long) * CHAR_BIT - 1);
+
+ S = Q + (m+1);
+ ptoj = Q + 2*(m+1); /* ptoj[i] = mantissa^(2^i) */
+ log2_nb_terms = mult + (m+1);
+
+ /* Normalize p */
+ MPFR_ASSERTD (mpz_cmp_ui (p, 0) != 0);
+ n = mpz_scan1 (p, 0); /* number of trailing zeros in p */
+ mpz_tdiv_q_2exp (p, p, n);
+ r -= n; /* since |p/2^r| < 1 and p >= 1, r >= 1 */
+
+ /* Set initial var */
+ mpz_set (ptoj[0], p);
+ for (k = 1; k < m; k++)
+ mpz_mul (ptoj[k], ptoj[k-1], ptoj[k-1]); /* ptoj[k] = p^(2^k) */
+ mpz_set_ui (Q[0], 1);
+ mpz_set_ui (S[0], 1);
+ k = 0;
+ mult[0] = 0; /* the multiplier P[k]/Q[k] for the remaining terms
+ satisfies P[k]/Q[k] <= 2^(-mult[k]) */
+ log2_nb_terms[0] = 0; /* log2(#terms) [exact in 1st loop where 2^k] */
+ prec_i_have = 0;
+
+ /* Main Loop */
+ n = 1UL << m;
+ for (i = 1; (prec_i_have < precy) && (i < n); i++)
+ {
+ /* invariant: Q[0]*Q[1]*...*Q[k] equals i! */
+ k++;
+ log2_nb_terms[k] = 0; /* 1 term */
+ mpz_set_ui (Q[k], i + 1);
+ mpz_set_ui (S[k], i + 1);
+ j = i + 1; /* we have computed j = i+1 terms so far */
+ l = 0;
+ while ((j & 1) == 0) /* combine and reduce */
+ {
+ /* invariant: S[k] corresponds to 2^l consecutive terms */
+ mpz_mul (S[k], S[k], ptoj[l]);
+ mpz_mul (S[k-1], S[k-1], Q[k]);
+ /* Q[k] corresponds to 2^l consecutive terms too.
+ Since it does not contains the factor 2^(r*2^l),
+ when going from l to l+1 we need to multiply
+ by 2^(r*2^(l+1))/2^(r*2^l) = 2^(r*2^l) */
+ mpz_mul_2exp (S[k-1], S[k-1], r << l);
+ mpz_add (S[k-1], S[k-1], S[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ log2_nb_terms[k-1] ++; /* number of terms in S[k-1]
+ is a power of 2 by construction */
+ MPFR_MPZ_SIZEINBASE2 (prec_i_have, Q[k]);
+ MPFR_MPZ_SIZEINBASE2 (prec_ptoj, ptoj[l]);
+ mult[k-1] += prec_i_have + (r << l) - prec_ptoj - 1;
+ prec_i_have = mult[k] = mult[k-1];
+ /* since mult[k] >= mult[k-1] + nbits(Q[k]),
+ we have Q[0]*...*Q[k] <= 2^mult[k] = 2^prec_i_have */
+ l ++;
+ j >>= 1;
+ k --;
+ }
+ }
+
+ /* accumulate all products in S[0] and Q[0]. Warning: contrary to above,
+ here we do not have log2_nb_terms[k-1] = log2_nb_terms[k]+1. */
+ l = 0; /* number of accumulated terms in the right part S[k]/Q[k] */
+ while (k > 0)
+ {
+ j = log2_nb_terms[k-1];
+ mpz_mul (S[k], S[k], ptoj[j]);
+ mpz_mul (S[k-1], S[k-1], Q[k]);
+ l += 1 << log2_nb_terms[k];
+ mpz_mul_2exp (S[k-1], S[k-1], r * l);
+ mpz_add (S[k-1], S[k-1], S[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ k--;
+ }
+
+ /* Q[0] now equals i! */
+ MPFR_MPZ_SIZEINBASE2 (prec_i_have, S[0]);
+ diff = (mpfr_exp_t) prec_i_have - 2 * (mpfr_exp_t) precy;
+ expo = diff;
+ if (diff >= 0)
+ mpz_fdiv_q_2exp (S[0], S[0], diff);
+ else
+ mpz_mul_2exp (S[0], S[0], -diff);
+
+ MPFR_MPZ_SIZEINBASE2 (prec_i_have, Q[0]);
+ diff = (mpfr_exp_t) prec_i_have - (mpfr_prec_t) precy;
+ expo -= diff;
+ if (diff > 0)
+ mpz_fdiv_q_2exp (Q[0], Q[0], diff);
+ else
+ mpz_mul_2exp (Q[0], Q[0], -diff);
+
+ mpz_tdiv_q (S[0], S[0], Q[0]);
+ mpfr_set_z (y, S[0], MPFR_RNDD);
+ MPFR_SET_EXP (y, MPFR_GET_EXP (y) + expo - r * (i - 1) );
+}
+
+#define shift (GMP_NUMB_BITS/2)
+
+int
+mpfr_exp_3 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t t, x_copy, tmp;
+ mpz_t uk;
+ mpfr_exp_t ttt, shift_x;
+ unsigned long twopoweri;
+ mpz_t *P;
+ mpfr_prec_t *mult;
+ int i, k, loop;
+ int prec_x;
+ mpfr_prec_t realprec, Prec;
+ int iter;
+ int inexact = 0;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (ziv_loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* decompose x */
+ /* we first write x = 1.xxxxxxxxxxxxx
+ ----- k bits -- */
+ prec_x = MPFR_INT_CEIL_LOG2 (MPFR_PREC (x)) - MPFR_LOG2_GMP_NUMB_BITS;
+ if (prec_x < 0)
+ prec_x = 0;
+
+ ttt = MPFR_GET_EXP (x);
+ mpfr_init2 (x_copy, MPFR_PREC(x));
+ mpfr_set (x_copy, x, MPFR_RNDD);
+
+ /* we shift to get a number less than 1 */
+ if (ttt > 0)
+ {
+ shift_x = ttt;
+ mpfr_div_2ui (x_copy, x, ttt, MPFR_RNDN);
+ ttt = MPFR_GET_EXP (x_copy);
+ }
+ else
+ shift_x = 0;
+ MPFR_ASSERTD (ttt <= 0);
+
+ /* Init prec and vars */
+ realprec = MPFR_PREC (y) + MPFR_INT_CEIL_LOG2 (prec_x + MPFR_PREC (y));
+ Prec = realprec + shift + 2 + shift_x;
+ mpfr_init2 (t, Prec);
+ mpfr_init2 (tmp, Prec);
+ mpz_init (uk);
+
+ /* Main loop */
+ MPFR_ZIV_INIT (ziv_loop, realprec);
+ for (;;)
+ {
+ int scaled = 0;
+ MPFR_BLOCK_DECL (flags);
+
+ k = MPFR_INT_CEIL_LOG2 (Prec) - MPFR_LOG2_GMP_NUMB_BITS;
+
+ /* now we have to extract */
+ twopoweri = GMP_NUMB_BITS;
+
+ /* Allocate tables */
+ P = (mpz_t*) (*__gmp_allocate_func) (3*(k+2)*sizeof(mpz_t));
+ for (i = 0; i < 3*(k+2); i++)
+ mpz_init (P[i]);
+ mult = (mpfr_prec_t*) (*__gmp_allocate_func) (2*(k+2)*sizeof(mpfr_prec_t));
+
+ /* Particular case for i==0 */
+ mpfr_extract (uk, x_copy, 0);
+ MPFR_ASSERTD (mpz_cmp_ui (uk, 0) != 0);
+ mpfr_exp_rational (tmp, uk, shift + twopoweri - ttt, k + 1, P, mult);
+ for (loop = 0; loop < shift; loop++)
+ mpfr_sqr (tmp, tmp, MPFR_RNDD);
+ twopoweri *= 2;
+
+ /* General case */
+ iter = (k <= prec_x) ? k : prec_x;
+ for (i = 1; i <= iter; i++)
+ {
+ mpfr_extract (uk, x_copy, i);
+ if (MPFR_LIKELY (mpz_cmp_ui (uk, 0) != 0))
+ {
+ mpfr_exp_rational (t, uk, twopoweri - ttt, k - i + 1, P, mult);
+ mpfr_mul (tmp, tmp, t, MPFR_RNDD);
+ }
+ MPFR_ASSERTN (twopoweri <= LONG_MAX/2);
+ twopoweri *=2;
+ }
+
+ /* Clear tables */
+ for (i = 0; i < 3*(k+2); i++)
+ mpz_clear (P[i]);
+ (*__gmp_free_func) (P, 3*(k+2)*sizeof(mpz_t));
+ (*__gmp_free_func) (mult, 2*(k+2)*sizeof(mpfr_prec_t));
+
+ if (shift_x > 0)
+ {
+ MPFR_BLOCK (flags, {
+ for (loop = 0; loop < shift_x - 1; loop++)
+ mpfr_sqr (tmp, tmp, MPFR_RNDD);
+ mpfr_sqr (t, tmp, MPFR_RNDD);
+ } );
+
+ if (MPFR_UNLIKELY (MPFR_OVERFLOW (flags)))
+ {
+ /* tmp <= exact result, so that it is a real overflow. */
+ inexact = mpfr_overflow (y, rnd_mode, 1);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+
+ if (MPFR_UNLIKELY (MPFR_UNDERFLOW (flags)))
+ {
+ /* This may be a spurious underflow. So, let's scale
+ the result. */
+ mpfr_mul_2ui (tmp, tmp, 1, MPFR_RNDD); /* no overflow, exact */
+ mpfr_sqr (t, tmp, MPFR_RNDD);
+ if (MPFR_IS_ZERO (t))
+ {
+ /* approximate result < 2^(emin - 3), thus
+ exact result < 2^(emin - 2). */
+ inexact = mpfr_underflow (y, (rnd_mode == MPFR_RNDN) ?
+ MPFR_RNDZ : rnd_mode, 1);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_UNDERFLOW);
+ break;
+ }
+ scaled = 1;
+ }
+ }
+
+ if (mpfr_can_round (shift_x > 0 ? t : tmp, realprec, MPFR_RNDD, MPFR_RNDZ,
+ MPFR_PREC(y) + (rnd_mode == MPFR_RNDN)))
+ {
+ inexact = mpfr_set (y, shift_x > 0 ? t : tmp, rnd_mode);
+ if (MPFR_UNLIKELY (scaled && MPFR_IS_PURE_FP (y)))
+ {
+ int inex2;
+ mpfr_exp_t ey;
+
+ /* The result has been scaled and needs to be corrected. */
+ ey = MPFR_GET_EXP (y);
+ inex2 = mpfr_mul_2si (y, y, -2, rnd_mode);
+ if (inex2) /* underflow */
+ {
+ if (rnd_mode == MPFR_RNDN && inexact < 0 &&
+ MPFR_IS_ZERO (y) && ey == __gmpfr_emin + 1)
+ {
+ /* Double rounding case: in MPFR_RNDN, the scaled
+ result has been rounded downward to 2^emin.
+ As the exact result is > 2^(emin - 2), correct
+ rounding must be done upward. */
+ /* TODO: make sure in coverage tests that this line
+ is reached. */
+ inexact = mpfr_underflow (y, MPFR_RNDU, 1);
+ }
+ else
+ {
+ /* No double rounding. */
+ inexact = inex2;
+ }
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_UNDERFLOW);
+ }
+ }
+ break;
+ }
+
+ MPFR_ZIV_NEXT (ziv_loop, realprec);
+ Prec = realprec + shift + 2 + shift_x;
+ mpfr_set_prec (t, Prec);
+ mpfr_set_prec (tmp, Prec);
+ }
+ MPFR_ZIV_FREE (ziv_loop);
+
+ mpz_clear (uk);
+ mpfr_clear (tmp);
+ mpfr_clear (t);
+ mpfr_clear (x_copy);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return inexact;
+}
diff --git a/src/exp_2.c b/src/exp_2.c
new file mode 100644
index 000000000..f0b5777b2
--- /dev/null
+++ b/src/exp_2.c
@@ -0,0 +1,419 @@
+/* mpfr_exp_2 -- exponential of a floating-point number
+ using algorithms in O(n^(1/2)*M(n)) and O(n^(1/3)*M(n))
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* #define DEBUG */
+#define MPFR_NEED_LONGLONG_H /* for count_leading_zeros */
+#include "mpfr-impl.h"
+
+static unsigned long
+mpfr_exp2_aux (mpz_t, mpfr_srcptr, mpfr_prec_t, mpfr_exp_t *);
+static unsigned long
+mpfr_exp2_aux2 (mpz_t, mpfr_srcptr, mpfr_prec_t, mpfr_exp_t *);
+static mpfr_exp_t
+mpz_normalize (mpz_t, mpz_t, mpfr_exp_t);
+static mpfr_exp_t
+mpz_normalize2 (mpz_t, mpz_t, mpfr_exp_t, mpfr_exp_t);
+
+/* if k = the number of bits of z > q, divides z by 2^(k-q) and returns k-q.
+ Otherwise do nothing and return 0.
+ */
+static mpfr_exp_t
+mpz_normalize (mpz_t rop, mpz_t z, mpfr_exp_t q)
+{
+ size_t k;
+
+ MPFR_MPZ_SIZEINBASE2 (k, z);
+ MPFR_ASSERTD (k == (mpfr_uexp_t) k);
+ if (q < 0 || (mpfr_uexp_t) k > (mpfr_uexp_t) q)
+ {
+ mpz_fdiv_q_2exp (rop, z, (unsigned long) ((mpfr_uexp_t) k - q));
+ return (mpfr_exp_t) k - q;
+ }
+ if (MPFR_UNLIKELY(rop != z))
+ mpz_set (rop, z);
+ return 0;
+}
+
+/* if expz > target, shift z by (expz-target) bits to the left.
+ if expz < target, shift z by (target-expz) bits to the right.
+ Returns target.
+*/
+static mpfr_exp_t
+mpz_normalize2 (mpz_t rop, mpz_t z, mpfr_exp_t expz, mpfr_exp_t target)
+{
+ if (target > expz)
+ mpz_fdiv_q_2exp (rop, z, target - expz);
+ else
+ mpz_mul_2exp (rop, z, expz - target);
+ return target;
+}
+
+/* use Brent's formula exp(x) = (1+r+r^2/2!+r^3/3!+...)^(2^K)*2^n
+ where x = n*log(2)+(2^K)*r
+ together with the Paterson-Stockmeyer O(t^(1/2)) algorithm for the
+ evaluation of power series. The resulting complexity is O(n^(1/3)*M(n)).
+ This function returns with the exact flags due to exp.
+*/
+int
+mpfr_exp_2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ long n;
+ unsigned long K, k, l, err; /* FIXME: Which type ? */
+ int error_r;
+ mpfr_exp_t exps, expx;
+ mpfr_prec_t q, precy;
+ int inexact;
+ mpfr_t r, s;
+ mpz_t ss;
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ expx = MPFR_GET_EXP (x);
+ precy = MPFR_PREC(y);
+
+ /* Warning: we cannot use the 'double' type here, since on 64-bit machines
+ x may be as large as 2^62*log(2) without overflow, and then x/log(2)
+ is about 2^62: not every integer of that size can be represented as a
+ 'double', thus the argument reduction would fail. */
+ if (expx <= -2)
+ /* |x| <= 0.25, thus n = round(x/log(2)) = 0 */
+ n = 0;
+ else
+ {
+ mpfr_init2 (r, sizeof (long) * CHAR_BIT);
+ mpfr_const_log2 (r, MPFR_RNDZ);
+ mpfr_div (r, x, r, MPFR_RNDN);
+ n = mpfr_get_si (r, MPFR_RNDN);
+ mpfr_clear (r);
+ }
+ /* we have |x| <= (|n|+1)*log(2) */
+ MPFR_LOG_MSG (("d(x)=%1.30e n=%ld\n", mpfr_get_d1(x), n));
+
+ /* error_r bounds the cancelled bits in x - n*log(2) */
+ if (MPFR_UNLIKELY (n == 0))
+ error_r = 0;
+ else
+ {
+ count_leading_zeros (error_r, (mp_limb_t) SAFE_ABS (unsigned long, n) + 1);
+ error_r = GMP_NUMB_BITS - error_r;
+ /* we have |x| <= 2^error_r * log(2) */
+ }
+
+ /* for the O(n^(1/2)*M(n)) method, the Taylor series computation of
+ n/K terms costs about n/(2K) multiplications when computed in fixed
+ point */
+ K = (precy < MPFR_EXP_2_THRESHOLD) ? __gmpfr_isqrt ((precy + 1) / 2)
+ : __gmpfr_cuberoot (4*precy);
+ l = (precy - 1) / K + 1;
+ err = K + MPFR_INT_CEIL_LOG2 (2 * l + 18);
+ /* add K extra bits, i.e. failure probability <= 1/2^K = O(1/precy) */
+ q = precy + err + K + 8;
+ /* if |x| >> 1, take into account the cancelled bits */
+ if (expx > 0)
+ q += expx;
+
+ /* Note: due to the mpfr_prec_round below, it is not possible to use
+ the MPFR_GROUP_* macros here. */
+
+ mpfr_init2 (r, q + error_r);
+ mpfr_init2 (s, q + error_r);
+
+ /* the algorithm consists in computing an upper bound of exp(x) using
+ a precision of q bits, and see if we can round to MPFR_PREC(y) taking
+ into account the maximal error. Otherwise we increase q. */
+ MPFR_ZIV_INIT (loop, q);
+ for (;;)
+ {
+ MPFR_LOG_MSG (("n=%ld K=%lu l=%lu q=%lu error_r=%d\n",
+ n, K, l, (unsigned long) q, error_r));
+
+ /* First reduce the argument to r = x - n * log(2),
+ so that r is small in absolute value. We want an upper
+ bound on r to get an upper bound on exp(x). */
+
+ /* if n<0, we have to get an upper bound of log(2)
+ in order to get an upper bound of r = x-n*log(2) */
+ mpfr_const_log2 (s, (n >= 0) ? MPFR_RNDZ : MPFR_RNDU);
+ /* s is within 1 ulp(s) of log(2) */
+
+ mpfr_mul_ui (r, s, (n < 0) ? -n : n, (n >= 0) ? MPFR_RNDZ : MPFR_RNDU);
+ /* r is within 3 ulps of |n|*log(2) */
+ if (n < 0)
+ MPFR_CHANGE_SIGN (r);
+ /* r <= n*log(2), within 3 ulps */
+
+ MPFR_LOG_VAR (x);
+ MPFR_LOG_VAR (r);
+
+ mpfr_sub (r, x, r, MPFR_RNDU);
+
+ if (MPFR_IS_PURE_FP (r))
+ {
+ while (MPFR_IS_NEG (r))
+ { /* initial approximation n was too large */
+ n--;
+ mpfr_add (r, r, s, MPFR_RNDU);
+ }
+
+ /* since there was a cancellation in x - n*log(2), the low error_r
+ bits from r are zero and thus non significant, thus we can reduce
+ the working precision */
+ if (error_r > 0)
+ mpfr_prec_round (r, q, MPFR_RNDU);
+ /* the error on r is at most 3 ulps (3 ulps if error_r = 0,
+ and 1 + 3/2 if error_r > 0) */
+ MPFR_LOG_VAR (r);
+ MPFR_ASSERTD (MPFR_IS_POS (r));
+ mpfr_div_2ui (r, r, K, MPFR_RNDU); /* r = (x-n*log(2))/2^K, exact */
+
+ mpz_init (ss);
+ exps = mpfr_get_z_2exp (ss, s);
+ /* s <- 1 + r/1! + r^2/2! + ... + r^l/l! */
+ MPFR_ASSERTD (MPFR_IS_PURE_FP (r) && MPFR_EXP (r) < 0);
+ l = (precy < MPFR_EXP_2_THRESHOLD)
+ ? mpfr_exp2_aux (ss, r, q, &exps) /* naive method */
+ : mpfr_exp2_aux2 (ss, r, q, &exps); /* Paterson/Stockmeyer meth */
+
+ MPFR_LOG_MSG (("l=%lu q=%lu (K+l)*q^2=%1.3e\n",
+ l, (unsigned long) q, (K + l) * (double) q * q));
+
+ for (k = 0; k < K; k++)
+ {
+ mpz_mul (ss, ss, ss);
+ exps <<= 1;
+ exps += mpz_normalize (ss, ss, q);
+ }
+ mpfr_set_z (s, ss, MPFR_RNDN);
+
+ MPFR_SET_EXP(s, MPFR_GET_EXP (s) + exps);
+ mpz_clear (ss);
+
+ /* error is at most 2^K*l, plus 2 to take into account of
+ the error of 3 ulps on r */
+ err = K + MPFR_INT_CEIL_LOG2 (l) + 2;
+
+ MPFR_LOG_MSG (("before mult. by 2^n:\n", 0));
+ MPFR_LOG_VAR (s);
+ MPFR_LOG_MSG (("err=%lu bits\n", K));
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s, q - err, precy, rnd_mode)))
+ {
+ mpfr_clear_flags ();
+ inexact = mpfr_mul_2si (y, s, n, rnd_mode);
+ break;
+ }
+ }
+
+ MPFR_ZIV_NEXT (loop, q);
+ mpfr_set_prec (r, q + error_r);
+ mpfr_set_prec (s, q + error_r);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ mpfr_clear (r);
+ mpfr_clear (s);
+
+ return inexact;
+}
+
+/* s <- 1 + r/1! + r^2/2! + ... + r^l/l! while MPFR_EXP(r^l/l!)+MPFR_EXPR(r)>-q
+ using naive method with O(l) multiplications.
+ Return the number of iterations l.
+ The absolute error on s is less than 3*l*(l+1)*2^(-q).
+ Version using fixed-point arithmetic with mpz instead
+ of mpfr for internal computations.
+ NOTE[VL]: the following sentence seems to be obsolete since MY_INIT_MPZ
+ is no longer used (r6919); qn was the number of limbs of q.
+ s must have at least qn+1 limbs (qn should be enough, but currently fails
+ since mpz_mul_2exp(s, s, q-1) reallocates qn+1 limbs)
+*/
+static unsigned long
+mpfr_exp2_aux (mpz_t s, mpfr_srcptr r, mpfr_prec_t q, mpfr_exp_t *exps)
+{
+ unsigned long l;
+ mpfr_exp_t dif, expt, expr;
+ mpz_t t, rr;
+ mp_size_t sbit, tbit;
+
+ MPFR_ASSERTN (MPFR_IS_PURE_FP (r));
+
+ expt = 0;
+ *exps = 1 - (mpfr_exp_t) q; /* s = 2^(q-1) */
+ mpz_init (t);
+ mpz_init (rr);
+ mpz_set_ui(t, 1);
+ mpz_set_ui(s, 1);
+ mpz_mul_2exp(s, s, q-1);
+ expr = mpfr_get_z_2exp(rr, r); /* no error here */
+
+ l = 0;
+ for (;;) {
+ l++;
+ mpz_mul(t, t, rr);
+ expt += expr;
+ MPFR_MPZ_SIZEINBASE2 (sbit, s);
+ MPFR_MPZ_SIZEINBASE2 (tbit, t);
+ dif = *exps + sbit - expt - tbit;
+ /* truncates the bits of t which are < ulp(s) = 2^(1-q) */
+ expt += mpz_normalize(t, t, (mpfr_exp_t) q-dif); /* error at most 2^(1-q) */
+ mpz_fdiv_q_ui (t, t, l); /* error at most 2^(1-q) */
+ /* the error wrt t^l/l! is here at most 3*l*ulp(s) */
+ MPFR_ASSERTD (expt == *exps);
+ if (mpz_sgn (t) == 0)
+ break;
+ mpz_add(s, s, t); /* no error here: exact */
+ /* ensures rr has the same size as t: after several shifts, the error
+ on rr is still at most ulp(t)=ulp(s) */
+ MPFR_MPZ_SIZEINBASE2 (tbit, t);
+ expr += mpz_normalize(rr, rr, tbit);
+ }
+
+ mpz_clear (t);
+ mpz_clear (rr);
+
+ return 3 * l * (l + 1);
+}
+
+/* s <- 1 + r/1! + r^2/2! + ... + r^l/l! while MPFR_EXP(r^l/l!)+MPFR_EXPR(r)>-q
+ using Paterson-Stockmeyer algorithm with O(sqrt(l)) multiplications.
+ Return l.
+ Uses m multiplications of full size and 2l/m of decreasing size,
+ i.e. a total equivalent to about m+l/m full multiplications,
+ i.e. 2*sqrt(l) for m=sqrt(l).
+ NOTE[VL]: The following sentence seems to be obsolete since MY_INIT_MPZ
+ is no longer used (r6919); sizer was the number of limbs of r.
+ Version using mpz. ss must have at least (sizer+1) limbs.
+ The error is bounded by (l^2+4*l) ulps where l is the return value.
+*/
+static unsigned long
+mpfr_exp2_aux2 (mpz_t s, mpfr_srcptr r, mpfr_prec_t q, mpfr_exp_t *exps)
+{
+ mpfr_exp_t expr, *expR, expt;
+ mpfr_prec_t ql;
+ unsigned long l, m, i;
+ mpz_t t, *R, rr, tmp;
+ mp_size_t sbit, rrbit;
+ MPFR_TMP_DECL(marker);
+
+ /* estimate value of l */
+ MPFR_ASSERTD (MPFR_GET_EXP (r) < 0);
+ l = q / (- MPFR_GET_EXP (r));
+ m = __gmpfr_isqrt (l);
+ /* we access R[2], thus we need m >= 2 */
+ if (m < 2)
+ m = 2;
+
+ MPFR_TMP_MARK(marker);
+ R = (mpz_t*) MPFR_TMP_ALLOC ((m + 1) * sizeof (mpz_t)); /* R[i] is r^i */
+ expR = (mpfr_exp_t*) MPFR_TMP_ALLOC((m + 1) * sizeof (mpfr_exp_t));
+ /* expR[i] is the exponent for R[i] */
+ mpz_init (tmp);
+ mpz_init (rr);
+ mpz_init (t);
+ mpz_set_ui (s, 0);
+ *exps = 1 - q; /* 1 ulp = 2^(1-q) */
+ for (i = 0 ; i <= m ; i++)
+ mpz_init (R[i]);
+ expR[1] = mpfr_get_z_2exp (R[1], r); /* exact operation: no error */
+ expR[1] = mpz_normalize2 (R[1], R[1], expR[1], 1 - q); /* error <= 1 ulp */
+ mpz_mul (t, R[1], R[1]); /* err(t) <= 2 ulps */
+ mpz_fdiv_q_2exp (R[2], t, q - 1); /* err(R[2]) <= 3 ulps */
+ expR[2] = 1 - q;
+ for (i = 3 ; i <= m ; i++)
+ {
+ if ((i & 1) == 1)
+ mpz_mul (t, R[i-1], R[1]); /* err(t) <= 2*i-2 */
+ else
+ mpz_mul (t, R[i/2], R[i/2]);
+ mpz_fdiv_q_2exp (R[i], t, q - 1); /* err(R[i]) <= 2*i-1 ulps */
+ expR[i] = 1 - q;
+ }
+ mpz_set_ui (R[0], 1);
+ mpz_mul_2exp (R[0], R[0], q-1);
+ expR[0] = 1-q; /* R[0]=1 */
+ mpz_set_ui (rr, 1);
+ expr = 0; /* rr contains r^l/l! */
+ /* by induction: err(rr) <= 2*l ulps */
+
+ l = 0;
+ ql = q; /* precision used for current giant step */
+ do
+ {
+ /* all R[i] must have exponent 1-ql */
+ if (l != 0)
+ for (i = 0 ; i < m ; i++)
+ expR[i] = mpz_normalize2 (R[i], R[i], expR[i], 1 - ql);
+ /* the absolute error on R[i]*rr is still 2*i-1 ulps */
+ expt = mpz_normalize2 (t, R[m-1], expR[m-1], 1 - ql);
+ /* err(t) <= 2*m-1 ulps */
+ /* computes t = 1 + r/(l+1) + ... + r^(m-1)*l!/(l+m-1)!
+ using Horner's scheme */
+ for (i = m-1 ; i-- != 0 ; )
+ {
+ mpz_fdiv_q_ui (t, t, l+i+1); /* err(t) += 1 ulp */
+ mpz_add (t, t, R[i]);
+ }
+ /* now err(t) <= (3m-2) ulps */
+
+ /* now multiplies t by r^l/l! and adds to s */
+ mpz_mul (t, t, rr);
+ expt += expr;
+ expt = mpz_normalize2 (t, t, expt, *exps);
+ /* err(t) <= (3m-1) + err_rr(l) <= (3m-2) + 2*l */
+ MPFR_ASSERTD (expt == *exps);
+ mpz_add (s, s, t); /* no error here */
+
+ /* updates rr, the multiplication of the factors l+i could be done
+ using binary splitting too, but it is not sure it would save much */
+ mpz_mul (t, rr, R[m]); /* err(t) <= err(rr) + 2m-1 */
+ expr += expR[m];
+ mpz_set_ui (tmp, 1);
+ for (i = 1 ; i <= m ; i++)
+ mpz_mul_ui (tmp, tmp, l + i);
+ mpz_fdiv_q (t, t, tmp); /* err(t) <= err(rr) + 2m */
+ l += m;
+ if (MPFR_UNLIKELY (mpz_sgn (t) == 0))
+ break;
+ expr += mpz_normalize (rr, t, ql); /* err_rr(l+1) <= err_rr(l) + 2m+1 */
+ if (MPFR_UNLIKELY (mpz_sgn (rr) == 0))
+ rrbit = 1;
+ else
+ MPFR_MPZ_SIZEINBASE2 (rrbit, rr);
+ MPFR_MPZ_SIZEINBASE2 (sbit, s);
+ ql = q - *exps - sbit + expr + rrbit;
+ /* TODO: Wrong cast. I don't want what is right, but this is
+ certainly wrong */
+ }
+ while ((size_t) expr + rrbit > (size_t) -q);
+
+ for (i = 0 ; i <= m ; i++)
+ mpz_clear (R[i]);
+ MPFR_TMP_FREE(marker);
+ mpz_clear (rr);
+ mpz_clear (t);
+ mpz_clear (tmp);
+
+ return l * (l + 4);
+}
diff --git a/src/expm1.c b/src/expm1.c
new file mode 100644
index 000000000..fd6d3c938
--- /dev/null
+++ b/src/expm1.c
@@ -0,0 +1,174 @@
+/* mpfr_expm1 -- Compute exp(x)-1
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of expm1 is done by
+ expm1(x)=exp(x)-1
+ */
+
+int
+mpfr_expm1 (mpfr_ptr y, mpfr_srcptr x , mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_exp_t ex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ /* check for inf or -inf (expm1(-inf)=-1) */
+ else if (MPFR_IS_INF (x))
+ {
+ if (MPFR_IS_POS (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ else
+ return mpfr_set_si (y, -1, rnd_mode);
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y); /* expm1(+/- 0) = +/- 0 */
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ }
+
+ ex = MPFR_GET_EXP (x);
+ if (ex < 0)
+ {
+ /* For -1 < x < 0, abs(expm1(x)-x) < x^2/2.
+ For 0 < x < 1, abs(expm1(x)-x) < x^2. */
+ if (MPFR_IS_POS (x))
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, - ex, 0, 1, rnd_mode, {});
+ else
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, - ex, 1, 0, rnd_mode, {});
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ if (MPFR_IS_NEG (x) && ex > 5) /* x <= -32 */
+ {
+ mpfr_t minus_one, t;
+ mpfr_exp_t err;
+
+ mpfr_init2 (minus_one, 2);
+ mpfr_init2 (t, 64);
+ mpfr_set_si (minus_one, -1, MPFR_RNDN);
+ mpfr_const_log2 (t, MPFR_RNDU); /* round upward since x is negative */
+ mpfr_div (t, x, t, MPFR_RNDU); /* > x / ln(2) */
+ err = mpfr_cmp_si (t, MPFR_EMIN_MIN >= -LONG_MAX ?
+ MPFR_EMIN_MIN : -LONG_MAX) <= 0 ?
+ - (MPFR_EMIN_MIN >= -LONG_MAX ? MPFR_EMIN_MIN : -LONG_MAX) :
+ - mpfr_get_si (t, MPFR_RNDU);
+ /* exp(x) = 2^(x/ln(2))
+ <= 2^max(MPFR_EMIN_MIN,-LONG_MAX,ceil(x/ln(2)+epsilon))
+ with epsilon > 0 */
+ mpfr_clear (t);
+ MPFR_SMALL_INPUT_AFTER_SAVE_EXPO (y, minus_one, err, 0, 0, rnd_mode,
+ expo, { mpfr_clear (minus_one); });
+ mpfr_clear (minus_one);
+ }
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t;
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(y); /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ mpfr_exp_t err, exp_te; /* error */
+ MPFR_ZIV_DECL (loop);
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + MPFR_INT_CEIL_LOG2 (Ny) + 6;
+
+ /* if |x| is smaller than 2^(-e), we will loose about e bits in the
+ subtraction exp(x) - 1 */
+ if (ex < 0)
+ Nt += - ex;
+
+ /* initialize auxiliary variable */
+ mpfr_init2 (t, Nt);
+
+ /* First computation of expm1 */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* exp(x) may overflow and underflow */
+ MPFR_BLOCK (flags, mpfr_exp (t, x, MPFR_RNDN));
+ if (MPFR_OVERFLOW (flags))
+ {
+ inexact = mpfr_overflow (y, rnd_mode, MPFR_SIGN_POS);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+ else if (MPFR_UNDERFLOW (flags))
+ {
+ inexact = mpfr_set_si (y, -1, rnd_mode);
+ MPFR_ASSERTD (inexact == 0);
+ inexact = -1;
+ if (MPFR_IS_LIKE_RNDZ (rnd_mode, 1))
+ {
+ inexact = 1;
+ mpfr_nexttozero (y);
+ }
+ break;
+ }
+
+ exp_te = MPFR_GET_EXP (t); /* FIXME: exp(x) may overflow! */
+ mpfr_sub_ui (t, t, 1, MPFR_RNDN); /* exp(x)-1 */
+
+ /* error estimate */
+ /*err=Nt-(__gmpfr_ceil_log2(1+pow(2,MPFR_EXP(te)-MPFR_EXP(t))));*/
+ err = Nt - (MAX (exp_te - MPFR_GET_EXP (t), 0) + 1);
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ {
+ inexact = mpfr_set (y, t, rnd_mode);
+ break;
+ }
+
+ /* increase the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ mpfr_clear (t);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/extract.c b/src/extract.c
new file mode 100644
index 000000000..a1dd3a046
--- /dev/null
+++ b/src/extract.c
@@ -0,0 +1,55 @@
+/* mpfr_extract -- bit-extraction function for the binary splitting algorithm
+
+Copyright 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* given 0 <= |p| < 1, this function extracts limbs of p and puts them in y.
+ It is mainly designed for the "binary splitting" algorithm.
+
+ More precisely, if B = 2^GMP_NUMB_BITS:
+ - for i=0, y = floor(p * B)
+ - for i>0, y = (p * B^(2^i)) mod B^(2^(i-1))
+ */
+
+void
+mpfr_extract (mpz_ptr y, mpfr_srcptr p, unsigned int i)
+{
+ unsigned long two_i = 1UL << i;
+ unsigned long two_i_2 = i ? two_i / 2 : 1;
+ mp_size_t size_p = MPFR_LIMB_SIZE (p);
+
+ /* as 0 <= |p| < 1, we don't have to care with infinities, NaN, ... */
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (p));
+
+ _mpz_realloc (y, two_i_2);
+ if ((mpfr_uexp_t) size_p < two_i)
+ {
+ MPN_ZERO (PTR(y), two_i_2);
+ if ((mpfr_uexp_t) size_p >= two_i_2)
+ MPN_COPY (PTR(y) + two_i - size_p, MPFR_MANT(p), size_p - two_i_2);
+ }
+ else
+ MPN_COPY (PTR(y), MPFR_MANT(p) + size_p - two_i, two_i_2);
+
+ MPN_NORMALIZE (PTR(y), two_i_2);
+ SIZ(y) = (MPFR_IS_NEG (p)) ? -two_i_2 : two_i_2;
+}
diff --git a/src/factorial.c b/src/factorial.c
new file mode 100644
index 000000000..5e40809fe
--- /dev/null
+++ b/src/factorial.c
@@ -0,0 +1,113 @@
+/* mpfr_fac_ui -- factorial of a non-negative integer
+
+Copyright 2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of n! is done by
+
+ n!=prod^{n}_{i=1}i
+ */
+
+/* FIXME: efficient problems with large arguments; see comments in gamma.c. */
+
+int
+mpfr_fac_ui (mpfr_ptr y, unsigned long int x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t t; /* Variable of Intermediary Calculation*/
+ unsigned long i;
+ int round, inexact;
+
+ mpfr_prec_t Ny; /* Precision of output variable */
+ mpfr_prec_t Nt; /* Precision of Intermediary Calculation variable */
+ mpfr_prec_t err; /* Precision of error */
+
+ mpfr_rnd_t rnd;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ /***** test x = 0 and x == 1******/
+ if (MPFR_UNLIKELY (x <= 1))
+ return mpfr_set_ui (y, 1, rnd_mode); /* 0! = 1 and 1! = 1 */
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Initialisation of the Precision */
+ Ny = MPFR_PREC (y);
+
+ /* compute the size of intermediary variable */
+ Nt = Ny + 2 * MPFR_INT_CEIL_LOG2 (x) + 7;
+
+ mpfr_init2 (t, Nt); /* initialise of intermediary variable */
+
+ rnd = MPFR_RNDZ;
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute factorial */
+ inexact = mpfr_set_ui (t, 1, rnd);
+ for (i = 2 ; i <= x ; i++)
+ {
+ round = mpfr_mul_ui (t, t, i, rnd);
+ /* assume the first inexact product gives the sign
+ of difference: is that always correct? */
+ if (inexact == 0)
+ inexact = round;
+ }
+
+ err = Nt - 1 - MPFR_INT_CEIL_LOG2 (Nt);
+
+ round = !inexact || mpfr_can_round (t, err, rnd, MPFR_RNDZ,
+ Ny + (rnd_mode == MPFR_RNDN));
+
+ if (MPFR_LIKELY (round))
+ {
+ /* If inexact = 0, then t is exactly x!, so round is the
+ correct inexact flag.
+ Otherwise, t != x! since we rounded to zero or away. */
+ round = mpfr_set (y, t, rnd_mode);
+ if (inexact == 0)
+ {
+ inexact = round;
+ break;
+ }
+ else if ((inexact < 0 && round <= 0)
+ || (inexact > 0 && round >= 0))
+ break;
+ else /* inexact and round have opposite signs: we cannot
+ compute the inexact flag. Restart using the
+ symmetric rounding. */
+ rnd = (rnd == MPFR_RNDZ) ? MPFR_RNDU : MPFR_RNDZ;
+ }
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ mpfr_clear (t);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
+
+
+
+
diff --git a/src/fits_intmax.c b/src/fits_intmax.c
new file mode 100644
index 000000000..fc6c5ef56
--- /dev/null
+++ b/src/fits_intmax.c
@@ -0,0 +1,120 @@
+/* mpfr_fits_intmax_p -- test whether an mpfr fits an intmax_t.
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h" /* for a build within gmp */
+#endif
+
+/* The ISO C99 standard specifies that in C++ implementations the
+ INTMAX_MAX, ... macros should only be defined if explicitly requested. */
+#if defined __cplusplus
+# define __STDC_LIMIT_MACROS
+# define __STDC_CONSTANT_MACROS
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+
+/* We can't use fits_s.h <= mpfr_cmp_ui */
+int
+mpfr_fits_intmax_p (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ mpfr_exp_t e;
+ int prec;
+ mpfr_t x, y;
+ int neg;
+ int res;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
+ /* Zero always fit */
+ return MPFR_IS_ZERO (f) ? 1 : 0;
+
+ /* now it fits if either
+ (a) MINIMUM <= f <= MAXIMUM
+ (b) or MINIMUM <= round(f, prec(slong), rnd) <= MAXIMUM */
+
+ e = MPFR_EXP (f);
+ if (e < 1)
+ return 1; /* |f| < 1: always fits */
+
+ neg = MPFR_IS_NEG (f);
+
+ /* let EXTREMUM be MAXIMUM if f > 0, and MINIMUM if f < 0 */
+
+ /* first compute prec(EXTREMUM), this could be done at configure time,
+ but the result can depend on neg (the loop is moved inside the "if"
+ to give the compiler a better chance to compute prec statically) */
+ if (neg)
+ {
+ uintmax_t s;
+ /* In C89, the division on negative integers isn't well-defined. */
+ s = SAFE_ABS (uintmax_t, MPFR_INTMAX_MIN);
+ for (prec = 0; s != 0; s /= 2, prec ++);
+ }
+ else
+ {
+ intmax_t s;
+ s = MPFR_INTMAX_MAX;
+ for (prec = 0; s != 0; s /= 2, prec ++);
+ }
+
+ /* EXTREMUM needs prec bits, i.e. 2^(prec-1) <= |EXTREMUM| < 2^prec */
+
+ /* if e <= prec - 1, then f < 2^(prec-1) <= |EXTREMUM| */
+ if (e <= prec - 1)
+ return 1;
+
+ /* if e >= prec + 1, then f >= 2^prec > |EXTREMUM| */
+ if (e >= prec + 1)
+ return 0;
+
+ MPFR_ASSERTD (e == prec);
+
+ /* hard case: first round to prec bits, then check */
+ mpfr_init2 (x, prec);
+ mpfr_set (x, f, rnd);
+
+ if (neg)
+ {
+ mpfr_init2 (y, prec);
+ mpfr_set_sj (y, MPFR_INTMAX_MIN, MPFR_RNDN);
+ res = mpfr_cmp (x, y) >= 0;
+ mpfr_clear (y);
+ }
+ else
+ {
+ res = MPFR_GET_EXP (x) == e;
+ }
+
+ mpfr_clear (x);
+ return res;
+}
+
+#endif
diff --git a/src/fits_s.h b/src/fits_s.h
new file mode 100644
index 000000000..5201f2b6b
--- /dev/null
+++ b/src/fits_s.h
@@ -0,0 +1,86 @@
+/* mpfr_fits_*_p -- test whether an mpfr fits a C signed type.
+
+Copyright 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+Copied from mpf/fits_s.h.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+FUNCTION (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ mpfr_exp_t e;
+ int prec;
+ mpfr_t x;
+ int neg;
+ int res;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
+ /* Zero always fit */
+ return MPFR_IS_ZERO (f) ? 1 : 0;
+
+ /* now it fits if either
+ (a) MINIMUM <= f <= MAXIMUM
+ (b) or MINIMUM <= round(f, prec(slong), rnd) <= MAXIMUM */
+
+ e = MPFR_GET_EXP (f);
+ if (e < 1)
+ return 1; /* |f| < 1: always fits */
+
+ neg = MPFR_IS_NEG (f);
+
+ /* let EXTREMUM be MAXIMUM if f > 0, and MINIMUM if f < 0 */
+
+ /* first compute prec(EXTREMUM), this could be done at configure time,
+ but the result can depend on neg (the loop is moved inside the "if"
+ to give the compiler a better chance to compute prec statically) */
+ if (neg)
+ {
+ unsigned TYPE s;
+ /* In C89, the division on negative integers isn't well-defined. */
+ s = SAFE_ABS (unsigned TYPE, MINIMUM);
+ for (prec = 0; s != 0; s /= 2, prec ++);
+ }
+ else
+ {
+ TYPE s;
+ s = MAXIMUM;
+ for (prec = 0; s != 0; s /= 2, prec ++);
+ }
+
+ /* EXTREMUM needs prec bits, i.e. 2^(prec-1) <= |EXTREMUM| < 2^prec */
+
+ /* if e <= prec - 1, then f < 2^(prec-1) <= |EXTREMUM| */
+ if (e <= prec - 1)
+ return 1;
+
+ /* if e >= prec + 1, then f >= 2^prec > |EXTREMUM| */
+ if (e >= prec + 1)
+ return 0;
+
+ MPFR_ASSERTD (e == prec);
+
+ /* hard case: first round to prec bits, then check */
+ mpfr_init2 (x, prec);
+ mpfr_set (x, f, rnd);
+ res = neg ? (mpfr_cmp_si (x, MINIMUM) >= 0) : (MPFR_GET_EXP (x) == e);
+ mpfr_clear (x);
+ return res;
+}
diff --git a/src/fits_sint.c b/src/fits_sint.c
new file mode 100644
index 000000000..c9bbf3653
--- /dev/null
+++ b/src/fits_sint.c
@@ -0,0 +1,28 @@
+/* mpfr_fits_sint_p -- test whether an mpfr fits an int.
+
+Copyright 2003, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_fits_sint_p
+#define MAXIMUM INT_MAX
+#define MINIMUM INT_MIN
+#define TYPE int
+
+#include "fits_s.h"
diff --git a/src/fits_slong.c b/src/fits_slong.c
new file mode 100644
index 000000000..2e09fa444
--- /dev/null
+++ b/src/fits_slong.c
@@ -0,0 +1,28 @@
+/* mpfr_fits_slong_p -- test whether an mpfr fits a long.
+
+Copyright 2003, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_fits_slong_p
+#define MAXIMUM LONG_MAX
+#define MINIMUM LONG_MIN
+#define TYPE long
+
+#include "fits_s.h"
diff --git a/src/fits_sshort.c b/src/fits_sshort.c
new file mode 100644
index 000000000..582b584fe
--- /dev/null
+++ b/src/fits_sshort.c
@@ -0,0 +1,28 @@
+/* mpfr_fits_sshort_p -- test whether an mpfr fits a short.
+
+Copyright 2003, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_fits_sshort_p
+#define MAXIMUM SHRT_MAX
+#define MINIMUM SHRT_MIN
+#define TYPE short
+
+#include "fits_s.h"
diff --git a/src/fits_u.h b/src/fits_u.h
new file mode 100644
index 000000000..0a84fd85d
--- /dev/null
+++ b/src/fits_u.h
@@ -0,0 +1,67 @@
+/* mpfr_fits_*_p -- test whether an mpfr fits a C unsigned type.
+
+Copyright 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+FUNCTION (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ mpfr_exp_t e;
+ int prec;
+ TYPE s;
+ mpfr_t x;
+ int res;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
+ /* Zero always fit */
+ return MPFR_IS_ZERO (f) ? 1 : 0;
+ else if (MPFR_IS_NEG (f))
+ /* Negative numbers don't fit */
+ return 0;
+ /* now it fits if
+ (a) f <= MAXIMUM
+ (b) round(f, prec(slong), rnd) <= MAXIMUM */
+
+ e = MPFR_GET_EXP (f);
+
+ /* first compute prec(MAXIMUM); fits in an int */
+ for (s = MAXIMUM, prec = 0; s != 0; s /= 2, prec ++);
+
+ /* MAXIMUM needs prec bits, i.e. MAXIMUM = 2^prec - 1 */
+
+ /* if e <= prec - 1, then f < 2^(prec-1) < MAXIMUM */
+ if (e <= prec - 1)
+ return 1;
+
+ /* if e >= prec + 1, then f >= 2^prec > MAXIMUM */
+ if (e >= prec + 1)
+ return 0;
+
+ MPFR_ASSERTD (e == prec);
+
+ /* hard case: first round to prec bits, then check */
+ mpfr_init2 (x, prec);
+ mpfr_set (x, f, rnd);
+ res = MPFR_GET_EXP (x) == e;
+ mpfr_clear (x);
+ return res;
+}
diff --git a/src/fits_uint.c b/src/fits_uint.c
new file mode 100644
index 000000000..bfdfe16e8
--- /dev/null
+++ b/src/fits_uint.c
@@ -0,0 +1,27 @@
+/* mpfr_fits_uint_p -- test whether an mpfr fits an unsigned int.
+
+Copyright 2003, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_fits_uint_p
+#define MAXIMUM UINT_MAX
+#define TYPE unsigned int
+
+#include "fits_u.h"
diff --git a/src/fits_uintmax.c b/src/fits_uintmax.c
new file mode 100644
index 000000000..54ce6d3c9
--- /dev/null
+++ b/src/fits_uintmax.c
@@ -0,0 +1,90 @@
+/* mpfr_fits_uintmax_p -- test whether an mpfr fits an uintmax_t.
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h" /* for a build within gmp */
+#endif
+
+/* The ISO C99 standard specifies that in C++ implementations the
+ INTMAX_MAX, ... macros should only be defined if explicitly requested. */
+#if defined __cplusplus
+# define __STDC_LIMIT_MACROS
+# define __STDC_CONSTANT_MACROS
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+
+/* We can't use fits_u.h <= mpfr_cmp_ui */
+int
+mpfr_fits_uintmax_p (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ mpfr_exp_t e;
+ int prec;
+ uintmax_t s;
+ mpfr_t x;
+ int res;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
+ /* Zero always fit */
+ return MPFR_IS_ZERO (f) ? 1 : 0;
+ else if (MPFR_IS_NEG (f))
+ /* Negative numbers don't fit */
+ return 0;
+ /* now it fits if
+ (a) f <= MAXIMUM
+ (b) round(f, prec(slong), rnd) <= MAXIMUM */
+
+ e = MPFR_GET_EXP (f);
+
+ /* first compute prec(MAXIMUM); fits in an int */
+ for (s = MPFR_UINTMAX_MAX, prec = 0; s != 0; s /= 2, prec ++);
+
+ /* MAXIMUM needs prec bits, i.e. MAXIMUM = 2^prec - 1 */
+
+ /* if e <= prec - 1, then f < 2^(prec-1) < MAXIMUM */
+ if (e <= prec - 1)
+ return 1;
+
+ /* if e >= prec + 1, then f >= 2^prec > MAXIMUM */
+ if (e >= prec + 1)
+ return 0;
+
+ MPFR_ASSERTD (e == prec);
+
+ /* hard case: first round to prec bits, then check */
+ mpfr_init2 (x, prec);
+ mpfr_set (x, f, rnd);
+ res = MPFR_GET_EXP (x) == e;
+ mpfr_clear (x);
+ return res;
+}
+
+#endif
diff --git a/src/fits_ulong.c b/src/fits_ulong.c
new file mode 100644
index 000000000..607dc4051
--- /dev/null
+++ b/src/fits_ulong.c
@@ -0,0 +1,27 @@
+/* mpfr_fits_ulong_p -- test whether an mpfr fits an unsigned long.
+
+Copyright 2003, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_fits_ulong_p
+#define MAXIMUM ULONG_MAX
+#define TYPE unsigned long
+
+#include "fits_u.h"
diff --git a/src/fits_ushort.c b/src/fits_ushort.c
new file mode 100644
index 000000000..9f82e1a03
--- /dev/null
+++ b/src/fits_ushort.c
@@ -0,0 +1,27 @@
+/* mpfr_fits_ushort_p -- test whether an mpfr fits an unsigned short.
+
+Copyright 2003, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_fits_ushort_p
+#define MAXIMUM USHRT_MAX
+#define TYPE unsigned short
+
+#include "fits_u.h"
diff --git a/src/fma.c b/src/fma.c
new file mode 100644
index 000000000..2a413fe0a
--- /dev/null
+++ b/src/fma.c
@@ -0,0 +1,294 @@
+/* mpfr_fma -- Floating multiply-add
+
+Copyright 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* The fused-multiply-add (fma) of x, y and z is defined by:
+ fma(x,y,z)= x*y + z
+*/
+
+int
+mpfr_fma (mpfr_ptr s, mpfr_srcptr x, mpfr_srcptr y, mpfr_srcptr z,
+ mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t u;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_GROUP_DECL(group);
+
+ /* particular cases */
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) ||
+ MPFR_IS_SINGULAR(y) ||
+ MPFR_IS_SINGULAR(z) ))
+ {
+ if (MPFR_IS_NAN(x) || MPFR_IS_NAN(y) || MPFR_IS_NAN(z))
+ {
+ MPFR_SET_NAN(s);
+ MPFR_RET_NAN;
+ }
+ /* now neither x, y or z is NaN */
+ else if (MPFR_IS_INF(x) || MPFR_IS_INF(y))
+ {
+ /* cases Inf*0+z, 0*Inf+z, Inf-Inf */
+ if ((MPFR_IS_ZERO(y)) ||
+ (MPFR_IS_ZERO(x)) ||
+ (MPFR_IS_INF(z) &&
+ ((MPFR_MULT_SIGN(MPFR_SIGN(x), MPFR_SIGN(y))) != MPFR_SIGN(z))))
+ {
+ MPFR_SET_NAN(s);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(z)) /* case Inf-Inf already checked above */
+ {
+ MPFR_SET_INF(s);
+ MPFR_SET_SAME_SIGN(s, z);
+ MPFR_RET(0);
+ }
+ else /* z is finite */
+ {
+ MPFR_SET_INF(s);
+ MPFR_SET_SIGN(s, MPFR_MULT_SIGN(MPFR_SIGN(x) , MPFR_SIGN(y)));
+ MPFR_RET(0);
+ }
+ }
+ /* now x and y are finite */
+ else if (MPFR_IS_INF(z))
+ {
+ MPFR_SET_INF(s);
+ MPFR_SET_SAME_SIGN(s, z);
+ MPFR_RET(0);
+ }
+ else if (MPFR_IS_ZERO(x) || MPFR_IS_ZERO(y))
+ {
+ if (MPFR_IS_ZERO(z))
+ {
+ int sign_p;
+ sign_p = MPFR_MULT_SIGN( MPFR_SIGN(x) , MPFR_SIGN(y) );
+ MPFR_SET_SIGN(s,(rnd_mode != MPFR_RNDD ?
+ ((MPFR_IS_NEG_SIGN(sign_p) && MPFR_IS_NEG(z))
+ ? -1 : 1) :
+ ((MPFR_IS_POS_SIGN(sign_p) && MPFR_IS_POS(z))
+ ? 1 : -1)));
+ MPFR_SET_ZERO(s);
+ MPFR_RET(0);
+ }
+ else
+ return mpfr_set (s, z, rnd_mode);
+ }
+ else /* necessarily z is zero here */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(z));
+ return mpfr_mul (s, x, y, rnd_mode);
+ }
+ }
+
+ /* If we take prec(u) >= prec(x) + prec(y), the product u <- x*y
+ is exact, except in case of overflow or underflow. */
+ MPFR_SAVE_EXPO_MARK (expo);
+ MPFR_GROUP_INIT_1 (group, MPFR_PREC(x) + MPFR_PREC(y), u);
+
+ if (MPFR_UNLIKELY (mpfr_mul (u, x, y, MPFR_RNDN)))
+ {
+ /* overflow or underflow - this case is regarded as rare, thus
+ does not need to be very efficient (even if some tests below
+ could have been done earlier).
+ It is an overflow iff u is an infinity (since MPFR_RNDN was used).
+ Alternatively, we could test the overflow flag, but in this case,
+ mpfr_clear_flags would have been necessary. */
+ if (MPFR_IS_INF (u)) /* overflow */
+ {
+ /* Let's eliminate the obvious case where x*y and z have the
+ same sign. No possible cancellation -> real overflow.
+ Also, we know that |z| < 2^emax. If E(x) + E(y) >= emax+3,
+ then |x*y| >= 2^(emax+1), and |x*y + z| >= 2^emax. This case
+ is also an overflow. */
+ if (MPFR_SIGN (u) == MPFR_SIGN (z) ||
+ MPFR_GET_EXP (x) + MPFR_GET_EXP (y) >= __gmpfr_emax + 3)
+ {
+ MPFR_GROUP_CLEAR (group);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_overflow (s, rnd_mode, MPFR_SIGN (z));
+ }
+
+ /* E(x) + E(y) <= emax+2, therefore |x*y| < 2^(emax+2), and
+ (x/4)*y does not overflow (let's recall that the result
+ is exact with an unbounded exponent range). It does not
+ underflow either, because x*y overflows and the exponent
+ range is large enough. */
+ inexact = mpfr_div_2ui (u, x, 2, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+ inexact = mpfr_mul (u, u, y, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+
+ /* Now, we need to add z/4... But it may underflow! */
+ {
+ mpfr_t zo4;
+ mpfr_srcptr zz;
+ MPFR_BLOCK_DECL (flags);
+
+ if (MPFR_GET_EXP (u) > MPFR_GET_EXP (z) &&
+ MPFR_GET_EXP (u) - MPFR_GET_EXP (z) > MPFR_PREC (u))
+ {
+ /* |z| < ulp(u)/2, therefore one can use z instead of z/4. */
+ zz = z;
+ }
+ else
+ {
+ mpfr_init2 (zo4, MPFR_PREC (z));
+ if (mpfr_div_2ui (zo4, z, 2, MPFR_RNDZ))
+ {
+ /* The division by 4 underflowed! */
+ MPFR_ASSERTN (0); /* TODO... */
+ }
+ zz = zo4;
+ }
+
+ /* Let's recall that u = x*y/4 and zz = z/4 (or z if the
+ following addition would give the same result). */
+ MPFR_BLOCK (flags, inexact = mpfr_add (s, u, zz, rnd_mode));
+ /* u and zz have different signs, so that an overflow
+ is not possible. But an underflow is theoretically
+ possible! */
+ if (MPFR_UNDERFLOW (flags))
+ {
+ MPFR_ASSERTN (zz != z);
+ MPFR_ASSERTN (0); /* TODO... */
+ mpfr_clears (zo4, u, (mpfr_ptr) 0);
+ }
+ else
+ {
+ int inex2;
+
+ if (zz != z)
+ mpfr_clear (zo4);
+ MPFR_GROUP_CLEAR (group);
+ MPFR_ASSERTN (! MPFR_OVERFLOW (flags));
+ inex2 = mpfr_mul_2ui (s, s, 2, rnd_mode);
+ if (inex2) /* overflow */
+ {
+ inexact = inex2;
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ }
+ goto end;
+ }
+ }
+ }
+ else /* underflow: one has |xy| < 2^(emin-1). */
+ {
+ unsigned long scale = 0;
+ mpfr_t scaled_z;
+ mpfr_srcptr new_z;
+ mpfr_exp_t diffexp;
+ mpfr_prec_t pzs;
+ int xy_underflows;
+
+ /* Let's scale z so that ulp(z) > 2^emin and ulp(s) > 2^emin
+ (the + 1 on MPFR_PREC (s) is necessary because the exponent
+ of the result can be EXP(z) - 1). */
+ diffexp = MPFR_GET_EXP (z) - __gmpfr_emin;
+ pzs = MAX (MPFR_PREC (z), MPFR_PREC (s) + 1);
+ if (diffexp <= pzs)
+ {
+ mpfr_uexp_t uscale;
+ mpfr_t scaled_v;
+ MPFR_BLOCK_DECL (flags);
+
+ uscale = (mpfr_uexp_t) pzs - diffexp + 1;
+ MPFR_ASSERTN (uscale > 0);
+ MPFR_ASSERTN (uscale <= ULONG_MAX);
+ scale = uscale;
+ mpfr_init2 (scaled_z, MPFR_PREC (z));
+ inexact = mpfr_mul_2ui (scaled_z, z, scale, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0); /* TODO: overflow case */
+ new_z = scaled_z;
+ /* Now we need to recompute u = xy * 2^scale. */
+ MPFR_BLOCK (flags,
+ if (MPFR_GET_EXP (x) < MPFR_GET_EXP (y))
+ {
+ mpfr_init2 (scaled_v, MPFR_PREC (x));
+ mpfr_mul_2ui (scaled_v, x, scale, MPFR_RNDN);
+ mpfr_mul (u, scaled_v, y, MPFR_RNDN);
+ }
+ else
+ {
+ mpfr_init2 (scaled_v, MPFR_PREC (y));
+ mpfr_mul_2ui (scaled_v, y, scale, MPFR_RNDN);
+ mpfr_mul (u, x, scaled_v, MPFR_RNDN);
+ });
+ mpfr_clear (scaled_v);
+ MPFR_ASSERTN (! MPFR_OVERFLOW (flags));
+ xy_underflows = MPFR_UNDERFLOW (flags);
+ }
+ else
+ {
+ new_z = z;
+ xy_underflows = 1;
+ }
+
+ if (xy_underflows)
+ {
+ /* Let's replace xy by sign(xy) * 2^(emin-1). */
+ MPFR_PREC (u) = MPFR_PREC_MIN;
+ mpfr_setmin (u, __gmpfr_emin);
+ MPFR_SET_SIGN (u, MPFR_MULT_SIGN (MPFR_SIGN (x),
+ MPFR_SIGN (y)));
+ }
+
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_BLOCK (flags, inexact = mpfr_add (s, u, new_z, rnd_mode));
+ MPFR_GROUP_CLEAR (group);
+ if (scale != 0)
+ {
+ int inex2;
+
+ mpfr_clear (scaled_z);
+ /* Here an overflow is theoretically possible, in which case
+ the result may be wrong, hence the assert. An underflow
+ is not possible, but let's check that anyway. */
+ MPFR_ASSERTN (! MPFR_OVERFLOW (flags)); /* TODO... */
+ MPFR_ASSERTN (! MPFR_UNDERFLOW (flags)); /* not possible */
+ inex2 = mpfr_div_2ui (s, s, scale, MPFR_RNDN);
+ /* FIXME: this seems incorrect. MPFR_RNDN -> rnd_mode?
+ Also, handle the double rounding case:
+ s / 2^scale = 2^(emin - 2) in MPFR_RNDN. */
+ if (inex2) /* underflow */
+ inexact = inex2;
+ }
+ }
+
+ /* FIXME/TODO: I'm not sure that the following is correct.
+ Check for possible spurious exceptions due to intermediate
+ computations. */
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ goto end;
+ }
+ }
+
+ inexact = mpfr_add (s, u, z, rnd_mode);
+ MPFR_GROUP_CLEAR (group);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (s, inexact, rnd_mode);
+}
diff --git a/src/fms.c b/src/fms.c
new file mode 100644
index 000000000..0e7c887d0
--- /dev/null
+++ b/src/fms.c
@@ -0,0 +1,296 @@
+/* mpfr_fms -- Floating multiply-subtract
+
+Copyright 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* The fused-multiply-subtract (fms) of x, y and z is defined by:
+ fms(x,y,z)= x*y - z
+ Note: this is neither in IEEE754R, nor in LIA-2, but both the
+ PowerPC and the Itanium define fms as x*y - z.
+*/
+
+int
+mpfr_fms (mpfr_ptr s, mpfr_srcptr x, mpfr_srcptr y, mpfr_srcptr z,
+ mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t u;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_GROUP_DECL(group);
+
+ /* particular cases */
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) ||
+ MPFR_IS_SINGULAR(y) ||
+ MPFR_IS_SINGULAR(z) ))
+ {
+ if (MPFR_IS_NAN(x) || MPFR_IS_NAN(y) || MPFR_IS_NAN(z))
+ {
+ MPFR_SET_NAN(s);
+ MPFR_RET_NAN;
+ }
+ /* now neither x, y or z is NaN */
+ else if (MPFR_IS_INF(x) || MPFR_IS_INF(y))
+ {
+ /* cases Inf*0-z, 0*Inf-z, Inf-Inf */
+ if ((MPFR_IS_ZERO(y)) ||
+ (MPFR_IS_ZERO(x)) ||
+ (MPFR_IS_INF(z) &&
+ ((MPFR_MULT_SIGN(MPFR_SIGN(x), MPFR_SIGN(y))) == MPFR_SIGN(z))))
+ {
+ MPFR_SET_NAN(s);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(z)) /* case Inf-Inf already checked above */
+ {
+ MPFR_SET_INF(s);
+ MPFR_SET_OPPOSITE_SIGN(s, z);
+ MPFR_RET(0);
+ }
+ else /* z is finite */
+ {
+ MPFR_SET_INF(s);
+ MPFR_SET_SIGN(s, MPFR_MULT_SIGN(MPFR_SIGN(x) , MPFR_SIGN(y)));
+ MPFR_RET(0);
+ }
+ }
+ /* now x and y are finite */
+ else if (MPFR_IS_INF(z))
+ {
+ MPFR_SET_INF(s);
+ MPFR_SET_OPPOSITE_SIGN(s, z);
+ MPFR_RET(0);
+ }
+ else if (MPFR_IS_ZERO(x) || MPFR_IS_ZERO(y))
+ {
+ if (MPFR_IS_ZERO(z))
+ {
+ int sign_p;
+ sign_p = MPFR_MULT_SIGN( MPFR_SIGN(x) , MPFR_SIGN(y) );
+ MPFR_SET_SIGN(s,(rnd_mode != MPFR_RNDD ?
+ ((MPFR_IS_NEG_SIGN(sign_p) && MPFR_IS_POS(z))
+ ? -1 : 1) :
+ ((MPFR_IS_POS_SIGN(sign_p) && MPFR_IS_NEG(z))
+ ? 1 : -1)));
+ MPFR_SET_ZERO(s);
+ MPFR_RET(0);
+ }
+ else
+ return mpfr_neg (s, z, rnd_mode);
+ }
+ else /* necessarily z is zero here */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(z));
+ return mpfr_mul (s, x, y, rnd_mode);
+ }
+ }
+
+ /* If we take prec(u) >= prec(x) + prec(y), the product u <- x*y
+ is exact, except in case of overflow or underflow. */
+ MPFR_SAVE_EXPO_MARK (expo);
+ MPFR_GROUP_INIT_1 (group, MPFR_PREC(x) + MPFR_PREC(y), u);
+
+ if (MPFR_UNLIKELY (mpfr_mul (u, x, y, MPFR_RNDN)))
+ {
+ /* overflow or underflow - this case is regarded as rare, thus
+ does not need to be very efficient (even if some tests below
+ could have been done earlier).
+ It is an overflow iff u is an infinity (since MPFR_RNDN was used).
+ Alternatively, we could test the overflow flag, but in this case,
+ mpfr_clear_flags would have been necessary. */
+ if (MPFR_IS_INF (u)) /* overflow */
+ {
+ /* Let's eliminate the obvious case where x*y and z have the
+ same sign. No possible cancellation -> real overflow.
+ Also, we know that |z| < 2^emax. If E(x) + E(y) >= emax+3,
+ then |x*y| >= 2^(emax+1), and |x*y - z| >= 2^emax. This case
+ is also an overflow. */
+ if (MPFR_SIGN (u) != MPFR_SIGN (z) ||
+ MPFR_GET_EXP (x) + MPFR_GET_EXP (y) >= __gmpfr_emax + 3)
+ {
+ MPFR_GROUP_CLEAR (group);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_overflow (s, rnd_mode, - MPFR_SIGN (z));
+ }
+
+ /* E(x) + E(y) <= emax+2, therefore |x*y| < 2^(emax+2), and
+ (x/4)*y does not overflow (let's recall that the result
+ is exact with an unbounded exponent range). It does not
+ underflow either, because x*y overflows and the exponent
+ range is large enough. */
+ inexact = mpfr_div_2ui (u, x, 2, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+ inexact = mpfr_mul (u, u, y, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+
+ /* Now, we need to subtract z/4... But it may underflow! */
+ {
+ mpfr_t zo4;
+ mpfr_srcptr zz;
+ MPFR_BLOCK_DECL (flags);
+
+ if (MPFR_GET_EXP (u) > MPFR_GET_EXP (z) &&
+ MPFR_GET_EXP (u) - MPFR_GET_EXP (z) > MPFR_PREC (u))
+ {
+ /* |z| < ulp(u)/2, therefore one can use z instead of z/4. */
+ zz = z;
+ }
+ else
+ {
+ mpfr_init2 (zo4, MPFR_PREC (z));
+ if (mpfr_div_2ui (zo4, z, 2, MPFR_RNDZ))
+ {
+ /* The division by 4 underflowed! */
+ MPFR_ASSERTN (0); /* TODO... */
+ }
+ zz = zo4;
+ }
+
+ /* Let's recall that u = x*y/4 and zz = z/4 (or z if the
+ following subtraction would give the same result). */
+ MPFR_BLOCK (flags, inexact = mpfr_sub (s, u, zz, rnd_mode));
+ /* u and zz have the same sign, so that an overflow
+ is not possible. But an underflow is theoretically
+ possible! */
+ if (MPFR_UNDERFLOW (flags))
+ {
+ MPFR_ASSERTN (zz != z);
+ MPFR_ASSERTN (0); /* TODO... */
+ mpfr_clears (zo4, u, (mpfr_ptr) 0);
+ }
+ else
+ {
+ int inex2;
+
+ if (zz != z)
+ mpfr_clear (zo4);
+ MPFR_GROUP_CLEAR (group);
+ MPFR_ASSERTN (! MPFR_OVERFLOW (flags));
+ inex2 = mpfr_mul_2ui (s, s, 2, rnd_mode);
+ if (inex2) /* overflow */
+ {
+ inexact = inex2;
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ }
+ goto end;
+ }
+ }
+ }
+ else /* underflow: one has |xy| < 2^(emin-1). */
+ {
+ unsigned long scale = 0;
+ mpfr_t scaled_z;
+ mpfr_srcptr new_z;
+ mpfr_exp_t diffexp;
+ mpfr_prec_t pzs;
+ int xy_underflows;
+
+ /* Let's scale z so that ulp(z) > 2^emin and ulp(s) > 2^emin
+ (the + 1 on MPFR_PREC (s) is necessary because the exponent
+ of the result can be EXP(z) - 1). */
+ diffexp = MPFR_GET_EXP (z) - __gmpfr_emin;
+ pzs = MAX (MPFR_PREC (z), MPFR_PREC (s) + 1);
+ if (diffexp <= pzs)
+ {
+ mpfr_uexp_t uscale;
+ mpfr_t scaled_v;
+ MPFR_BLOCK_DECL (flags);
+
+ uscale = (mpfr_uexp_t) pzs - diffexp + 1;
+ MPFR_ASSERTN (uscale > 0);
+ MPFR_ASSERTN (uscale <= ULONG_MAX);
+ scale = uscale;
+ mpfr_init2 (scaled_z, MPFR_PREC (z));
+ inexact = mpfr_mul_2ui (scaled_z, z, scale, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0); /* TODO: overflow case */
+ new_z = scaled_z;
+ /* Now we need to recompute u = xy * 2^scale. */
+ MPFR_BLOCK (flags,
+ if (MPFR_GET_EXP (x) < MPFR_GET_EXP (y))
+ {
+ mpfr_init2 (scaled_v, MPFR_PREC (x));
+ mpfr_mul_2ui (scaled_v, x, scale, MPFR_RNDN);
+ mpfr_mul (u, scaled_v, y, MPFR_RNDN);
+ }
+ else
+ {
+ mpfr_init2 (scaled_v, MPFR_PREC (y));
+ mpfr_mul_2ui (scaled_v, y, scale, MPFR_RNDN);
+ mpfr_mul (u, x, scaled_v, MPFR_RNDN);
+ });
+ mpfr_clear (scaled_v);
+ MPFR_ASSERTN (! MPFR_OVERFLOW (flags));
+ xy_underflows = MPFR_UNDERFLOW (flags);
+ }
+ else
+ {
+ new_z = z;
+ xy_underflows = 1;
+ }
+
+ if (xy_underflows)
+ {
+ /* Let's replace xy by sign(xy) * 2^(emin-1). */
+ MPFR_PREC (u) = MPFR_PREC_MIN;
+ mpfr_setmin (u, __gmpfr_emin);
+ MPFR_SET_SIGN (u, MPFR_MULT_SIGN (MPFR_SIGN (x),
+ MPFR_SIGN (y)));
+ }
+
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_BLOCK (flags, inexact = mpfr_sub (s, u, new_z, rnd_mode));
+ MPFR_GROUP_CLEAR (group);
+ if (scale != 0)
+ {
+ int inex2;
+
+ mpfr_clear (scaled_z);
+ /* Here an overflow is theoretically possible, in which case
+ the result may be wrong, hence the assert. An underflow
+ is not possible, but let's check that anyway. */
+ MPFR_ASSERTN (! MPFR_OVERFLOW (flags)); /* TODO... */
+ MPFR_ASSERTN (! MPFR_UNDERFLOW (flags)); /* not possible */
+ inex2 = mpfr_div_2ui (s, s, scale, MPFR_RNDN);
+ /* FIXME: this seems incorrect. MPFR_RNDN -> rnd_mode?
+ Also, handle the double rounding case:
+ s / 2^scale = 2^(emin - 2) in MPFR_RNDN. */
+ if (inex2) /* underflow */
+ inexact = inex2;
+ }
+ }
+
+ /* FIXME/TODO: I'm not sure that the following is correct.
+ Check for possible spurious exceptions due to intermediate
+ computations. */
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ goto end;
+ }
+ }
+
+ inexact = mpfr_sub (s, u, z, rnd_mode);
+ MPFR_GROUP_CLEAR (group);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (s, inexact, rnd_mode);
+}
diff --git a/src/frac.c b/src/frac.c
new file mode 100644
index 000000000..c13e7a155
--- /dev/null
+++ b/src/frac.c
@@ -0,0 +1,144 @@
+/* mpfr_frac -- Fractional part of a floating-point number.
+
+Copyright 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Optimization note: it is not a good idea to call mpfr_integer_p,
+ as some cases will take longer (the number may be parsed twice). */
+
+int
+mpfr_frac (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ mpfr_exp_t re, ue;
+ mpfr_prec_t uq;
+ mp_size_t un, tn, t0;
+ mp_limb_t *up, *tp, k;
+ int sh;
+ mpfr_t tmp;
+ mpfr_ptr t;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ /* Special cases */
+ if (MPFR_UNLIKELY(MPFR_IS_NAN(u)))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_UNLIKELY(MPFR_IS_INF(u) || mpfr_integer_p (u)))
+ {
+ MPFR_SET_SAME_SIGN(r, u);
+ MPFR_SET_ZERO(r);
+ MPFR_RET(0); /* zero is exact */
+ }
+
+ ue = MPFR_GET_EXP (u);
+ if (ue <= 0) /* |u| < 1 */
+ return mpfr_set (r, u, rnd_mode);
+
+ /* Now |u| >= 1, meaning that an overflow is not possible. */
+
+ uq = MPFR_PREC(u);
+ un = (uq - 1) / GMP_NUMB_BITS; /* index of most significant limb */
+ un -= (mp_size_t) (ue / GMP_NUMB_BITS);
+ /* now the index of the MSL containing bits of the fractional part */
+
+ up = MPFR_MANT(u);
+ sh = ue % GMP_NUMB_BITS;
+ k = up[un] << sh;
+ /* the first bit of the fractional part is the MSB of k */
+
+ if (k != 0)
+ {
+ int cnt;
+
+ count_leading_zeros(cnt, k);
+ /* first bit 1 of the fractional part -> MSB of the number */
+ re = -cnt;
+ sh += cnt;
+ MPFR_ASSERTN (sh < GMP_NUMB_BITS);
+ k <<= cnt;
+ }
+ else
+ {
+ re = sh - GMP_NUMB_BITS;
+ /* searching for the first bit 1 (exists since u isn't an integer) */
+ while (up[--un] == 0)
+ re -= GMP_NUMB_BITS;
+ MPFR_ASSERTN(un >= 0);
+ k = up[un];
+ count_leading_zeros(sh, k);
+ re -= sh;
+ k <<= sh;
+ }
+ /* The exponent of r will be re */
+ /* un: index of the limb of u that contains the first bit 1 of the FP */
+
+ t = (mp_size_t) (MPFR_PREC(r) - 1) / GMP_NUMB_BITS < un ?
+ (mpfr_init2 (tmp, (un + 1) * GMP_NUMB_BITS), tmp) : r;
+ /* t has enough precision to contain the fractional part of u */
+ /* If we use a temporary variable, we take the non-significant bits
+ of u into account, because of the mpn_lshift below. */
+ MPFR_SET_SAME_SIGN(t, u);
+
+ /* Put the fractional part of u into t */
+ tn = (MPFR_PREC(t) - 1) / GMP_NUMB_BITS;
+ MPFR_ASSERTN(tn >= un);
+ t0 = tn - un;
+ tp = MPFR_MANT(t);
+ if (sh == 0)
+ MPN_COPY_DECR(tp + t0, up, un + 1);
+ else /* warning: un may be 0 here */
+ tp[tn] = k | ((un) ? mpn_lshift (tp + t0, up, un, sh) : (mp_limb_t) 0);
+ if (t0 > 0)
+ MPN_ZERO(tp, t0);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ if (t != r)
+ { /* t is tmp */
+ MPFR_EXP (t) = 0; /* should be re, but not necessarily in the range */
+ inex = mpfr_set (r, t, rnd_mode); /* no underflow */
+ mpfr_clear (t);
+ MPFR_EXP (r) += re;
+ }
+ else
+ { /* There may be remaining non-significant bits in t (= r). */
+ int carry;
+
+ MPFR_EXP (r) = re;
+ carry = mpfr_round_raw (tp, tp,
+ (mpfr_prec_t) (tn + 1) * GMP_NUMB_BITS,
+ MPFR_IS_NEG (r), MPFR_PREC (r), rnd_mode,
+ &inex);
+ if (carry)
+ {
+ tp[tn] = MPFR_LIMB_HIGHBIT;
+ MPFR_EXP (r) ++;
+ }
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inex, rnd_mode);
+}
diff --git a/src/free_cache.c b/src/free_cache.c
new file mode 100644
index 000000000..0f66f1286
--- /dev/null
+++ b/src/free_cache.c
@@ -0,0 +1,52 @@
+/* mpfr_free_cache - Free the cache used by MPFR for internal consts.
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#if 0
+static void
+free_l2b (void)
+{
+ int i, b;
+
+ for (b = 2; b <= BASE_MAX; b++)
+ for (i = 0; i < 2; i++)
+ {
+ mpfr_ptr p = __gmpfr_l2b[b-2][i];
+ if (p != NULL)
+ {
+ mpfr_clear (p);
+ (*__gmp_free_func) (p, sizeof (mpfr_t));
+ }
+ }
+}
+#endif
+
+void
+mpfr_free_cache (void)
+{
+ mpfr_clear_cache (__gmpfr_cache_const_pi);
+ mpfr_clear_cache (__gmpfr_cache_const_log2);
+ mpfr_clear_cache (__gmpfr_cache_const_euler);
+ mpfr_clear_cache (__gmpfr_cache_const_catalan);
+ /* free_l2b (); */
+}
diff --git a/src/gamma.c b/src/gamma.c
new file mode 100644
index 000000000..f7c1f0aee
--- /dev/null
+++ b/src/gamma.c
@@ -0,0 +1,402 @@
+/* mpfr_gamma -- gamma function
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#define IS_GAMMA
+#include "lngamma.c"
+#undef IS_GAMMA
+
+/* return a sufficient precision such that 2-x is exact, assuming x < 0 */
+static mpfr_prec_t
+mpfr_gamma_2_minus_x_exact (mpfr_srcptr x)
+{
+ /* Since x < 0, 2-x = 2+y with y := -x.
+ If y < 2, a precision w >= PREC(y) + EXP(2)-EXP(y) = PREC(y) + 2 - EXP(y)
+ is enough, since no overlap occurs in 2+y, so no carry happens.
+ If y >= 2, either ULP(y) <= 2, and we need w >= PREC(y)+1 since a
+ carry can occur, or ULP(y) > 2, and we need w >= EXP(y)-1:
+ (a) if EXP(y) <= 1, w = PREC(y) + 2 - EXP(y)
+ (b) if EXP(y) > 1 and EXP(y)-PREC(y) <= 1, w = PREC(y) + 1
+ (c) if EXP(y) > 1 and EXP(y)-PREC(y) > 1, w = EXP(y) - 1 */
+ return (MPFR_GET_EXP(x) <= 1) ? MPFR_PREC(x) + 2 - MPFR_GET_EXP(x)
+ : ((MPFR_GET_EXP(x) <= MPFR_PREC(x) + 1) ? MPFR_PREC(x) + 1
+ : MPFR_GET_EXP(x) - 1);
+}
+
+/* return a sufficient precision such that 1-x is exact, assuming x < 1 */
+static mpfr_prec_t
+mpfr_gamma_1_minus_x_exact (mpfr_srcptr x)
+{
+ if (MPFR_IS_POS(x))
+ return MPFR_PREC(x) - MPFR_GET_EXP(x);
+ else if (MPFR_GET_EXP(x) <= 0)
+ return MPFR_PREC(x) + 1 - MPFR_GET_EXP(x);
+ else if (MPFR_PREC(x) >= MPFR_GET_EXP(x))
+ return MPFR_PREC(x) + 1;
+ else
+ return MPFR_GET_EXP(x);
+}
+
+/* returns a lower bound of the number of significant bits of n!
+ (not counting the low zero bits).
+ We know n! >= (n/e)^n*sqrt(2*Pi*n) for n >= 1, and the number of zero bits
+ is floor(n/2) + floor(n/4) + floor(n/8) + ...
+ This approximation is exact for n <= 500000, except for n = 219536, 235928,
+ 298981, 355854, 464848, 493725, 498992 where it returns a value 1 too small.
+*/
+static unsigned long
+bits_fac (unsigned long n)
+{
+ mpfr_t x, y;
+ unsigned long r, k;
+ mpfr_init2 (x, 38);
+ mpfr_init2 (y, 38);
+ mpfr_set_ui (x, n, MPFR_RNDZ);
+ mpfr_set_str_binary (y, "10.101101111110000101010001011000101001"); /* upper bound of e */
+ mpfr_div (x, x, y, MPFR_RNDZ);
+ mpfr_pow_ui (x, x, n, MPFR_RNDZ);
+ mpfr_const_pi (y, MPFR_RNDZ);
+ mpfr_mul_ui (y, y, 2 * n, MPFR_RNDZ);
+ mpfr_sqrt (y, y, MPFR_RNDZ);
+ mpfr_mul (x, x, y, MPFR_RNDZ);
+ mpfr_log2 (x, x, MPFR_RNDZ);
+ r = mpfr_get_ui (x, MPFR_RNDU);
+ for (k = 2; k <= n; k *= 2)
+ r -= n / k;
+ mpfr_clear (x);
+ mpfr_clear (y);
+ return r;
+}
+
+/* We use the reflection formula
+ Gamma(1+t) Gamma(1-t) = - Pi t / sin(Pi (1 + t))
+ in order to treat the case x <= 1,
+ i.e. with x = 1-t, then Gamma(x) = -Pi*(1-x)/sin(Pi*(2-x))/GAMMA(2-x)
+*/
+int
+mpfr_gamma (mpfr_ptr gamma, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t xp, GammaTrial, tmp, tmp2;
+ mpz_t fact;
+ mpfr_prec_t realprec;
+ int compared, inex, is_integer;
+ MPFR_GROUP_DECL (group);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("gamma[%#R]=%R inexact=%d", gamma, gamma, inex));
+
+ /* Trivial cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (gamma);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ if (MPFR_IS_NEG (x))
+ {
+ MPFR_SET_NAN (gamma);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_SET_INF (gamma);
+ MPFR_SET_POS (gamma);
+ MPFR_RET (0); /* exact */
+ }
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(x));
+ MPFR_SET_INF(gamma);
+ MPFR_SET_SAME_SIGN(gamma, x);
+ MPFR_RET (0); /* exact */
+ }
+ }
+
+ /* Check for tiny arguments, where gamma(x) ~ 1/x - euler + ....
+ We know from "Bound on Runs of Zeros and Ones for Algebraic Functions",
+ Proceedings of Arith15, T. Lang and J.-M. Muller, 2001, that the maximal
+ number of consecutive zeroes or ones after the round bit is n-1 for an
+ input of n bits. But we need a more precise lower bound. Assume x has
+ n bits, and 1/x is near a floating-point number y of n+1 bits. We can
+ write x = X*2^e, y = Y/2^f with X, Y integers of n and n+1 bits.
+ Thus X*Y^2^(e-f) is near from 1, i.e., X*Y is near from 2^(f-e).
+ Two cases can happen:
+ (i) either X*Y is exactly 2^(f-e), but this can happen only if X and Y
+ are themselves powers of two, i.e., x is a power of two;
+ (ii) or X*Y is at distance at least one from 2^(f-e), thus
+ |xy-1| >= 2^(e-f), or |y-1/x| >= 2^(e-f)/x = 2^(-f)/X >= 2^(-f-n).
+ Since ufp(y) = 2^(n-f) [ufp = unit in first place], this means
+ that the distance |y-1/x| >= 2^(-2n) ufp(y).
+ Now assuming |gamma(x)-1/x| <= 1, which is true for x <= 1,
+ if 2^(-2n) ufp(y) >= 2, the error is at most 2^(-2n-1) ufp(y),
+ and round(1/x) with precision >= 2n+2 gives the correct result.
+ If x < 2^E, then y > 2^(-E), thus ufp(y) > 2^(-E-1).
+ A sufficient condition is thus EXP(x) + 2 <= -2 MAX(PREC(x),PREC(Y)).
+ */
+ if (MPFR_EXP(x) + 2 <= -2 * (mpfr_exp_t) MAX(MPFR_PREC(x), MPFR_PREC(gamma)))
+ {
+ int positive = MPFR_IS_POS (x);
+ inex = mpfr_ui_div (gamma, 1, x, rnd_mode);
+ if (inex == 0) /* x is a power of two */
+ {
+ if (positive)
+ {
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDN)
+ inex = 1;
+ else /* round to zero or to -Inf */
+ {
+ mpfr_nextbelow (gamma); /* 2^k - epsilon */
+ inex = -1;
+ }
+ }
+ else /* negative */
+ {
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDZ)
+ {
+ mpfr_nextabove (gamma); /* -2^k + epsilon */
+ inex = 1;
+ }
+ else /* round to nearest and to -Inf */
+ inex = -1;
+ }
+ }
+ return inex;
+ }
+
+ is_integer = mpfr_integer_p (x);
+ /* gamma(x) for x a negative integer gives NaN */
+ if (is_integer && MPFR_IS_NEG(x))
+ {
+ MPFR_SET_NAN (gamma);
+ MPFR_RET_NAN;
+ }
+
+ compared = mpfr_cmp_ui (x, 1);
+ if (compared == 0)
+ return mpfr_set_ui (gamma, 1, rnd_mode);
+
+ /* if x is an integer that fits into an unsigned long, use mpfr_fac_ui
+ if argument is not too large.
+ If precision is p, fac_ui costs O(u*p), whereas gamma costs O(p*M(p)),
+ so for u <= M(p), fac_ui should be faster.
+ We approximate here M(p) by p*log(p)^2, which is not a bad guess.
+ Warning: since the generic code does not handle exact cases,
+ we want all cases where gamma(x) is exact to be treated here.
+ */
+ if (is_integer && mpfr_fits_ulong_p (x, MPFR_RNDN))
+ {
+ unsigned long int u;
+ mpfr_prec_t p = MPFR_PREC(gamma);
+ u = mpfr_get_ui (x, MPFR_RNDN);
+ if (u < 44787929UL && bits_fac (u - 1) <= p + (rnd_mode == MPFR_RNDN))
+ /* bits_fac: lower bound on the number of bits of m,
+ where gamma(x) = (u-1)! = m*2^e with m odd. */
+ return mpfr_fac_ui (gamma, u - 1, rnd_mode);
+ /* if bits_fac(...) > p (resp. p+1 for rounding to nearest),
+ then gamma(x) cannot be exact in precision p (resp. p+1).
+ FIXME: remove the test u < 44787929UL after changing bits_fac
+ to return a mpz_t or mpfr_t. */
+ }
+
+ /* check for overflow: according to (6.1.37) in Abramowitz & Stegun,
+ gamma(x) >= exp(-x) * x^(x-1/2) * sqrt(2*Pi)
+ >= 2 * (x/e)^x / x for x >= 1 */
+ if (compared > 0)
+ {
+ mpfr_t yp;
+ MPFR_BLOCK_DECL (flags);
+
+ /* 1/e rounded down to 53 bits */
+#define EXPM1_STR "0.010111100010110101011000110110001011001110111100111"
+ mpfr_init2 (xp, 53);
+ mpfr_init2 (yp, 53);
+ mpfr_set_str_binary (xp, EXPM1_STR);
+ mpfr_mul (xp, x, xp, MPFR_RNDZ);
+ mpfr_sub_ui (yp, x, 2, MPFR_RNDZ);
+ mpfr_pow (xp, xp, yp, MPFR_RNDZ); /* (x/e)^(x-2) */
+ mpfr_set_str_binary (yp, EXPM1_STR);
+ mpfr_mul (xp, xp, yp, MPFR_RNDZ); /* x^(x-2) / e^(x-1) */
+ mpfr_mul (xp, xp, yp, MPFR_RNDZ); /* x^(x-2) / e^x */
+ mpfr_mul (xp, xp, x, MPFR_RNDZ); /* lower bound on x^(x-1) / e^x */
+ MPFR_BLOCK (flags, mpfr_mul_2ui (xp, xp, 1, MPFR_RNDZ));
+ mpfr_clear (xp);
+ mpfr_clear (yp);
+ return MPFR_OVERFLOW (flags) ? mpfr_overflow (gamma, rnd_mode, 1)
+ : mpfr_gamma_aux (gamma, x, rnd_mode);
+ }
+
+ /* now compared < 0 */
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* check for underflow: for x < 1,
+ gamma(x) = Pi*(x-1)/sin(Pi*(2-x))/gamma(2-x).
+ Since gamma(2-x) >= 2 * ((2-x)/e)^(2-x) / (2-x), we have
+ |gamma(x)| <= Pi*(1-x)*(2-x)/2/((2-x)/e)^(2-x) / |sin(Pi*(2-x))|
+ <= 12 * ((2-x)/e)^x / |sin(Pi*(2-x))|.
+ To avoid an underflow in ((2-x)/e)^x, we compute the logarithm.
+ */
+ if (MPFR_IS_NEG(x))
+ {
+ int underflow = 0, sgn, ck;
+ mpfr_prec_t w;
+
+ mpfr_init2 (xp, 53);
+ mpfr_init2 (tmp, 53);
+ mpfr_init2 (tmp2, 53);
+ /* we want an upper bound for x * [log(2-x)-1].
+ since x < 0, we need a lower bound on log(2-x) */
+ mpfr_ui_sub (xp, 2, x, MPFR_RNDD);
+ mpfr_log2 (xp, xp, MPFR_RNDD);
+ mpfr_sub_ui (xp, xp, 1, MPFR_RNDD);
+ mpfr_mul (xp, xp, x, MPFR_RNDU);
+
+ /* we need an upper bound on 1/|sin(Pi*(2-x))|,
+ thus a lower bound on |sin(Pi*(2-x))|.
+ If 2-x is exact, then the error of Pi*(2-x) is (1+u)^2 with u = 2^(-p)
+ thus the error on sin(Pi*(2-x)) is less than 1/2ulp + 3Pi(2-x)u,
+ assuming u <= 1, thus <= u + 3Pi(2-x)u */
+
+ w = mpfr_gamma_2_minus_x_exact (x); /* 2-x is exact for prec >= w */
+ w += 17; /* to get tmp2 small enough */
+ mpfr_set_prec (tmp, w);
+ mpfr_set_prec (tmp2, w);
+ ck = mpfr_ui_sub (tmp, 2, x, MPFR_RNDN);
+ MPFR_ASSERTD (ck == 0); (void) ck; /* use ck to avoid a warning */
+ mpfr_const_pi (tmp2, MPFR_RNDN);
+ mpfr_mul (tmp2, tmp2, tmp, MPFR_RNDN); /* Pi*(2-x) */
+ mpfr_sin (tmp, tmp2, MPFR_RNDN); /* sin(Pi*(2-x)) */
+ sgn = mpfr_sgn (tmp);
+ mpfr_abs (tmp, tmp, MPFR_RNDN);
+ mpfr_mul_ui (tmp2, tmp2, 3, MPFR_RNDU); /* 3Pi(2-x) */
+ mpfr_add_ui (tmp2, tmp2, 1, MPFR_RNDU); /* 3Pi(2-x)+1 */
+ mpfr_div_2ui (tmp2, tmp2, mpfr_get_prec (tmp), MPFR_RNDU);
+ /* if tmp2<|tmp|, we get a lower bound */
+ if (mpfr_cmp (tmp2, tmp) < 0)
+ {
+ mpfr_sub (tmp, tmp, tmp2, MPFR_RNDZ); /* low bnd on |sin(Pi*(2-x))| */
+ mpfr_ui_div (tmp, 12, tmp, MPFR_RNDU); /* upper bound */
+ mpfr_log2 (tmp, tmp, MPFR_RNDU);
+ mpfr_add (xp, tmp, xp, MPFR_RNDU);
+ /* The assert below checks that expo.saved_emin - 2 always
+ fits in a long. FIXME if we want to allow mpfr_exp_t to
+ be a long long, for instance. */
+ MPFR_ASSERTN (MPFR_EMIN_MIN - 2 >= LONG_MIN);
+ underflow = mpfr_cmp_si (xp, expo.saved_emin - 2) <= 0;
+ }
+
+ mpfr_clear (xp);
+ mpfr_clear (tmp);
+ mpfr_clear (tmp2);
+ if (underflow) /* the sign is the opposite of that of sin(Pi*(2-x)) */
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (gamma, (rnd_mode == MPFR_RNDN) ? MPFR_RNDZ : rnd_mode, -sgn);
+ }
+ }
+
+ realprec = MPFR_PREC (gamma);
+ /* we want both 1-x and 2-x to be exact */
+ {
+ mpfr_prec_t w;
+ w = mpfr_gamma_1_minus_x_exact (x);
+ if (realprec < w)
+ realprec = w;
+ w = mpfr_gamma_2_minus_x_exact (x);
+ if (realprec < w)
+ realprec = w;
+ }
+ realprec = realprec + MPFR_INT_CEIL_LOG2 (realprec) + 20;
+ MPFR_ASSERTD(realprec >= 5);
+
+ MPFR_GROUP_INIT_4 (group, realprec + MPFR_INT_CEIL_LOG2 (realprec) + 20,
+ xp, tmp, tmp2, GammaTrial);
+ mpz_init (fact);
+ MPFR_ZIV_INIT (loop, realprec);
+ for (;;)
+ {
+ mpfr_exp_t err_g;
+ int ck;
+ MPFR_GROUP_REPREC_4 (group, realprec, xp, tmp, tmp2, GammaTrial);
+
+ /* reflection formula: gamma(x) = Pi*(x-1)/sin(Pi*(2-x))/gamma(2-x) */
+
+ ck = mpfr_ui_sub (xp, 2, x, MPFR_RNDN); /* 2-x, exact */
+ MPFR_ASSERTD(ck == 0); (void) ck; /* use ck to avoid a warning */
+ mpfr_gamma (tmp, xp, MPFR_RNDN); /* gamma(2-x), error (1+u) */
+ mpfr_const_pi (tmp2, MPFR_RNDN); /* Pi, error (1+u) */
+ mpfr_mul (GammaTrial, tmp2, xp, MPFR_RNDN); /* Pi*(2-x), error (1+u)^2 */
+ err_g = MPFR_GET_EXP(GammaTrial);
+ mpfr_sin (GammaTrial, GammaTrial, MPFR_RNDN); /* sin(Pi*(2-x)) */
+ err_g = err_g + 1 - MPFR_GET_EXP(GammaTrial);
+ /* let g0 the true value of Pi*(2-x), g the computed value.
+ We have g = g0 + h with |h| <= |(1+u^2)-1|*g.
+ Thus sin(g) = sin(g0) + h' with |h'| <= |(1+u^2)-1|*g.
+ The relative error is thus bounded by |(1+u^2)-1|*g/sin(g)
+ <= |(1+u^2)-1|*2^err_g. <= 2.25*u*2^err_g for |u|<=1/4.
+ With the rounding error, this gives (0.5 + 2.25*2^err_g)*u. */
+ ck = mpfr_sub_ui (xp, x, 1, MPFR_RNDN); /* x-1, exact */
+ MPFR_ASSERTD(ck == 0); (void) ck; /* use ck to avoid a warning */
+ mpfr_mul (xp, tmp2, xp, MPFR_RNDN); /* Pi*(x-1), error (1+u)^2 */
+ mpfr_mul (GammaTrial, GammaTrial, tmp, MPFR_RNDN);
+ /* [1 + (0.5 + 2.25*2^err_g)*u]*(1+u)^2 = 1 + (2.5 + 2.25*2^err_g)*u
+ + (0.5 + 2.25*2^err_g)*u*(2u+u^2) + u^2.
+ For err_g <= realprec-2, we have (0.5 + 2.25*2^err_g)*u <=
+ 0.5*u + 2.25/4 <= 0.6875 and u^2 <= u/4, thus
+ (0.5 + 2.25*2^err_g)*u*(2u+u^2) + u^2 <= 0.6875*(2u+u/4) + u/4
+ <= 1.8*u, thus the rel. error is bounded by (4.5 + 2.25*2^err_g)*u. */
+ mpfr_div (GammaTrial, xp, GammaTrial, MPFR_RNDN);
+ /* the error is of the form (1+u)^3/[1 + (4.5 + 2.25*2^err_g)*u].
+ For realprec >= 5 and err_g <= realprec-2, [(4.5 + 2.25*2^err_g)*u]^2
+ <= 0.71, and for |y|<=0.71, 1/(1-y) can be written 1+a*y with a<=4.
+ (1+u)^3 * (1+4*(4.5 + 2.25*2^err_g)*u)
+ = 1 + (21 + 9*2^err_g)*u + (57+27*2^err_g)*u^2 + (55+27*2^err_g)*u^3
+ + (18+9*2^err_g)*u^4
+ <= 1 + (21 + 9*2^err_g)*u + (57+27*2^err_g)*u^2 + (56+28*2^err_g)*u^3
+ <= 1 + (21 + 9*2^err_g)*u + (59+28*2^err_g)*u^2
+ <= 1 + (23 + 10*2^err_g)*u.
+ The final error is thus bounded by (23 + 10*2^err_g) ulps,
+ which is <= 2^6 for err_g<=2, and <= 2^(err_g+4) for err_g >= 2. */
+ err_g = (err_g <= 2) ? 6 : err_g + 4;
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (GammaTrial, realprec - err_g,
+ MPFR_PREC(gamma), rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, realprec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = mpfr_set (gamma, GammaTrial, rnd_mode);
+ MPFR_GROUP_CLEAR (group);
+ mpz_clear (fact);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (gamma, inex, rnd_mode);
+}
diff --git a/src/gammaonethird.c b/src/gammaonethird.c
new file mode 100644
index 000000000..4dd82210a
--- /dev/null
+++ b/src/gammaonethird.c
@@ -0,0 +1,191 @@
+/* Functions for evaluating Gamma(1/3) and Gamma(2/3). Used by mpfr_ai.
+
+Copyright 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#define MPFR_ACC_OR_MUL(v) \
+ do \
+ { \
+ if (v <= ULONG_MAX / acc) \
+ acc *= v; \
+ else \
+ { \
+ mpfr_mul_ui (y, y, acc, mode); acc = v; \
+ } \
+ } \
+ while (0)
+
+#define MPFR_ACC_OR_DIV(v) \
+ do \
+ { \
+ if (v <= ULONG_MAX / acc) \
+ acc *= v; \
+ else \
+ { \
+ mpfr_div_ui (y, y, acc, mode); acc = v; \
+ } \
+ } \
+ while (0)
+
+static void
+mpfr_mul_ui5 (mpfr_ptr y, mpfr_srcptr x,
+ unsigned long int v1, unsigned long int v2,
+ unsigned long int v3, unsigned long int v4,
+ unsigned long int v5, mpfr_rnd_t mode)
+{
+ unsigned long int acc = v1;
+ mpfr_set (y, x, mode);
+ MPFR_ACC_OR_MUL (v2);
+ MPFR_ACC_OR_MUL (v3);
+ MPFR_ACC_OR_MUL (v4);
+ MPFR_ACC_OR_MUL (v5);
+ mpfr_mul_ui (y, y, acc, mode);
+}
+
+void
+mpfr_div_ui2 (mpfr_ptr y, mpfr_srcptr x,
+ unsigned long int v1, unsigned long int v2, mpfr_rnd_t mode)
+{
+ unsigned long int acc = v1;
+ mpfr_set (y, x, mode);
+ MPFR_ACC_OR_DIV (v2);
+ mpfr_div_ui (y, y, acc, mode);
+}
+
+static void
+mpfr_div_ui8 (mpfr_ptr y, mpfr_srcptr x,
+ unsigned long int v1, unsigned long int v2,
+ unsigned long int v3, unsigned long int v4,
+ unsigned long int v5, unsigned long int v6,
+ unsigned long int v7, unsigned long int v8, mpfr_rnd_t mode)
+{
+ unsigned long int acc = v1;
+ mpfr_set (y, x, mode);
+ MPFR_ACC_OR_DIV (v2);
+ MPFR_ACC_OR_DIV (v3);
+ MPFR_ACC_OR_DIV (v4);
+ MPFR_ACC_OR_DIV (v5);
+ MPFR_ACC_OR_DIV (v6);
+ MPFR_ACC_OR_DIV (v7);
+ MPFR_ACC_OR_DIV (v8);
+ mpfr_div_ui (y, y, acc, mode);
+}
+
+
+/* Gives an approximation of omega = Gamma(1/3)^6 * sqrt(10) / (12pi^4) */
+/* using C. H. Brown's formula. */
+/* The computed value s satisfies |s-omega| <= 2^{1-prec}*omega */
+/* As usual, the variable s is supposed to be initialized. */
+static void
+mpfr_Browns_const (mpfr_ptr s, mpfr_prec_t prec)
+{
+ mpfr_t uk;
+ unsigned long int k;
+
+ mpfr_prec_t working_prec = prec + 10 + MPFR_INT_CEIL_LOG2 (2 + prec / 10);
+
+ mpfr_init2 (uk, working_prec);
+ mpfr_set_prec (s, working_prec);
+
+ mpfr_set_ui (uk, 1, MPFR_RNDN);
+ mpfr_set (s, uk, MPFR_RNDN);
+ k = 1;
+
+ /* Invariants: uk ~ u(k-1) and s ~ sum(i=0..k-1, u(i)) */
+ for (;;)
+ {
+ mpfr_mul_ui5 (uk, uk, 6 * k - 5, 6 * k - 4, 6 * k - 3, 6 * k - 2,
+ 6 * k - 1, MPFR_RNDN);
+ mpfr_div_ui8 (uk, uk, k, k, 3 * k - 2, 3 * k - 1, 3 * k, 80, 160, 160,
+ MPFR_RNDN);
+ MPFR_CHANGE_SIGN (uk);
+
+ mpfr_add (s, s, uk, MPFR_RNDN);
+ k++;
+ if (MPFR_GET_EXP (uk) + prec <= MPFR_GET_EXP (s) + 7)
+ break;
+ }
+
+ mpfr_clear (uk);
+ return;
+}
+
+/* Returns y such that |Gamma(1/3)-y| <= 2^{1-prec}*Gamma(1/3) */
+static void
+mpfr_gamma_one_third (mpfr_ptr y, mpfr_prec_t prec)
+{
+ mpfr_t tmp, tmp2, tmp3;
+
+ mpfr_init2 (tmp, prec + 9);
+ mpfr_init2 (tmp2, prec + 9);
+ mpfr_init2 (tmp3, prec + 4);
+ mpfr_set_prec (y, prec + 2);
+
+ mpfr_const_pi (tmp, MPFR_RNDN);
+ mpfr_sqr (tmp, tmp, MPFR_RNDN);
+ mpfr_sqr (tmp, tmp, MPFR_RNDN);
+ mpfr_mul_ui (tmp, tmp, 12, MPFR_RNDN);
+
+ mpfr_Browns_const (tmp2, prec + 9);
+ mpfr_mul (tmp, tmp, tmp2, MPFR_RNDN);
+
+ mpfr_set_ui (tmp2, 10, MPFR_RNDN);
+ mpfr_sqrt (tmp2, tmp2, MPFR_RNDN);
+ mpfr_div (tmp, tmp, tmp2, MPFR_RNDN);
+
+ mpfr_sqrt (tmp3, tmp, MPFR_RNDN);
+ mpfr_cbrt (y, tmp3, MPFR_RNDN);
+
+ mpfr_clear (tmp);
+ mpfr_clear (tmp2);
+ mpfr_clear (tmp3);
+ return;
+}
+
+/* Computes y1 and y2 such that: */
+/* |y1-Gamma(1/3)| <= 2^{1-prec}Gamma(1/3) */
+/* and |y2-Gamma(2/3)| <= 2^{1-prec}Gamma(2/3) */
+/* */
+/* Uses the formula Gamma(z)Gamma(1-z) = pi / sin(pi*z) */
+/* to compute Gamma(2/3) from Gamma(1/3). */
+void
+mpfr_gamma_one_and_two_third (mpfr_ptr y1, mpfr_ptr y2, mpfr_prec_t prec)
+{
+ mpfr_t temp;
+
+ mpfr_init2 (temp, prec + 4);
+ mpfr_set_prec (y2, prec + 4);
+
+ mpfr_gamma_one_third (y1, prec + 4);
+
+ mpfr_set_ui (temp, 3, MPFR_RNDN);
+ mpfr_sqrt (temp, temp, MPFR_RNDN);
+ mpfr_mul (temp, y1, temp, MPFR_RNDN);
+
+ mpfr_const_pi (y2, MPFR_RNDN);
+ mpfr_mul_2ui (y2, y2, 1, MPFR_RNDN);
+
+ mpfr_div (y2, y2, temp, MPFR_RNDN);
+
+ mpfr_clear (temp);
+}
diff --git a/src/gen_inverse.h b/src/gen_inverse.h
new file mode 100644
index 000000000..dc569793d
--- /dev/null
+++ b/src/gen_inverse.h
@@ -0,0 +1,106 @@
+/* generic inverse of a function.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#ifndef ACTION_SPECIAL
+#define ACTION_SPECIAL
+#endif
+
+#ifndef ACTION_TINY
+#define ACTION_TINY
+#endif
+
+/* example of use:
+#define FUNCTION mpfr_sec
+#define INVERSE mpfr_cos
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_ZERO(y) return mpfr_set_ui (y, 1, MPFR_RNDN)
+#include "gen_inverse.h"
+*/
+
+int
+FUNCTION (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t precy; /* target precision */
+ mpfr_prec_t m; /* working precision */
+ mpfr_t z; /* temporary variable to store INVERSE(x) */
+ int inexact; /* inexact flag */
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)))
+ {
+ if (MPFR_IS_NAN(x))
+ ACTION_NAN(y);
+ else if (MPFR_IS_INF(x))
+ ACTION_INF(y);
+ else /* x = 0 */
+ ACTION_ZERO(y,x);
+ }
+
+ /* x is neither NaN, Inf nor zero */
+ MPFR_SAVE_EXPO_MARK (expo);
+ ACTION_TINY (y, x, rnd_mode); /* special case for very small input x */
+ precy = MPFR_PREC(y);
+ m = precy + MPFR_INT_CEIL_LOG2 (precy) + 3;
+ mpfr_init2 (z, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for(;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_BLOCK (flags, INVERSE (z, x, MPFR_RNDZ)); /* error k_u < 1 ulp */
+ /* FIXME: the following assumes that if an overflow happens with
+ MPFR_EMAX_MAX, then necessarily an underflow happens with
+ __gmpfr_emin */
+ if (MPFR_OVERFLOW (flags))
+ {
+ int s = MPFR_SIGN(z);
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (z);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (y, (rnd_mode == MPFR_RNDN) ?
+ MPFR_RNDZ : rnd_mode, s);
+ }
+ mpfr_ui_div (z, 1, z, MPFR_RNDN);
+ /* the error is less than c_w + 2*c_u*k_u (see algorithms.tex),
+ where c_w = 1/2, c_u = 1 since z was rounded toward zero,
+ thus 1/2 + 2 < 4 */
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (z, m - 2, precy, rnd_mode)))
+ break;
+ ACTION_SPECIAL;
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (z, m);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (y, z, rnd_mode);
+ mpfr_clear (z);
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/get_d.c b/src/get_d.c
new file mode 100644
index 000000000..7db963837
--- /dev/null
+++ b/src/get_d.c
@@ -0,0 +1,183 @@
+/* mpfr_get_d, mpfr_get_d_2exp -- convert a multiple precision floating-point
+ number to a machine double precision float
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <float.h>
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#include "ieee_floats.h"
+
+/* Assumes IEEE-754 double precision; otherwise, only an approximated
+ result will be returned, without any guaranty (and special cases
+ such as NaN must be avoided if not supported). */
+
+double
+mpfr_get_d (mpfr_srcptr src, mpfr_rnd_t rnd_mode)
+{
+ double d;
+ int negative;
+ mpfr_exp_t e;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (src)))
+ {
+ if (MPFR_IS_NAN (src))
+ return MPFR_DBL_NAN;
+
+ negative = MPFR_IS_NEG (src);
+
+ if (MPFR_IS_INF (src))
+ return negative ? MPFR_DBL_INFM : MPFR_DBL_INFP;
+
+ MPFR_ASSERTD (MPFR_IS_ZERO(src));
+ return negative ? DBL_NEG_ZERO : 0.0;
+ }
+
+ e = MPFR_GET_EXP (src);
+ negative = MPFR_IS_NEG (src);
+
+ if (MPFR_UNLIKELY(rnd_mode == MPFR_RNDA))
+ rnd_mode = negative ? MPFR_RNDD : MPFR_RNDU;
+
+ /* the smallest normalized number is 2^(-1022)=0.1e-1021, and the smallest
+ subnormal is 2^(-1074)=0.1e-1073 */
+ if (MPFR_UNLIKELY (e < -1073))
+ {
+ /* Note: Avoid using a constant expression DBL_MIN * DBL_EPSILON
+ as this gives 0 instead of the correct result with gcc on some
+ Alpha machines. */
+ d = negative ?
+ (rnd_mode == MPFR_RNDD ||
+ (rnd_mode == MPFR_RNDN && mpfr_cmp_si_2exp(src, -1, -1075) < 0)
+ ? -DBL_MIN : DBL_NEG_ZERO) :
+ (rnd_mode == MPFR_RNDU ||
+ (rnd_mode == MPFR_RNDN && mpfr_cmp_si_2exp(src, 1, -1075) > 0)
+ ? DBL_MIN : 0.0);
+ if (d != 0.0) /* we multiply DBL_MIN = 2^(-1022) by DBL_EPSILON = 2^(-52)
+ to get +-2^(-1074) */
+ d *= DBL_EPSILON;
+ }
+ /* the largest normalized number is 2^1024*(1-2^(-53))=0.111...111e1024 */
+ else if (MPFR_UNLIKELY (e > 1024))
+ {
+ d = negative ?
+ (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDU ?
+ -DBL_MAX : MPFR_DBL_INFM) :
+ (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDD ?
+ DBL_MAX : MPFR_DBL_INFP);
+ }
+ else
+ {
+ int nbits;
+ mp_size_t np, i;
+ mp_limb_t tp[ MPFR_LIMBS_PER_DOUBLE ];
+ int carry;
+
+ nbits = IEEE_DBL_MANT_DIG; /* 53 */
+ if (MPFR_UNLIKELY (e < -1021))
+ /*In the subnormal case, compute the exact number of significant bits*/
+ {
+ nbits += (1021 + e);
+ MPFR_ASSERTD (nbits >= 1);
+ }
+ np = (nbits + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
+ MPFR_ASSERTD ( np <= MPFR_LIMBS_PER_DOUBLE );
+ carry = mpfr_round_raw_4 (tp, MPFR_MANT(src), MPFR_PREC(src), negative,
+ nbits, rnd_mode);
+ if (MPFR_UNLIKELY(carry))
+ d = 1.0;
+ else
+ {
+ /* The following computations are exact thanks to the previous
+ mpfr_round_raw. */
+ d = (double) tp[0] / MP_BASE_AS_DOUBLE;
+ for (i = 1 ; i < np ; i++)
+ d = (d + tp[i]) / MP_BASE_AS_DOUBLE;
+ /* d is the mantissa (between 1/2 and 1) of the argument rounded
+ to 53 bits */
+ }
+ d = mpfr_scale2 (d, e);
+ if (negative)
+ d = -d;
+ }
+
+ return d;
+}
+
+#undef mpfr_get_d1
+double
+mpfr_get_d1 (mpfr_srcptr src)
+{
+ return mpfr_get_d (src, __gmpfr_default_rounding_mode);
+}
+
+double
+mpfr_get_d_2exp (long *expptr, mpfr_srcptr src, mpfr_rnd_t rnd_mode)
+{
+ double ret;
+ mpfr_exp_t exp;
+ mpfr_t tmp;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (src)))
+ {
+ int negative;
+ *expptr = 0;
+ if (MPFR_IS_NAN (src))
+ return MPFR_DBL_NAN;
+ negative = MPFR_IS_NEG (src);
+ if (MPFR_IS_INF (src))
+ return negative ? MPFR_DBL_INFM : MPFR_DBL_INFP;
+ MPFR_ASSERTD (MPFR_IS_ZERO(src));
+ return negative ? DBL_NEG_ZERO : 0.0;
+ }
+
+ tmp[0] = *src; /* Hack copy mpfr_t */
+ MPFR_SET_EXP (tmp, 0);
+ ret = mpfr_get_d (tmp, rnd_mode);
+
+ if (MPFR_IS_PURE_FP(src))
+ {
+ exp = MPFR_GET_EXP (src);
+
+ /* rounding can give 1.0, adjust back to 0.5 <= abs(ret) < 1.0 */
+ if (ret == 1.0)
+ {
+ ret = 0.5;
+ exp++;
+ }
+ else if (ret == -1.0)
+ {
+ ret = -0.5;
+ exp++;
+ }
+
+ MPFR_ASSERTN ((ret >= 0.5 && ret < 1.0)
+ || (ret <= -0.5 && ret > -1.0));
+ MPFR_ASSERTN (exp >= LONG_MIN && exp <= LONG_MAX);
+ }
+ else
+ exp = 0;
+
+ *expptr = exp;
+ return ret;
+}
diff --git a/src/get_d64.c b/src/get_d64.c
new file mode 100644
index 000000000..22307d042
--- /dev/null
+++ b/src/get_d64.c
@@ -0,0 +1,397 @@
+/* mpfr_get_decimal64 -- convert a multiple precision floating-point number
+ to a IEEE 754r decimal64 float
+
+See http://gcc.gnu.org/ml/gcc/2006-06/msg00691.html,
+http://gcc.gnu.org/onlinedocs/gcc/Decimal-Float.html,
+and TR 24732 <http://www.open-std.org/jtc1/sc22/wg14/www/projects#24732>.
+
+Copyright 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <stdlib.h> /* for strtol */
+#include "mpfr-impl.h"
+
+#define ISDIGIT(c) ('0' <= c && c <= '9')
+
+#ifdef MPFR_WANT_DECIMAL_FLOATS
+
+#ifdef DPD_FORMAT
+static int T[1000] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 80, 81, 82, 83, 84, 85, 86, 87, 88,
+ 89, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116,
+ 117, 118, 119, 120, 121, 10, 11, 42, 43, 74, 75, 106, 107, 78, 79, 26, 27,
+ 58, 59, 90, 91, 122, 123, 94, 95, 128, 129, 130, 131, 132, 133, 134, 135,
+ 136, 137, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 160, 161, 162,
+ 163, 164, 165, 166, 167, 168, 169, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 208, 209, 210,
+ 211, 212, 213, 214, 215, 216, 217, 224, 225, 226, 227, 228, 229, 230, 231,
+ 232, 233, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 138, 139, 170,
+ 171, 202, 203, 234, 235, 206, 207, 154, 155, 186, 187, 218, 219, 250, 251,
+ 222, 223, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 288, 289, 290, 291, 292, 293, 294, 295,
+ 296, 297, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 320, 321, 322,
+ 323, 324, 325, 326, 327, 328, 329, 336, 337, 338, 339, 340, 341, 342, 343,
+ 344, 345, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 368, 369, 370,
+ 371, 372, 373, 374, 375, 376, 377, 266, 267, 298, 299, 330, 331, 362, 363,
+ 334, 335, 282, 283, 314, 315, 346, 347, 378, 379, 350, 351, 384, 385, 386,
+ 387, 388, 389, 390, 391, 392, 393, 400, 401, 402, 403, 404, 405, 406, 407,
+ 408, 409, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 432, 433, 434,
+ 435, 436, 437, 438, 439, 440, 441, 448, 449, 450, 451, 452, 453, 454, 455,
+ 456, 457, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 480, 481, 482,
+ 483, 484, 485, 486, 487, 488, 489, 496, 497, 498, 499, 500, 501, 502, 503,
+ 504, 505, 394, 395, 426, 427, 458, 459, 490, 491, 462, 463, 410, 411, 442,
+ 443, 474, 475, 506, 507, 478, 479, 512, 513, 514, 515, 516, 517, 518, 519,
+ 520, 521, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 544, 545, 546,
+ 547, 548, 549, 550, 551, 552, 553, 560, 561, 562, 563, 564, 565, 566, 567,
+ 568, 569, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 592, 593, 594,
+ 595, 596, 597, 598, 599, 600, 601, 608, 609, 610, 611, 612, 613, 614, 615,
+ 616, 617, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 522, 523, 554,
+ 555, 586, 587, 618, 619, 590, 591, 538, 539, 570, 571, 602, 603, 634, 635,
+ 606, 607, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 656, 657, 658,
+ 659, 660, 661, 662, 663, 664, 665, 672, 673, 674, 675, 676, 677, 678, 679,
+ 680, 681, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 704, 705, 706,
+ 707, 708, 709, 710, 711, 712, 713, 720, 721, 722, 723, 724, 725, 726, 727,
+ 728, 729, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 752, 753, 754,
+ 755, 756, 757, 758, 759, 760, 761, 650, 651, 682, 683, 714, 715, 746, 747,
+ 718, 719, 666, 667, 698, 699, 730, 731, 762, 763, 734, 735, 768, 769, 770,
+ 771, 772, 773, 774, 775, 776, 777, 784, 785, 786, 787, 788, 789, 790, 791,
+ 792, 793, 800, 801, 802, 803, 804, 805, 806, 807, 808, 809, 816, 817, 818,
+ 819, 820, 821, 822, 823, 824, 825, 832, 833, 834, 835, 836, 837, 838, 839,
+ 840, 841, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 864, 865, 866,
+ 867, 868, 869, 870, 871, 872, 873, 880, 881, 882, 883, 884, 885, 886, 887,
+ 888, 889, 778, 779, 810, 811, 842, 843, 874, 875, 846, 847, 794, 795, 826,
+ 827, 858, 859, 890, 891, 862, 863, 896, 897, 898, 899, 900, 901, 902, 903,
+ 904, 905, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 928, 929, 930,
+ 931, 932, 933, 934, 935, 936, 937, 944, 945, 946, 947, 948, 949, 950, 951,
+ 952, 953, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 976, 977, 978,
+ 979, 980, 981, 982, 983, 984, 985, 992, 993, 994, 995, 996, 997, 998, 999,
+ 1000, 1001, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017, 906,
+ 907, 938, 939, 970, 971, 1002, 1003, 974, 975, 922, 923, 954, 955, 986,
+ 987, 1018, 1019, 990, 991, 12, 13, 268, 269, 524, 525, 780, 781, 46, 47, 28,
+ 29, 284, 285, 540, 541, 796, 797, 62, 63, 44, 45, 300, 301, 556, 557, 812,
+ 813, 302, 303, 60, 61, 316, 317, 572, 573, 828, 829, 318, 319, 76, 77,
+ 332, 333, 588, 589, 844, 845, 558, 559, 92, 93, 348, 349, 604, 605, 860,
+ 861, 574, 575, 108, 109, 364, 365, 620, 621, 876, 877, 814, 815, 124, 125,
+ 380, 381, 636, 637, 892, 893, 830, 831, 14, 15, 270, 271, 526, 527, 782,
+ 783, 110, 111, 30, 31, 286, 287, 542, 543, 798, 799, 126, 127, 140, 141,
+ 396, 397, 652, 653, 908, 909, 174, 175, 156, 157, 412, 413, 668, 669, 924,
+ 925, 190, 191, 172, 173, 428, 429, 684, 685, 940, 941, 430, 431, 188, 189,
+ 444, 445, 700, 701, 956, 957, 446, 447, 204, 205, 460, 461, 716, 717, 972,
+ 973, 686, 687, 220, 221, 476, 477, 732, 733, 988, 989, 702, 703, 236, 237,
+ 492, 493, 748, 749, 1004, 1005, 942, 943, 252, 253, 508, 509, 764, 765,
+ 1020, 1021, 958, 959, 142, 143, 398, 399, 654, 655, 910, 911, 238, 239, 158,
+ 159, 414, 415, 670, 671, 926, 927, 254, 255};
+#endif
+
+/* construct a decimal64 NaN */
+static _Decimal64
+get_decimal64_nan (void)
+{
+ union ieee_double_extract x;
+ union ieee_double_decimal64 y;
+
+ x.s.exp = 1984; /* G[0]..G[4] = 11111: quiet NaN */
+ y.d = x.d;
+ return y.d64;
+}
+
+/* construct the decimal64 Inf with given sign */
+static _Decimal64
+get_decimal64_inf (int negative)
+{
+ union ieee_double_extract x;
+ union ieee_double_decimal64 y;
+
+ x.s.sig = (negative) ? 1 : 0;
+ x.s.exp = 1920; /* G[0]..G[4] = 11110: Inf */
+ y.d = x.d;
+ return y.d64;
+}
+
+/* construct the decimal64 zero with given sign */
+static _Decimal64
+get_decimal64_zero (int negative)
+{
+ union ieee_double_decimal64 y;
+
+ /* zero has the same representation in binary64 and decimal64 */
+ y.d = negative ? DBL_NEG_ZERO : 0.0;
+ return y.d64;
+}
+
+/* construct the decimal64 smallest non-zero with given sign */
+static _Decimal64
+get_decimal64_min (int negative)
+{
+ union ieee_double_extract x;
+
+ x.s.sig = (negative) ? 1 : 0;
+ x.s.exp = 0;
+ x.s.manh = 0;
+ x.s.manl = 1;
+ return x.d;
+}
+
+/* construct the decimal64 largest finite number with given sign */
+static _Decimal64
+get_decimal64_max (int negative)
+{
+ union ieee_double_extract x;
+
+ x.s.sig = (negative) ? 1 : 0;
+ x.s.exp = 1919;
+ x.s.manh = 1048575; /* 2^20-1 */
+ x.s.manl = ~0;
+ return x.d;
+}
+
+/* one-to-one conversion:
+ s is a decimal string representing a number x = m * 10^e which must be
+ exactly representable in the decimal64 format, i.e.
+ (a) the mantissa m has at most 16 decimal digits
+ (b1) -383 <= e <= 384 with m integer multiple of 10^(-15), |m| < 10
+ (b2) or -398 <= e <= 369 with m integer, |m| < 10^16.
+ Assumes s is neither NaN nor +Inf nor -Inf.
+*/
+static _Decimal64
+string_to_Decimal64 (char *s)
+{
+ long int exp = 0;
+ char m[17];
+ long n = 0; /* mantissa length */
+ char *endptr[1];
+ union ieee_double_extract x;
+ union ieee_double_decimal64 y;
+#ifdef DPD_FORMAT
+ unsigned int G, d1, d2, d3, d4, d5;
+#endif
+
+ /* read sign */
+ if (*s == '-')
+ {
+ x.s.sig = 1;
+ s ++;
+ }
+ else
+ x.s.sig = 0;
+ /* read mantissa */
+ while (ISDIGIT (*s))
+ m[n++] = *s++;
+ exp = n;
+ if (*s == '.')
+ {
+ s ++;
+ while (ISDIGIT (*s))
+ m[n++] = *s++;
+ }
+ /* we have exp digits before decimal point, and a total of n digits */
+ exp -= n; /* we will consider an integer mantissa */
+ MPFR_ASSERTN(n <= 16);
+ if (*s == 'E' || *s == 'e')
+ exp += strtol (s + 1, endptr, 10);
+ else
+ *endptr = s;
+ MPFR_ASSERTN(**endptr == '\0');
+ MPFR_ASSERTN(-398 <= exp && exp <= (long) (385 - n));
+ while (n < 16)
+ {
+ m[n++] = '0';
+ exp --;
+ }
+ /* now n=16 and -398 <= exp <= 369 */
+ m[n] = '\0';
+
+ /* compute biased exponent */
+ exp += 398;
+
+ MPFR_ASSERTN(exp >= -15);
+ if (exp < 0)
+ {
+ int i;
+ n = -exp;
+ /* check the last n digits of the mantissa are zero */
+ for (i = 1; i <= n; i++)
+ MPFR_ASSERTN(m[16 - n] == '0');
+ /* shift the first (16-n) digits to the right */
+ for (i = 16 - n - 1; i >= 0; i--)
+ m[i + n] = m[i];
+ /* zero the first n digits */
+ for (i = 0; i < n; i ++)
+ m[i] = '0';
+ exp = 0;
+ }
+
+ /* now convert to DPD or BID */
+#ifdef DPD_FORMAT
+#define CH(d) (d - '0')
+ if (m[0] >= '8')
+ G = (3 << 11) | ((exp & 768) << 1) | ((CH(m[0]) & 1) << 8);
+ else
+ G = ((exp & 768) << 3) | (CH(m[0]) << 8);
+ /* now the most 5 significant bits of G are filled */
+ G |= exp & 255;
+ d1 = T[100 * CH(m[1]) + 10 * CH(m[2]) + CH(m[3])]; /* 10-bit encoding */
+ d2 = T[100 * CH(m[4]) + 10 * CH(m[5]) + CH(m[6])]; /* 10-bit encoding */
+ d3 = T[100 * CH(m[7]) + 10 * CH(m[8]) + CH(m[9])]; /* 10-bit encoding */
+ d4 = T[100 * CH(m[10]) + 10 * CH(m[11]) + CH(m[12])]; /* 10-bit encoding */
+ d5 = T[100 * CH(m[13]) + 10 * CH(m[14]) + CH(m[15])]; /* 10-bit encoding */
+ x.s.exp = G >> 2;
+ x.s.manh = ((G & 3) << 18) | (d1 << 8) | (d2 >> 2);
+ x.s.manl = (d2 & 3) << 30;
+ x.s.manl |= (d3 << 20) | (d4 << 10) | d5;
+#else /* BID format */
+ {
+ mp_size_t rn;
+ mp_limb_t rp[2];
+ int case_i = strcmp (m, "9007199254740992") < 0;
+
+ for (n = 0; n < 16; n++)
+ m[n] -= '0';
+ rn = mpn_set_str (rp, (unsigned char *) m, 16, 10);
+ if (rn == 1)
+ rp[1] = 0;
+#if GMP_NUMB_BITS > 32
+ rp[1] = rp[1] << (GMP_NUMB_BITS - 32);
+ rp[1] |= rp[0] >> 32;
+ rp[0] &= 4294967295UL;
+#endif
+ if (case_i)
+ { /* s < 2^53: case i) */
+ x.s.exp = exp << 1;
+ x.s.manl = rp[0]; /* 32 bits */
+ x.s.manh = rp[1] & 1048575; /* 20 low bits */
+ x.s.exp |= rp[1] >> 20; /* 1 bit */
+ }
+ else /* s >= 2^53: case ii) */
+ {
+ x.s.exp = 1536 | (exp >> 1);
+ x.s.manl = rp[0];
+ x.s.manh = (rp[1] ^ 2097152) | ((exp & 1) << 19);
+ }
+ }
+#endif /* DPD_FORMAT */
+ y.d = x.d;
+ return y.d64;
+}
+
+_Decimal64
+mpfr_get_decimal64 (mpfr_srcptr src, mpfr_rnd_t rnd_mode)
+{
+ int negative;
+ mpfr_exp_t e;
+
+ /* the encoding of NaN, Inf, zero is the same under DPD or BID */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (src)))
+ {
+ if (MPFR_IS_NAN (src))
+ return get_decimal64_nan ();
+
+ negative = MPFR_IS_NEG (src);
+
+ if (MPFR_IS_INF (src))
+ return get_decimal64_inf (negative);
+
+ MPFR_ASSERTD (MPFR_IS_ZERO(src));
+ return get_decimal64_zero (negative);
+ }
+
+ e = MPFR_GET_EXP (src);
+ negative = MPFR_IS_NEG (src);
+
+ if (MPFR_UNLIKELY(rnd_mode == MPFR_RNDA))
+ rnd_mode = negative ? MPFR_RNDD : MPFR_RNDU;
+
+ /* the smallest decimal64 number is 10^(-398),
+ with 2^(-1323) < 10^(-398) < 2^(-1322) */
+ if (MPFR_UNLIKELY (e < -1323)) /* src <= 2^(-1324) < 1/2*10^(-398) */
+ {
+ if (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDN
+ || (rnd_mode == MPFR_RNDD && negative == 0)
+ || (rnd_mode == MPFR_RNDU && negative != 0))
+ return get_decimal64_zero (negative);
+ else /* return the smallest non-zero number */
+ return get_decimal64_min (negative);
+ }
+ /* the largest decimal64 number is just below 10^(385) < 2^1279 */
+ else if (MPFR_UNLIKELY (e > 1279)) /* then src >= 2^1279 */
+ {
+ if (MPFR_RNDZ || (rnd_mode == MPFR_RNDU && negative != 0)
+ || (rnd_mode == MPFR_RNDD && negative == 0))
+ return get_decimal64_max (negative);
+ else
+ return get_decimal64_inf (negative);
+ }
+ else
+ {
+ /* we need to store the sign (1), the mantissa (16), and the terminating
+ character, thus we need at least 18 characters in s */
+ char s[23];
+ mpfr_get_str (s, &e, 10, 16, src, rnd_mode);
+ /* the smallest normal number is 1.000...000E-383,
+ which corresponds to s=[0.]1000...000 and e=-382 */
+ if (e < -382)
+ {
+ /* the smallest subnormal number is 0.000...001E-383 = 1E-398,
+ which corresponds to s=[0.]1000...000 and e=-397 */
+ if (e < -397)
+ {
+ if (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDN
+ || (rnd_mode == MPFR_RNDD && negative == 0)
+ || (rnd_mode == MPFR_RNDU && negative != 0))
+ return get_decimal64_zero (negative);
+ else /* return the smallest non-zero number */
+ return get_decimal64_min (negative);
+ }
+ else
+ {
+ mpfr_exp_t e2;
+ long digits = 16 - (-382 - e);
+ /* if e = -397 then 16 - (-382 - e) = 1 */
+ mpfr_get_str (s, &e2, 10, digits, src, rnd_mode);
+ /* Warning: we can have e2 = e + 1 here, when rounding to
+ nearest or away from zero. */
+ s[negative + digits] = 'E';
+ sprintf (s + negative + digits + 1, "%ld",
+ (long int)e2 - digits);
+ return string_to_Decimal64 (s);
+ }
+ }
+ /* the largest number is 9.999...999E+384,
+ which corresponds to s=[0.]9999...999 and e=385 */
+ else if (e > 385)
+ {
+ if (MPFR_RNDZ || (rnd_mode == MPFR_RNDU && negative != 0)
+ || (rnd_mode == MPFR_RNDD && negative == 0))
+ return get_decimal64_max (negative);
+ else
+ return get_decimal64_inf (negative);
+ }
+ else /* -382 <= e <= 385 */
+ {
+ s[16 + negative] = 'E';
+ sprintf (s + 17 + negative, "%ld", (long int)e - 16);
+ return string_to_Decimal64 (s);
+ }
+ }
+}
+
+#endif /* MPFR_WANT_DECIMAL_FLOATS */
diff --git a/src/get_exp.c b/src/get_exp.c
new file mode 100644
index 000000000..9e384a293
--- /dev/null
+++ b/src/get_exp.c
@@ -0,0 +1,31 @@
+/* mpfr_get_exp - get the exponent of a floating-point number
+
+Copyright 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#undef mpfr_get_exp
+mpfr_exp_t
+mpfr_get_exp (mpfr_srcptr x)
+{
+ MPFR_ASSERTN(MPFR_IS_PURE_FP(x));
+ return MPFR_EXP(x); /* do not use MPFR_GET_EXP of course... */
+}
diff --git a/src/get_f.c b/src/get_f.c
new file mode 100644
index 000000000..5725407a3
--- /dev/null
+++ b/src/get_f.c
@@ -0,0 +1,148 @@
+/* mpfr_get_f -- convert a MPFR number to a GNU MPF number
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Since MPFR-3.0, return the usual inexact value.
+ The erange flag is set if an error occurred in the conversion
+ (y is NaN, +Inf, or -Inf that have no equivalent in mpf)
+*/
+int
+mpfr_get_f (mpf_ptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ int inex;
+ mp_size_t sx, sy;
+ mpfr_prec_t precx, precy;
+ mp_limb_t *xp;
+ int sh;
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(y)))
+ {
+ if (MPFR_IS_ZERO(y))
+ {
+ mpf_set_ui (x, 0);
+ return 0;
+ }
+ else if (MPFR_IS_NAN (y))
+ {
+ MPFR_SET_ERANGE ();
+ return 0;
+ }
+ else /* y is plus infinity (resp. minus infinity), set x to the maximum
+ value (resp. the minimum value) in precision PREC(x) */
+ {
+ int i;
+ mp_limb_t *xp;
+
+ MPFR_SET_ERANGE ();
+
+ /* To this day, [mp_exp_t] and mp_size_t are #defined as the same
+ type */
+ EXP (x) = MP_SIZE_T_MAX;
+
+ sx = PREC (x);
+ SIZ (x) = sx;
+ xp = LIMBS (x);
+ for (i = 0; i < sx; i++)
+ xp[i] = MP_LIMB_T_MAX;
+
+ if (MPFR_IS_POS (y))
+ return -1;
+ else
+ {
+ mpf_neg (x, x);
+ return +1;
+ }
+ }
+ }
+
+ sx = PREC(x); /* number of limbs of the mantissa of x */
+
+ precy = MPFR_PREC(y);
+ precx = (mpfr_prec_t) sx * GMP_NUMB_BITS;
+ sy = MPFR_LIMB_SIZE (y);
+
+ xp = PTR (x);
+
+ /* since mpf numbers are represented in base 2^GMP_NUMB_BITS,
+ we loose -EXP(y) % GMP_NUMB_BITS bits in the most significant limb */
+ sh = MPFR_GET_EXP(y) % GMP_NUMB_BITS;
+ sh = sh <= 0 ? - sh : GMP_NUMB_BITS - sh;
+ MPFR_ASSERTD (sh >= 0);
+ if (precy + sh <= precx) /* we can copy directly */
+ {
+ mp_size_t ds;
+
+ MPFR_ASSERTN (sx >= sy);
+ ds = sx - sy;
+
+ if (sh != 0)
+ {
+ mp_limb_t out;
+ out = mpn_rshift (xp + ds, MPFR_MANT(y), sy, sh);
+ MPFR_ASSERTN (ds > 0 || out == 0);
+ if (ds > 0)
+ xp[--ds] = out;
+ }
+ else
+ MPN_COPY (xp + ds, MPFR_MANT (y), sy);
+ if (ds > 0)
+ MPN_ZERO (xp, ds);
+ EXP(x) = (MPFR_GET_EXP(y) + sh) / GMP_NUMB_BITS;
+ inex = 0;
+ }
+ else /* we have to round to precx - sh bits */
+ {
+ mpfr_t z;
+ mp_size_t sz;
+
+ /* Recall that precx = (mpfr_prec_t) sx * GMP_NUMB_BITS, thus removing
+ sh bits (sh < GMP_NUMB_BITSS) won't reduce the number of limbs. */
+ mpfr_init2 (z, precx - sh);
+ sz = MPFR_LIMB_SIZE (z);
+ MPFR_ASSERTN (sx == sz);
+
+ inex = mpfr_set (z, y, rnd_mode);
+ /* warning, sh may change due to rounding, but then z is a power of two,
+ thus we can safely ignore its last bit which is 0 */
+ sh = MPFR_GET_EXP(z) % GMP_NUMB_BITS;
+ sh = sh <= 0 ? - sh : GMP_NUMB_BITS - sh;
+ MPFR_ASSERTD (sh >= 0);
+ if (sh != 0)
+ {
+ mp_limb_t out;
+ out = mpn_rshift (xp, MPFR_MANT(z), sz, sh);
+ /* If sh hasn't changed, it is the number of the non-significant
+ bits in the lowest limb of z. Therefore out == 0. */
+ MPFR_ASSERTD (out == 0); (void) out; /* avoid a warning */
+ }
+ else
+ MPN_COPY (xp, MPFR_MANT(z), sz);
+ EXP(x) = (MPFR_GET_EXP(z) + sh) / GMP_NUMB_BITS;
+ mpfr_clear (z);
+ }
+
+ /* set size and sign */
+ SIZ(x) = (MPFR_FROM_SIGN_TO_INT(MPFR_SIGN(y)) < 0) ? -sx : sx;
+
+ return inex;
+}
diff --git a/src/get_flt.c b/src/get_flt.c
new file mode 100644
index 000000000..ce7d717d2
--- /dev/null
+++ b/src/get_flt.c
@@ -0,0 +1,123 @@
+/* mpfr_get_flt -- convert a mpfr_t to a machine single precision float
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <float.h> /* for FLT_MIN */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#include "ieee_floats.h"
+
+#define FLT_NEG_ZERO ((float) DBL_NEG_ZERO)
+#define MPFR_FLT_INFM ((float) MPFR_DBL_INFM)
+#define MPFR_FLT_INFP ((float) MPFR_DBL_INFP)
+
+float
+mpfr_get_flt (mpfr_srcptr src, mpfr_rnd_t rnd_mode)
+{
+ int negative;
+ mpfr_exp_t e;
+ float d;
+
+ /* in case of NaN, +Inf, -Inf, +0, -0, the conversion from double to float
+ is exact */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (src)))
+ return (float) mpfr_get_d (src, rnd_mode);
+
+ e = MPFR_GET_EXP (src);
+ negative = MPFR_IS_NEG (src);
+
+ if (MPFR_UNLIKELY(rnd_mode == MPFR_RNDA))
+ rnd_mode = negative ? MPFR_RNDD : MPFR_RNDU;
+
+ /* the smallest positive normal float number is 2^(-126) = 0.5*2^(-125),
+ and the smallest positive subnormal number is 2^(-149) = 0.5*2^(-148) */
+ if (MPFR_UNLIKELY (e < -148))
+ {
+ /* |src| < 2^(-149), i.e., |src| is smaller than the smallest positive
+ subnormal number.
+ In round-to-nearest mode, 2^(-150) is rounded to zero.
+ */
+ d = negative ?
+ (rnd_mode == MPFR_RNDD ||
+ (rnd_mode == MPFR_RNDN && mpfr_cmp_si_2exp (src, -1, -150) < 0)
+ ? -FLT_MIN : FLT_NEG_ZERO) :
+ (rnd_mode == MPFR_RNDU ||
+ (rnd_mode == MPFR_RNDN && mpfr_cmp_si_2exp (src, 1, -150) > 0)
+ ? FLT_MIN : 0.0);
+ if (d != 0.0) /* we multiply FLT_MIN = 2^(-126) by FLT_EPSILON = 2^(-23)
+ to get +-2^(-149) */
+ d *= FLT_EPSILON;
+ }
+ /* the largest normal number is 2^128*(1-2^(-24)) = 0.111...111e128 */
+ else if (MPFR_UNLIKELY (e > 128))
+ {
+ d = negative ?
+ (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDU ?
+ -FLT_MAX : MPFR_FLT_INFM) :
+ (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDD ?
+ FLT_MAX : MPFR_FLT_INFP);
+ }
+ else /* -148 <= e <= 127 */
+ {
+ int nbits;
+ mp_size_t np, i;
+ mp_limb_t tp[MPFR_LIMBS_PER_FLT];
+ int carry;
+ double dd;
+
+ nbits = IEEE_FLT_MANT_DIG; /* 24 */
+ if (MPFR_UNLIKELY (e < -125))
+ /*In the subnormal case, compute the exact number of significant bits*/
+ {
+ nbits += (125 + e);
+ MPFR_ASSERTD (nbits >= 1);
+ }
+ np = (nbits + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
+ MPFR_ASSERTD(np <= MPFR_LIMBS_PER_FLT);
+ carry = mpfr_round_raw_4 (tp, MPFR_MANT(src), MPFR_PREC(src), negative,
+ nbits, rnd_mode);
+ /* we perform the reconstruction using the 'double' type here,
+ knowing the result is exactly representable as 'float' */
+ if (MPFR_UNLIKELY(carry))
+ dd = 1.0;
+ else
+ {
+ /* The following computations are exact thanks to the previous
+ mpfr_round_raw. */
+ dd = (double) tp[0] / MP_BASE_AS_DOUBLE;
+ for (i = 1 ; i < np ; i++)
+ dd = (dd + tp[i]) / MP_BASE_AS_DOUBLE;
+ /* dd is the mantissa (between 1/2 and 1) of the argument rounded
+ to 24 bits */
+ }
+ dd = mpfr_scale2 (dd, e);
+ if (negative)
+ dd = -dd;
+
+ /* convert (exacly) to float */
+ d = (float) dd;
+ }
+
+ return d;
+}
+
diff --git a/src/get_ld.c b/src/get_ld.c
new file mode 100644
index 000000000..b6c05d28c
--- /dev/null
+++ b/src/get_ld.c
@@ -0,0 +1,215 @@
+/* mpfr_get_ld, mpfr_get_ld_2exp -- convert a multiple precision floating-point
+ number to a machine long double
+
+Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <float.h>
+
+#include "mpfr-impl.h"
+
+#ifndef HAVE_LDOUBLE_IEEE_EXT_LITTLE
+
+long double
+mpfr_get_ld (mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ return (long double) mpfr_get_d (x, rnd_mode);
+ else /* now x is a normal non-zero number */
+ {
+ long double r; /* result */
+ long double m;
+ double s; /* part of result */
+ mpfr_exp_t sh; /* exponent shift, so that x/2^sh is in the double range */
+ mpfr_t y, z;
+ int sign;
+
+ /* first round x to the target long double precision, so that
+ all subsequent operations are exact (this avoids double rounding
+ problems) */
+ mpfr_init2 (y, MPFR_LDBL_MANT_DIG);
+ mpfr_init2 (z, IEEE_DBL_MANT_DIG);
+
+ mpfr_set (y, x, rnd_mode);
+ sh = MPFR_GET_EXP (y);
+ sign = MPFR_SIGN (y);
+ MPFR_SET_EXP (y, 0);
+ MPFR_SET_POS (y);
+
+ r = 0.0;
+ do {
+ s = mpfr_get_d (y, MPFR_RNDN); /* high part of y */
+ r += (long double) s;
+ mpfr_set_d (z, s, MPFR_RNDN); /* exact */
+ mpfr_sub (y, y, z, MPFR_RNDN); /* exact */
+ } while (!MPFR_IS_ZERO (y));
+
+ mpfr_clear (z);
+ mpfr_clear (y);
+
+ /* we now have to multiply back by 2^sh */
+ MPFR_ASSERTD (r > 0);
+ if (sh != 0)
+ {
+ /* An overflow may occurs (example: 0.5*2^1024) */
+ while (r < 1.0)
+ {
+ r += r;
+ sh--;
+ }
+
+ if (sh > 0)
+ m = 2.0;
+ else
+ {
+ m = 0.5;
+ sh = -sh;
+ }
+
+ for (;;)
+ {
+ if (sh % 2)
+ r = r * m;
+ sh >>= 1;
+ if (sh == 0)
+ break;
+ m = m * m;
+ }
+ }
+ if (sign < 0)
+ r = -r;
+ return r;
+ }
+}
+
+#else
+
+long double
+mpfr_get_ld (mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_long_double_t ld;
+ mpfr_t tmp;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (tmp, MPFR_LDBL_MANT_DIG);
+ inex = mpfr_set (tmp, x, rnd_mode);
+
+ mpfr_set_emin (-16382-63);
+ mpfr_set_emax (16384);
+ mpfr_subnormalize (tmp, mpfr_check_range (tmp, inex, rnd_mode), rnd_mode);
+ mpfr_prec_round (tmp, 64, MPFR_RNDZ); /* exact */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (tmp)))
+ ld.ld = (long double) mpfr_get_d (tmp, rnd_mode);
+ else
+ {
+ mp_limb_t *tmpmant;
+ mpfr_exp_t e, denorm;
+
+ tmpmant = MPFR_MANT (tmp);
+ e = MPFR_GET_EXP (tmp);
+ /* the smallest normal number is 2^(-16382), which is 0.5*2^(-16381)
+ in MPFR, thus any exponent <= -16382 corresponds to a subnormal
+ number */
+ denorm = MPFR_UNLIKELY (e <= -16382) ? - e - 16382 + 1 : 0;
+#if GMP_NUMB_BITS >= 64
+ ld.s.manl = (tmpmant[0] >> denorm);
+ ld.s.manh = (tmpmant[0] >> denorm) >> 32;
+#elif GMP_NUMB_BITS == 32
+ if (MPFR_LIKELY (denorm == 0))
+ {
+ ld.s.manl = tmpmant[0];
+ ld.s.manh = tmpmant[1];
+ }
+ else if (denorm < 32)
+ {
+ ld.s.manl = (tmpmant[0] >> denorm) | (tmpmant[1] << (32 - denorm));
+ ld.s.manh = tmpmant[1] >> denorm;
+ }
+ else /* 32 <= denorm <= 64 */
+ {
+ ld.s.manl = tmpmant[1] >> (denorm - 32);
+ ld.s.manh = 0;
+ }
+#else
+# error "GMP_NUMB_BITS must be 32 or >= 64"
+ /* Other values have never been supported anyway. */
+#endif
+ if (MPFR_LIKELY (denorm == 0))
+ {
+ ld.s.exph = (e + 0x3FFE) >> 8;
+ ld.s.expl = (e + 0x3FFE);
+ }
+ else
+ ld.s.exph = ld.s.expl = 0;
+ ld.s.sign = MPFR_IS_NEG (x);
+ }
+
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return ld.ld;
+}
+
+#endif
+
+/* contributed by Damien Stehle */
+long double
+mpfr_get_ld_2exp (long *expptr, mpfr_srcptr src, mpfr_rnd_t rnd_mode)
+{
+ long double ret;
+ mpfr_exp_t exp;
+ mpfr_t tmp;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (src)))
+ return (long double) mpfr_get_d_2exp (expptr, src, rnd_mode);
+
+ tmp[0] = *src; /* Hack copy mpfr_t */
+ MPFR_SET_EXP (tmp, 0);
+ ret = mpfr_get_ld (tmp, rnd_mode);
+
+ if (MPFR_IS_PURE_FP(src))
+ {
+ exp = MPFR_GET_EXP (src);
+
+ /* rounding can give 1.0, adjust back to 0.5 <= abs(ret) < 1.0 */
+ if (ret == 1.0)
+ {
+ ret = 0.5;
+ exp ++;
+ }
+ else if (ret == -1.0)
+ {
+ ret = -0.5;
+ exp ++;
+ }
+
+ MPFR_ASSERTN ((ret >= 0.5 && ret < 1.0)
+ || (ret <= -0.5 && ret > -1.0));
+ MPFR_ASSERTN (exp >= LONG_MIN && exp <= LONG_MAX);
+ }
+ else
+ exp = 0;
+
+ *expptr = exp;
+ return ret;
+}
diff --git a/src/get_si.c b/src/get_si.c
new file mode 100644
index 000000000..8f836f8e0
--- /dev/null
+++ b/src/get_si.c
@@ -0,0 +1,69 @@
+/* mpfr_get_si -- convert a floating-point number to a signed long.
+
+Copyright 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+long
+mpfr_get_si (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t prec;
+ long s;
+ mpfr_t x;
+
+ if (MPFR_UNLIKELY (!mpfr_fits_slong_p (f, rnd)))
+ {
+ MPFR_SET_ERANGE ();
+ return MPFR_IS_NAN (f) ? 0 :
+ MPFR_IS_NEG (f) ? LONG_MIN : LONG_MAX;
+ }
+
+ if (MPFR_IS_ZERO (f))
+ return (long) 0;
+
+ /* determine prec of long */
+ for (s = LONG_MIN, prec = 0; s != 0; s /= 2, prec++)
+ { }
+
+ /* first round to prec bits */
+ mpfr_init2 (x, prec);
+ mpfr_rint (x, f, rnd);
+
+ /* warning: if x=0, taking its exponent is illegal */
+ if (MPFR_UNLIKELY (MPFR_IS_ZERO(x)))
+ s = 0;
+ else
+ {
+ mp_limb_t a;
+ mp_size_t n;
+ mpfr_exp_t exp;
+
+ /* now the result is in the most significant limb of x */
+ exp = MPFR_GET_EXP (x); /* since |x| >= 1, exp >= 1 */
+ n = MPFR_LIMB_SIZE(x);
+ a = MPFR_MANT(x)[n - 1] >> (GMP_NUMB_BITS - exp);
+ s = MPFR_SIGN(f) > 0 ? a : a <= LONG_MAX ? - (long) a : LONG_MIN;
+ }
+
+ mpfr_clear (x);
+
+ return s;
+}
diff --git a/src/get_sj.c b/src/get_sj.c
new file mode 100644
index 000000000..e02adff15
--- /dev/null
+++ b/src/get_sj.c
@@ -0,0 +1,136 @@
+/* mpfr_get_sj -- convert a MPFR number to a huge machine signed integer
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h" /* for a build within gmp */
+#endif
+
+/* The ISO C99 standard specifies that in C++ implementations the
+ INTMAX_MAX, ... macros should only be defined if explicitly requested. */
+#if defined __cplusplus
+# define __STDC_LIMIT_MACROS
+# define __STDC_CONSTANT_MACROS
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+
+intmax_t
+mpfr_get_sj (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ intmax_t r;
+ mpfr_prec_t prec;
+ mpfr_t x;
+
+ if (MPFR_UNLIKELY (!mpfr_fits_intmax_p (f, rnd)))
+ {
+ MPFR_SET_ERANGE ();
+ return MPFR_IS_NAN (f) ? 0 :
+ MPFR_IS_NEG (f) ? MPFR_INTMAX_MIN : MPFR_INTMAX_MAX;
+ }
+
+ if (MPFR_IS_ZERO (f))
+ return (intmax_t) 0;
+
+ /* determine the precision of intmax_t */
+ for (r = MPFR_INTMAX_MIN, prec = 0; r != 0; r /= 2, prec++)
+ { }
+ /* Note: though INTMAX_MAX would have been sufficient for the conversion,
+ we chose INTMAX_MIN so that INTMAX_MIN - 1 is always representable in
+ precision prec; this is useful to detect overflows in MPFR_RNDZ (will
+ be needed later). */
+
+ /* Now, r = 0. */
+
+ mpfr_init2 (x, prec);
+ mpfr_rint (x, f, rnd);
+ MPFR_ASSERTN (MPFR_IS_FP (x));
+
+ if (MPFR_NOTZERO (x))
+ {
+ mp_limb_t *xp;
+ int sh, n; /* An int should be sufficient in this context. */
+
+ xp = MPFR_MANT (x);
+ sh = MPFR_GET_EXP (x);
+ MPFR_ASSERTN ((mpfr_prec_t) sh <= prec);
+ if (MPFR_INTMAX_MIN + MPFR_INTMAX_MAX != 0
+ && MPFR_UNLIKELY ((mpfr_prec_t) sh == prec))
+ {
+ /* 2's complement and x <= INTMAX_MIN: in the case mp_limb_t
+ has the same size as intmax_t, we cannot use the code in
+ the for loop since the operations would be performed in
+ unsigned arithmetic. */
+ MPFR_ASSERTN (MPFR_IS_NEG (x) && (mpfr_powerof2_raw (x)));
+ r = MPFR_INTMAX_MIN;
+ }
+ else if (MPFR_IS_POS (x))
+ {
+ /* Note: testing the condition sh >= 0 is necessary to avoid
+ an undefined behavior on xp[n] >> S when S >= GMP_NUMB_BITS
+ (even though xp[n] == 0 in such a case). This can happen if
+ sizeof(mp_limb_t) < sizeof(intmax_t) and |x| is small enough
+ because of the trailing bits due to its normalization. */
+ for (n = MPFR_LIMB_SIZE (x) - 1; n >= 0 && sh >= 0; n--)
+ {
+ sh -= GMP_NUMB_BITS;
+ /* Note the concerning the casts below:
+ When sh >= 0, the cast must be performed before the shift
+ for the case sizeof(intmax_t) > sizeof(mp_limb_t).
+ When sh < 0, the cast must be performed after the shift
+ for the case sizeof(intmax_t) == sizeof(mp_limb_t), as
+ mp_limb_t is unsigned, therefore not representable as an
+ intmax_t when the MSB is 1 (this is the case here). */
+ MPFR_ASSERTD (sh < GMP_NUMB_BITS && -sh < GMP_NUMB_BITS);
+ r += (sh >= 0
+ ? (intmax_t) xp[n] << sh
+ : (intmax_t) (xp[n] >> (-sh)));
+ }
+ }
+ else
+ {
+ /* See the comments for the case x positive. */
+ for (n = MPFR_LIMB_SIZE (x) - 1; n >= 0 && sh >= 0; n--)
+ {
+ sh -= GMP_NUMB_BITS;
+ MPFR_ASSERTD (sh < GMP_NUMB_BITS && -sh < GMP_NUMB_BITS);
+ r -= (sh >= 0
+ ? (intmax_t) xp[n] << sh
+ : (intmax_t) (xp[n] >> (-sh)));
+ }
+ }
+ }
+
+ mpfr_clear (x);
+
+ return r;
+}
+
+#endif
diff --git a/src/get_str.c b/src/get_str.c
new file mode 100644
index 000000000..aa7ad18b7
--- /dev/null
+++ b/src/get_str.c
@@ -0,0 +1,2554 @@
+/* mpfr_get_str -- output a floating-point number to a string
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+Contributed by Alain Delplanque and Paul Zimmermann.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+static int mpfr_get_str_aux (char *const, mpfr_exp_t *const, mp_limb_t *const,
+ mp_size_t, mpfr_exp_t, long, int, size_t, mpfr_rnd_t);
+
+/* The implicit \0 is useless, but we do not write num_to_text[62] otherwise
+ g++ complains. */
+static const char num_to_text36[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+static const char num_to_text62[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz";
+
+/* copy most important limbs of {op, n2} in {rp, n1} */
+/* if n1 > n2 put 0 in low limbs of {rp, n1} */
+#define MPN_COPY2(rp, n1, op, n2) \
+ if ((n1) <= (n2)) \
+ { \
+ MPN_COPY ((rp), (op) + (n2) - (n1), (n1)); \
+ } \
+ else \
+ { \
+ MPN_COPY ((rp) + (n1) - (n2), (op), (n2)); \
+ MPN_ZERO ((rp), (n1) - (n2)); \
+ }
+
+#define MPFR_ROUND_FAILED 3
+
+/* Input: an approximation r*2^f of a real Y, with |r*2^f-Y| <= 2^(e+f).
+ Returns if possible in the string s the mantissa corresponding to
+ the integer nearest to Y, within the direction rnd, and returns the
+ exponent in exp.
+ n is the number of limbs of r.
+ e represents the maximal error in the approximation of Y
+ (e < 0 iff the approximation is exact, i.e., r*2^f = Y).
+ b is the wanted base (2 <= b <= 36).
+ m is the number of wanted digits in the mantissa.
+ rnd is the rounding mode.
+ It is assumed that b^(m-1) <= Y < b^(m+1), thus the returned value
+ satisfies b^(m-1) <= rnd(Y) < b^(m+1).
+
+ Rounding may fail for two reasons:
+ - the error is too large to determine the integer N nearest to Y
+ - either the number of digits of N in base b is too large (m+1),
+ N=2*N1+(b/2) and the rounding mode is to nearest. This can
+ only happen when b is even.
+
+ Return value:
+ - the direction of rounding (-1, 0, 1) if rounding is possible
+ - -MPFR_ROUND_FAILED if rounding not possible because m+1 digits
+ - MPFR_ROUND_FAILED otherwise (too large error)
+*/
+static int
+mpfr_get_str_aux (char *const str, mpfr_exp_t *const exp, mp_limb_t *const r,
+ mp_size_t n, mpfr_exp_t f, long e, int b, size_t m,
+ mpfr_rnd_t rnd)
+{
+ const char *num_to_text;
+ int dir; /* direction of the rounded result */
+ mp_limb_t ret = 0; /* possible carry in addition */
+ mp_size_t i0, j0; /* number of limbs and bits of Y */
+ unsigned char *str1; /* string of m+2 characters */
+ size_t size_s1; /* length of str1 */
+ mpfr_rnd_t rnd1;
+ size_t i;
+ int exact = (e < 0);
+ MPFR_TMP_DECL(marker);
+
+ /* if f > 0, then the maximal error 2^(e+f) is larger than 2 so we can't
+ determine the integer Y */
+ MPFR_ASSERTN(f <= 0);
+ /* if f is too small, then r*2^f is smaller than 1 */
+ MPFR_ASSERTN(f > (-n * GMP_NUMB_BITS));
+
+ MPFR_TMP_MARK(marker);
+
+ num_to_text = b < 37 ? num_to_text36 : num_to_text62;
+
+ /* R = 2^f sum r[i]K^(i)
+ r[i] = (r_(i,k-1)...r_(i,0))_2
+ R = sum r(i,j)2^(j+ki+f)
+ the bits from R are referenced by pairs (i,j) */
+
+ /* check if is possible to round r with rnd mode
+ where |r*2^f-Y| <= 2^(e+f)
+ the exponent of R is: f + n*GMP_NUMB_BITS
+ we must have e + f == f + n*GMP_NUMB_BITS - err
+ err = n*GMP_NUMB_BITS - e
+ R contains exactly -f bits after the integer point:
+ to determine the nearest integer, we thus need a precision of
+ n * GMP_NUMB_BITS + f */
+
+ if (exact || mpfr_can_round_raw (r, n, (mp_size_t) 1,
+ n * GMP_NUMB_BITS - e, MPFR_RNDN, rnd, n * GMP_NUMB_BITS + f))
+ {
+ /* compute the nearest integer to R */
+
+ /* bit of weight 0 in R has position j0 in limb r[i0] */
+ i0 = (-f) / GMP_NUMB_BITS;
+ j0 = (-f) % GMP_NUMB_BITS;
+
+ ret = mpfr_round_raw (r + i0, r, n * GMP_NUMB_BITS, 0,
+ n * GMP_NUMB_BITS + f, rnd, &dir);
+ MPFR_ASSERTD(dir != MPFR_ROUND_FAILED);
+
+ /* warning: mpfr_round_raw_generic returns MPFR_EVEN_INEX (2) or
+ -MPFR_EVEN_INEX (-2) in case of even rounding */
+
+ if (ret) /* Y is a power of 2 */
+ {
+ if (j0)
+ r[n - 1] = MPFR_LIMB_HIGHBIT >> (j0 - 1);
+ else /* j0=0, necessarily i0 >= 1 otherwise f=0 and r is exact */
+ {
+ r[n - 1] = ret;
+ r[--i0] = 0; /* set to zero the new low limb */
+ }
+ }
+ else /* shift r to the right by (-f) bits (i0 already done) */
+ {
+ if (j0)
+ mpn_rshift (r + i0, r + i0, n - i0, j0);
+ }
+
+ /* now the rounded value Y is in {r+i0, n-i0} */
+
+ /* convert r+i0 into base b */
+ str1 = (unsigned char*) MPFR_TMP_ALLOC (m + 3); /* need one extra character for mpn_get_str */
+ size_s1 = mpn_get_str (str1, b, r + i0, n - i0);
+
+ /* round str1 */
+ MPFR_ASSERTN(size_s1 >= m);
+ *exp = size_s1 - m; /* number of superfluous characters */
+
+ /* if size_s1 = m + 2, necessarily we have b^(m+1) as result,
+ and the result will not change */
+
+ /* so we have to double-round only when size_s1 = m + 1 and
+ (i) the result is inexact
+ (ii) or the last digit is non-zero */
+ if ((size_s1 == m + 1) && ((dir != 0) || (str1[size_s1 - 1] != 0)))
+ {
+ /* rounding mode */
+ rnd1 = rnd;
+
+ /* round to nearest case */
+ if (rnd == MPFR_RNDN)
+ {
+ if (2 * str1[size_s1 - 1] == b)
+ {
+ if (dir == 0 && exact) /* exact: even rounding */
+ {
+ rnd1 = ((str1[size_s1 - 2] & 1) == 0)
+ ? MPFR_RNDD : MPFR_RNDU;
+ }
+ else
+ {
+ /* otherwise we cannot round correctly: for example
+ if b=10, we might have a mantissa of
+ xxxxxxx5.00000000 which can be rounded to nearest
+ to 8 digits but not to 7 */
+ dir = -MPFR_ROUND_FAILED;
+ MPFR_ASSERTD(dir != MPFR_EVEN_INEX);
+ goto free_and_return;
+ }
+ }
+ else if (2 * str1[size_s1 - 1] < b)
+ rnd1 = MPFR_RNDD;
+ else
+ rnd1 = MPFR_RNDU;
+ }
+
+ /* now rnd1 is either
+ MPFR_RNDD or MPFR_RNDZ -> truncate, or
+ MPFR_RNDU or MPFR_RNDA -> round toward infinity */
+
+ /* round away from zero */
+ if (rnd1 == MPFR_RNDU || rnd1 == MPFR_RNDA)
+ {
+ if (str1[size_s1 - 1] != 0)
+ {
+ /* the carry cannot propagate to the whole string, since
+ Y = x*b^(m-g) < 2*b^m <= b^(m+1)-b
+ where x is the input float */
+ MPFR_ASSERTN(size_s1 >= 2);
+ i = size_s1 - 2;
+ while (str1[i] == b - 1)
+ {
+ MPFR_ASSERTD(i > 0);
+ str1[i--] = 0;
+ }
+ str1[i]++;
+ }
+ dir = 1;
+ }
+ /* round toward zero (truncate) */
+ else
+ dir = -1;
+ }
+
+ /* copy str1 into str and convert to characters (digits and
+ lowercase letters from the source character set) */
+ for (i = 0; i < m; i++)
+ str[i] = num_to_text[(int) str1[i]]; /* str1[i] is an unsigned char */
+ str[m] = 0;
+ }
+ /* mpfr_can_round_raw failed: rounding is not possible */
+ else
+ {
+ dir = MPFR_ROUND_FAILED; /* should be different from MPFR_EVEN_INEX */
+ MPFR_ASSERTD(dir != MPFR_EVEN_INEX);
+ }
+
+ free_and_return:
+ MPFR_TMP_FREE(marker);
+
+ return dir;
+}
+
+/***************************************************************************
+ * __gmpfr_l2b[b-2][0] is a 23-bit upper approximation to log(b)/log(2), *
+ * __gmpfr_l2b[b-2][1] is a 76-bit upper approximation to log(2)/log(b). *
+ * The following code is generated by tests/tl2b (with an argument). *
+ ***************************************************************************/
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_2_0__tab[] = { 0x0000, 0x8000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_2_0__tab[] = { 0x80000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_2_0__tab[] = { 0x8000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_2_0__tab[] = { 0x800000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_2_0__tab[] = { 0x80000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_2_0__tab[] = { 0x8000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_2_1__tab[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x8000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_2_1__tab[] = { 0x00000000, 0x00000000, 0x80000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_2_1__tab[] = { 0x0000000000000000, 0x8000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_2_1__tab[] = { 0x800000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_2_1__tab[] = { 0x80000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_2_1__tab[] = { 0x8000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_3_0__tab[] = { 0x0e00, 0xcae0 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_3_0__tab[] = { 0xcae00e00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_3_0__tab[] = { 0xcae00e0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_3_0__tab[] = { 0xcae00e000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_3_0__tab[] = { 0xcae00e00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_3_0__tab[] = { 0xcae00e0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_3_1__tab[] = { 0x0448, 0xe94e, 0xa9a9, 0x9cc1, 0xa184 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_3_1__tab[] = { 0x04480000, 0xa9a9e94e, 0xa1849cc1 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_3_1__tab[] = { 0x0448000000000000, 0xa1849cc1a9a9e94e };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_3_1__tab[] = { 0xa1849cc1a9a9e94e04480000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_3_1__tab[] = { 0xa1849cc1a9a9e94e0448000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_3_1__tab[] = { 0xa1849cc1a9a9e94e044800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_4_0__tab[] = { 0x0000, 0x8000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_4_0__tab[] = { 0x80000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_4_0__tab[] = { 0x8000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_4_0__tab[] = { 0x800000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_4_0__tab[] = { 0x80000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_4_0__tab[] = { 0x8000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_4_1__tab[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x8000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_4_1__tab[] = { 0x00000000, 0x00000000, 0x80000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_4_1__tab[] = { 0x0000000000000000, 0x8000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_4_1__tab[] = { 0x800000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_4_1__tab[] = { 0x80000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_4_1__tab[] = { 0x8000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_5_0__tab[] = { 0x7a00, 0x949a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_5_0__tab[] = { 0x949a7a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_5_0__tab[] = { 0x949a7a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_5_0__tab[] = { 0x949a7a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_5_0__tab[] = { 0x949a7a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_5_0__tab[] = { 0x949a7a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_5_1__tab[] = { 0x67b8, 0x9728, 0x287b, 0xa348, 0xdc81 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_5_1__tab[] = { 0x67b80000, 0x287b9728, 0xdc81a348 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_5_1__tab[] = { 0x67b8000000000000, 0xdc81a348287b9728 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_5_1__tab[] = { 0xdc81a348287b972867b80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_5_1__tab[] = { 0xdc81a348287b972867b8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_5_1__tab[] = { 0xdc81a348287b972867b800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_6_0__tab[] = { 0x0800, 0xa570 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_6_0__tab[] = { 0xa5700800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_6_0__tab[] = { 0xa570080000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_6_0__tab[] = { 0xa57008000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_6_0__tab[] = { 0xa5700800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_6_0__tab[] = { 0xa570080000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_6_1__tab[] = { 0xff10, 0xf9e9, 0xe054, 0x9236, 0xc611 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_6_1__tab[] = { 0xff100000, 0xe054f9e9, 0xc6119236 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_6_1__tab[] = { 0xff10000000000000, 0xc6119236e054f9e9 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_6_1__tab[] = { 0xc6119236e054f9e9ff100000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_6_1__tab[] = { 0xc6119236e054f9e9ff10000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_6_1__tab[] = { 0xc6119236e054f9e9ff1000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_7_0__tab[] = { 0xb400, 0xb3ab };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_7_0__tab[] = { 0xb3abb400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_7_0__tab[] = { 0xb3abb40000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_7_0__tab[] = { 0xb3abb4000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_7_0__tab[] = { 0xb3abb400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_7_0__tab[] = { 0xb3abb40000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_7_1__tab[] = { 0x37b8, 0xa711, 0x754d, 0xc9d6, 0xb660 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_7_1__tab[] = { 0x37b80000, 0x754da711, 0xb660c9d6 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_7_1__tab[] = { 0x37b8000000000000, 0xb660c9d6754da711 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_7_1__tab[] = { 0xb660c9d6754da71137b80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_7_1__tab[] = { 0xb660c9d6754da71137b8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_7_1__tab[] = { 0xb660c9d6754da71137b800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_8_0__tab[] = { 0x0000, 0xc000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_8_0__tab[] = { 0xc0000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_8_0__tab[] = { 0xc000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_8_0__tab[] = { 0xc00000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_8_0__tab[] = { 0xc0000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_8_0__tab[] = { 0xc000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_8_1__tab[] = { 0xaab0, 0xaaaa, 0xaaaa, 0xaaaa, 0xaaaa };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_8_1__tab[] = { 0xaab00000, 0xaaaaaaaa, 0xaaaaaaaa };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_8_1__tab[] = { 0xaab0000000000000, 0xaaaaaaaaaaaaaaaa };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_8_1__tab[] = { 0xaaaaaaaaaaaaaaaaaab00000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_8_1__tab[] = { 0xaaaaaaaaaaaaaaaaaab0000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_8_1__tab[] = { 0xaaaaaaaaaaaaaaaaaab000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_9_0__tab[] = { 0x0e00, 0xcae0 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_9_0__tab[] = { 0xcae00e00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_9_0__tab[] = { 0xcae00e0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_9_0__tab[] = { 0xcae00e000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_9_0__tab[] = { 0xcae00e00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_9_0__tab[] = { 0xcae00e0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_9_1__tab[] = { 0x0448, 0xe94e, 0xa9a9, 0x9cc1, 0xa184 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_9_1__tab[] = { 0x04480000, 0xa9a9e94e, 0xa1849cc1 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_9_1__tab[] = { 0x0448000000000000, 0xa1849cc1a9a9e94e };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_9_1__tab[] = { 0xa1849cc1a9a9e94e04480000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_9_1__tab[] = { 0xa1849cc1a9a9e94e0448000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_9_1__tab[] = { 0xa1849cc1a9a9e94e044800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_10_0__tab[] = { 0x7a00, 0xd49a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_10_0__tab[] = { 0xd49a7a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_10_0__tab[] = { 0xd49a7a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_10_0__tab[] = { 0xd49a7a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_10_0__tab[] = { 0xd49a7a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_10_0__tab[] = { 0xd49a7a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_10_1__tab[] = { 0x8f90, 0xf798, 0xfbcf, 0x9a84, 0x9a20 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_10_1__tab[] = { 0x8f900000, 0xfbcff798, 0x9a209a84 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_10_1__tab[] = { 0x8f90000000000000, 0x9a209a84fbcff798 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_10_1__tab[] = { 0x9a209a84fbcff7988f900000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_10_1__tab[] = { 0x9a209a84fbcff7988f90000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_10_1__tab[] = { 0x9a209a84fbcff7988f9000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_11_0__tab[] = { 0x5400, 0xdd67 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_11_0__tab[] = { 0xdd675400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_11_0__tab[] = { 0xdd67540000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_11_0__tab[] = { 0xdd6754000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_11_0__tab[] = { 0xdd675400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_11_0__tab[] = { 0xdd67540000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_11_1__tab[] = { 0xe170, 0x9d10, 0xeb22, 0x4e0e, 0x9400 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_11_1__tab[] = { 0xe1700000, 0xeb229d10, 0x94004e0e };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_11_1__tab[] = { 0xe170000000000000, 0x94004e0eeb229d10 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_11_1__tab[] = { 0x94004e0eeb229d10e1700000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_11_1__tab[] = { 0x94004e0eeb229d10e170000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_11_1__tab[] = { 0x94004e0eeb229d10e17000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_12_0__tab[] = { 0x0800, 0xe570 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_12_0__tab[] = { 0xe5700800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_12_0__tab[] = { 0xe570080000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_12_0__tab[] = { 0xe57008000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_12_0__tab[] = { 0xe5700800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_12_0__tab[] = { 0xe570080000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_12_1__tab[] = { 0xfe28, 0x1c24, 0x0b03, 0x9c1a, 0x8ed1 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_12_1__tab[] = { 0xfe280000, 0x0b031c24, 0x8ed19c1a };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_12_1__tab[] = { 0xfe28000000000000, 0x8ed19c1a0b031c24 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_12_1__tab[] = { 0x8ed19c1a0b031c24fe280000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_12_1__tab[] = { 0x8ed19c1a0b031c24fe28000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_12_1__tab[] = { 0x8ed19c1a0b031c24fe2800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_13_0__tab[] = { 0x0200, 0xecd4 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_13_0__tab[] = { 0xecd40200 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_13_0__tab[] = { 0xecd4020000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_13_0__tab[] = { 0xecd402000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_13_0__tab[] = { 0xecd40200000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_13_0__tab[] = { 0xecd4020000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_13_1__tab[] = { 0x57f8, 0xf7b4, 0xcb20, 0xa7c6, 0x8a5c };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_13_1__tab[] = { 0x57f80000, 0xcb20f7b4, 0x8a5ca7c6 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_13_1__tab[] = { 0x57f8000000000000, 0x8a5ca7c6cb20f7b4 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_13_1__tab[] = { 0x8a5ca7c6cb20f7b457f80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_13_1__tab[] = { 0x8a5ca7c6cb20f7b457f8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_13_1__tab[] = { 0x8a5ca7c6cb20f7b457f800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_14_0__tab[] = { 0xb400, 0xf3ab };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_14_0__tab[] = { 0xf3abb400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_14_0__tab[] = { 0xf3abb40000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_14_0__tab[] = { 0xf3abb4000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_14_0__tab[] = { 0xf3abb400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_14_0__tab[] = { 0xf3abb40000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_14_1__tab[] = { 0x85a8, 0x5cab, 0x96b5, 0xfff6, 0x8679 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_14_1__tab[] = { 0x85a80000, 0x96b55cab, 0x8679fff6 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_14_1__tab[] = { 0x85a8000000000000, 0x8679fff696b55cab };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_14_1__tab[] = { 0x8679fff696b55cab85a80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_14_1__tab[] = { 0x8679fff696b55cab85a8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_14_1__tab[] = { 0x8679fff696b55cab85a800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_15_0__tab[] = { 0x8000, 0xfa0a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_15_0__tab[] = { 0xfa0a8000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_15_0__tab[] = { 0xfa0a800000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_15_0__tab[] = { 0xfa0a80000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_15_0__tab[] = { 0xfa0a8000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_15_0__tab[] = { 0xfa0a800000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_15_1__tab[] = { 0x6f80, 0xa6aa, 0x69f0, 0xee23, 0x830c };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_15_1__tab[] = { 0x6f800000, 0x69f0a6aa, 0x830cee23 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_15_1__tab[] = { 0x6f80000000000000, 0x830cee2369f0a6aa };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_15_1__tab[] = { 0x830cee2369f0a6aa6f800000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_15_1__tab[] = { 0x830cee2369f0a6aa6f80000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_15_1__tab[] = { 0x830cee2369f0a6aa6f8000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_16_0__tab[] = { 0x0000, 0x8000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_16_0__tab[] = { 0x80000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_16_0__tab[] = { 0x8000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_16_0__tab[] = { 0x800000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_16_0__tab[] = { 0x80000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_16_0__tab[] = { 0x8000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_16_1__tab[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x8000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_16_1__tab[] = { 0x00000000, 0x00000000, 0x80000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_16_1__tab[] = { 0x0000000000000000, 0x8000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_16_1__tab[] = { 0x800000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_16_1__tab[] = { 0x80000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_16_1__tab[] = { 0x8000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_17_0__tab[] = { 0x8000, 0x82cc };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_17_0__tab[] = { 0x82cc8000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_17_0__tab[] = { 0x82cc800000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_17_0__tab[] = { 0x82cc80000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_17_0__tab[] = { 0x82cc8000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_17_0__tab[] = { 0x82cc800000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_17_1__tab[] = { 0x8720, 0x259b, 0x62c4, 0xabf5, 0xfa85 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_17_1__tab[] = { 0x87200000, 0x62c4259b, 0xfa85abf5 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_17_1__tab[] = { 0x8720000000000000, 0xfa85abf562c4259b };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_17_1__tab[] = { 0xfa85abf562c4259b87200000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_17_1__tab[] = { 0xfa85abf562c4259b8720000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_17_1__tab[] = { 0xfa85abf562c4259b872000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_18_0__tab[] = { 0x0800, 0x8570 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_18_0__tab[] = { 0x85700800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_18_0__tab[] = { 0x8570080000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_18_0__tab[] = { 0x857008000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_18_0__tab[] = { 0x85700800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_18_0__tab[] = { 0x8570080000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_18_1__tab[] = { 0x3698, 0x1378, 0x5537, 0x6634, 0xf591 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_18_1__tab[] = { 0x36980000, 0x55371378, 0xf5916634 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_18_1__tab[] = { 0x3698000000000000, 0xf591663455371378 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_18_1__tab[] = { 0xf59166345537137836980000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_18_1__tab[] = { 0xf5916634553713783698000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_18_1__tab[] = { 0xf591663455371378369800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_19_0__tab[] = { 0x0600, 0x87ef };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_19_0__tab[] = { 0x87ef0600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_19_0__tab[] = { 0x87ef060000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_19_0__tab[] = { 0x87ef06000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_19_0__tab[] = { 0x87ef0600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_19_0__tab[] = { 0x87ef060000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_19_1__tab[] = { 0x0db8, 0x558c, 0x62ed, 0x08c0, 0xf10f };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_19_1__tab[] = { 0x0db80000, 0x62ed558c, 0xf10f08c0 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_19_1__tab[] = { 0x0db8000000000000, 0xf10f08c062ed558c };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_19_1__tab[] = { 0xf10f08c062ed558c0db80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_19_1__tab[] = { 0xf10f08c062ed558c0db8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_19_1__tab[] = { 0xf10f08c062ed558c0db800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_20_0__tab[] = { 0x3e00, 0x8a4d };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_20_0__tab[] = { 0x8a4d3e00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_20_0__tab[] = { 0x8a4d3e0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_20_0__tab[] = { 0x8a4d3e000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_20_0__tab[] = { 0x8a4d3e00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_20_0__tab[] = { 0x8a4d3e0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_20_1__tab[] = { 0x0b40, 0xa71c, 0x1cc1, 0x690a, 0xecee };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_20_1__tab[] = { 0x0b400000, 0x1cc1a71c, 0xecee690a };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_20_1__tab[] = { 0x0b40000000000000, 0xecee690a1cc1a71c };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_20_1__tab[] = { 0xecee690a1cc1a71c0b400000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_20_1__tab[] = { 0xecee690a1cc1a71c0b40000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_20_1__tab[] = { 0xecee690a1cc1a71c0b4000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_21_0__tab[] = { 0xde00, 0x8c8d };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_21_0__tab[] = { 0x8c8dde00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_21_0__tab[] = { 0x8c8dde0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_21_0__tab[] = { 0x8c8dde000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_21_0__tab[] = { 0x8c8dde00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_21_0__tab[] = { 0x8c8dde0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_21_1__tab[] = { 0x4108, 0x6b26, 0xb3d0, 0x63c1, 0xe922 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_21_1__tab[] = { 0x41080000, 0xb3d06b26, 0xe92263c1 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_21_1__tab[] = { 0x4108000000000000, 0xe92263c1b3d06b26 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_21_1__tab[] = { 0xe92263c1b3d06b2641080000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_21_1__tab[] = { 0xe92263c1b3d06b264108000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_21_1__tab[] = { 0xe92263c1b3d06b26410800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_22_0__tab[] = { 0xaa00, 0x8eb3 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_22_0__tab[] = { 0x8eb3aa00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_22_0__tab[] = { 0x8eb3aa0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_22_0__tab[] = { 0x8eb3aa000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_22_0__tab[] = { 0x8eb3aa00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_22_0__tab[] = { 0x8eb3aa0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_22_1__tab[] = { 0xdbe8, 0xf061, 0x60b9, 0x2c4d, 0xe5a0 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_22_1__tab[] = { 0xdbe80000, 0x60b9f061, 0xe5a02c4d };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_22_1__tab[] = { 0xdbe8000000000000, 0xe5a02c4d60b9f061 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_22_1__tab[] = { 0xe5a02c4d60b9f061dbe80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_22_1__tab[] = { 0xe5a02c4d60b9f061dbe8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_22_1__tab[] = { 0xe5a02c4d60b9f061dbe800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_23_0__tab[] = { 0x0600, 0x90c1 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_23_0__tab[] = { 0x90c10600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_23_0__tab[] = { 0x90c1060000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_23_0__tab[] = { 0x90c106000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_23_0__tab[] = { 0x90c10600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_23_0__tab[] = { 0x90c1060000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_23_1__tab[] = { 0xc3e0, 0x586a, 0x46b9, 0xcadd, 0xe25e };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_23_1__tab[] = { 0xc3e00000, 0x46b9586a, 0xe25ecadd };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_23_1__tab[] = { 0xc3e0000000000000, 0xe25ecadd46b9586a };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_23_1__tab[] = { 0xe25ecadd46b9586ac3e00000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_23_1__tab[] = { 0xe25ecadd46b9586ac3e0000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_23_1__tab[] = { 0xe25ecadd46b9586ac3e000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_24_0__tab[] = { 0x0400, 0x92b8 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_24_0__tab[] = { 0x92b80400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_24_0__tab[] = { 0x92b8040000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_24_0__tab[] = { 0x92b804000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_24_0__tab[] = { 0x92b80400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_24_0__tab[] = { 0x92b8040000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_24_1__tab[] = { 0x3668, 0x7263, 0xc7c6, 0xbb44, 0xdf56 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_24_1__tab[] = { 0x36680000, 0xc7c67263, 0xdf56bb44 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_24_1__tab[] = { 0x3668000000000000, 0xdf56bb44c7c67263 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_24_1__tab[] = { 0xdf56bb44c7c6726336680000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_24_1__tab[] = { 0xdf56bb44c7c672633668000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_24_1__tab[] = { 0xdf56bb44c7c67263366800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_25_0__tab[] = { 0x7a00, 0x949a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_25_0__tab[] = { 0x949a7a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_25_0__tab[] = { 0x949a7a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_25_0__tab[] = { 0x949a7a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_25_0__tab[] = { 0x949a7a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_25_0__tab[] = { 0x949a7a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_25_1__tab[] = { 0x67b8, 0x9728, 0x287b, 0xa348, 0xdc81 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_25_1__tab[] = { 0x67b80000, 0x287b9728, 0xdc81a348 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_25_1__tab[] = { 0x67b8000000000000, 0xdc81a348287b9728 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_25_1__tab[] = { 0xdc81a348287b972867b80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_25_1__tab[] = { 0xdc81a348287b972867b8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_25_1__tab[] = { 0xdc81a348287b972867b800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_26_0__tab[] = { 0x0200, 0x966a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_26_0__tab[] = { 0x966a0200 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_26_0__tab[] = { 0x966a020000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_26_0__tab[] = { 0x966a02000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_26_0__tab[] = { 0x966a0200000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_26_0__tab[] = { 0x966a020000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_26_1__tab[] = { 0x6458, 0x78a4, 0x7583, 0x19f9, 0xd9da };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_26_1__tab[] = { 0x64580000, 0x758378a4, 0xd9da19f9 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_26_1__tab[] = { 0x6458000000000000, 0xd9da19f9758378a4 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_26_1__tab[] = { 0xd9da19f9758378a464580000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_26_1__tab[] = { 0xd9da19f9758378a46458000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_26_1__tab[] = { 0xd9da19f9758378a4645800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_27_0__tab[] = { 0x0a00, 0x9828 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_27_0__tab[] = { 0x98280a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_27_0__tab[] = { 0x98280a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_27_0__tab[] = { 0x98280a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_27_0__tab[] = { 0x98280a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_27_0__tab[] = { 0x98280a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_27_1__tab[] = { 0x5b08, 0xe1bd, 0xe237, 0x7bac, 0xd75b };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_27_1__tab[] = { 0x5b080000, 0xe237e1bd, 0xd75b7bac };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_27_1__tab[] = { 0x5b08000000000000, 0xd75b7bace237e1bd };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_27_1__tab[] = { 0xd75b7bace237e1bd5b080000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_27_1__tab[] = { 0xd75b7bace237e1bd5b08000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_27_1__tab[] = { 0xd75b7bace237e1bd5b0800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_28_0__tab[] = { 0xda00, 0x99d5 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_28_0__tab[] = { 0x99d5da00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_28_0__tab[] = { 0x99d5da0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_28_0__tab[] = { 0x99d5da000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_28_0__tab[] = { 0x99d5da00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_28_0__tab[] = { 0x99d5da0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_28_1__tab[] = { 0xdeb8, 0xe8b8, 0x71df, 0xc758, 0xd501 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_28_1__tab[] = { 0xdeb80000, 0x71dfe8b8, 0xd501c758 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_28_1__tab[] = { 0xdeb8000000000000, 0xd501c75871dfe8b8 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_28_1__tab[] = { 0xd501c75871dfe8b8deb80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_28_1__tab[] = { 0xd501c75871dfe8b8deb8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_28_1__tab[] = { 0xd501c75871dfe8b8deb800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_29_0__tab[] = { 0x9600, 0x9b74 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_29_0__tab[] = { 0x9b749600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_29_0__tab[] = { 0x9b74960000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_29_0__tab[] = { 0x9b7496000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_29_0__tab[] = { 0x9b749600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_29_0__tab[] = { 0x9b74960000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_29_1__tab[] = { 0xccc8, 0x62b3, 0x9c6c, 0x8315, 0xd2c9 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_29_1__tab[] = { 0xccc80000, 0x9c6c62b3, 0xd2c98315 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_29_1__tab[] = { 0xccc8000000000000, 0xd2c983159c6c62b3 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_29_1__tab[] = { 0xd2c983159c6c62b3ccc80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_29_1__tab[] = { 0xd2c983159c6c62b3ccc8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_29_1__tab[] = { 0xd2c983159c6c62b3ccc800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_30_0__tab[] = { 0x4000, 0x9d05 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_30_0__tab[] = { 0x9d054000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_30_0__tab[] = { 0x9d05400000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_30_0__tab[] = { 0x9d0540000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_30_0__tab[] = { 0x9d054000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_30_0__tab[] = { 0x9d05400000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_30_1__tab[] = { 0x3588, 0x1732, 0x5cad, 0xa619, 0xd0af };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_30_1__tab[] = { 0x35880000, 0x5cad1732, 0xd0afa619 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_30_1__tab[] = { 0x3588000000000000, 0xd0afa6195cad1732 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_30_1__tab[] = { 0xd0afa6195cad173235880000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_30_1__tab[] = { 0xd0afa6195cad17323588000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_30_1__tab[] = { 0xd0afa6195cad1732358800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_31_0__tab[] = { 0xc800, 0x9e88 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_31_0__tab[] = { 0x9e88c800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_31_0__tab[] = { 0x9e88c80000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_31_0__tab[] = { 0x9e88c8000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_31_0__tab[] = { 0x9e88c800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_31_0__tab[] = { 0x9e88c80000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_31_1__tab[] = { 0xd578, 0xf7ca, 0x63ee, 0x86e6, 0xceb1 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_31_1__tab[] = { 0xd5780000, 0x63eef7ca, 0xceb186e6 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_31_1__tab[] = { 0xd578000000000000, 0xceb186e663eef7ca };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_31_1__tab[] = { 0xceb186e663eef7cad5780000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_31_1__tab[] = { 0xceb186e663eef7cad578000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_31_1__tab[] = { 0xceb186e663eef7cad57800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_32_0__tab[] = { 0x0000, 0xa000 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_32_0__tab[] = { 0xa0000000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_32_0__tab[] = { 0xa000000000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_32_0__tab[] = { 0xa00000000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_32_0__tab[] = { 0xa0000000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_32_0__tab[] = { 0xa000000000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_32_1__tab[] = { 0xccd0, 0xcccc, 0xcccc, 0xcccc, 0xcccc };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_32_1__tab[] = { 0xccd00000, 0xcccccccc, 0xcccccccc };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_32_1__tab[] = { 0xccd0000000000000, 0xcccccccccccccccc };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_32_1__tab[] = { 0xccccccccccccccccccd00000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_32_1__tab[] = { 0xccccccccccccccccccd0000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_32_1__tab[] = { 0xccccccccccccccccccd000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_33_0__tab[] = { 0xae00, 0xa16b };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_33_0__tab[] = { 0xa16bae00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_33_0__tab[] = { 0xa16bae0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_33_0__tab[] = { 0xa16bae000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_33_0__tab[] = { 0xa16bae00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_33_0__tab[] = { 0xa16bae0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_33_1__tab[] = { 0x0888, 0xa187, 0x5304, 0x6404, 0xcaff };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_33_1__tab[] = { 0x08880000, 0x5304a187, 0xcaff6404 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_33_1__tab[] = { 0x0888000000000000, 0xcaff64045304a187 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_33_1__tab[] = { 0xcaff64045304a18708880000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_33_1__tab[] = { 0xcaff64045304a1870888000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_33_1__tab[] = { 0xcaff64045304a187088800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_34_0__tab[] = { 0x8000, 0xa2cc };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_34_0__tab[] = { 0xa2cc8000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_34_0__tab[] = { 0xa2cc800000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_34_0__tab[] = { 0xa2cc80000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_34_0__tab[] = { 0xa2cc8000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_34_0__tab[] = { 0xa2cc800000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_34_1__tab[] = { 0xfb50, 0x17ca, 0x5a79, 0x73d8, 0xc947 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_34_1__tab[] = { 0xfb500000, 0x5a7917ca, 0xc94773d8 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_34_1__tab[] = { 0xfb50000000000000, 0xc94773d85a7917ca };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_34_1__tab[] = { 0xc94773d85a7917cafb500000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_34_1__tab[] = { 0xc94773d85a7917cafb50000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_34_1__tab[] = { 0xc94773d85a7917cafb5000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_35_0__tab[] = { 0x1800, 0xa423 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_35_0__tab[] = { 0xa4231800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_35_0__tab[] = { 0xa423180000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_35_0__tab[] = { 0xa42318000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_35_0__tab[] = { 0xa4231800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_35_0__tab[] = { 0xa423180000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_35_1__tab[] = { 0x6960, 0x18c2, 0x6037, 0x567c, 0xc7a3 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_35_1__tab[] = { 0x69600000, 0x603718c2, 0xc7a3567c };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_35_1__tab[] = { 0x6960000000000000, 0xc7a3567c603718c2 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_35_1__tab[] = { 0xc7a3567c603718c269600000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_35_1__tab[] = { 0xc7a3567c603718c26960000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_35_1__tab[] = { 0xc7a3567c603718c2696000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_36_0__tab[] = { 0x0800, 0xa570 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_36_0__tab[] = { 0xa5700800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_36_0__tab[] = { 0xa570080000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_36_0__tab[] = { 0xa57008000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_36_0__tab[] = { 0xa5700800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_36_0__tab[] = { 0xa570080000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_36_1__tab[] = { 0xff10, 0xf9e9, 0xe054, 0x9236, 0xc611 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_36_1__tab[] = { 0xff100000, 0xe054f9e9, 0xc6119236 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_36_1__tab[] = { 0xff10000000000000, 0xc6119236e054f9e9 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_36_1__tab[] = { 0xc6119236e054f9e9ff100000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_36_1__tab[] = { 0xc6119236e054f9e9ff10000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_36_1__tab[] = { 0xc6119236e054f9e9ff1000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_37_0__tab[] = { 0xd800, 0xa6b3 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_37_0__tab[] = { 0xa6b3d800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_37_0__tab[] = { 0xa6b3d80000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_37_0__tab[] = { 0xa6b3d8000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_37_0__tab[] = { 0xa6b3d800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_37_0__tab[] = { 0xa6b3d80000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_37_1__tab[] = { 0x1618, 0x6b36, 0x70d7, 0xd3a2, 0xc490 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_37_1__tab[] = { 0x16180000, 0x70d76b36, 0xc490d3a2 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_37_1__tab[] = { 0x1618000000000000, 0xc490d3a270d76b36 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_37_1__tab[] = { 0xc490d3a270d76b3616180000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_37_1__tab[] = { 0xc490d3a270d76b361618000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_37_1__tab[] = { 0xc490d3a270d76b36161800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_38_0__tab[] = { 0x0600, 0xa7ef };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_38_0__tab[] = { 0xa7ef0600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_38_0__tab[] = { 0xa7ef060000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_38_0__tab[] = { 0xa7ef06000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_38_0__tab[] = { 0xa7ef0600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_38_0__tab[] = { 0xa7ef060000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_38_1__tab[] = { 0xa3e0, 0x9505, 0x5182, 0xe8d2, 0xc31f };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_38_1__tab[] = { 0xa3e00000, 0x51829505, 0xc31fe8d2 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_38_1__tab[] = { 0xa3e0000000000000, 0xc31fe8d251829505 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_38_1__tab[] = { 0xc31fe8d251829505a3e00000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_38_1__tab[] = { 0xc31fe8d251829505a3e0000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_38_1__tab[] = { 0xc31fe8d251829505a3e000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_39_0__tab[] = { 0x0400, 0xa922 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_39_0__tab[] = { 0xa9220400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_39_0__tab[] = { 0xa922040000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_39_0__tab[] = { 0xa92204000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_39_0__tab[] = { 0xa9220400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_39_0__tab[] = { 0xa922040000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_39_1__tab[] = { 0xfcf8, 0xf1b5, 0x10ca, 0xbd32, 0xc1bd };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_39_1__tab[] = { 0xfcf80000, 0x10caf1b5, 0xc1bdbd32 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_39_1__tab[] = { 0xfcf8000000000000, 0xc1bdbd3210caf1b5 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_39_1__tab[] = { 0xc1bdbd3210caf1b5fcf80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_39_1__tab[] = { 0xc1bdbd3210caf1b5fcf8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_39_1__tab[] = { 0xc1bdbd3210caf1b5fcf800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_40_0__tab[] = { 0x3e00, 0xaa4d };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_40_0__tab[] = { 0xaa4d3e00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_40_0__tab[] = { 0xaa4d3e0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_40_0__tab[] = { 0xaa4d3e000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_40_0__tab[] = { 0xaa4d3e00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_40_0__tab[] = { 0xaa4d3e0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_40_1__tab[] = { 0xdce8, 0x4948, 0xeff7, 0x55ff, 0xc069 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_40_1__tab[] = { 0xdce80000, 0xeff74948, 0xc06955ff };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_40_1__tab[] = { 0xdce8000000000000, 0xc06955ffeff74948 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_40_1__tab[] = { 0xc06955ffeff74948dce80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_40_1__tab[] = { 0xc06955ffeff74948dce8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_40_1__tab[] = { 0xc06955ffeff74948dce800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_41_0__tab[] = { 0x1200, 0xab71 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_41_0__tab[] = { 0xab711200 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_41_0__tab[] = { 0xab71120000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_41_0__tab[] = { 0xab7112000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_41_0__tab[] = { 0xab711200000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_41_0__tab[] = { 0xab71120000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_41_1__tab[] = { 0xdc28, 0x7cef, 0xf695, 0xcf47, 0xbf21 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_41_1__tab[] = { 0xdc280000, 0xf6957cef, 0xbf21cf47 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_41_1__tab[] = { 0xdc28000000000000, 0xbf21cf47f6957cef };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_41_1__tab[] = { 0xbf21cf47f6957cefdc280000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_41_1__tab[] = { 0xbf21cf47f6957cefdc28000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_41_1__tab[] = { 0xbf21cf47f6957cefdc2800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_42_0__tab[] = { 0xde00, 0xac8d };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_42_0__tab[] = { 0xac8dde00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_42_0__tab[] = { 0xac8dde0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_42_0__tab[] = { 0xac8dde000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_42_0__tab[] = { 0xac8dde00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_42_0__tab[] = { 0xac8dde0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_42_1__tab[] = { 0xba10, 0x7125, 0x939b, 0x594a, 0xbde6 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_42_1__tab[] = { 0xba100000, 0x939b7125, 0xbde6594a };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_42_1__tab[] = { 0xba10000000000000, 0xbde6594a939b7125 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_42_1__tab[] = { 0xbde6594a939b7125ba100000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_42_1__tab[] = { 0xbde6594a939b7125ba10000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_42_1__tab[] = { 0xbde6594a939b7125ba1000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_43_0__tab[] = { 0xf600, 0xada3 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_43_0__tab[] = { 0xada3f600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_43_0__tab[] = { 0xada3f60000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_43_0__tab[] = { 0xada3f6000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_43_0__tab[] = { 0xada3f600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_43_0__tab[] = { 0xada3f60000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_43_1__tab[] = { 0x9560, 0x2ab5, 0x9118, 0x363d, 0xbcb6 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_43_1__tab[] = { 0x95600000, 0x91182ab5, 0xbcb6363d };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_43_1__tab[] = { 0x9560000000000000, 0xbcb6363d91182ab5 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_43_1__tab[] = { 0xbcb6363d91182ab595600000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_43_1__tab[] = { 0xbcb6363d91182ab59560000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_43_1__tab[] = { 0xbcb6363d91182ab5956000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_44_0__tab[] = { 0xaa00, 0xaeb3 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_44_0__tab[] = { 0xaeb3aa00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_44_0__tab[] = { 0xaeb3aa0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_44_0__tab[] = { 0xaeb3aa000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_44_0__tab[] = { 0xaeb3aa00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_44_0__tab[] = { 0xaeb3aa0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_44_1__tab[] = { 0x1590, 0x4e90, 0x3a3d, 0xb859, 0xbb90 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_44_1__tab[] = { 0x15900000, 0x3a3d4e90, 0xbb90b859 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_44_1__tab[] = { 0x1590000000000000, 0xbb90b8593a3d4e90 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_44_1__tab[] = { 0xbb90b8593a3d4e9015900000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_44_1__tab[] = { 0xbb90b8593a3d4e901590000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_44_1__tab[] = { 0xbb90b8593a3d4e90159000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_45_0__tab[] = { 0x4400, 0xafbd };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_45_0__tab[] = { 0xafbd4400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_45_0__tab[] = { 0xafbd440000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_45_0__tab[] = { 0xafbd44000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_45_0__tab[] = { 0xafbd4400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_45_0__tab[] = { 0xafbd440000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_45_1__tab[] = { 0x1e78, 0x76f5, 0x1010, 0x4026, 0xba75 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_45_1__tab[] = { 0x1e780000, 0x101076f5, 0xba754026 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_45_1__tab[] = { 0x1e78000000000000, 0xba754026101076f5 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_45_1__tab[] = { 0xba754026101076f51e780000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_45_1__tab[] = { 0xba754026101076f51e78000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_45_1__tab[] = { 0xba754026101076f51e7800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_46_0__tab[] = { 0x0600, 0xb0c1 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_46_0__tab[] = { 0xb0c10600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_46_0__tab[] = { 0xb0c1060000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_46_0__tab[] = { 0xb0c106000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_46_0__tab[] = { 0xb0c10600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_46_0__tab[] = { 0xb0c1060000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_46_1__tab[] = { 0xb670, 0x0512, 0x69aa, 0x3b01, 0xb963 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_46_1__tab[] = { 0xb6700000, 0x69aa0512, 0xb9633b01 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_46_1__tab[] = { 0xb670000000000000, 0xb9633b0169aa0512 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_46_1__tab[] = { 0xb9633b0169aa0512b6700000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_46_1__tab[] = { 0xb9633b0169aa0512b670000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_46_1__tab[] = { 0xb9633b0169aa0512b67000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_47_0__tab[] = { 0x3200, 0xb1bf };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_47_0__tab[] = { 0xb1bf3200 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_47_0__tab[] = { 0xb1bf320000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_47_0__tab[] = { 0xb1bf32000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_47_0__tab[] = { 0xb1bf3200000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_47_0__tab[] = { 0xb1bf320000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_47_1__tab[] = { 0x5118, 0x4133, 0xfbe4, 0x21d0, 0xb85a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_47_1__tab[] = { 0x51180000, 0xfbe44133, 0xb85a21d0 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_47_1__tab[] = { 0x5118000000000000, 0xb85a21d0fbe44133 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_47_1__tab[] = { 0xb85a21d0fbe4413351180000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_47_1__tab[] = { 0xb85a21d0fbe441335118000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_47_1__tab[] = { 0xb85a21d0fbe44133511800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_48_0__tab[] = { 0x0400, 0xb2b8 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_48_0__tab[] = { 0xb2b80400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_48_0__tab[] = { 0xb2b8040000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_48_0__tab[] = { 0xb2b804000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_48_0__tab[] = { 0xb2b80400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_48_0__tab[] = { 0xb2b8040000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_48_1__tab[] = { 0x0490, 0x663d, 0x960d, 0x77de, 0xb759 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_48_1__tab[] = { 0x04900000, 0x960d663d, 0xb75977de };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_48_1__tab[] = { 0x0490000000000000, 0xb75977de960d663d };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_48_1__tab[] = { 0xb75977de960d663d04900000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_48_1__tab[] = { 0xb75977de960d663d0490000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_48_1__tab[] = { 0xb75977de960d663d049000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_49_0__tab[] = { 0xb400, 0xb3ab };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_49_0__tab[] = { 0xb3abb400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_49_0__tab[] = { 0xb3abb40000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_49_0__tab[] = { 0xb3abb4000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_49_0__tab[] = { 0xb3abb400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_49_0__tab[] = { 0xb3abb40000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_49_1__tab[] = { 0x37b8, 0xa711, 0x754d, 0xc9d6, 0xb660 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_49_1__tab[] = { 0x37b80000, 0x754da711, 0xb660c9d6 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_49_1__tab[] = { 0x37b8000000000000, 0xb660c9d6754da711 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_49_1__tab[] = { 0xb660c9d6754da71137b80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_49_1__tab[] = { 0xb660c9d6754da71137b8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_49_1__tab[] = { 0xb660c9d6754da71137b800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_50_0__tab[] = { 0x7a00, 0xb49a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_50_0__tab[] = { 0xb49a7a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_50_0__tab[] = { 0xb49a7a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_50_0__tab[] = { 0xb49a7a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_50_0__tab[] = { 0xb49a7a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_50_0__tab[] = { 0xb49a7a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_50_1__tab[] = { 0x27f0, 0xe532, 0x7344, 0xace3, 0xb56f };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_50_1__tab[] = { 0x27f00000, 0x7344e532, 0xb56face3 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_50_1__tab[] = { 0x27f0000000000000, 0xb56face37344e532 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_50_1__tab[] = { 0xb56face37344e53227f00000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_50_1__tab[] = { 0xb56face37344e53227f0000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_50_1__tab[] = { 0xb56face37344e53227f000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_51_0__tab[] = { 0x8400, 0xb584 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_51_0__tab[] = { 0xb5848400 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_51_0__tab[] = { 0xb584840000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_51_0__tab[] = { 0xb58484000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_51_0__tab[] = { 0xb5848400000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_51_0__tab[] = { 0xb584840000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_51_1__tab[] = { 0x4000, 0xe9a9, 0x0f8a, 0xbde5, 0xb485 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_51_1__tab[] = { 0x40000000, 0x0f8ae9a9, 0xb485bde5 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_51_1__tab[] = { 0x4000000000000000, 0xb485bde50f8ae9a9 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_51_1__tab[] = { 0xb485bde50f8ae9a940000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_51_1__tab[] = { 0xb485bde50f8ae9a94000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_51_1__tab[] = { 0xb485bde50f8ae9a9400000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_52_0__tab[] = { 0x0200, 0xb66a };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_52_0__tab[] = { 0xb66a0200 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_52_0__tab[] = { 0xb66a020000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_52_0__tab[] = { 0xb66a02000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_52_0__tab[] = { 0xb66a0200000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_52_0__tab[] = { 0xb66a020000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_52_1__tab[] = { 0x4608, 0xfcb3, 0xeecf, 0xa0bb, 0xb3a2 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_52_1__tab[] = { 0x46080000, 0xeecffcb3, 0xb3a2a0bb };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_52_1__tab[] = { 0x4608000000000000, 0xb3a2a0bbeecffcb3 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_52_1__tab[] = { 0xb3a2a0bbeecffcb346080000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_52_1__tab[] = { 0xb3a2a0bbeecffcb34608000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_52_1__tab[] = { 0xb3a2a0bbeecffcb3460800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_53_0__tab[] = { 0x2000, 0xb74b };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_53_0__tab[] = { 0xb74b2000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_53_0__tab[] = { 0xb74b200000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_53_0__tab[] = { 0xb74b20000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_53_0__tab[] = { 0xb74b2000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_53_0__tab[] = { 0xb74b200000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_53_1__tab[] = { 0xa360, 0x8ccb, 0xeb5f, 0xffa9, 0xb2c5 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_53_1__tab[] = { 0xa3600000, 0xeb5f8ccb, 0xb2c5ffa9 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_53_1__tab[] = { 0xa360000000000000, 0xb2c5ffa9eb5f8ccb };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_53_1__tab[] = { 0xb2c5ffa9eb5f8ccba3600000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_53_1__tab[] = { 0xb2c5ffa9eb5f8ccba360000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_53_1__tab[] = { 0xb2c5ffa9eb5f8ccba36000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_54_0__tab[] = { 0x0a00, 0xb828 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_54_0__tab[] = { 0xb8280a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_54_0__tab[] = { 0xb8280a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_54_0__tab[] = { 0xb8280a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_54_0__tab[] = { 0xb8280a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_54_0__tab[] = { 0xb8280a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_54_1__tab[] = { 0xf368, 0xe940, 0x3e86, 0x8ac3, 0xb1ef };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_54_1__tab[] = { 0xf3680000, 0x3e86e940, 0xb1ef8ac3 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_54_1__tab[] = { 0xf368000000000000, 0xb1ef8ac33e86e940 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_54_1__tab[] = { 0xb1ef8ac33e86e940f3680000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_54_1__tab[] = { 0xb1ef8ac33e86e940f368000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_54_1__tab[] = { 0xb1ef8ac33e86e940f36800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_55_0__tab[] = { 0xe800, 0xb900 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_55_0__tab[] = { 0xb900e800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_55_0__tab[] = { 0xb900e80000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_55_0__tab[] = { 0xb900e8000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_55_0__tab[] = { 0xb900e800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_55_0__tab[] = { 0xb900e80000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_55_1__tab[] = { 0x7a40, 0xd18e, 0xa4b5, 0xf76e, 0xb11e };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_55_1__tab[] = { 0x7a400000, 0xa4b5d18e, 0xb11ef76e };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_55_1__tab[] = { 0x7a40000000000000, 0xb11ef76ea4b5d18e };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_55_1__tab[] = { 0xb11ef76ea4b5d18e7a400000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_55_1__tab[] = { 0xb11ef76ea4b5d18e7a40000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_55_1__tab[] = { 0xb11ef76ea4b5d18e7a4000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_56_0__tab[] = { 0xda00, 0xb9d5 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_56_0__tab[] = { 0xb9d5da00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_56_0__tab[] = { 0xb9d5da0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_56_0__tab[] = { 0xb9d5da000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_56_0__tab[] = { 0xb9d5da00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_56_0__tab[] = { 0xb9d5da0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_56_1__tab[] = { 0xe818, 0x4c7b, 0xaa2c, 0xfff2, 0xb053 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_56_1__tab[] = { 0xe8180000, 0xaa2c4c7b, 0xb053fff2 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_56_1__tab[] = { 0xe818000000000000, 0xb053fff2aa2c4c7b };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_56_1__tab[] = { 0xb053fff2aa2c4c7be8180000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_56_1__tab[] = { 0xb053fff2aa2c4c7be818000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_56_1__tab[] = { 0xb053fff2aa2c4c7be81800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_57_0__tab[] = { 0x0a00, 0xbaa7 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_57_0__tab[] = { 0xbaa70a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_57_0__tab[] = { 0xbaa70a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_57_0__tab[] = { 0xbaa70a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_57_0__tab[] = { 0xbaa70a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_57_0__tab[] = { 0xbaa70a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_57_1__tab[] = { 0xefb0, 0x814f, 0x8e2f, 0x630e, 0xaf8e };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_57_1__tab[] = { 0xefb00000, 0x8e2f814f, 0xaf8e630e };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_57_1__tab[] = { 0xefb0000000000000, 0xaf8e630e8e2f814f };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_57_1__tab[] = { 0xaf8e630e8e2f814fefb00000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_57_1__tab[] = { 0xaf8e630e8e2f814fefb0000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_57_1__tab[] = { 0xaf8e630e8e2f814fefb000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_58_0__tab[] = { 0x9600, 0xbb74 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_58_0__tab[] = { 0xbb749600 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_58_0__tab[] = { 0xbb74960000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_58_0__tab[] = { 0xbb7496000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_58_0__tab[] = { 0xbb749600000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_58_0__tab[] = { 0xbb74960000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_58_1__tab[] = { 0x5d18, 0x41a1, 0x6114, 0xe39d, 0xaecd };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_58_1__tab[] = { 0x5d180000, 0x611441a1, 0xaecde39d };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_58_1__tab[] = { 0x5d18000000000000, 0xaecde39d611441a1 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_58_1__tab[] = { 0xaecde39d611441a15d180000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_58_1__tab[] = { 0xaecde39d611441a15d18000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_58_1__tab[] = { 0xaecde39d611441a15d1800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_59_0__tab[] = { 0x9e00, 0xbc3e };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_59_0__tab[] = { 0xbc3e9e00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_59_0__tab[] = { 0xbc3e9e0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_59_0__tab[] = { 0xbc3e9e000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_59_0__tab[] = { 0xbc3e9e00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_59_0__tab[] = { 0xbc3e9e0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_59_1__tab[] = { 0xd000, 0x97df, 0x2f97, 0x4842, 0xae12 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_59_1__tab[] = { 0xd0000000, 0x2f9797df, 0xae124842 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_59_1__tab[] = { 0xd000000000000000, 0xae1248422f9797df };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_59_1__tab[] = { 0xae1248422f9797dfd0000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_59_1__tab[] = { 0xae1248422f9797dfd000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_59_1__tab[] = { 0xae1248422f9797dfd00000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_60_0__tab[] = { 0x4000, 0xbd05 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_60_0__tab[] = { 0xbd054000 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_60_0__tab[] = { 0xbd05400000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_60_0__tab[] = { 0xbd0540000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_60_0__tab[] = { 0xbd054000000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_60_0__tab[] = { 0xbd05400000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_60_1__tab[] = { 0xfe58, 0x206d, 0x3555, 0x5b1c, 0xad5b };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_60_1__tab[] = { 0xfe580000, 0x3555206d, 0xad5b5b1c };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_60_1__tab[] = { 0xfe58000000000000, 0xad5b5b1c3555206d };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_60_1__tab[] = { 0xad5b5b1c3555206dfe580000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_60_1__tab[] = { 0xad5b5b1c3555206dfe58000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_60_1__tab[] = { 0xad5b5b1c3555206dfe5800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_61_0__tab[] = { 0x9a00, 0xbdc8 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_61_0__tab[] = { 0xbdc89a00 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_61_0__tab[] = { 0xbdc89a0000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_61_0__tab[] = { 0xbdc89a000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_61_0__tab[] = { 0xbdc89a00000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_61_0__tab[] = { 0xbdc89a0000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_61_1__tab[] = { 0x4df8, 0x7757, 0x31cb, 0xe982, 0xaca8 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_61_1__tab[] = { 0x4df80000, 0x31cb7757, 0xaca8e982 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_61_1__tab[] = { 0x4df8000000000000, 0xaca8e98231cb7757 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_61_1__tab[] = { 0xaca8e98231cb77574df80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_61_1__tab[] = { 0xaca8e98231cb77574df8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_61_1__tab[] = { 0xaca8e98231cb77574df800000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_62_0__tab[] = { 0xc800, 0xbe88 };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_62_0__tab[] = { 0xbe88c800 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_62_0__tab[] = { 0xbe88c80000000000 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_62_0__tab[] = { 0xbe88c8000000000000000000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_62_0__tab[] = { 0xbe88c800000000000000000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_62_0__tab[] = { 0xbe88c80000000000000000000000000000000000000000000000000000000000 };
+#endif
+
+#if 0
+#elif GMP_NUMB_BITS == 16
+const mp_limb_t mpfr_l2b_62_1__tab[] = { 0x74f8, 0xf905, 0x1831, 0xc3c4, 0xabfa };
+#elif GMP_NUMB_BITS == 32
+const mp_limb_t mpfr_l2b_62_1__tab[] = { 0x74f80000, 0x1831f905, 0xabfac3c4 };
+#elif GMP_NUMB_BITS == 64
+const mp_limb_t mpfr_l2b_62_1__tab[] = { 0x74f8000000000000, 0xabfac3c41831f905 };
+#elif GMP_NUMB_BITS == 96
+const mp_limb_t mpfr_l2b_62_1__tab[] = { 0xabfac3c41831f90574f80000 };
+#elif GMP_NUMB_BITS == 128
+const mp_limb_t mpfr_l2b_62_1__tab[] = { 0xabfac3c41831f90574f8000000000000 };
+#elif GMP_NUMB_BITS == 256
+const mp_limb_t mpfr_l2b_62_1__tab[] = { 0xabfac3c41831f90574f800000000000000000000000000000000000000000000 };
+#endif
+
+const __mpfr_struct __gmpfr_l2b[BASE_MAX-1][2] = {
+ { { 23, 1, 1, (mp_limb_t *) mpfr_l2b_2_0__tab },
+ { 77, 1, 1, (mp_limb_t *) mpfr_l2b_2_1__tab } },
+ { { 23, 1, 1, (mp_limb_t *) mpfr_l2b_3_0__tab },
+ { 77, 1, 0, (mp_limb_t *) mpfr_l2b_3_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_4_0__tab },
+ { 77, 1, 0, (mp_limb_t *) mpfr_l2b_4_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_5_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_5_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_6_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_6_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_7_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_7_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_8_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_8_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_9_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_9_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_10_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_10_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_11_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_11_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_12_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_12_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_13_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_13_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_14_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_14_1__tab } },
+ { { 23, 1, 2, (mp_limb_t *) mpfr_l2b_15_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_15_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_16_0__tab },
+ { 77, 1, -1, (mp_limb_t *) mpfr_l2b_16_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_17_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_17_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_18_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_18_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_19_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_19_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_20_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_20_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_21_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_21_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_22_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_22_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_23_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_23_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_24_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_24_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_25_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_25_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_26_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_26_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_27_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_27_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_28_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_28_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_29_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_29_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_30_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_30_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_31_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_31_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_32_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_32_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_33_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_33_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_34_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_34_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_35_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_35_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_36_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_36_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_37_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_37_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_38_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_38_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_39_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_39_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_40_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_40_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_41_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_41_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_42_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_42_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_43_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_43_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_44_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_44_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_45_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_45_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_46_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_46_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_47_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_47_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_48_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_48_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_49_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_49_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_50_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_50_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_51_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_51_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_52_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_52_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_53_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_53_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_54_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_54_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_55_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_55_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_56_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_56_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_57_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_57_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_58_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_58_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_59_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_59_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_60_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_60_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_61_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_61_1__tab } },
+ { { 23, 1, 3, (mp_limb_t *) mpfr_l2b_62_0__tab },
+ { 77, 1, -2, (mp_limb_t *) mpfr_l2b_62_1__tab } } };
+
+/***************************************************************************/
+
+/* returns ceil(e * log2(b)^((-1)^i)), or ... + 1.
+ For i=0, uses a 23-bit upper approximation to log(beta)/log(2).
+ For i=1, uses a 76-bit upper approximation to log(2)/log(beta).
+*/
+static mpfr_exp_t
+ceil_mul (mpfr_exp_t e, int beta, int i)
+{
+ mpfr_srcptr p;
+ mpfr_t t;
+ mpfr_exp_t r;
+
+ p = &__gmpfr_l2b[beta-2][i];
+ mpfr_init2 (t, sizeof (mpfr_exp_t) * CHAR_BIT);
+ mpfr_set_exp_t (t, e, MPFR_RNDU);
+ mpfr_mul (t, t, p, MPFR_RNDU);
+ r = mpfr_get_exp_t (t, MPFR_RNDU);
+ mpfr_clear (t);
+ return r;
+}
+
+/* prints the mantissa of x in the string s, and writes the corresponding
+ exponent in e.
+ x is rounded with direction rnd, m is the number of digits of the mantissa,
+ b is the given base (2 <= b <= 36).
+
+ Return value:
+ if s=NULL, allocates a string to store the mantissa, with
+ m characters, plus a final '\0', plus a possible minus sign
+ (thus m+1 or m+2 characters).
+
+ Important: when you call this function with s=NULL, don't forget to free
+ the memory space allocated, with free(s, strlen(s)).
+*/
+char*
+mpfr_get_str (char *s, mpfr_exp_t *e, int b, size_t m, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ const char *num_to_text;
+ int exact; /* exact result */
+ mpfr_exp_t exp, g;
+ mpfr_exp_t prec; /* precision of the computation */
+ long err;
+ mp_limb_t *a;
+ mpfr_exp_t exp_a;
+ mp_limb_t *result;
+ mp_limb_t *xp;
+ mp_limb_t *reste;
+ size_t nx, nx1;
+ size_t n, i;
+ char *s0;
+ int neg;
+ int ret; /* return value of mpfr_get_str_aux */
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_TMP_DECL(marker);
+
+ /* if exact = 1 then err is undefined */
+ /* otherwise err is such that |x*b^(m-g)-a*2^exp_a| < 2^(err+exp_a) */
+
+ /* is the base valid? */
+ if (b < 2 || b > 62)
+ return NULL;
+
+ num_to_text = b < 37 ? num_to_text36 : num_to_text62;
+
+ if (MPFR_UNLIKELY (MPFR_IS_NAN (x)))
+ {
+ if (s == NULL)
+ s = (char *) (*__gmp_allocate_func) (6);
+ strcpy (s, "@NaN@");
+ return s;
+ }
+
+ neg = MPFR_SIGN(x) < 0; /* 0 if positive, 1 if negative */
+
+ if (MPFR_UNLIKELY (MPFR_IS_INF (x)))
+ {
+ if (s == NULL)
+ s = (char *) (*__gmp_allocate_func) (neg + 6);
+ strcpy (s, (neg) ? "-@Inf@" : "@Inf@");
+ return s;
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo); /* needed for ceil_mul (at least) */
+
+ if (m == 0)
+ {
+
+ /* take at least 1 + ceil(n*log(2)/log(b)) digits, where n is the
+ number of bits of the mantissa, to ensure back conversion from
+ the output gives the same floating-point.
+
+ Warning: if b = 2^k, this may be too large. The worst case is when
+ the first base-b digit contains only one bit, so we get
+ 1 + ceil((n-1)/k) = 2 + floor((n-2)/k) instead.
+ */
+ m = 1 + ceil_mul (IS_POW2(b) ? MPFR_PREC(x) - 1 : MPFR_PREC(x), b, 1);
+ if (m < 2)
+ m = 2;
+ }
+
+ /* the code below for non-power-of-two bases works for m=1 */
+ MPFR_ASSERTN (m >= 2 || (IS_POW2(b) == 0 && m >= 1));
+
+ /* x is a floating-point number */
+
+ if (MPFR_IS_ZERO(x))
+ {
+ if (s == NULL)
+ s = (char*) (*__gmp_allocate_func) (neg + m + 1);
+ s0 = s;
+ if (neg)
+ *s++ = '-';
+ memset (s, '0', m);
+ s[m] = '\0';
+ *e = 0; /* a bit like frexp() in ISO C99 */
+ MPFR_SAVE_EXPO_FREE (expo);
+ return s0; /* strlen(s0) = neg + m */
+ }
+
+ if (s == NULL)
+ s = (char*) (*__gmp_allocate_func) (neg + m + 1);
+ s0 = s;
+ if (neg)
+ *s++ = '-';
+
+ xp = MPFR_MANT(x);
+
+ if (IS_POW2(b))
+ {
+ int pow2;
+ mpfr_exp_t f, r;
+ mp_limb_t *x1;
+ mp_size_t nb;
+ int inexp;
+
+ count_leading_zeros (pow2, (mp_limb_t) b);
+ pow2 = GMP_NUMB_BITS - pow2 - 1; /* base = 2^pow2 */
+
+ /* set MPFR_EXP(x) = f*pow2 + r, 1 <= r <= pow2 */
+ f = (MPFR_GET_EXP (x) - 1) / pow2;
+ r = MPFR_GET_EXP (x) - f * pow2;
+ if (r <= 0)
+ {
+ f --;
+ r += pow2;
+ }
+
+ /* the first digit will contain only r bits */
+ prec = (m - 1) * pow2 + r; /* total number of bits */
+ n = (prec - 1) / GMP_NUMB_BITS + 1;
+
+ MPFR_TMP_MARK (marker);
+ x1 = (mp_limb_t*) MPFR_TMP_ALLOC((n + 1) * sizeof (mp_limb_t));
+ nb = n * GMP_NUMB_BITS - prec;
+ /* round xp to the precision prec, and put it into x1
+ put the carry into x1[n] */
+ if ((x1[n] = mpfr_round_raw (x1, xp, MPFR_PREC(x),
+ MPFR_IS_STRICTNEG(x),
+ prec, rnd, &inexp)))
+ {
+ /* overflow when rounding x: x1 = 2^prec */
+ if (r == pow2) /* prec = m * pow2,
+ 2^prec will need (m+1) digits in base 2^pow2 */
+ {
+ /* divide x1 by 2^pow2, and increase the exponent */
+ mpn_rshift (x1, x1, n + 1, pow2);
+ f ++;
+ }
+ else /* 2^prec needs still m digits, but x1 may need n+1 limbs */
+ n ++;
+ }
+
+ /* it remains to shift x1 by nb limbs to the right, since mpn_get_str
+ expects a right-normalized number */
+ if (nb != 0)
+ {
+ mpn_rshift (x1, x1, n, nb);
+ /* the most significant word may be zero */
+ if (x1[n - 1] == 0)
+ n --;
+ }
+
+ mpn_get_str ((unsigned char*) s, b, x1, n);
+ for (i=0; i<m; i++)
+ s[i] = num_to_text[(int) s[i]];
+ s[m] = 0;
+
+ /* the exponent of s is f + 1 */
+ *e = f + 1;
+
+ MPFR_TMP_FREE(marker);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return (s0);
+ }
+
+ /* if x < 0, reduce to x > 0 */
+ if (neg)
+ rnd = MPFR_INVERT_RND(rnd);
+
+ g = ceil_mul (MPFR_GET_EXP (x) - 1, b, 1);
+ exact = 1;
+ prec = ceil_mul (m, b, 0) + 1;
+ exp = ((mpfr_exp_t) m < g) ? g - (mpfr_exp_t) m : (mpfr_exp_t) m - g;
+ prec += MPFR_INT_CEIL_LOG2 (prec); /* number of guard bits */
+ if (exp != 0) /* add maximal exponentiation error */
+ prec += 3 * (mpfr_exp_t) MPFR_INT_CEIL_LOG2 (exp);
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ MPFR_TMP_MARK(marker);
+
+ exact = 1;
+
+ /* number of limbs */
+ n = 1 + (prec - 1) / GMP_NUMB_BITS;
+
+ /* a will contain the approximation of the mantissa */
+ a = (mp_limb_t*) MPFR_TMP_ALLOC (n * sizeof (mp_limb_t));
+
+ nx = 1 + (MPFR_PREC(x) - 1) / GMP_NUMB_BITS;
+
+ if ((mpfr_exp_t) m == g) /* final exponent is 0, no multiplication or
+ division to perform */
+ {
+ if (nx > n)
+ exact = mpn_scan1 (xp, 0) >= (nx - n) * GMP_NUMB_BITS;
+ err = !exact;
+ MPN_COPY2 (a, n, xp, nx);
+ exp_a = MPFR_GET_EXP (x) - n * GMP_NUMB_BITS;
+ }
+ else if ((mpfr_exp_t) m > g) /* we have to multiply x by b^exp */
+ {
+ mp_limb_t *x1;
+
+ /* a2*2^exp_a = b^e */
+ err = mpfr_mpn_exp (a, &exp_a, b, exp, n);
+ /* here, the error on a is at most 2^err ulps */
+ exact = (err == -1);
+
+ /* x = x1*2^(n*GMP_NUMB_BITS) */
+ x1 = (nx >= n) ? xp + nx - n : xp;
+ nx1 = (nx >= n) ? n : nx; /* nx1 = min(n, nx) */
+
+ /* test si exact */
+ if (nx > n)
+ exact = (exact &&
+ ((mpn_scan1 (xp, 0) >= (nx - n) * GMP_NUMB_BITS)));
+
+ /* we loose one more bit in the multiplication,
+ except when err=0 where we loose two bits */
+ err = (err <= 0) ? 2 : err + 1;
+
+ /* result = a * x */
+ result = (mp_limb_t*) MPFR_TMP_ALLOC ((n + nx1) * sizeof (mp_limb_t));
+ mpn_mul (result, a, n, x1, nx1);
+ exp_a += MPFR_GET_EXP (x);
+ if (mpn_scan1 (result, 0) < (nx1 * GMP_NUMB_BITS))
+ exact = 0;
+
+ /* normalize a and truncate */
+ if ((result[n + nx1 - 1] & MPFR_LIMB_HIGHBIT) == 0)
+ {
+ mpn_lshift (a, result + nx1, n , 1);
+ a[0] |= result[nx1 - 1] >> (GMP_NUMB_BITS - 1);
+ exp_a --;
+ }
+ else
+ MPN_COPY (a, result + nx1, n);
+ }
+ else
+ {
+ mp_limb_t *x1;
+
+ /* a2*2^exp_a = b^e */
+ err = mpfr_mpn_exp (a, &exp_a, b, exp, n);
+ exact = (err == -1);
+
+ /* allocate memory for x1, result and reste */
+ x1 = (mp_limb_t*) MPFR_TMP_ALLOC (2 * n * sizeof (mp_limb_t));
+ result = (mp_limb_t*) MPFR_TMP_ALLOC ((n + 1) * sizeof (mp_limb_t));
+ reste = (mp_limb_t*) MPFR_TMP_ALLOC (n * sizeof (mp_limb_t));
+
+ /* initialize x1 = x */
+ MPN_COPY2 (x1, 2 * n, xp, nx);
+ if ((exact) && (nx > 2 * n) &&
+ (mpn_scan1 (xp, 0) < (nx - 2 * n) * GMP_NUMB_BITS))
+ exact = 0;
+
+ /* result = x / a */
+ mpn_tdiv_qr (result, reste, 0, x1, 2 * n, a, n);
+ exp_a = MPFR_GET_EXP (x) - exp_a - 2 * n * GMP_NUMB_BITS;
+
+ /* test if division was exact */
+ if (exact)
+ exact = mpn_popcount (reste, n) == 0;
+
+ /* normalize the result and copy into a */
+ if (result[n] == 1)
+ {
+ mpn_rshift (a, result, n, 1);
+ a[n - 1] |= MPFR_LIMB_HIGHBIT;;
+ exp_a ++;
+ }
+ else
+ MPN_COPY (a, result, n);
+
+ err = (err == -1) ? 2 : err + 2;
+ }
+
+ /* check if rounding is possible */
+ if (exact)
+ err = -1;
+ ret = mpfr_get_str_aux (s, e, a, n, exp_a, err, b, m, rnd);
+ if (ret == MPFR_ROUND_FAILED)
+ {
+ /* too large error: increment the working precision */
+ MPFR_ZIV_NEXT (loop, prec);
+ }
+ else if (ret == -MPFR_ROUND_FAILED)
+ {
+ /* too many digits in mantissa: exp = |m-g| */
+ if ((mpfr_exp_t) m > g) /* exp = m - g, multiply by b^exp */
+ {
+ g++;
+ exp --;
+ }
+ else /* exp = g - m, divide by b^exp */
+ {
+ g++;
+ exp ++;
+ }
+ }
+ else
+ break;
+
+ MPFR_TMP_FREE(marker);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ *e += g;
+
+ MPFR_TMP_FREE(marker);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return s0;
+}
+
+void mpfr_free_str (char *str)
+{
+ (*__gmp_free_func) (str, strlen (str) + 1);
+}
diff --git a/src/get_ui.c b/src/get_ui.c
new file mode 100644
index 000000000..9a477ad6d
--- /dev/null
+++ b/src/get_ui.c
@@ -0,0 +1,65 @@
+/* mpfr_get_ui -- convert a floating-point number to an unsigned long.
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+unsigned long
+mpfr_get_ui (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t prec;
+ unsigned long s;
+ mpfr_t x;
+ mp_size_t n;
+ mpfr_exp_t exp;
+
+ if (MPFR_UNLIKELY (!mpfr_fits_ulong_p (f, rnd)))
+ {
+ MPFR_SET_ERANGE ();
+ return MPFR_IS_NAN (f) || MPFR_IS_NEG (f) ?
+ (unsigned long) 0 : ULONG_MAX;
+ }
+
+ if (MPFR_IS_ZERO (f))
+ return (unsigned long) 0;
+
+ for (s = ULONG_MAX, prec = 0; s != 0; s /= 2, prec ++)
+ { }
+
+ /* first round to prec bits */
+ mpfr_init2 (x, prec);
+ mpfr_rint (x, f, rnd);
+
+ /* warning: if x=0, taking its exponent is illegal */
+ if (MPFR_IS_ZERO(x))
+ s = 0;
+ else
+ {
+ /* now the result is in the most significant limb of x */
+ exp = MPFR_GET_EXP (x); /* since |x| >= 1, exp >= 1 */
+ n = MPFR_LIMB_SIZE(x);
+ s = MPFR_MANT(x)[n - 1] >> (GMP_NUMB_BITS - exp);
+ }
+
+ mpfr_clear (x);
+
+ return s;
+}
diff --git a/src/get_uj.c b/src/get_uj.c
new file mode 100644
index 000000000..064f18c21
--- /dev/null
+++ b/src/get_uj.c
@@ -0,0 +1,95 @@
+/* mpfr_get_uj -- convert a MPFR number to a huge machine unsigned integer
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h" /* for a build within gmp */
+#endif
+
+/* The ISO C99 standard specifies that in C++ implementations the
+ INTMAX_MAX, ... macros should only be defined if explicitly requested. */
+#if defined __cplusplus
+# define __STDC_LIMIT_MACROS
+# define __STDC_CONSTANT_MACROS
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+
+uintmax_t
+mpfr_get_uj (mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ uintmax_t r;
+ mpfr_prec_t prec;
+ mpfr_t x;
+
+ if (MPFR_UNLIKELY (!mpfr_fits_uintmax_p (f, rnd)))
+ {
+ MPFR_SET_ERANGE ();
+ return MPFR_IS_NAN (f) || MPFR_IS_NEG (f) ?
+ (uintmax_t) 0 : MPFR_UINTMAX_MAX;
+ }
+
+ if (MPFR_IS_ZERO (f))
+ return (uintmax_t) 0;
+
+ /* determine the precision of uintmax_t */
+ for (r = MPFR_UINTMAX_MAX, prec = 0; r != 0; r /= 2, prec++)
+ { }
+
+ /* Now, r = 0. */
+
+ mpfr_init2 (x, prec);
+ mpfr_rint (x, f, rnd);
+ MPFR_ASSERTN (MPFR_IS_FP (x));
+
+ if (MPFR_NOTZERO (x))
+ {
+ mp_limb_t *xp;
+ int sh, n; /* An int should be sufficient in this context. */
+
+ MPFR_ASSERTN (MPFR_IS_POS (x));
+ xp = MPFR_MANT (x);
+ sh = MPFR_GET_EXP (x);
+ MPFR_ASSERTN ((mpfr_prec_t) sh <= prec);
+ for (n = MPFR_LIMB_SIZE(x) - 1; n >= 0; n--)
+ {
+ sh -= GMP_NUMB_BITS;
+ r += (sh >= 0
+ ? (uintmax_t) xp[n] << sh
+ : (uintmax_t) xp[n] >> (- sh));
+ }
+ }
+
+ mpfr_clear (x);
+
+ return r;
+}
+
+#endif
diff --git a/src/get_z.c b/src/get_z.c
new file mode 100644
index 000000000..6795955e2
--- /dev/null
+++ b/src/get_z.c
@@ -0,0 +1,61 @@
+/* mpfr_get_z -- get a multiple-precision integer from
+ a floating-point number
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_get_z (mpz_ptr z, mpfr_srcptr f, mpfr_rnd_t rnd)
+{
+ int inex;
+ mpfr_t r;
+ mpfr_exp_t exp;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
+ {
+ if (MPFR_UNLIKELY (MPFR_NOTZERO (f)))
+ MPFR_SET_ERANGE ();
+ mpz_set_ui (z, 0);
+ /* The ternary value is 0 even for infinity. Giving the rounding
+ direction in this case would not make much sense anyway, and
+ the direction would not necessarily match rnd. */
+ return 0;
+ }
+
+ exp = MPFR_GET_EXP (f);
+ /* if exp <= 0, then |f|<1, thus |o(f)|<=1 */
+ MPFR_ASSERTN (exp < 0 || exp <= MPFR_PREC_MAX);
+ mpfr_init2 (r, (exp < (mpfr_exp_t) MPFR_PREC_MIN ?
+ MPFR_PREC_MIN : (mpfr_prec_t) exp));
+ inex = mpfr_rint (r, f, rnd);
+ MPFR_ASSERTN (inex != 1 && inex != -1); /* integral part of f is
+ representable in r */
+ MPFR_ASSERTN (MPFR_IS_FP (r));
+ exp = mpfr_get_z_2exp (z, r);
+ if (exp >= 0)
+ mpz_mul_2exp (z, z, exp);
+ else
+ mpz_fdiv_q_2exp (z, z, -exp);
+ mpfr_clear (r);
+
+ return inex;
+}
diff --git a/src/get_z_exp.c b/src/get_z_exp.c
new file mode 100644
index 000000000..851907f0b
--- /dev/null
+++ b/src/get_z_exp.c
@@ -0,0 +1,79 @@
+/* mpfr_get_z_2exp -- get a multiple-precision integer and an exponent
+ from a floating-point number
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* puts the significand of f into z, and returns 'exp' such that f = z * 2^exp
+ *
+ * 0 doesn't have an exponent, therefore the returned exponent in this case
+ * isn't really important. We choose to return __gmpfr_emin because
+ * 1) it is in the exponent range [__gmpfr_emin,__gmpfr_emax],
+ * 2) the smaller a number is (in absolute value), the smaller its
+ * exponent is. In other words, the f -> exp function is monotonous
+ * on nonnegative numbers. --> This is WRONG since the returned
+ * exponent is not necessarily in the exponent range!
+ * Note that this is different from the C function frexp().
+ *
+ * For NaN and infinities, we choose to set z = 0 (neutral value).
+ * The exponent doesn't really matter, so let's keep __gmpfr_emin
+ * for consistency. The erange flag is set.
+ */
+
+mpfr_exp_t
+mpfr_get_z_2exp (mpz_ptr z, mpfr_srcptr f)
+{
+ mp_size_t fn;
+ int sh;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (f)))
+ {
+ if (MPFR_UNLIKELY (MPFR_NOTZERO (f)))
+ MPFR_SET_ERANGE ();
+ mpz_set_ui (z, 0);
+ return __gmpfr_emin;
+ }
+
+ fn = MPFR_LIMB_SIZE(f);
+
+ /* check whether allocated space for z is enough */
+ if (MPFR_UNLIKELY (ALLOC (z) < fn))
+ MPZ_REALLOC (z, fn);
+
+ MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC (f));
+ if (MPFR_LIKELY (sh))
+ mpn_rshift (PTR (z), MPFR_MANT (f), fn, sh);
+ else
+ MPN_COPY (PTR (z), MPFR_MANT (f), fn);
+
+ SIZ(z) = MPFR_IS_NEG (f) ? -fn : fn;
+
+ if (MPFR_UNLIKELY ((mpfr_uexp_t) MPFR_GET_EXP (f) - MPFR_EXP_MIN
+ < (mpfr_uexp_t) MPFR_PREC (f)))
+ {
+ /* The exponent isn't representable in an mpfr_exp_t. */
+ MPFR_SET_ERANGE ();
+ return MPFR_EXP_MIN;
+ }
+
+ return MPFR_GET_EXP (f) - MPFR_PREC (f);
+}
diff --git a/src/gmp_op.c b/src/gmp_op.c
new file mode 100644
index 000000000..a738c2427
--- /dev/null
+++ b/src/gmp_op.c
@@ -0,0 +1,345 @@
+/* Implementations of operations between mpfr and mpz/mpq data
+
+Copyright 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Init and set a mpfr_t with enough precision to store a mpz */
+static void
+init_set_z (mpfr_ptr t, mpz_srcptr z)
+{
+ mpfr_prec_t p;
+ int i;
+
+ if (mpz_size (z) <= 1)
+ p = GMP_NUMB_BITS;
+ else
+ MPFR_MPZ_SIZEINBASE2 (p, z);
+ mpfr_init2 (t, p);
+ i = mpfr_set_z (t, z, MPFR_RNDN);
+ MPFR_ASSERTD (i == 0); (void) i; /* use i to avoid a warning */
+}
+
+/* Init, set a mpfr_t with enough precision to store a mpz_t without round,
+ call the function, and clear the allocated mpfr_t */
+static int
+foo (mpfr_ptr x, mpfr_srcptr y, mpz_srcptr z, mpfr_rnd_t r,
+ int (*f)(mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mpfr_rnd_t))
+{
+ mpfr_t t;
+ int i;
+ init_set_z (t, z);
+ i = (*f) (x, y, t, r);
+ mpfr_clear (t);
+ return i;
+}
+
+int
+mpfr_mul_z (mpfr_ptr y, mpfr_srcptr x, mpz_srcptr z, mpfr_rnd_t r)
+{
+ return foo (y, x, z, r, mpfr_mul);
+}
+
+int
+mpfr_div_z (mpfr_ptr y, mpfr_srcptr x, mpz_srcptr z, mpfr_rnd_t r)
+{
+ return foo (y, x, z, r, mpfr_div);
+}
+
+int
+mpfr_add_z (mpfr_ptr y, mpfr_srcptr x, mpz_srcptr z, mpfr_rnd_t r)
+{
+ /* Mpz 0 is unsigned */
+ if (MPFR_UNLIKELY (mpz_sgn (z) == 0))
+ return mpfr_set (y, x, r);
+ else
+ return foo (y, x, z, r, mpfr_add);
+}
+
+int
+mpfr_sub_z (mpfr_ptr y, mpfr_srcptr x, mpz_srcptr z, mpfr_rnd_t r)
+{
+ /* Mpz 0 is unsigned */
+ if (MPFR_UNLIKELY (mpz_sgn (z) == 0))
+ return mpfr_set (y, x, r);
+ else
+ return foo (y, x, z, r, mpfr_sub);
+}
+
+int
+mpfr_cmp_z (mpfr_srcptr x, mpz_srcptr z)
+{
+ mpfr_t t;
+ int res;
+ init_set_z (t, z);
+ res = mpfr_cmp (x, t);
+ mpfr_clear (t);
+ return res;
+}
+
+/* FIXME [VL] (for mpfr_mul_q and mpfr_div_q): an intermediate overflow
+ doesn't necessarily imply an overflow on the final result. Moreover
+ the exponent range should be extended in the usual way.
+ To fix this, I think that these functions should call a common
+ function mpfr_muldiv_z:
+ res = mpfr_muldiv_z (y, x, mpq_numref(z), mpq_denref(z), rnd_mode);
+ res = mpfr_muldiv_z (y, x, mpq_denref(z), mpq_numref(z), rnd_mode);
+ respectively, so that all the work isn't done twice. */
+
+int
+mpfr_mul_q (mpfr_ptr y, mpfr_srcptr x, mpq_srcptr z, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t tmp;
+ int res;
+ mpfr_prec_t p;
+
+ if (MPFR_UNLIKELY (mpq_sgn (z) == 0))
+ return mpfr_mul_ui (y, x, 0, rnd_mode);
+ else
+ {
+ MPFR_MPZ_SIZEINBASE2 (p, mpq_numref (z));
+ mpfr_init2 (tmp, MPFR_PREC (x) + p);
+ res = mpfr_mul_z (tmp, x, mpq_numref(z), MPFR_RNDN );
+ if (MPFR_UNLIKELY (res != 0))
+ {
+ /* overflow case */
+ MPFR_ASSERTD (mpfr_inf_p (tmp));
+ mpfr_set (y, tmp, MPFR_RNDN); /* exact */
+ }
+ else
+ res = mpfr_div_z (y, tmp, mpq_denref(z), rnd_mode);
+
+ mpfr_clear (tmp);
+ return res;
+ }
+}
+
+int
+mpfr_div_q (mpfr_ptr y, mpfr_srcptr x, mpq_srcptr z, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t tmp;
+ int res;
+ mpfr_prec_t p;
+
+ if (MPFR_UNLIKELY (mpq_sgn (z) == 0))
+ return mpfr_div_ui (y, x, 0, rnd_mode);
+ else if (MPFR_UNLIKELY (mpz_sgn (mpq_denref (z)) == 0))
+ p = 0;
+ else
+ MPFR_MPZ_SIZEINBASE2 (p, mpq_denref (z));
+ mpfr_init2 (tmp, MPFR_PREC(x) + p);
+ res = mpfr_mul_z (tmp, x, mpq_denref(z), MPFR_RNDN );
+ if (MPFR_UNLIKELY (res != 0))
+ {
+ /* overflow case */
+ MPFR_ASSERTD (mpfr_inf_p (tmp));
+ mpfr_set (y, tmp, MPFR_RNDN); /* exact */
+ }
+ else
+ res = mpfr_div_z (y, tmp, mpq_numref(z), rnd_mode);
+
+ mpfr_clear (tmp);
+ return res;
+}
+
+int
+mpfr_add_q (mpfr_ptr y, mpfr_srcptr x, mpq_srcptr z, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t t,q;
+ mpfr_prec_t p;
+ mpfr_exp_t err;
+ int res;
+ MPFR_ZIV_DECL (loop);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_ASSERTD (mpz_sgn (mpq_denref (z)) != 0);
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ if (MPFR_UNLIKELY (mpq_sgn (z) == 0))
+ return mpfr_set (y, x, rnd_mode); /* signed 0 - Unsigned 0 */
+ else
+ return mpfr_set_q (y, z, rnd_mode);
+ }
+ }
+
+ p = MPFR_PREC (y) + 10;
+ mpfr_init2 (t, p);
+ mpfr_init2 (q, p);
+
+ MPFR_ZIV_INIT (loop, p);
+ for (;;)
+ {
+ res = mpfr_set_q (q, z, MPFR_RNDN); /* Error <= 1/2 ulp(q) */
+ /* If z if @INF@ (1/0), res = 0, so it quits immediately */
+ if (MPFR_UNLIKELY (res == 0))
+ /* Result is exact so we can add it directly! */
+ {
+ res = mpfr_add (y, x, q, rnd_mode);
+ break;
+ }
+ mpfr_add (t, x, q, MPFR_RNDN); /* Error <= 1/2 ulp(t) */
+ /* Error / ulp(t) <= 1/2 + 1/2 * 2^(EXP(q)-EXP(t))
+ If EXP(q)-EXP(t)>0, <= 2^(EXP(q)-EXP(t)-1)*(1+2^-(EXP(q)-EXP(t)))
+ <= 2^(EXP(q)-EXP(t))
+ If EXP(q)-EXP(t)<0, <= 2^0 */
+ /* We can get 0, but we can't round since q is inexact */
+ if (MPFR_LIKELY (!MPFR_IS_ZERO (t)))
+ {
+ err = (mpfr_exp_t) p - 1 - MAX (MPFR_GET_EXP(q)-MPFR_GET_EXP(t), 0);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, MPFR_PREC (y), rnd_mode)))
+ {
+ res = mpfr_set (y, t, rnd_mode);
+ break;
+ }
+ }
+ MPFR_ZIV_NEXT (loop, p);
+ mpfr_set_prec (t, p);
+ mpfr_set_prec (q, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ mpfr_clear (q);
+ return res;
+}
+
+int
+mpfr_sub_q (mpfr_ptr y, mpfr_srcptr x, mpq_srcptr z,mpfr_rnd_t rnd_mode)
+{
+ mpfr_t t,q;
+ mpfr_prec_t p;
+ int res;
+ mpfr_exp_t err;
+ MPFR_ZIV_DECL (loop);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_ASSERTD (mpz_sgn (mpq_denref (z)) != 0);
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+
+ if (MPFR_UNLIKELY (mpq_sgn (z) == 0))
+ return mpfr_set (y, x, rnd_mode); /* signed 0 - Unsigned 0 */
+ else
+ {
+ res = mpfr_set_q (y, z, MPFR_INVERT_RND (rnd_mode));
+ MPFR_CHANGE_SIGN (y);
+ return -res;
+ }
+ }
+ }
+
+ p = MPFR_PREC (y) + 10;
+ mpfr_init2 (t, p);
+ mpfr_init2 (q, p);
+
+ MPFR_ZIV_INIT (loop, p);
+ for(;;)
+ {
+ res = mpfr_set_q(q, z, MPFR_RNDN); /* Error <= 1/2 ulp(q) */
+ /* If z if @INF@ (1/0), res = 0, so it quits immediately */
+ if (MPFR_UNLIKELY (res == 0))
+ /* Result is exact so we can add it directly!*/
+ {
+ res = mpfr_sub (y, x, q, rnd_mode);
+ break;
+ }
+ mpfr_sub (t, x, q, MPFR_RNDN); /* Error <= 1/2 ulp(t) */
+ /* Error / ulp(t) <= 1/2 + 1/2 * 2^(EXP(q)-EXP(t))
+ If EXP(q)-EXP(t)>0, <= 2^(EXP(q)-EXP(t)-1)*(1+2^-(EXP(q)-EXP(t)))
+ <= 2^(EXP(q)-EXP(t))
+ If EXP(q)-EXP(t)<0, <= 2^0 */
+ /* We can get 0, but we can't round since q is inexact */
+ if (MPFR_LIKELY (!MPFR_IS_ZERO (t)))
+ {
+ err = (mpfr_exp_t) p - 1 - MAX (MPFR_GET_EXP(q)-MPFR_GET_EXP(t), 0);
+ res = MPFR_CAN_ROUND (t, err, MPFR_PREC (y), rnd_mode);
+ if (MPFR_LIKELY (res != 0)) /* We can round! */
+ {
+ res = mpfr_set (y, t, rnd_mode);
+ break;
+ }
+ }
+ MPFR_ZIV_NEXT (loop, p);
+ mpfr_set_prec (t, p);
+ mpfr_set_prec (q, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ mpfr_clear (q);
+ return res;
+}
+
+int
+mpfr_cmp_q (mpfr_srcptr x, mpq_srcptr z)
+{
+ mpfr_t t;
+ int res;
+ mpfr_prec_t p;
+ /* x < a/b ? <=> x*b < a */
+ MPFR_ASSERTD (mpz_sgn (mpq_denref (z)) != 0);
+ MPFR_MPZ_SIZEINBASE2 (p, mpq_denref (z));
+ mpfr_init2 (t, MPFR_PREC(x) + p);
+ res = mpfr_mul_z (t, x, mpq_denref (z), MPFR_RNDN );
+ MPFR_ASSERTD (res == 0);
+ res = mpfr_cmp_z (t, mpq_numref (z) );
+ mpfr_clear (t);
+ return res;
+}
+
+int
+mpfr_cmp_f (mpfr_srcptr x, mpf_srcptr z)
+{
+ mpfr_t t;
+ int res;
+
+ mpfr_init2 (t, MPFR_PREC_MIN + ABS(SIZ(z)) * GMP_NUMB_BITS );
+ res = mpfr_set_f (t, z, MPFR_RNDN);
+ MPFR_ASSERTD (res == 0);
+ res = mpfr_cmp (x, t);
+ mpfr_clear (t);
+ return res;
+}
diff --git a/src/hypot.c b/src/hypot.c
new file mode 100644
index 000000000..4b450610a
--- /dev/null
+++ b/src/hypot.c
@@ -0,0 +1,187 @@
+/* mpfr_hypot -- Euclidean distance
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of hypot of x and y is done by *
+ * hypot(x,y)= sqrt(x^2+y^2) = z */
+
+int
+mpfr_hypot (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ int inexact, exact;
+ mpfr_t t, te, ti; /* auxiliary variables */
+ mpfr_prec_t N, Nz; /* size variables */
+ mpfr_prec_t Nt; /* precision of the intermediary variable */
+ mpfr_prec_t threshold;
+ mpfr_exp_t Ex, sh;
+ mpfr_uexp_t diff_exp;
+
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+ MPFR_BLOCK_DECL (flags);
+
+ /* particular cases */
+ if (MPFR_ARE_SINGULAR (x, y))
+ {
+ if (MPFR_IS_INF (x) || MPFR_IS_INF (y))
+ {
+ /* Return +inf, even when the other number is NaN. */
+ MPFR_SET_INF (z);
+ MPFR_SET_POS (z);
+ MPFR_RET (0);
+ }
+ else if (MPFR_IS_NAN (x) || MPFR_IS_NAN (y))
+ {
+ MPFR_SET_NAN (z);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_ZERO (x))
+ return mpfr_abs (z, y, rnd_mode);
+ else /* y is necessarily 0 */
+ return mpfr_abs (z, x, rnd_mode);
+ }
+
+ if (mpfr_cmpabs (x, y) < 0)
+ {
+ mpfr_srcptr u;
+ u = x;
+ x = y;
+ y = u;
+ }
+
+ /* now |x| >= |y| */
+
+ Ex = MPFR_GET_EXP (x);
+ diff_exp = (mpfr_uexp_t) Ex - MPFR_GET_EXP (y);
+
+ N = MPFR_PREC (x); /* Precision of input variable */
+ Nz = MPFR_PREC (z); /* Precision of output variable */
+ threshold = (MAX (N, Nz) + (rnd_mode == MPFR_RNDN ? 1 : 0)) << 1;
+ if (rnd_mode == MPFR_RNDA)
+ rnd_mode = MPFR_RNDU; /* since the result is positive, RNDA = RNDU */
+
+ /* Is |x| a suitable approximation to the precision Nz ?
+ (see algorithms.tex for explanations) */
+ if (diff_exp > threshold)
+ /* result is |x| or |x|+ulp(|x|,Nz) */
+ {
+ if (MPFR_UNLIKELY (rnd_mode == MPFR_RNDU))
+ {
+ /* If z > abs(x), then it was already rounded up; otherwise
+ z = abs(x), and we need to add one ulp due to y. */
+ if (mpfr_abs (z, x, rnd_mode) == 0)
+ mpfr_nexttoinf (z);
+ MPFR_RET (1);
+ }
+ else /* MPFR_RNDZ, MPFR_RNDD, MPFR_RNDN */
+ {
+ if (MPFR_LIKELY (Nz >= N))
+ {
+ mpfr_abs (z, x, rnd_mode); /* exact */
+ MPFR_RET (-1);
+ }
+ else
+ {
+ MPFR_SET_EXP (z, Ex);
+ MPFR_SET_SIGN (z, 1);
+ MPFR_RNDRAW_GEN (inexact, z, MPFR_MANT (x), N, rnd_mode, 1,
+ goto addoneulp,
+ if (MPFR_UNLIKELY (++ MPFR_EXP (z) >
+ __gmpfr_emax))
+ return mpfr_overflow (z, rnd_mode, 1);
+ );
+
+ if (MPFR_UNLIKELY (inexact == 0))
+ inexact = -1;
+ MPFR_RET (inexact);
+ }
+ }
+ }
+
+ /* General case */
+
+ N = MAX (MPFR_PREC (x), MPFR_PREC (y));
+
+ /* working precision */
+ Nt = Nz + MPFR_INT_CEIL_LOG2 (Nz) + 4;
+
+ mpfr_init2 (t, Nt);
+ mpfr_init2 (te, Nt);
+ mpfr_init2 (ti, Nt);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Scale x and y to avoid overflow/underflow in x^2 and overflow in y^2
+ (as |x| >= |y|). The scaling of y can underflow only when the target
+ precision is huge, otherwise the case would already have been handled
+ by the diff_exp > threshold code. */
+ sh = mpfr_get_emax () / 2 - Ex - 1;
+
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ mpfr_prec_t err;
+
+ exact = mpfr_mul_2si (te, x, sh, MPFR_RNDZ);
+ exact |= mpfr_mul_2si (ti, y, sh, MPFR_RNDZ);
+ exact |= mpfr_sqr (te, te, MPFR_RNDZ);
+ /* Use fma in order to avoid underflow when diff_exp<=MPFR_EMAX_MAX-2 */
+ exact |= mpfr_fma (t, ti, ti, te, MPFR_RNDZ);
+ exact |= mpfr_sqrt (t, t, MPFR_RNDZ);
+
+ err = Nt < N ? 4 : 2;
+ if (MPFR_LIKELY (exact == 0
+ || MPFR_CAN_ROUND (t, Nt-err, Nz, rnd_mode)))
+ break;
+
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ mpfr_set_prec (te, Nt);
+ mpfr_set_prec (ti, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ MPFR_BLOCK (flags, inexact = mpfr_div_2si (z, t, sh, rnd_mode));
+ MPFR_ASSERTD (exact == 0 || inexact != 0);
+
+ mpfr_clear (t);
+ mpfr_clear (ti);
+ mpfr_clear (te);
+
+ /*
+ exact inexact
+ 0 0 result is exact, ternary flag is 0
+ 0 non zero t is exact, ternary flag given by inexact
+ 1 0 impossible (see above)
+ 1 non zero ternary flag given by inexact
+ */
+
+ MPFR_SAVE_EXPO_FREE (expo);
+
+ if (MPFR_OVERFLOW (flags))
+ mpfr_set_overflow ();
+ /* hypot(x,y) >= |x|, thus underflow is not possible. */
+
+ return mpfr_check_range (z, inexact, rnd_mode);
+}
diff --git a/src/ieee_floats.h b/src/ieee_floats.h
new file mode 100644
index 000000000..e544281e7
--- /dev/null
+++ b/src/ieee_floats.h
@@ -0,0 +1,76 @@
+/* auxiliary data to generate special IEEE floats (NaN, +Inf, -Inf)
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* "double" NaN and infinities are written as explicit bytes to be sure of
+ getting what we want, and to be sure of not depending on libm.
+
+ Could use 4-byte "float" values and let the code convert them, but it
+ seems more direct to give exactly what we want. Certainly for gcc 3.0.2
+ on alphaev56-unknown-freebsd4.3 the NaN must be 8-bytes, since that
+ compiler+system was seen incorrectly converting from a "float" NaN. */
+
+#if _GMP_IEEE_FLOATS
+
+/* The "d" field guarantees alignment to a suitable boundary for a double.
+ Could use a union instead, if we checked the compiler supports union
+ initializers. */
+union dbl_bytes {
+ unsigned char b[8];
+ double d;
+};
+
+#define MPFR_DBL_INFP (dbl_infp.d)
+#define MPFR_DBL_INFM (dbl_infm.d)
+#define MPFR_DBL_NAN (dbl_nan.d)
+
+#if HAVE_DOUBLE_IEEE_LITTLE_ENDIAN
+static const union dbl_bytes dbl_infp =
+ { { 0, 0, 0, 0, 0, 0, 0xF0, 0x7F } };
+static const union dbl_bytes dbl_infm =
+ { { 0, 0, 0, 0, 0, 0, 0xF0, 0xFF } };
+static const union dbl_bytes dbl_nan =
+ { { 0, 0, 0, 0, 0, 0, 0xF8, 0x7F } };
+#endif
+#if HAVE_DOUBLE_IEEE_LITTLE_SWAPPED
+static const union dbl_bytes dbl_infp =
+ { { 0, 0, 0xF0, 0x7F, 0, 0, 0, 0 } };
+static const union dbl_bytes dbl_infm =
+ { { 0, 0, 0xF0, 0xFF, 0, 0, 0, 0 } };
+static const union dbl_bytes dbl_nan =
+ { { 0, 0, 0xF8, 0x7F, 0, 0, 0, 0 } };
+#endif
+#if HAVE_DOUBLE_IEEE_BIG_ENDIAN
+static const union dbl_bytes dbl_infp =
+ { { 0x7F, 0xF0, 0, 0, 0, 0, 0, 0 } };
+static const union dbl_bytes dbl_infm =
+ { { 0xFF, 0xF0, 0, 0, 0, 0, 0, 0 } };
+static const union dbl_bytes dbl_nan =
+ { { 0x7F, 0xF8, 0, 0, 0, 0, 0, 0 } };
+#endif
+
+#else /* _GMP_IEEE_FLOATS */
+
+#define MPFR_DBL_INFP DBL_POS_INF
+#define MPFR_DBL_INFM DBL_NEG_INF
+#define MPFR_DBL_NAN DBL_NAN
+
+#endif /* _GMP_IEEE_FLOATS */
diff --git a/src/init.c b/src/init.c
new file mode 100644
index 000000000..63caa5007
--- /dev/null
+++ b/src/init.c
@@ -0,0 +1,29 @@
+/* mpfr_init -- initialize a floating-point number
+
+Copyright 1999, 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_init (mpfr_ptr x)
+{
+ mpfr_init2 (x, __gmpfr_default_fp_bit_precision);
+}
diff --git a/src/init2.c b/src/init2.c
new file mode 100644
index 000000000..c2b5ce3a8
--- /dev/null
+++ b/src/init2.c
@@ -0,0 +1,69 @@
+/* mpfr_init2 -- initialize a floating-point number with given precision
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_init2 (mpfr_ptr x, mpfr_prec_t p)
+{
+ mp_size_t xsize;
+ mp_ptr tmp;
+
+ /* Check if we can represent the number of limbs
+ * associated to the maximum of mpfr_prec_t*/
+ MPFR_ASSERTN( MP_SIZE_T_MAX >= (MPFR_PREC_MAX/BYTES_PER_MP_LIMB) );
+
+ /* Check for correct GMP_NUMB_BITS and BYTES_PER_MP_LIMB */
+ MPFR_ASSERTN( GMP_NUMB_BITS == BYTES_PER_MP_LIMB * CHAR_BIT
+ && sizeof(mp_limb_t) == BYTES_PER_MP_LIMB );
+
+ MPFR_ASSERTN (mp_bits_per_limb == GMP_NUMB_BITS);
+
+ /* Check for correct EXP NAN, ZERO & INF in both mpfr.h and in mpfr-impl.h */
+ MPFR_ASSERTN( __MPFR_EXP_NAN == MPFR_EXP_NAN );
+ MPFR_ASSERTN( __MPFR_EXP_ZERO == MPFR_EXP_ZERO );
+ MPFR_ASSERTN( __MPFR_EXP_INF == MPFR_EXP_INF );
+
+ MPFR_ASSERTN( MPFR_EMAX_MAX <= (MPFR_EXP_MAX >> 1) );
+ MPFR_ASSERTN( MPFR_EMIN_MIN >= -(MPFR_EXP_MAX >> 1) );
+
+ /* p=1 is not allowed since the rounding to nearest even rule requires at
+ least two bits of mantissa: the neighbours of 3/2 are 1*2^0 and 1*2^1,
+ which both have an odd mantissa */
+ MPFR_ASSERTN(p >= MPFR_PREC_MIN && p <= MPFR_PREC_MAX);
+
+ xsize = (mp_size_t) ((p - 1) / GMP_NUMB_BITS) + 1;
+ tmp = (mp_ptr) (*__gmp_allocate_func)(MPFR_MALLOC_SIZE(xsize));
+
+ MPFR_PREC(x) = p; /* Set prec */
+ MPFR_EXP (x) = MPFR_EXP_INVALID; /* make sure that the exp field has a
+ valid value in the C point of view */
+ MPFR_SET_POS(x); /* Set a sign */
+ MPFR_SET_MANT_PTR(x, tmp); /* Set Mantissa ptr */
+ MPFR_SET_ALLOC_SIZE(x, xsize); /* Fix alloc size of Mantissa */
+ MPFR_SET_NAN(x); /* initializes to NaN */
+}
+
+#ifdef MPFR_USE_OWN_MPFR_TMP_ALLOC
+static unsigned char mpfr_stack_tab[8000000];
+unsigned char *mpfr_stack = mpfr_stack_tab;
+#endif
diff --git a/src/inits.c b/src/inits.c
new file mode 100644
index 000000000..4193db7ec
--- /dev/null
+++ b/src/inits.c
@@ -0,0 +1,62 @@
+/* mpfr_inits -- initialize several floating-point numbers
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+#undef HAVE_STDARG
+#include "config.h" /* for a build within gmp */
+#endif
+
+#if HAVE_STDARG
+# include <stdarg.h>
+#else
+# include <varargs.h>
+#endif
+
+#include "mpfr-impl.h"
+
+/* Since it uses "...", we need an explicit support for K&R */
+
+void
+#if HAVE_STDARG
+mpfr_inits (mpfr_ptr x, ...)
+#else
+mpfr_inits (va_alist)
+ va_dcl
+#endif
+{
+ va_list arg;
+
+#if HAVE_STDARG
+ va_start (arg, x);
+#else
+ mpfr_ptr x;
+ va_start(arg);
+ x = va_arg (arg, mpfr_ptr);
+#endif
+
+ while (x != 0)
+ {
+ mpfr_init (x);
+ x = (mpfr_ptr) va_arg (arg, mpfr_ptr);
+ }
+ va_end (arg);
+}
diff --git a/src/inits2.c b/src/inits2.c
new file mode 100644
index 000000000..a17165509
--- /dev/null
+++ b/src/inits2.c
@@ -0,0 +1,66 @@
+/* mpfr_inits2 -- initialize several floating-point numbers with given
+ precision
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+#undef HAVE_STDARG
+#include "config.h" /* for a build within gmp */
+#endif
+
+#if HAVE_STDARG
+# include <stdarg.h>
+#else
+# include <varargs.h>
+#endif
+
+#include "mpfr-impl.h"
+
+/*
+ * Contrary to mpfr_init2, mpfr_prec_t p is the first argument
+ */
+
+/* Explicit support for K&R compiler */
+void
+#if HAVE_STDARG
+mpfr_inits2 (mpfr_prec_t p, mpfr_ptr x, ...)
+#else
+mpfr_inits2 (va_alist)
+ va_dcl
+#endif
+{
+ va_list arg;
+#if HAVE_STDARG
+ va_start (arg, x);
+#else
+ mpfr_prec_t p;
+ mpfr_ptr x;
+ va_start(arg);
+ p = va_arg (arg, mpfr_prec_t);
+ x = va_arg (arg, mpfr_ptr);
+#endif
+ while (x != 0)
+ {
+ mpfr_init2 (x, p);
+ x = (mpfr_ptr) va_arg (arg, mpfr_ptr);
+ }
+ va_end (arg);
+}
diff --git a/src/inp_str.c b/src/inp_str.c
new file mode 100644
index 000000000..7925a3643
--- /dev/null
+++ b/src/inp_str.c
@@ -0,0 +1,87 @@
+/* mpf_inp_str(dest_float, stream, base) -- Input a number in base
+ BASE from stdio stream STREAM and store the result in DEST_FLOAT.
+
+Copyright 1999, 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+(Copied from GMP, file mpf/inp_str.c)
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <ctype.h>
+
+#include "mpfr-impl.h"
+
+size_t
+mpfr_inp_str (mpfr_ptr rop, FILE *stream, int base, mpfr_rnd_t rnd_mode)
+{
+ unsigned char *str;
+ size_t alloc_size, str_size;
+ int c;
+ int retval;
+ size_t nread;
+
+ if (stream == NULL)
+ stream = stdin;
+
+ alloc_size = 100;
+ str = (unsigned char *) (*__gmp_allocate_func) (alloc_size);
+ str_size = 0;
+ nread = 0;
+
+ /* Skip whitespace. */
+ do
+ {
+ c = getc (stream);
+ nread++;
+ }
+ while (isspace (c));
+
+ /* number of characters read is nread */
+
+ for (;;)
+ {
+ if (str_size >= alloc_size)
+ {
+ size_t old_alloc_size = alloc_size;
+ alloc_size = alloc_size * 3 / 2;
+ str = (unsigned char *)
+ (*__gmp_reallocate_func) (str, old_alloc_size, alloc_size);
+ }
+ if (c == EOF || isspace (c))
+ break;
+ str[str_size++] = (unsigned char) c;
+ c = getc (stream);
+ }
+ ungetc (c, stream);
+
+ /* number of characters read is nread + str_size - 1 */
+
+ /* we can exit the for loop only by the break instruction,
+ then necessarily str_size >= alloc_size was checked, so
+ now str_size < alloc_size */
+
+ str[str_size] = '\0';
+
+ retval = mpfr_set_str (rop, (char *) str, base, rnd_mode);
+ (*__gmp_free_func) (str, alloc_size);
+
+ if (retval == -1)
+ return 0; /* error */
+
+ return str_size + nread - 1;
+}
diff --git a/src/int_ceil_log2.c b/src/int_ceil_log2.c
new file mode 100644
index 000000000..de6052e36
--- /dev/null
+++ b/src/int_ceil_log2.c
@@ -0,0 +1,42 @@
+/* __gmpfr_int_ceil_log2 -- Integer ceil of log2(x)
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H /* for count_leading_zeros */
+#include "mpfr-impl.h"
+
+int
+__gmpfr_int_ceil_log2 (unsigned long n)
+{
+ if (MPFR_UNLIKELY (n == 1))
+ return 0;
+ else
+ {
+ int b;
+ mp_limb_t limb;
+
+ MPFR_ASSERTN (n > 1);
+ limb = n - 1;
+ MPFR_ASSERTN (limb == n - 1);
+ count_leading_zeros (b, limb);
+ return GMP_NUMB_BITS - b;
+ }
+}
diff --git a/src/isinf.c b/src/isinf.c
new file mode 100644
index 000000000..1027e0d1f
--- /dev/null
+++ b/src/isinf.c
@@ -0,0 +1,29 @@
+/* mpfr_inf_p -- check for infinities
+
+Copyright 2000, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+(mpfr_inf_p) (mpfr_srcptr x)
+{
+ return MPFR_IS_INF(x);
+}
diff --git a/src/isinteger.c b/src/isinteger.c
new file mode 100644
index 000000000..40bc7833f
--- /dev/null
+++ b/src/isinteger.c
@@ -0,0 +1,59 @@
+/* mpfr_integer_p -- test if a mpfr variable is integer.
+
+Copyright 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#include "mpfr-impl.h"
+
+int
+mpfr_integer_p (mpfr_srcptr x)
+{
+ mpfr_exp_t expo;
+ mpfr_prec_t prec;
+ mp_size_t xn;
+ mp_limb_t *xp;
+
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x)))
+ return (MPFR_IS_ZERO(x));
+
+ expo = MPFR_GET_EXP (x);
+ if (expo <= 0)
+ return 0;
+
+ prec = MPFR_PREC(x);
+ if ((mpfr_uexp_t) expo >= (mpfr_uexp_t) prec)
+ return 1;
+
+ /* 0 < expo < prec */
+
+ xn = (mp_size_t) ((prec - 1) / GMP_NUMB_BITS); /* index of last limb */
+ xn -= (mp_size_t) (expo / GMP_NUMB_BITS);
+ /* now the index of the last limb containing bits of the fractional part */
+
+ xp = MPFR_MANT(x);
+ MPFR_ASSERTN(xn >= 0);
+ if (xp[xn] << (expo % GMP_NUMB_BITS) != 0)
+ return 0;
+ while (--xn >= 0)
+ if (xp[xn] != 0)
+ return 0;
+ return 1;
+}
diff --git a/src/isnan.c b/src/isnan.c
new file mode 100644
index 000000000..f91e1edc1
--- /dev/null
+++ b/src/isnan.c
@@ -0,0 +1,29 @@
+/* mpfr_nan_p -- check for NaN
+
+Copyright 2000, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+(mpfr_nan_p) (mpfr_srcptr x)
+{
+ return MPFR_IS_NAN (x);
+}
diff --git a/src/isnum.c b/src/isnum.c
new file mode 100644
index 000000000..bf3d1d5bd
--- /dev/null
+++ b/src/isnum.c
@@ -0,0 +1,29 @@
+/* mpfr_number_p -- check for ordinary numbers
+
+Copyright 2000, 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_number_p (mpfr_srcptr x)
+{
+ return MPFR_IS_FP(x);
+}
diff --git a/src/isqrt.c b/src/isqrt.c
new file mode 100644
index 000000000..39d03171f
--- /dev/null
+++ b/src/isqrt.c
@@ -0,0 +1,84 @@
+/* __gmpfr_isqrt && __gmpfr_cuberoot -- Integer square root and cube root
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* returns floor(sqrt(n)) */
+unsigned long
+__gmpfr_isqrt (unsigned long n)
+{
+ unsigned long i, s;
+
+ /* First find an approximation to floor(sqrt(n)) of the form 2^k. */
+ i = n;
+ s = 1;
+ while (i >= 2)
+ {
+ i >>= 2;
+ s <<= 1;
+ }
+
+ do
+ {
+ s = (s + n / s) / 2;
+ }
+ while (!(s*s <= n && (s*s > s*(s+2) || n <= s*(s+2))));
+ /* Short explanation: As mathematically s*(s+2) < 2*ULONG_MAX,
+ the condition s*s > s*(s+2) is evaluated as true when s*(s+2)
+ "overflows" but not s*s. This implies that mathematically, one
+ has s*s <= n <= s*(s+2). If s*s "overflows", this means that n
+ is "large" and the inequality n <= s*(s+2) cannot be satisfied. */
+ return s;
+}
+
+/* returns floor(n^(1/3)) */
+unsigned long
+__gmpfr_cuberoot (unsigned long n)
+{
+ unsigned long i, s;
+
+ /* First find an approximation to floor(cbrt(n)) of the form 2^k. */
+ i = n;
+ s = 1;
+ while (i >= 4)
+ {
+ i >>= 3;
+ s <<= 1;
+ }
+
+ /* Improve the approximation (this is necessary if n is large, so that
+ mathematically (s+1)*(s+1)*(s+1) isn't much larger than ULONG_MAX). */
+ if (n >= 256)
+ {
+ s = (2 * s + n / (s * s)) / 3;
+ s = (2 * s + n / (s * s)) / 3;
+ s = (2 * s + n / (s * s)) / 3;
+ }
+
+ do
+ {
+ s = (2 * s + n / (s * s)) / 3;
+ }
+ while (!(s*s*s <= n && (s*s*s > (s+1)*(s+1)*(s+1) ||
+ n < (s+1)*(s+1)*(s+1))));
+ return s;
+}
diff --git a/src/isregular.c b/src/isregular.c
new file mode 100644
index 000000000..b85e08ada
--- /dev/null
+++ b/src/isregular.c
@@ -0,0 +1,29 @@
+/* mpfr_regular_p -- check for regular number (neither NaN, Inf or zero)
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+(mpfr_regular_p) (mpfr_srcptr x)
+{
+ return MPFR_IS_SINGULAR(x) == 0;
+}
diff --git a/src/iszero.c b/src/iszero.c
new file mode 100644
index 000000000..e05fa6852
--- /dev/null
+++ b/src/iszero.c
@@ -0,0 +1,29 @@
+/* mpfr_zero_p -- check for zero
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+(mpfr_zero_p) (mpfr_srcptr x)
+{
+ return MPFR_IS_ZERO(x);
+}
diff --git a/src/jn.c b/src/jn.c
new file mode 100644
index 000000000..3db992e4c
--- /dev/null
+++ b/src/jn.c
@@ -0,0 +1,243 @@
+/* mpfr_j0, mpfr_j1, mpfr_jn -- Bessel functions of 1st kind, integer order.
+ http://www.opengroup.org/onlinepubs/009695399/functions/j0.html
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Relations: j(-n,z) = (-1)^n j(n,z)
+ j(n,-z) = (-1)^n j(n,z)
+*/
+
+static int mpfr_jn_asympt (mpfr_ptr, long, mpfr_srcptr, mpfr_rnd_t);
+
+int
+mpfr_j0 (mpfr_ptr res, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ return mpfr_jn (res, 0, z, r);
+}
+
+int
+mpfr_j1 (mpfr_ptr res, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ return mpfr_jn (res, 1, z, r);
+}
+
+/* Estimate k0 such that z^2/4 = k0 * (k0 + n)
+ i.e., (sqrt(n^2+z^2)-n)/2 = n/2 * (sqrt(1+(z/n)^2) - 1).
+ Return min(2*k0/log(2), ULONG_MAX).
+*/
+static unsigned long
+mpfr_jn_k0 (long n, mpfr_srcptr z)
+{
+ mpfr_t t, u;
+ unsigned long k0;
+
+ mpfr_init2 (t, 32);
+ mpfr_init2 (u, 32);
+ mpfr_div_si (t, z, n, MPFR_RNDN);
+ mpfr_sqr (t, t, MPFR_RNDN);
+ mpfr_add_ui (t, t, 1, MPFR_RNDN);
+ mpfr_sqrt (t, t, MPFR_RNDN);
+ mpfr_sub_ui (t, t, 1, MPFR_RNDN);
+ mpfr_mul_si (t, t, n, MPFR_RNDN);
+ /* the following is a 32-bit approximation to nearest of log(2) */
+ mpfr_set_str_binary (u, "0.10110001011100100001011111111");
+ mpfr_div (t, t, u, MPFR_RNDN);
+ if (mpfr_fits_ulong_p (t, MPFR_RNDN))
+ k0 = mpfr_get_ui (t, MPFR_RNDN);
+ else
+ k0 = ULONG_MAX;
+ mpfr_clear (t);
+ mpfr_clear (u);
+ return k0;
+}
+
+int
+mpfr_jn (mpfr_ptr res, long n, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ int inex;
+ unsigned long absn;
+ mpfr_prec_t prec, pbound, err;
+ mpfr_exp_t exps, expT;
+ mpfr_t y, s, t, absz;
+ unsigned long k, zz, k0;
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%d rnd=%d", z, z, n, r),
+ ("y[%#R]=%R", res, res));
+
+ absn = SAFE_ABS (unsigned long, n);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (z)))
+ {
+ if (MPFR_IS_NAN (z))
+ {
+ MPFR_SET_NAN (res);
+ MPFR_RET_NAN;
+ }
+ /* j(n,z) tends to zero when z goes to +Inf or -Inf, oscillating around
+ 0. We choose to return +0 in that case. */
+ else if (MPFR_IS_INF (z)) /* FIXME: according to j(-n,z) = (-1)^n j(n,z)
+ we might want to give a sign depending on
+ z and n */
+ return mpfr_set_ui (res, 0, r);
+ else /* z=0: j(0,0)=1, j(n odd,+/-0) = +/-0 if n > 0, -/+0 if n < 0,
+ j(n even,+/-0) = +0 */
+ {
+ if (n == 0)
+ return mpfr_set_ui (res, 1, r);
+ else if (absn & 1) /* n odd */
+ return (n > 0) ? mpfr_set (res, z, r) : mpfr_neg (res, z, r);
+ else /* n even */
+ return mpfr_set_ui (res, 0, r);
+ }
+ }
+
+ /* check for tiny input for j0: j0(z) = 1 - z^2/4 + ..., more precisely
+ |j0(z) - 1| <= z^2/4 for -1 <= z <= 1. */
+ if (n == 0)
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (res, __gmpfr_one, -2 * MPFR_GET_EXP (z),
+ 2, 0, r, return _inexact);
+
+ /* idem for j1: j1(z) = z/2 - z^3/16 + ..., more precisely
+ |j1(z) - z/2| <= |z^3|/16 for -1 <= z <= 1, with the sign of j1(z) - z/2
+ being the opposite of that of z. */
+ if (n == 1)
+ /* we first compute 2j1(z) = z - z^3/8 + ..., then divide by 2 using
+ the "extra" argument of MPFR_FAST_COMPUTE_IF_SMALL_INPUT. */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (res, z, -2 * MPFR_GET_EXP (z), 3,
+ 0, r, mpfr_div_2ui (res, res, 1, r));
+
+ /* we can use the asymptotic expansion as soon as |z| > p log(2)/2,
+ but to get some margin we use it for |z| > p/2 */
+ pbound = MPFR_PREC (res) / 2 + 3;
+ MPFR_ASSERTN (pbound <= ULONG_MAX);
+ MPFR_ALIAS (absz, z, 1, MPFR_EXP (z));
+ if (mpfr_cmp_ui (absz, pbound) > 0)
+ {
+ inex = mpfr_jn_asympt (res, n, z, r);
+ if (inex != 0)
+ return inex;
+ }
+
+ mpfr_init2 (y, 32);
+
+ /* check underflow case: |j(n,z)| <= 1/sqrt(2 Pi n) (ze/2n)^n
+ (see algorithms.tex) */
+ if (absn > 0)
+ {
+ /* the following is an upper 32-bit approximation of exp(1)/2 */
+ mpfr_set_str_binary (y, "1.0101101111110000101010001011001");
+ if (MPFR_SIGN(z) > 0)
+ mpfr_mul (y, y, z, MPFR_RNDU);
+ else
+ {
+ mpfr_mul (y, y, z, MPFR_RNDD);
+ mpfr_neg (y, y, MPFR_RNDU);
+ }
+ mpfr_div_ui (y, y, absn, MPFR_RNDU);
+ /* now y is an upper approximation of |ze/2n|: y < 2^EXP(y),
+ thus |j(n,z)| < 1/2*y^n < 2^(n*EXP(y)-1).
+ If n*EXP(y) < __gmpfr_emin then we have an underflow.
+ Warning: absn is an unsigned long. */
+ if ((MPFR_EXP(y) < 0 && absn > (unsigned long) (-__gmpfr_emin))
+ || (absn <= (unsigned long) (-MPFR_EMIN_MIN) &&
+ MPFR_EXP(y) < __gmpfr_emin / (mpfr_exp_t) absn))
+ {
+ mpfr_clear (y);
+ return mpfr_underflow (res, (r == MPFR_RNDN) ? MPFR_RNDZ : r,
+ (n % 2) ? ((n > 0) ? MPFR_SIGN(z) : -MPFR_SIGN(z))
+ : MPFR_SIGN_POS);
+ }
+ }
+
+ mpfr_init (s);
+ mpfr_init (t);
+
+ /* the logarithm of the ratio between the largest term in the series
+ and the first one is roughly bounded by k0, which we add to the
+ working precision to take into account this cancellation */
+ k0 = mpfr_jn_k0 (absn, z);
+ prec = MPFR_PREC (res) + k0 + 2 * MPFR_INT_CEIL_LOG2 (MPFR_PREC (res)) + 3;
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ mpfr_set_prec (y, prec);
+ mpfr_set_prec (s, prec);
+ mpfr_set_prec (t, prec);
+ mpfr_pow_ui (t, z, absn, MPFR_RNDN); /* z^|n| */
+ mpfr_mul (y, z, z, MPFR_RNDN); /* z^2 */
+ zz = mpfr_get_ui (y, MPFR_RNDU);
+ MPFR_ASSERTN (zz < ULONG_MAX);
+ mpfr_div_2ui (y, y, 2, MPFR_RNDN); /* z^2/4 */
+ mpfr_fac_ui (s, absn, MPFR_RNDN); /* |n|! */
+ mpfr_div (t, t, s, MPFR_RNDN);
+ if (absn > 0)
+ mpfr_div_2ui (t, t, absn, MPFR_RNDN);
+ mpfr_set (s, t, MPFR_RNDN);
+ exps = MPFR_EXP (s);
+ expT = exps;
+ for (k = 1; ; k++)
+ {
+ mpfr_mul (t, t, y, MPFR_RNDN);
+ mpfr_neg (t, t, MPFR_RNDN);
+ if (k + absn <= ULONG_MAX / k)
+ mpfr_div_ui (t, t, k * (k + absn), MPFR_RNDN);
+ else
+ {
+ mpfr_div_ui (t, t, k, MPFR_RNDN);
+ mpfr_div_ui (t, t, k + absn, MPFR_RNDN);
+ }
+ exps = MPFR_EXP (t);
+ if (exps > expT)
+ expT = exps;
+ mpfr_add (s, s, t, MPFR_RNDN);
+ exps = MPFR_EXP (s);
+ if (exps > expT)
+ expT = exps;
+ if (MPFR_EXP (t) + (mpfr_exp_t) prec <= MPFR_EXP (s) &&
+ zz / (2 * k) < k + n)
+ break;
+ }
+ /* the error is bounded by (4k^2+21/2k+7) ulp(s)*2^(expT-exps)
+ <= (k+2)^2 ulp(s)*2^(2+expT-exps) */
+ err = 2 * MPFR_INT_CEIL_LOG2(k + 2) + 2 + expT - MPFR_EXP (s);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s, prec - err, MPFR_PREC(res), r)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = ((n >= 0) || ((n & 1) == 0)) ? mpfr_set (res, s, r)
+ : mpfr_neg (res, s, r);
+
+ mpfr_clear (y);
+ mpfr_clear (s);
+ mpfr_clear (t);
+
+ return inex;
+}
+
+#define MPFR_JN
+#include "jyn_asympt.c"
diff --git a/src/jyn_asympt.c b/src/jyn_asympt.c
new file mode 100644
index 000000000..371300080
--- /dev/null
+++ b/src/jyn_asympt.c
@@ -0,0 +1,269 @@
+/* mpfr_jn_asympt, mpfr_yn_asympt -- shared code for mpfr_jn and mpfr_yn
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef MPFR_JN
+# define FUNCTION mpfr_jn_asympt
+#else
+# ifdef MPFR_YN
+# define FUNCTION mpfr_yn_asympt
+# else
+# error "neither MPFR_JN nor MPFR_YN is defined"
+# endif
+#endif
+
+/* Implements asymptotic expansion for jn or yn (formulae 9.2.5 and 9.2.6
+ from Abramowitz & Stegun).
+ Assumes |z| > p log(2)/2, where p is the target precision
+ (z can be negative only for jn).
+ Return 0 if the expansion does not converge enough (the value 0 as inexact
+ flag should not happen for normal input).
+*/
+static int
+FUNCTION (mpfr_ptr res, long n, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ mpfr_t s, c, P, Q, t, iz, err_t, err_s, err_u;
+ mpfr_prec_t w;
+ long k;
+ int inex, stop, diverge = 0;
+ mpfr_exp_t err2, err;
+ MPFR_ZIV_DECL (loop);
+
+ mpfr_init (c);
+
+ w = MPFR_PREC(res) + MPFR_INT_CEIL_LOG2(MPFR_PREC(res)) + 4;
+
+ MPFR_ZIV_INIT (loop, w);
+ for (;;)
+ {
+ mpfr_set_prec (c, w);
+ mpfr_init2 (s, w);
+ mpfr_init2 (P, w);
+ mpfr_init2 (Q, w);
+ mpfr_init2 (t, w);
+ mpfr_init2 (iz, w);
+ mpfr_init2 (err_t, 31);
+ mpfr_init2 (err_s, 31);
+ mpfr_init2 (err_u, 31);
+
+ /* Approximate sin(z) and cos(z). In the following, err <= k means that
+ the approximate value y and the true value x are related by
+ y = x * (1 + u)^k with |u| <= 2^(-w), following Higham's method. */
+ mpfr_sin_cos (s, c, z, MPFR_RNDN);
+ if (MPFR_IS_NEG(z))
+ mpfr_neg (s, s, MPFR_RNDN); /* compute jn/yn(|z|), fix sign later */
+ /* The absolute error on s/c is bounded by 1/2 ulp(1/2) <= 2^(-w-1). */
+ mpfr_add (t, s, c, MPFR_RNDN);
+ mpfr_sub (c, s, c, MPFR_RNDN);
+ mpfr_swap (s, t);
+ /* now s approximates sin(z)+cos(z), and c approximates sin(z)-cos(z),
+ with total absolute error bounded by 2^(1-w). */
+
+ /* precompute 1/(8|z|) */
+ mpfr_si_div (iz, MPFR_IS_POS(z) ? 1 : -1, z, MPFR_RNDN); /* err <= 1 */
+ mpfr_div_2ui (iz, iz, 3, MPFR_RNDN);
+
+ /* compute P and Q */
+ mpfr_set_ui (P, 1, MPFR_RNDN);
+ mpfr_set_ui (Q, 0, MPFR_RNDN);
+ mpfr_set_ui (t, 1, MPFR_RNDN); /* current term */
+ mpfr_set_ui (err_t, 0, MPFR_RNDN); /* error on t */
+ mpfr_set_ui (err_s, 0, MPFR_RNDN); /* error on P and Q (sum of errors) */
+ for (k = 1, stop = 0; stop < 4; k++)
+ {
+ /* compute next term: t(k)/t(k-1) = (2n+2k-1)(2n-2k+1)/(8kz) */
+ mpfr_mul_si (t, t, 2 * (n + k) - 1, MPFR_RNDN); /* err <= err_k + 1 */
+ mpfr_mul_si (t, t, 2 * (n - k) + 1, MPFR_RNDN); /* err <= err_k + 2 */
+ mpfr_div_ui (t, t, k, MPFR_RNDN); /* err <= err_k + 3 */
+ mpfr_mul (t, t, iz, MPFR_RNDN); /* err <= err_k + 5 */
+ /* the relative error on t is bounded by (1+u)^(5k)-1, which is
+ bounded by 6ku for 6ku <= 0.02: first |5 log(1+u)| <= |5.5u|
+ for |u| <= 0.15, then |exp(5.5u)-1| <= 6u for |u| <= 0.02. */
+ mpfr_mul_ui (err_t, t, 6 * k, MPFR_IS_POS(t) ? MPFR_RNDU : MPFR_RNDD);
+ mpfr_abs (err_t, err_t, MPFR_RNDN); /* exact */
+ /* the absolute error on t is bounded by err_t * 2^(-w) */
+ mpfr_abs (err_u, t, MPFR_RNDU);
+ mpfr_mul_2ui (err_u, err_u, w, MPFR_RNDU); /* t * 2^w */
+ mpfr_add (err_u, err_u, err_t, MPFR_RNDU); /* max|t| * 2^w */
+ if (stop >= 2)
+ {
+ /* take into account the neglected terms: t * 2^w */
+ mpfr_div_2ui (err_s, err_s, w, MPFR_RNDU);
+ if (MPFR_IS_POS(t))
+ mpfr_add (err_s, err_s, t, MPFR_RNDU);
+ else
+ mpfr_sub (err_s, err_s, t, MPFR_RNDU);
+ mpfr_mul_2ui (err_s, err_s, w, MPFR_RNDU);
+ stop ++;
+ }
+ /* if k is odd, add to Q, otherwise to P */
+ else if (k & 1)
+ {
+ /* if k = 1 mod 4, add, otherwise subtract */
+ if ((k & 2) == 0)
+ mpfr_add (Q, Q, t, MPFR_RNDN);
+ else
+ mpfr_sub (Q, Q, t, MPFR_RNDN);
+ /* check if the next term is smaller than ulp(Q): if EXP(err_u)
+ <= EXP(Q), since the current term is bounded by
+ err_u * 2^(-w), it is bounded by ulp(Q) */
+ if (MPFR_EXP(err_u) <= MPFR_EXP(Q))
+ stop ++;
+ else
+ stop = 0;
+ }
+ else
+ {
+ /* if k = 0 mod 4, add, otherwise subtract */
+ if ((k & 2) == 0)
+ mpfr_add (P, P, t, MPFR_RNDN);
+ else
+ mpfr_sub (P, P, t, MPFR_RNDN);
+ /* check if the next term is smaller than ulp(P) */
+ if (MPFR_EXP(err_u) <= MPFR_EXP(P))
+ stop ++;
+ else
+ stop = 0;
+ }
+ mpfr_add (err_s, err_s, err_t, MPFR_RNDU);
+ /* the sum of the rounding errors on P and Q is bounded by
+ err_s * 2^(-w) */
+
+ /* stop when start to diverge */
+ if (stop < 2 &&
+ ((MPFR_IS_POS(z) && mpfr_cmp_ui (z, (k + 1) / 2) < 0) ||
+ (MPFR_IS_NEG(z) && mpfr_cmp_si (z, - ((k + 1) / 2)) > 0)))
+ {
+ /* if we have to stop the series because it diverges, then
+ increasing the precision will most probably fail, since
+ we will stop to the same point, and thus compute a very
+ similar approximation */
+ diverge = 1;
+ stop = 2; /* force stop */
+ }
+ }
+ /* the sum of the total errors on P and Q is bounded by err_s * 2^(-w) */
+
+ /* Now combine: the sum of the rounding errors on P and Q is bounded by
+ err_s * 2^(-w), and the absolute error on s/c is bounded by 2^(1-w) */
+ if ((n & 1) == 0) /* n even: P * (sin + cos) + Q (cos - sin) for jn
+ Q * (sin + cos) + P (sin - cos) for yn */
+ {
+#ifdef MPFR_JN
+ mpfr_mul (c, c, Q, MPFR_RNDN); /* Q * (sin - cos) */
+ mpfr_mul (s, s, P, MPFR_RNDN); /* P * (sin + cos) */
+#else
+ mpfr_mul (c, c, P, MPFR_RNDN); /* P * (sin - cos) */
+ mpfr_mul (s, s, Q, MPFR_RNDN); /* Q * (sin + cos) */
+#endif
+ err = MPFR_EXP(c);
+ if (MPFR_EXP(s) > err)
+ err = MPFR_EXP(s);
+#ifdef MPFR_JN
+ mpfr_sub (s, s, c, MPFR_RNDN);
+#else
+ mpfr_add (s, s, c, MPFR_RNDN);
+#endif
+ }
+ else /* n odd: P * (sin - cos) + Q (cos + sin) for jn,
+ Q * (sin - cos) - P (cos + sin) for yn */
+ {
+#ifdef MPFR_JN
+ mpfr_mul (c, c, P, MPFR_RNDN); /* P * (sin - cos) */
+ mpfr_mul (s, s, Q, MPFR_RNDN); /* Q * (sin + cos) */
+#else
+ mpfr_mul (c, c, Q, MPFR_RNDN); /* Q * (sin - cos) */
+ mpfr_mul (s, s, P, MPFR_RNDN); /* P * (sin + cos) */
+#endif
+ err = MPFR_EXP(c);
+ if (MPFR_EXP(s) > err)
+ err = MPFR_EXP(s);
+#ifdef MPFR_JN
+ mpfr_add (s, s, c, MPFR_RNDN);
+#else
+ mpfr_sub (s, c, s, MPFR_RNDN);
+#endif
+ }
+ if ((n & 2) != 0)
+ mpfr_neg (s, s, MPFR_RNDN);
+ if (MPFR_EXP(s) > err)
+ err = MPFR_EXP(s);
+ /* the absolute error on s is bounded by P*err(s/c) + Q*err(s/c)
+ + err(P)*(s/c) + err(Q)*(s/c) + 3 * 2^(err - w - 1)
+ <= (|P|+|Q|) * 2^(1-w) + err_s * 2^(1-w) + 2^err * 2^(1-w),
+ since |c|, |old_s| <= 2. */
+ err2 = (MPFR_EXP(P) >= MPFR_EXP(Q)) ? MPFR_EXP(P) + 2 : MPFR_EXP(Q) + 2;
+ /* (|P| + |Q|) * 2^(1 - w) <= 2^(err2 - w) */
+ err = MPFR_EXP(err_s) >= err ? MPFR_EXP(err_s) + 2 : err + 2;
+ /* err_s * 2^(1-w) + 2^old_err * 2^(1-w) <= 2^err * 2^(-w) */
+ err2 = (err >= err2) ? err + 1 : err2 + 1;
+ /* now the absolute error on s is bounded by 2^(err2 - w) */
+
+ /* multiply by sqrt(1/(Pi*z)) */
+ mpfr_const_pi (c, MPFR_RNDN); /* Pi, err <= 1 */
+ mpfr_mul (c, c, z, MPFR_RNDN); /* err <= 2 */
+ mpfr_si_div (c, MPFR_IS_POS(z) ? 1 : -1, c, MPFR_RNDN); /* err <= 3 */
+ mpfr_sqrt (c, c, MPFR_RNDN); /* err<=5/2, thus the absolute error is
+ bounded by 3*u*|c| for |u| <= 0.25 */
+ mpfr_mul (err_t, c, s, MPFR_SIGN(c)==MPFR_SIGN(s) ? MPFR_RNDU : MPFR_RNDD);
+ mpfr_abs (err_t, err_t, MPFR_RNDU);
+ mpfr_mul_ui (err_t, err_t, 3, MPFR_RNDU);
+ /* 3*2^(-w)*|old_c|*|s| [see below] is bounded by err_t * 2^(-w) */
+ err2 += MPFR_EXP(c);
+ /* |old_c| * 2^(err2 - w) [see below] is bounded by 2^(err2-w) */
+ mpfr_mul (c, c, s, MPFR_RNDN); /* the absolute error on c is bounded by
+ 1/2 ulp(c) + 3*2^(-w)*|old_c|*|s|
+ + |old_c| * 2^(err2 - w) */
+ /* compute err_t * 2^(-w) + 1/2 ulp(c) = (err_t + 2^EXP(c)) * 2^(-w) */
+ err = (MPFR_EXP(err_t) > MPFR_EXP(c)) ? MPFR_EXP(err_t) + 1 : MPFR_EXP(c) + 1;
+ /* err_t * 2^(-w) + 1/2 ulp(c) <= 2^(err - w) */
+ /* now err_t * 2^(-w) bounds 1/2 ulp(c) + 3*2^(-w)*|old_c|*|s| */
+ err = (err >= err2) ? err + 1 : err2 + 1;
+ /* the absolute error on c is bounded by 2^(err - w) */
+
+ mpfr_clear (s);
+ mpfr_clear (P);
+ mpfr_clear (Q);
+ mpfr_clear (t);
+ mpfr_clear (iz);
+ mpfr_clear (err_t);
+ mpfr_clear (err_s);
+ mpfr_clear (err_u);
+
+ err -= MPFR_EXP(c);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (c, w - err, MPFR_PREC(res), r)))
+ break;
+ if (diverge != 0)
+ {
+ mpfr_set (c, z, r); /* will force inex=0 below, which means the
+ asymptotic expansion failed */
+ break;
+ }
+ MPFR_ZIV_NEXT (loop, w);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = (MPFR_IS_POS(z) || ((n & 1) == 0)) ? mpfr_set (res, c, r)
+ : mpfr_neg (res, c, r);
+ mpfr_clear (c);
+
+ return inex;
+}
diff --git a/src/li2.c b/src/li2.c
new file mode 100644
index 000000000..bdf776c0b
--- /dev/null
+++ b/src/li2.c
@@ -0,0 +1,631 @@
+/* mpfr_li2 -- Dilogarithm.
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Compute the alternating series
+ s = S(z) = \sum_{k=0}^infty B_{2k} (z))^{2k+1} / (2k+1)!
+ with 0 < z <= log(2) to the precision of s rounded in the direction
+ rnd_mode.
+ Return the maximum index of the truncature which is useful
+ for determinating the relative error.
+*/
+static int
+li2_series (mpfr_t sum, mpfr_srcptr z, mpfr_rnd_t rnd_mode)
+{
+ int i, Bm, Bmax;
+ mpfr_t s, u, v, w;
+ mpfr_prec_t sump, p;
+ mpfr_exp_t se, err;
+ mpz_t *B;
+ MPFR_ZIV_DECL (loop);
+
+ /* The series converges for |z| < 2 pi, but in mpfr_li2 the argument is
+ reduced so that 0 < z <= log(2). Here is additionnal check that z is
+ (nearly) correct */
+ MPFR_ASSERTD (MPFR_IS_STRICTPOS (z));
+ MPFR_ASSERTD (mpfr_cmp_d (z, 0.6953125) <= 0);
+
+ sump = MPFR_PREC (sum); /* target precision */
+ p = sump + MPFR_INT_CEIL_LOG2 (sump) + 4; /* the working precision */
+ mpfr_init2 (s, p);
+ mpfr_init2 (u, p);
+ mpfr_init2 (v, p);
+ mpfr_init2 (w, p);
+
+ B = mpfr_bernoulli_internal ((mpz_t *) 0, 0);
+ Bm = Bmax = 1;
+
+ MPFR_ZIV_INIT (loop, p);
+ for (;;)
+ {
+ mpfr_sqr (u, z, MPFR_RNDU);
+ mpfr_set (v, z, MPFR_RNDU);
+ mpfr_set (s, z, MPFR_RNDU);
+ se = MPFR_GET_EXP (s);
+ err = 0;
+
+ for (i = 1;; i++)
+ {
+ if (i >= Bmax)
+ B = mpfr_bernoulli_internal (B, Bmax++); /* B_2i*(2i+1)!, exact */
+
+ mpfr_mul (v, u, v, MPFR_RNDU);
+ mpfr_div_ui (v, v, 2 * i, MPFR_RNDU);
+ mpfr_div_ui (v, v, 2 * i, MPFR_RNDU);
+ mpfr_div_ui (v, v, 2 * i + 1, MPFR_RNDU);
+ mpfr_div_ui (v, v, 2 * i + 1, MPFR_RNDU);
+ /* here, v_2i = v_{2i-2} / (2i * (2i+1))^2 */
+
+ mpfr_mul_z (w, v, B[i], MPFR_RNDN);
+ /* here, w_2i = v_2i * B_2i * (2i+1)! with
+ error(w_2i) < 2^(5 * i + 8) ulp(w_2i) (see algorithms.tex) */
+
+ mpfr_add (s, s, w, MPFR_RNDN);
+
+ err = MAX (err + se, 5 * i + 8 + MPFR_GET_EXP (w))
+ - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err);
+ se = MPFR_GET_EXP (s);
+ if (MPFR_GET_EXP (w) <= se - (mpfr_exp_t) p)
+ break;
+ }
+
+ /* the previous value of err is the rounding error,
+ the truncation error is less than EXP(z) - 6 * i - 5
+ (see algorithms.tex) */
+ err = MAX (err, MPFR_GET_EXP (z) - 6 * i - 5) + 1;
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) p - err, sump, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, p);
+ mpfr_set_prec (s, p);
+ mpfr_set_prec (u, p);
+ mpfr_set_prec (v, p);
+ mpfr_set_prec (w, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ mpfr_set (sum, s, rnd_mode);
+
+ Bm = Bmax;
+ while (Bm--)
+ mpz_clear (B[Bm]);
+ (*__gmp_free_func) (B, Bmax * sizeof (mpz_t));
+ mpfr_clears (s, u, v, w, (mpfr_ptr) 0);
+
+ /* Let K be the returned value.
+ 1. As we compute an alternating series, the truncation error has the same
+ sign as the next term w_{K+2} which is positive iff K%4 == 0.
+ 2. Assume that error(z) <= (1+t) z', where z' is the actual value, then
+ error(s) <= 2 * (K+1) * t (see algorithms.tex).
+ */
+ return 2 * i;
+}
+
+/* try asymptotic expansion when x is large and positive:
+ Li2(x) = -log(x)^2/2 + Pi^2/3 - 1/x + O(1/x^2).
+ More precisely for x >= 2 we have for g(x) = -log(x)^2/2 + Pi^2/3:
+ -2 <= x * (Li2(x) - g(x)) <= -1
+ thus |Li2(x) - g(x)| <= 2/x.
+ Assumes x >= 38, which ensures log(x)^2/2 >= 2*Pi^2/3, and g(x) <= -3.3.
+ Return 0 if asymptotic expansion failed (unable to round), otherwise
+ returns correct ternary value.
+*/
+static int
+mpfr_li2_asympt_pos (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t g, h;
+ mpfr_prec_t w = MPFR_PREC (y) + 20;
+ int inex = 0;
+
+ MPFR_ASSERTN (mpfr_cmp_ui (x, 38) >= 0);
+
+ mpfr_init2 (g, w);
+ mpfr_init2 (h, w);
+ mpfr_log (g, x, MPFR_RNDN); /* rel. error <= |(1 + theta) - 1| */
+ mpfr_sqr (g, g, MPFR_RNDN); /* rel. error <= |(1 + theta)^3 - 1| <= 2^(2-w) */
+ mpfr_div_2ui (g, g, 1, MPFR_RNDN); /* rel. error <= 2^(2-w) */
+ mpfr_const_pi (h, MPFR_RNDN); /* error <= 2^(1-w) */
+ mpfr_sqr (h, h, MPFR_RNDN); /* rel. error <= 2^(2-w) */
+ mpfr_div_ui (h, h, 3, MPFR_RNDN); /* rel. error <= |(1 + theta)^4 - 1|
+ <= 5 * 2^(-w) */
+ /* since x is chosen such that log(x)^2/2 >= 2 * (Pi^2/3), we should have
+ g >= 2*h, thus |g-h| >= |h|, and the relative error on g is at most
+ multiplied by 2 in the difference, and that by h is unchanged. */
+ MPFR_ASSERTN (MPFR_EXP (g) > MPFR_EXP (h));
+ mpfr_sub (g, h, g, MPFR_RNDN); /* err <= ulp(g)/2 + g*2^(3-w) + g*5*2^(-w)
+ <= ulp(g) * (1/2 + 8 + 5) < 14 ulp(g).
+
+ If in addition 2/x <= 2 ulp(g), i.e.,
+ 1/x <= ulp(g), then the total error is
+ bounded by 16 ulp(g). */
+ if ((MPFR_EXP (x) >= (mpfr_exp_t) w - MPFR_EXP (g)) &&
+ MPFR_CAN_ROUND (g, w - 4, MPFR_PREC (y), rnd_mode))
+ inex = mpfr_set (y, g, rnd_mode);
+
+ mpfr_clear (g);
+ mpfr_clear (h);
+
+ return inex;
+}
+
+/* try asymptotic expansion when x is large and negative:
+ Li2(x) = -log(-x)^2/2 - Pi^2/6 - 1/x + O(1/x^2).
+ More precisely for x <= -2 we have for g(x) = -log(-x)^2/2 - Pi^2/6:
+ |Li2(x) - g(x)| <= 1/|x|.
+ Assumes x <= -7, which ensures |log(-x)^2/2| >= Pi^2/6, and g(x) <= -3.5.
+ Return 0 if asymptotic expansion failed (unable to round), otherwise
+ returns correct ternary value.
+*/
+static int
+mpfr_li2_asympt_neg (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t g, h;
+ mpfr_prec_t w = MPFR_PREC (y) + 20;
+ int inex = 0;
+
+ MPFR_ASSERTN (mpfr_cmp_si (x, -7) <= 0);
+
+ mpfr_init2 (g, w);
+ mpfr_init2 (h, w);
+ mpfr_neg (g, x, MPFR_RNDN);
+ mpfr_log (g, g, MPFR_RNDN); /* rel. error <= |(1 + theta) - 1| */
+ mpfr_sqr (g, g, MPFR_RNDN); /* rel. error <= |(1 + theta)^3 - 1| <= 2^(2-w) */
+ mpfr_div_2ui (g, g, 1, MPFR_RNDN); /* rel. error <= 2^(2-w) */
+ mpfr_const_pi (h, MPFR_RNDN); /* error <= 2^(1-w) */
+ mpfr_sqr (h, h, MPFR_RNDN); /* rel. error <= 2^(2-w) */
+ mpfr_div_ui (h, h, 6, MPFR_RNDN); /* rel. error <= |(1 + theta)^4 - 1|
+ <= 5 * 2^(-w) */
+ MPFR_ASSERTN (MPFR_EXP (g) >= MPFR_EXP (h));
+ mpfr_add (g, g, h, MPFR_RNDN); /* err <= ulp(g)/2 + g*2^(2-w) + g*5*2^(-w)
+ <= ulp(g) * (1/2 + 4 + 5) < 10 ulp(g).
+
+ If in addition |1/x| <= 4 ulp(g), then the
+ total error is bounded by 16 ulp(g). */
+ if ((MPFR_EXP (x) >= (mpfr_exp_t) (w - 2) - MPFR_EXP (g)) &&
+ MPFR_CAN_ROUND (g, w - 4, MPFR_PREC (y), rnd_mode))
+ inex = mpfr_neg (y, g, rnd_mode);
+
+ mpfr_clear (g);
+ mpfr_clear (h);
+
+ return inex;
+}
+
+/* Compute the real part of the dilogarithm defined by
+ Li2(x) = -\Int_{t=0}^x log(1-t)/t dt */
+int
+mpfr_li2 (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_exp_t err;
+ mpfr_prec_t yp, m;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode), ("y[%#R]=%R", y));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SET_NEG (y);
+ MPFR_SET_INF (y);
+ MPFR_RET (0);
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_SET_ZERO (y);
+ MPFR_RET (0);
+ }
+ }
+
+ /* Li2(x) = x + x^2/4 + x^3/9 + ..., more precisely for 0 < x <= 1/2
+ we have |Li2(x) - x| < x^2/2 <= 2^(2EXP(x)-1) and for -1/2 <= x < 0
+ we have |Li2(x) - x| < x^2/4 <= 2^(2EXP(x)-2) */
+ if (MPFR_IS_POS (x))
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, -MPFR_GET_EXP (x), 1, 1, rnd_mode,
+ {});
+ else
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, -MPFR_GET_EXP (x), 2, 0, rnd_mode,
+ {});
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ yp = MPFR_PREC (y);
+ m = yp + MPFR_INT_CEIL_LOG2 (yp) + 13;
+
+ if (MPFR_LIKELY ((mpfr_cmp_ui (x, 0) > 0) && (mpfr_cmp_d (x, 0.5) <= 0)))
+ /* 0 < x <= 1/2: Li2(x) = S(-log(1-x))-log^2(1-x)/4 */
+ {
+ mpfr_t s, u;
+ mpfr_exp_t expo_l;
+ int k;
+
+ mpfr_init2 (u, m);
+ mpfr_init2 (s, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_ui_sub (u, 1, x, MPFR_RNDN);
+ mpfr_log (u, u, MPFR_RNDU);
+ if (MPFR_IS_ZERO(u))
+ goto next_m;
+ mpfr_neg (u, u, MPFR_RNDN); /* u = -log(1-x) */
+ expo_l = MPFR_GET_EXP (u);
+ k = li2_series (s, u, MPFR_RNDU);
+ err = 1 + MPFR_INT_CEIL_LOG2 (k + 1);
+
+ mpfr_sqr (u, u, MPFR_RNDU);
+ mpfr_div_2ui (u, u, 2, MPFR_RNDU); /* u = log^2(1-x) / 4 */
+ mpfr_sub (s, s, u, MPFR_RNDN);
+
+ /* error(s) <= (0.5 + 2^(d-EXP(s))
+ + 2^(3 + MAX(1, - expo_l) - EXP(s))) ulp(s) */
+ err = MAX (err, MAX (1, - expo_l) - 1) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err);
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) m - err, yp, rnd_mode))
+ break;
+
+ next_m:
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (s, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+
+ mpfr_clear (u);
+ mpfr_clear (s);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+ else if (!mpfr_cmp_ui (x, 1))
+ /* Li2(1)= pi^2 / 6 */
+ {
+ mpfr_t u;
+ mpfr_init2 (u, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_const_pi (u, MPFR_RNDU);
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_ui (u, u, 6, MPFR_RNDN);
+
+ err = m - 4; /* error(u) <= 19/2 ulp(u) */
+ if (MPFR_CAN_ROUND (u, err, yp, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (u, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, u, rnd_mode);
+
+ mpfr_clear (u);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+ else if (mpfr_cmp_ui (x, 2) >= 0)
+ /* x >= 2: Li2(x) = -S(-log(1-1/x))-log^2(x)/2+log^2(1-1/x)/4+pi^2/3 */
+ {
+ int k;
+ mpfr_exp_t expo_l;
+ mpfr_t s, u, xx;
+
+ if (mpfr_cmp_ui (x, 38) >= 0)
+ {
+ inexact = mpfr_li2_asympt_pos (y, x, rnd_mode);
+ if (inexact != 0)
+ goto end_of_case_gt2;
+ }
+
+ mpfr_init2 (u, m);
+ mpfr_init2 (s, m);
+ mpfr_init2 (xx, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_ui_div (xx, 1, x, MPFR_RNDN);
+ mpfr_neg (xx, xx, MPFR_RNDN);
+ mpfr_log1p (u, xx, MPFR_RNDD);
+ mpfr_neg (u, u, MPFR_RNDU); /* u = -log(1-1/x) */
+ expo_l = MPFR_GET_EXP (u);
+ k = li2_series (s, u, MPFR_RNDN);
+ mpfr_neg (s, s, MPFR_RNDN);
+ err = MPFR_INT_CEIL_LOG2 (k + 1) + 1; /* error(s) <= 2^err ulp(s) */
+
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_2ui (u, u, 2, MPFR_RNDN); /* u= log^2(1-1/x)/4 */
+ mpfr_add (s, s, u, MPFR_RNDN);
+ err =
+ MAX (err,
+ 3 + MAX (1, -expo_l) + MPFR_GET_EXP (u)) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err); /* error(s) <= 2^err ulp(s) */
+ err += MPFR_GET_EXP (s);
+
+ mpfr_log (u, x, MPFR_RNDU);
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_2ui (u, u, 1, MPFR_RNDN); /* u = log^2(x)/2 */
+ mpfr_sub (s, s, u, MPFR_RNDN);
+ err = MAX (err, 3 + MPFR_GET_EXP (u)) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err); /* error(s) <= 2^err ulp(s) */
+ err += MPFR_GET_EXP (s);
+
+ mpfr_const_pi (u, MPFR_RNDU);
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_ui (u, u, 3, MPFR_RNDN); /* u = pi^2/3 */
+ mpfr_add (s, s, u, MPFR_RNDN);
+ err = MAX (err, 2) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err); /* error(s) <= 2^err ulp(s) */
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) m - err, yp, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (s, m);
+ mpfr_set_prec (xx, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+ mpfr_clears (s, u, xx, (mpfr_ptr) 0);
+
+ end_of_case_gt2:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+ else if (mpfr_cmp_ui (x, 1) > 0)
+ /* 2 > x > 1: Li2(x) = S(log(x))+log^2(x)/4-log(x)log(x-1)+pi^2/6 */
+ {
+ int k;
+ mpfr_exp_t e1, e2;
+ mpfr_t s, u, v, xx;
+ mpfr_init2 (s, m);
+ mpfr_init2 (u, m);
+ mpfr_init2 (v, m);
+ mpfr_init2 (xx, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_log (v, x, MPFR_RNDU);
+ k = li2_series (s, v, MPFR_RNDN);
+ e1 = MPFR_GET_EXP (s);
+
+ mpfr_sqr (u, v, MPFR_RNDN);
+ mpfr_div_2ui (u, u, 2, MPFR_RNDN); /* u = log^2(x)/4 */
+ mpfr_add (s, s, u, MPFR_RNDN);
+
+ mpfr_sub_ui (xx, x, 1, MPFR_RNDN);
+ mpfr_log (u, xx, MPFR_RNDU);
+ e2 = MPFR_GET_EXP (u);
+ mpfr_mul (u, v, u, MPFR_RNDN); /* u = log(x) * log(x-1) */
+ mpfr_sub (s, s, u, MPFR_RNDN);
+
+ mpfr_const_pi (u, MPFR_RNDU);
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_ui (u, u, 6, MPFR_RNDN); /* u = pi^2/6 */
+ mpfr_add (s, s, u, MPFR_RNDN);
+ /* error(s) <= (31 + (k+1) * 2^(1-e1) + 2^(1-e2)) ulp(s)
+ see algorithms.tex */
+ err = MAX (MPFR_INT_CEIL_LOG2 (k + 1) + 1 - e1, 1 - e2);
+ err = 2 + MAX (5, err);
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) m - err, yp, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (s, m);
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (v, m);
+ mpfr_set_prec (xx, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+
+ mpfr_clears (s, u, v, xx, (mpfr_ptr) 0);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+ else if (mpfr_cmp_ui_2exp (x, 1, -1) > 0) /* 1/2 < x < 1 */
+ /* 1 > x > 1/2: Li2(x) = -S(-log(x))+log^2(x)/4-log(x)log(1-x)+pi^2/6 */
+ {
+ int k;
+ mpfr_t s, u, v, xx;
+ mpfr_init2 (s, m);
+ mpfr_init2 (u, m);
+ mpfr_init2 (v, m);
+ mpfr_init2 (xx, m);
+
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_log (u, x, MPFR_RNDD);
+ mpfr_neg (u, u, MPFR_RNDN);
+ k = li2_series (s, u, MPFR_RNDN);
+ mpfr_neg (s, s, MPFR_RNDN);
+ err = 1 + MPFR_INT_CEIL_LOG2 (k + 1) - MPFR_GET_EXP (s);
+
+ mpfr_ui_sub (xx, 1, x, MPFR_RNDN);
+ mpfr_log (v, xx, MPFR_RNDU);
+ mpfr_mul (v, v, u, MPFR_RNDN); /* v = - log(x) * log(1-x) */
+ mpfr_add (s, s, v, MPFR_RNDN);
+ err = MAX (err, 1 - MPFR_GET_EXP (v));
+ err = 2 + MAX (3, err) - MPFR_GET_EXP (s);
+
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_2ui (u, u, 2, MPFR_RNDN); /* u = log^2(x)/4 */
+ mpfr_add (s, s, u, MPFR_RNDN);
+ err = MAX (err, 2 + MPFR_GET_EXP (u)) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err) + MPFR_GET_EXP (s);
+
+ mpfr_const_pi (u, MPFR_RNDU);
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_ui (u, u, 6, MPFR_RNDN); /* u = pi^2/6 */
+ mpfr_add (s, s, u, MPFR_RNDN);
+ err = MAX (err, 3) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err);
+
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) m - err, yp, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (s, m);
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (v, m);
+ mpfr_set_prec (xx, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+
+ mpfr_clears (s, u, v, xx, (mpfr_ptr) 0);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+ else if (mpfr_cmp_si (x, -1) >= 0)
+ /* 0 > x >= -1: Li2(x) = -S(log(1-x))-log^2(1-x)/4 */
+ {
+ int k;
+ mpfr_exp_t expo_l;
+ mpfr_t s, u, xx;
+ mpfr_init2 (s, m);
+ mpfr_init2 (u, m);
+ mpfr_init2 (xx, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_neg (xx, x, MPFR_RNDN);
+ mpfr_log1p (u, xx, MPFR_RNDN);
+ k = li2_series (s, u, MPFR_RNDN);
+ mpfr_neg (s, s, MPFR_RNDN);
+ expo_l = MPFR_GET_EXP (u);
+ err = 1 + MPFR_INT_CEIL_LOG2 (k + 1) - MPFR_GET_EXP (s);
+
+ mpfr_sqr (u, u, MPFR_RNDN);
+ mpfr_div_2ui (u, u, 2, MPFR_RNDN); /* u = log^2(1-x)/4 */
+ mpfr_sub (s, s, u, MPFR_RNDN);
+ err = MAX (err, - expo_l);
+ err = 2 + MAX (err, 3);
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) m - err, yp, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (s, m);
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (xx, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+
+ mpfr_clears (s, u, xx, (mpfr_ptr) 0);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+ else
+ /* x < -1: Li2(x)
+ = S(log(1-1/x))-log^2(-x)/4-log(1-x)log(-x)/2+log^2(1-x)/4-pi^2/6 */
+ {
+ int k;
+ mpfr_t s, u, v, w, xx;
+
+ if (mpfr_cmp_si (x, -7) <= 0)
+ {
+ inexact = mpfr_li2_asympt_neg (y, x, rnd_mode);
+ if (inexact != 0)
+ goto end_of_case_ltm1;
+ }
+
+ mpfr_init2 (s, m);
+ mpfr_init2 (u, m);
+ mpfr_init2 (v, m);
+ mpfr_init2 (w, m);
+ mpfr_init2 (xx, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_ui_div (xx, 1, x, MPFR_RNDN);
+ mpfr_neg (xx, xx, MPFR_RNDN);
+ mpfr_log1p (u, xx, MPFR_RNDN);
+ k = li2_series (s, u, MPFR_RNDN);
+
+ mpfr_ui_sub (xx, 1, x, MPFR_RNDN);
+ mpfr_log (u, xx, MPFR_RNDU);
+ mpfr_neg (xx, x, MPFR_RNDN);
+ mpfr_log (v, xx, MPFR_RNDU);
+ mpfr_mul (w, v, u, MPFR_RNDN);
+ mpfr_div_2ui (w, w, 1, MPFR_RNDN); /* w = log(-x) * log(1-x) / 2 */
+ mpfr_sub (s, s, w, MPFR_RNDN);
+ err = 1 + MAX (3, MPFR_INT_CEIL_LOG2 (k+1) + 1 - MPFR_GET_EXP (s))
+ + MPFR_GET_EXP (s);
+
+ mpfr_sqr (w, v, MPFR_RNDN);
+ mpfr_div_2ui (w, w, 2, MPFR_RNDN); /* w = log^2(-x) / 4 */
+ mpfr_sub (s, s, w, MPFR_RNDN);
+ err = MAX (err, 3 + MPFR_GET_EXP(w)) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err) + MPFR_GET_EXP (s);
+
+ mpfr_sqr (w, u, MPFR_RNDN);
+ mpfr_div_2ui (w, w, 2, MPFR_RNDN); /* w = log^2(1-x) / 4 */
+ mpfr_add (s, s, w, MPFR_RNDN);
+ err = MAX (err, 3 + MPFR_GET_EXP (w)) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err) + MPFR_GET_EXP (s);
+
+ mpfr_const_pi (w, MPFR_RNDU);
+ mpfr_sqr (w, w, MPFR_RNDN);
+ mpfr_div_ui (w, w, 6, MPFR_RNDN); /* w = pi^2 / 6 */
+ mpfr_sub (s, s, w, MPFR_RNDN);
+ err = MAX (err, 3) - MPFR_GET_EXP (s);
+ err = 2 + MAX (-1, err) + MPFR_GET_EXP (s);
+
+ if (MPFR_CAN_ROUND (s, (mpfr_exp_t) m - err, yp, rnd_mode))
+ break;
+
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (s, m);
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (v, m);
+ mpfr_set_prec (w, m);
+ mpfr_set_prec (xx, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, s, rnd_mode);
+ mpfr_clears (s, u, v, w, xx, (mpfr_ptr) 0);
+
+ end_of_case_ltm1:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+ }
+
+ MPFR_ASSERTN (0); /* should never reach this point */
+}
diff --git a/src/lngamma.c b/src/lngamma.c
new file mode 100644
index 000000000..e1c0c00b7
--- /dev/null
+++ b/src/lngamma.c
@@ -0,0 +1,637 @@
+/* mpfr_lngamma -- lngamma function
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* given a precision p, return alpha, such that the argument reduction
+ will use k = alpha*p*log(2).
+
+ Warning: we should always have alpha >= log(2)/(2Pi) ~ 0.11,
+ and the smallest value of alpha multiplied by the smallest working
+ precision should be >= 4.
+*/
+static void
+mpfr_gamma_alpha (mpfr_t s, mpfr_prec_t p)
+{
+ if (p <= 100)
+ mpfr_set_ui_2exp (s, 614, -10, MPFR_RNDN); /* about 0.6 */
+ else if (p <= 500)
+ mpfr_set_ui_2exp (s, 819, -10, MPFR_RNDN); /* about 0.8 */
+ else if (p <= 1000)
+ mpfr_set_ui_2exp (s, 1331, -10, MPFR_RNDN); /* about 1.3 */
+ else if (p <= 2000)
+ mpfr_set_ui_2exp (s, 1741, -10, MPFR_RNDN); /* about 1.7 */
+ else if (p <= 5000)
+ mpfr_set_ui_2exp (s, 2253, -10, MPFR_RNDN); /* about 2.2 */
+ else if (p <= 10000)
+ mpfr_set_ui_2exp (s, 3482, -10, MPFR_RNDN); /* about 3.4 */
+ else
+ mpfr_set_ui_2exp (s, 9, -1, MPFR_RNDN); /* 4.5 */
+}
+
+#ifndef IS_GAMMA
+static int
+unit_bit (mpfr_srcptr (x))
+{
+ mpfr_exp_t expo;
+ mpfr_prec_t prec;
+ mp_limb_t x0;
+
+ expo = MPFR_GET_EXP (x);
+ if (expo <= 0)
+ return 0; /* |x| < 1 */
+
+ prec = MPFR_PREC (x);
+ if (expo > prec)
+ return 0; /* y is a multiple of 2^(expo-prec), thus an even integer */
+
+ /* Now, the unit bit is represented. */
+
+ prec = ((prec - 1) / GMP_NUMB_BITS + 1) * GMP_NUMB_BITS - expo;
+ /* number of represented fractional bits (including the trailing 0's) */
+
+ x0 = *(MPFR_MANT (x) + prec / GMP_NUMB_BITS);
+ /* limb containing the unit bit */
+
+ return (x0 >> (prec % GMP_NUMB_BITS)) & 1;
+}
+#endif
+
+/* lngamma(x) = log(gamma(x)).
+ We use formula [6.1.40] from Abramowitz&Stegun:
+ lngamma(z) = (z-1/2)*log(z) - z + 1/2*log(2*Pi)
+ + sum (Bernoulli[2m]/(2m)/(2m-1)/z^(2m-1),m=1..infinity)
+ According to [6.1.42], if the sum is truncated after m=n, the error
+ R_n(z) is bounded by |B[2n+2]|*K(z)/(2n+1)/(2n+2)/|z|^(2n+1)
+ where K(z) = max (z^2/(u^2+z^2)) for u >= 0.
+ For z real, |K(z)| <= 1 thus R_n(z) is bounded by the first neglected term.
+ */
+#ifdef IS_GAMMA
+#define GAMMA_FUNC mpfr_gamma_aux
+#else
+#define GAMMA_FUNC mpfr_lngamma_aux
+#endif
+
+static int
+GAMMA_FUNC (mpfr_ptr y, mpfr_srcptr z0, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t precy, w; /* working precision */
+ mpfr_t s, t, u, v, z;
+ unsigned long m, k, maxm;
+ mpz_t *INITIALIZED(B); /* variable B declared as initialized */
+ int inexact, compared;
+ mpfr_exp_t err_s, err_t;
+ unsigned long Bm = 0; /* number of allocated B[] */
+ unsigned long oldBm;
+ double d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ compared = mpfr_cmp_ui (z0, 1);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+#ifndef IS_GAMMA /* lngamma or lgamma */
+ if (compared == 0 || (compared > 0 && mpfr_cmp_ui (z0, 2) == 0))
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_set_ui (y, 0, MPFR_RNDN); /* lngamma(1 or 2) = +0 */
+ }
+
+ /* Deal here with tiny inputs. We have for -0.3 <= x <= 0.3:
+ - log|x| - gamma*x <= log|gamma(x)| <= - log|x| - gamma*x + x^2 */
+ if (MPFR_EXP(z0) <= - (mpfr_exp_t) MPFR_PREC(y))
+ {
+ mpfr_t l, h, g;
+ int ok, inex2;
+ mpfr_prec_t prec = MPFR_PREC(y) + 14;
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_ZIV_INIT (loop, prec);
+ do
+ {
+ mpfr_init2 (l, prec);
+ if (MPFR_IS_POS(z0))
+ {
+ mpfr_log (l, z0, MPFR_RNDU); /* upper bound for log(z0) */
+ mpfr_init2 (h, MPFR_PREC(l));
+ }
+ else
+ {
+ mpfr_init2 (h, MPFR_PREC(z0));
+ mpfr_neg (h, z0, MPFR_RNDN); /* exact */
+ mpfr_log (l, h, MPFR_RNDU); /* upper bound for log(-z0) */
+ mpfr_set_prec (h, MPFR_PREC(l));
+ }
+ mpfr_neg (l, l, MPFR_RNDD); /* lower bound for -log(|z0|) */
+ mpfr_set (h, l, MPFR_RNDD); /* exact */
+ mpfr_nextabove (h); /* upper bound for -log(|z0|), avoids two calls
+ to mpfr_log */
+ mpfr_init2 (g, MPFR_PREC(l));
+ /* if z0>0, we need an upper approximation of Euler's constant
+ for the left bound */
+ mpfr_const_euler (g, MPFR_IS_POS(z0) ? MPFR_RNDU : MPFR_RNDD);
+ mpfr_mul (g, g, z0, MPFR_RNDD);
+ mpfr_sub (l, l, g, MPFR_RNDD);
+ mpfr_const_euler (g, MPFR_IS_POS(z0) ? MPFR_RNDD : MPFR_RNDU); /* cached */
+ mpfr_mul (g, g, z0, MPFR_RNDU);
+ mpfr_sub (h, h, g, MPFR_RNDD);
+ mpfr_mul (g, z0, z0, MPFR_RNDU);
+ mpfr_add (h, h, g, MPFR_RNDU);
+ inexact = mpfr_prec_round (l, MPFR_PREC(y), rnd);
+ inex2 = mpfr_prec_round (h, MPFR_PREC(y), rnd);
+ /* Caution: we not only need l = h, but both inexact flags should
+ agree. Indeed, one of the inexact flags might be zero. In that
+ case if we assume lngamma(z0) cannot be exact, the other flag
+ should be correct. We are conservative here and request that both
+ inexact flags agree. */
+ ok = SAME_SIGN (inexact, inex2) && mpfr_cmp (l, h) == 0;
+ if (ok)
+ mpfr_set (y, h, rnd); /* exact */
+ mpfr_clear (l);
+ mpfr_clear (h);
+ mpfr_clear (g);
+ if (ok)
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd);
+ }
+ /* since we have log|gamma(x)| = - log|x| - gamma*x + O(x^2),
+ if x ~ 2^(-n), then we have a n-bit approximation, thus
+ we can try again with a working precision of n bits,
+ especially when n >> PREC(y).
+ Otherwise we would use the reflection formula evaluating x-1,
+ which would need precision n. */
+ MPFR_ZIV_NEXT (loop, prec);
+ }
+ while (prec <= -MPFR_EXP(z0));
+ MPFR_ZIV_FREE (loop);
+ }
+#endif
+
+ precy = MPFR_PREC(y);
+
+ mpfr_init2 (s, MPFR_PREC_MIN);
+ mpfr_init2 (t, MPFR_PREC_MIN);
+ mpfr_init2 (u, MPFR_PREC_MIN);
+ mpfr_init2 (v, MPFR_PREC_MIN);
+ mpfr_init2 (z, MPFR_PREC_MIN);
+
+ if (compared < 0)
+ {
+ mpfr_exp_t err_u;
+
+ /* use reflection formula:
+ gamma(x) = Pi*(x-1)/sin(Pi*(2-x))/gamma(2-x)
+ thus lngamma(x) = log(Pi*(x-1)/sin(Pi*(2-x))) - lngamma(2-x) */
+
+ w = precy + MPFR_INT_CEIL_LOG2 (precy);
+ while (1)
+ {
+ w += MPFR_INT_CEIL_LOG2 (w) + 14;
+ MPFR_ASSERTD(w >= 3);
+ mpfr_set_prec (s, w);
+ mpfr_set_prec (t, w);
+ mpfr_set_prec (u, w);
+ mpfr_set_prec (v, w);
+ /* In the following, we write r for a real of absolute value
+ at most 2^(-w). Different instances of r may represent different
+ values. */
+ mpfr_ui_sub (s, 2, z0, MPFR_RNDD); /* s = (2-z0) * (1+2r) >= 1 */
+ mpfr_const_pi (t, MPFR_RNDN); /* t = Pi * (1+r) */
+ mpfr_lngamma (u, s, MPFR_RNDN); /* lngamma(2-x) */
+ /* Let s = (2-z0) + h. By construction, -(2-z0)*2^(1-w) <= h <= 0.
+ We have lngamma(s) = lngamma(2-z0) + h*Psi(z), z in [2-z0+h,2-z0].
+ Since 2-z0+h = s >= 1 and |Psi(x)| <= max(1,log(x)) for x >= 1,
+ the error on u is bounded by
+ ulp(u)/2 + (2-z0)*max(1,log(2-z0))*2^(1-w)
+ = (1/2 + (2-z0)*max(1,log(2-z0))*2^(1-E(u))) ulp(u) */
+ d = (double) MPFR_GET_EXP(s) * 0.694; /* upper bound for log(2-z0) */
+ err_u = MPFR_GET_EXP(s) + __gmpfr_ceil_log2 (d) + 1 - MPFR_GET_EXP(u);
+ err_u = (err_u >= 0) ? err_u + 1 : 0;
+ /* now the error on u is bounded by 2^err_u ulps */
+
+ mpfr_mul (s, s, t, MPFR_RNDN); /* Pi*(2-x) * (1+r)^4 */
+ err_s = MPFR_GET_EXP(s); /* 2-x <= 2^err_s */
+ mpfr_sin (s, s, MPFR_RNDN); /* sin(Pi*(2-x)) */
+ /* the error on s is bounded by 1/2*ulp(s) + [(1+2^(-w))^4-1]*(2-x)
+ <= 1/2*ulp(s) + 5*2^(-w)*(2-x) for w >= 3
+ <= (1/2 + 5 * 2^(-E(s)) * (2-x)) ulp(s) */
+ err_s += 3 - MPFR_GET_EXP(s);
+ err_s = (err_s >= 0) ? err_s + 1 : 0;
+ /* the error on s is bounded by 2^err_s ulp(s), thus by
+ 2^(err_s+1)*2^(-w)*|s| since ulp(s) <= 2^(1-w)*|s|.
+ Now n*2^(-w) can always be written |(1+r)^n-1| for some
+ |r|<=2^(-w), thus taking n=2^(err_s+1) we see that
+ |S - s| <= |(1+r)^(2^(err_s+1))-1| * |s|, where S is the
+ true value.
+ In fact if ulp(s) <= ulp(S) the same inequality holds for
+ |S| instead of |s| in the right hand side, i.e., we can
+ write s = (1+r)^(2^(err_s+1)) * S.
+ But if ulp(S) < ulp(s), we need to add one ``bit'' to the error,
+ to get s = (1+r)^(2^(err_s+2)) * S. This is true since with
+ E = n*2^(-w) we have |s - S| <= E * |s|, thus
+ |s - S| <= E/(1-E) * |S|.
+ Now E/(1-E) is bounded by 2E as long as E<=1/2,
+ and 2E can be written (1+r)^(2n)-1 as above.
+ */
+ err_s += 2; /* exponent of relative error */
+
+ mpfr_sub_ui (v, z0, 1, MPFR_RNDN); /* v = (x-1) * (1+r) */
+ mpfr_mul (v, v, t, MPFR_RNDN); /* v = Pi*(x-1) * (1+r)^3 */
+ mpfr_div (v, v, s, MPFR_RNDN); /* Pi*(x-1)/sin(Pi*(2-x)) */
+ mpfr_abs (v, v, MPFR_RNDN);
+ /* (1+r)^(3+2^err_s+1) */
+ err_s = (err_s <= 1) ? 3 : err_s + 1;
+ /* now (1+r)^M with M <= 2^err_s */
+ mpfr_log (v, v, MPFR_RNDN);
+ /* log(v*(1+e)) = log(v)+log(1+e) where |e| <= 2^(err_s-w).
+ Since |log(1+e)| <= 2*e for |e| <= 1/4, the error on v is
+ bounded by ulp(v)/2 + 2^(err_s+1-w). */
+ if (err_s + 2 > w)
+ {
+ w += err_s + 2;
+ }
+ else
+ {
+ err_s += 1 - MPFR_GET_EXP(v);
+ err_s = (err_s >= 0) ? err_s + 1 : 0;
+ /* the error on v is bounded by 2^err_s ulps */
+ err_u += MPFR_GET_EXP(u); /* absolute error on u */
+ err_s += MPFR_GET_EXP(v); /* absolute error on v */
+ mpfr_sub (s, v, u, MPFR_RNDN);
+ /* the total error on s is bounded by ulp(s)/2 + 2^(err_u-w)
+ + 2^(err_s-w) <= ulp(s)/2 + 2^(max(err_u,err_s)+1-w) */
+ err_s = (err_s >= err_u) ? err_s : err_u;
+ err_s += 1 - MPFR_GET_EXP(s); /* error is 2^err_s ulp(s) */
+ err_s = (err_s >= 0) ? err_s + 1 : 0;
+ if (mpfr_can_round (s, w - err_s, MPFR_RNDN, MPFR_RNDZ, precy
+ + (rnd == MPFR_RNDN)))
+ goto end;
+ }
+ }
+ }
+
+ /* now z0 > 1 */
+
+ MPFR_ASSERTD (compared > 0);
+
+ /* since k is O(w), the value of log(z0*...*(z0+k-1)) is about w*log(w),
+ so there is a cancellation of ~log(w) in the argument reconstruction */
+ w = precy + MPFR_INT_CEIL_LOG2 (precy);
+
+ do
+ {
+ w += MPFR_INT_CEIL_LOG2 (w) + 13;
+ MPFR_ASSERTD (w >= 3);
+
+ /* argument reduction: we compute gamma(z0 + k), where the series
+ has error term B_{2n}/(z0+k)^(2n) ~ (n/(Pi*e*(z0+k)))^(2n)
+ and we need k steps of argument reconstruction. Assuming k is large
+ with respect to z0, and k = n, we get 1/(Pi*e)^(2n) ~ 2^(-w), i.e.,
+ k ~ w*log(2)/2/log(Pi*e) ~ 0.1616 * w.
+ However, since the series is more expensive to compute, the optimal
+ value seems to be k ~ 4.5 * w experimentally. */
+ mpfr_set_prec (s, 53);
+ mpfr_gamma_alpha (s, w);
+ mpfr_set_ui_2exp (s, 9, -1, MPFR_RNDU);
+ mpfr_mul_ui (s, s, w, MPFR_RNDU);
+ if (mpfr_cmp (z0, s) < 0)
+ {
+ mpfr_sub (s, s, z0, MPFR_RNDU);
+ k = mpfr_get_ui (s, MPFR_RNDU);
+ if (k < 3)
+ k = 3;
+ }
+ else
+ k = 3;
+
+ mpfr_set_prec (s, w);
+ mpfr_set_prec (t, w);
+ mpfr_set_prec (u, w);
+ mpfr_set_prec (v, w);
+ mpfr_set_prec (z, w);
+
+ mpfr_add_ui (z, z0, k, MPFR_RNDN);
+ /* z = (z0+k)*(1+t1) with |t1| <= 2^(-w) */
+
+ /* z >= 4 ensures the relative error on log(z) is small,
+ and also (z-1/2)*log(z)-z >= 0 */
+ MPFR_ASSERTD (mpfr_cmp_ui (z, 4) >= 0);
+
+ mpfr_log (s, z, MPFR_RNDN); /* log(z) */
+ /* we have s = log((z0+k)*(1+t1))*(1+t2) with |t1|, |t2| <= 2^(-w).
+ Since w >= 2 and z0+k >= 4, we can write log((z0+k)*(1+t1))
+ = log(z0+k) * (1+t3) with |t3| <= 2^(-w), thus we have
+ s = log(z0+k) * (1+t4)^2 with |t4| <= 2^(-w) */
+ mpfr_mul_2ui (t, z, 1, MPFR_RNDN); /* t = 2z * (1+t5) */
+ mpfr_sub_ui (t, t, 1, MPFR_RNDN); /* t = 2z-1 * (1+t6)^3 */
+ /* since we can write 2z*(1+t5) = (2z-1)*(1+t5') with
+ t5' = 2z/(2z-1) * t5, thus |t5'| <= 8/7 * t5 */
+ mpfr_mul (s, s, t, MPFR_RNDN); /* (2z-1)*log(z) * (1+t7)^6 */
+ mpfr_div_2ui (s, s, 1, MPFR_RNDN); /* (z-1/2)*log(z) * (1+t7)^6 */
+ mpfr_sub (s, s, z, MPFR_RNDN); /* (z-1/2)*log(z)-z */
+ /* s = [(z-1/2)*log(z)-z]*(1+u)^14, s >= 1/2 */
+
+ mpfr_ui_div (u, 1, z, MPFR_RNDN); /* 1/z * (1+u), u <= 1/4 since z >= 4 */
+
+ /* the first term is B[2]/2/z = 1/12/z: t=1/12/z, C[2]=1 */
+ mpfr_div_ui (t, u, 12, MPFR_RNDN); /* 1/(12z) * (1+u)^2, t <= 3/128 */
+ mpfr_set (v, t, MPFR_RNDN); /* (1+u)^2, v < 2^(-5) */
+ mpfr_add (s, s, v, MPFR_RNDN); /* (1+u)^15 */
+
+ mpfr_mul (u, u, u, MPFR_RNDN); /* 1/z^2 * (1+u)^3 */
+
+ if (Bm == 0)
+ {
+ B = mpfr_bernoulli_internal ((mpz_t *) 0, 0);
+ B = mpfr_bernoulli_internal (B, 1);
+ Bm = 2;
+ }
+
+ /* m <= maxm ensures that 2*m*(2*m+1) <= ULONG_MAX */
+ maxm = 1UL << (GMP_NUMB_BITS / 2 - 1);
+
+ /* s:(1+u)^15, t:(1+u)^2, t <= 3/128 */
+
+ for (m = 2; MPFR_GET_EXP(v) + (mpfr_exp_t) w >= MPFR_GET_EXP(s); m++)
+ {
+ mpfr_mul (t, t, u, MPFR_RNDN); /* (1+u)^(10m-14) */
+ if (m <= maxm)
+ {
+ mpfr_mul_ui (t, t, 2*(m-1)*(2*m-3), MPFR_RNDN);
+ mpfr_div_ui (t, t, 2*m*(2*m-1), MPFR_RNDN);
+ mpfr_div_ui (t, t, 2*m*(2*m+1), MPFR_RNDN);
+ }
+ else
+ {
+ mpfr_mul_ui (t, t, 2*(m-1), MPFR_RNDN);
+ mpfr_mul_ui (t, t, 2*m-3, MPFR_RNDN);
+ mpfr_div_ui (t, t, 2*m, MPFR_RNDN);
+ mpfr_div_ui (t, t, 2*m-1, MPFR_RNDN);
+ mpfr_div_ui (t, t, 2*m, MPFR_RNDN);
+ mpfr_div_ui (t, t, 2*m+1, MPFR_RNDN);
+ }
+ /* (1+u)^(10m-8) */
+ /* invariant: t=1/(2m)/(2m-1)/z^(2m-1)/(2m+1)! */
+ if (Bm <= m)
+ {
+ B = mpfr_bernoulli_internal (B, m); /* B[2m]*(2m+1)!, exact */
+ Bm ++;
+ }
+ mpfr_mul_z (v, t, B[m], MPFR_RNDN); /* (1+u)^(10m-7) */
+ MPFR_ASSERTD(MPFR_GET_EXP(v) <= - (2 * m + 3));
+ mpfr_add (s, s, v, MPFR_RNDN);
+ }
+ /* m <= 1/2*Pi*e*z ensures that |v[m]| < 1/2^(2m+3) */
+ MPFR_ASSERTD ((double) m <= 4.26 * mpfr_get_d (z, MPFR_RNDZ));
+
+ /* We have sum([(1+u)^(10m-7)-1]*1/2^(2m+3), m=2..infinity)
+ <= 1.46*u for u <= 2^(-3).
+ We have 0 < lngamma(z) - [(z - 1/2) ln(z) - z + 1/2 ln(2 Pi)] < 0.021
+ for z >= 4, thus since the initial s >= 0.85, the different values of
+ s differ by at most one binade, and the total rounding error on s
+ in the for-loop is bounded by 2*(m-1)*ulp(final_s).
+ The error coming from the v's is bounded by
+ 1.46*2^(-w) <= 2*ulp(final_s).
+ Thus the total error so far is bounded by [(1+u)^15-1]*s+2m*ulp(s)
+ <= (2m+47)*ulp(s).
+ Taking into account the truncation error (which is bounded by the last
+ term v[] according to 6.1.42 in A&S), the bound is (2m+48)*ulp(s).
+ */
+
+ /* add 1/2*log(2*Pi) and subtract log(z0*(z0+1)*...*(z0+k-1)) */
+ mpfr_const_pi (v, MPFR_RNDN); /* v = Pi*(1+u) */
+ mpfr_mul_2ui (v, v, 1, MPFR_RNDN); /* v = 2*Pi * (1+u) */
+ if (k)
+ {
+ unsigned long l;
+ mpfr_set (t, z0, MPFR_RNDN); /* t = z0*(1+u) */
+ for (l = 1; l < k; l++)
+ {
+ mpfr_add_ui (u, z0, l, MPFR_RNDN); /* u = (z0+l)*(1+u) */
+ mpfr_mul (t, t, u, MPFR_RNDN); /* (1+u)^(2l+1) */
+ }
+ /* now t: (1+u)^(2k-1) */
+ /* instead of computing log(sqrt(2*Pi)/t), we compute
+ 1/2*log(2*Pi/t^2), which trades a square root for a square */
+ mpfr_mul (t, t, t, MPFR_RNDN); /* (z0*...*(z0+k-1))^2, (1+u)^(4k-1) */
+ mpfr_div (v, v, t, MPFR_RNDN);
+ /* 2*Pi/(z0*...*(z0+k-1))^2 (1+u)^(4k+1) */
+ }
+#ifdef IS_GAMMA
+ err_s = MPFR_GET_EXP(s);
+ mpfr_exp (s, s, MPFR_RNDN);
+ /* before the exponential, we have s = s0 + h where
+ |h| <= (2m+48)*ulp(s), thus exp(s0) = exp(s) * exp(-h).
+ For |h| <= 1/4, we have |exp(h)-1| <= 1.2*|h| thus
+ |exp(s) - exp(s0)| <= 1.2 * exp(s) * (2m+48)* 2^(EXP(s)-w). */
+ d = 1.2 * (2.0 * (double) m + 48.0);
+ /* the error on s is bounded by d*2^err_s * 2^(-w) */
+ mpfr_sqrt (t, v, MPFR_RNDN);
+ /* let v0 be the exact value of v. We have v = v0*(1+u)^(4k+1),
+ thus t = sqrt(v0)*(1+u)^(2k+3/2). */
+ mpfr_mul (s, s, t, MPFR_RNDN);
+ /* the error on input s is bounded by (1+u)^(d*2^err_s),
+ and that on t is (1+u)^(2k+3/2), thus the
+ total error is (1+u)^(d*2^err_s+2k+5/2) */
+ err_s += __gmpfr_ceil_log2 (d);
+ err_t = __gmpfr_ceil_log2 (2.0 * (double) k + 2.5);
+ err_s = (err_s >= err_t) ? err_s + 1 : err_t + 1;
+#else
+ mpfr_log (t, v, MPFR_RNDN);
+ /* let v0 be the exact value of v. We have v = v0*(1+u)^(4k+1),
+ thus log(v) = log(v0) + (4k+1)*log(1+u). Since |log(1+u)/u| <= 1.07
+ for |u| <= 2^(-3), the absolute error on log(v) is bounded by
+ 1.07*(4k+1)*u, and the rounding error by ulp(t). */
+ mpfr_div_2ui (t, t, 1, MPFR_RNDN);
+ /* the error on t is now bounded by ulp(t) + 0.54*(4k+1)*2^(-w).
+ We have sqrt(2*Pi)/(z0*(z0+1)*...*(z0+k-1)) <= sqrt(2*Pi)/k! <= 0.5
+ since k>=3, thus t <= -0.5 and ulp(t) >= 2^(-w).
+ Thus the error on t is bounded by (2.16*k+1.54)*ulp(t). */
+ err_t = MPFR_GET_EXP(t) + (mpfr_exp_t)
+ __gmpfr_ceil_log2 (2.2 * (double) k + 1.6);
+ err_s = MPFR_GET_EXP(s) + (mpfr_exp_t)
+ __gmpfr_ceil_log2 (2.0 * (double) m + 48.0);
+ mpfr_add (s, s, t, MPFR_RNDN); /* this is a subtraction in fact */
+ /* the final error in ulp(s) is
+ <= 1 + 2^(err_t-EXP(s)) + 2^(err_s-EXP(s))
+ <= 2^(1+max(err_t,err_s)-EXP(s)) if err_t <> err_s
+ <= 2^(2+max(err_t,err_s)-EXP(s)) if err_t = err_s */
+ err_s = (err_t == err_s) ? 1 + err_s : ((err_t > err_s) ? err_t : err_s);
+ err_s += 1 - MPFR_GET_EXP(s);
+#endif
+ }
+ while (MPFR_UNLIKELY (!MPFR_CAN_ROUND (s, w - err_s, precy, rnd)));
+
+ oldBm = Bm;
+ while (Bm--)
+ mpz_clear (B[Bm]);
+ (*__gmp_free_func) (B, oldBm * sizeof (mpz_t));
+
+ end:
+ inexact = mpfr_set (y, s, rnd);
+
+ mpfr_clear (s);
+ mpfr_clear (t);
+ mpfr_clear (u);
+ mpfr_clear (v);
+ mpfr_clear (z);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd);
+}
+
+#ifndef IS_GAMMA
+
+int
+mpfr_lngamma (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ int inex;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd),
+ ("lngamma[%#R]=%R inexact=%d", y, y, inex));
+
+ /* special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x) || MPFR_IS_NEG (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else /* lngamma(+Inf) = lngamma(+0) = +Inf */
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0); /* exact */
+ }
+ }
+
+ /* if x < 0 and -2k-1 <= x <= -2k, then lngamma(x) = NaN */
+ if (MPFR_IS_NEG (x) && (unit_bit (x) == 0 || mpfr_integer_p (x)))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+
+ inex = mpfr_lngamma_aux (y, x, rnd);
+ return inex;
+}
+
+int
+mpfr_lgamma (mpfr_ptr y, int *signp, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ int inex;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd),
+ ("lgamma[%#R]=%R inexact=%d", y, y, inex));
+
+ *signp = 1; /* most common case */
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ *signp = MPFR_INT_SIGN (x);
+ MPFR_SET_INF (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ }
+
+ if (MPFR_IS_NEG (x))
+ {
+ if (mpfr_integer_p (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+
+ if (unit_bit (x) == 0)
+ *signp = -1;
+
+ /* For tiny negative x, we have gamma(x) = 1/x - euler + O(x),
+ thus |gamma(x)| = -1/x + euler + O(x), and
+ log |gamma(x)| = -log(-x) - euler*x + O(x^2).
+ More precisely we have for -0.4 <= x < 0:
+ -log(-x) <= log |gamma(x)| <= -log(-x) - x.
+ Since log(x) is not representable, we may have an instance of the
+ Table Maker Dilemma. The only way to ensure correct rounding is to
+ compute an interval [l,h] such that l <= -log(-x) and
+ -log(-x) - x <= h, and check whether l and h round to the same number
+ for the target precision and rounding modes. */
+ if (MPFR_EXP(x) + 1 <= - (mpfr_exp_t) MPFR_PREC(y))
+ /* since PREC(y) >= 1, this ensures EXP(x) <= -2,
+ thus |x| <= 0.25 < 0.4 */
+ {
+ mpfr_t l, h;
+ int ok, inex2;
+ mpfr_prec_t w = MPFR_PREC (y) + 14;
+
+ while (1)
+ {
+ mpfr_init2 (l, w);
+ mpfr_init2 (h, w);
+ /* we want a lower bound on -log(-x), thus an upper bound
+ on log(-x), thus an upper bound on -x. */
+ mpfr_neg (l, x, MPFR_RNDU); /* upper bound on -x */
+ mpfr_log (l, l, MPFR_RNDU); /* upper bound for log(-x) */
+ mpfr_neg (l, l, MPFR_RNDD); /* lower bound for -log(-x) */
+ mpfr_neg (h, x, MPFR_RNDD); /* lower bound on -x */
+ mpfr_log (h, h, MPFR_RNDD); /* lower bound on log(-x) */
+ mpfr_neg (h, h, MPFR_RNDU); /* upper bound for -log(-x) */
+ mpfr_sub (h, h, x, MPFR_RNDU); /* upper bound for -log(-x) - x */
+ inex = mpfr_prec_round (l, MPFR_PREC (y), rnd);
+ inex2 = mpfr_prec_round (h, MPFR_PREC (y), rnd);
+ /* Caution: we not only need l = h, but both inexact flags
+ should agree. Indeed, one of the inexact flags might be
+ zero. In that case if we assume ln|gamma(x)| cannot be
+ exact, the other flag should be correct. We are conservative
+ here and request that both inexact flags agree. */
+ ok = SAME_SIGN (inex, inex2) && mpfr_equal_p (l, h);
+ if (ok)
+ mpfr_set (y, h, rnd); /* exact */
+ mpfr_clear (l);
+ mpfr_clear (h);
+ if (ok)
+ return inex;
+ /* if ulp(log(-x)) <= |x| there is no reason to loop,
+ since the width of [l, h] will be at least |x| */
+ if (MPFR_EXP(l) < MPFR_EXP(x) + (mpfr_exp_t) w)
+ break;
+ w += MPFR_INT_CEIL_LOG2(w) + 3;
+ }
+ }
+ }
+
+ inex = mpfr_lngamma_aux (y, x, rnd);
+ return inex;
+}
+
+#endif
diff --git a/src/log.c b/src/log.c
new file mode 100644
index 000000000..c657a6ac2
--- /dev/null
+++ b/src/log.c
@@ -0,0 +1,174 @@
+/* mpfr_log -- natural logarithm of a floating-point number
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of log(x) is done using the formula :
+ if we want p bits of the result,
+
+ pi
+ log(x) ~ ------------ - m log 2
+ 2 AG(1,4/s)
+
+ where s = x 2^m > 2^(p/2)
+
+ More precisely, if F(x) = int(1/sqrt(1-(1-x^2)*sin(t)^2), t=0..PI/2),
+ then for s>=1.26 we have log(s) < F(4/s) < log(s)*(1+4/s^2)
+ from which we deduce pi/2/AG(1,4/s)*(1-4/s^2) < log(s) < pi/2/AG(1,4/s)
+ so the relative error 4/s^2 is < 4/2^p i.e. 4 ulps.
+*/
+
+int
+mpfr_log (mpfr_ptr r, mpfr_srcptr a, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_prec_t p, q;
+ mpfr_t tmp1, tmp2;
+ mp_limb_t *tmp1p, *tmp2p;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+ MPFR_TMP_DECL(marker);
+
+ MPFR_LOG_FUNC (("a[%#R]=%R rnd=%d", a, a, rnd_mode),
+ ("r[%#R]=%R inexact=%d", r, r, inexact));
+
+ /* Special cases */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (a)))
+ {
+ /* If a is NaN, the result is NaN */
+ if (MPFR_IS_NAN (a))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ /* check for infinity before zero */
+ else if (MPFR_IS_INF (a))
+ {
+ if (MPFR_IS_NEG (a))
+ /* log(-Inf) = NaN */
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ else /* log(+Inf) = +Inf */
+ {
+ MPFR_SET_INF (r);
+ MPFR_SET_POS (r);
+ MPFR_RET (0);
+ }
+ }
+ else /* a is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (a));
+ MPFR_SET_INF (r);
+ MPFR_SET_NEG (r);
+ MPFR_RET (0); /* log(0) is an exact -infinity */
+ }
+ }
+ /* If a is negative, the result is NaN */
+ else if (MPFR_UNLIKELY (MPFR_IS_NEG (a)))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ /* If a is 1, the result is 0 */
+ else if (MPFR_UNLIKELY (MPFR_GET_EXP (a) == 1 && mpfr_cmp_ui (a, 1) == 0))
+ {
+ MPFR_SET_ZERO (r);
+ MPFR_SET_POS (r);
+ MPFR_RET (0); /* only "normal" case where the result is exact */
+ }
+
+ q = MPFR_PREC (r);
+
+ /* use initial precision about q+lg(q)+5 */
+ p = q + 5 + 2 * MPFR_INT_CEIL_LOG2 (q);
+ /* % ~(mpfr_prec_t)GMP_NUMB_BITS ;
+ m=q; while (m) { p++; m >>= 1; } */
+ /* if (MPFR_LIKELY(p % GMP_NUMB_BITS != 0))
+ p += GMP_NUMB_BITS - (p%GMP_NUMB_BITS); */
+
+ MPFR_TMP_MARK(marker);
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ MPFR_ZIV_INIT (loop, p);
+ for (;;)
+ {
+ mp_size_t size;
+ long m;
+ mpfr_exp_t cancel;
+
+ /* Calculus of m (depends on p) */
+ m = (p + 1) / 2 - MPFR_GET_EXP (a) + 1;
+
+ /* All the mpfr_t needed have a precision of p */
+ size = (p-1)/GMP_NUMB_BITS+1;
+ MPFR_TMP_INIT (tmp1p, tmp1, p, size);
+ MPFR_TMP_INIT (tmp2p, tmp2, p, size);
+
+ mpfr_mul_2si (tmp2, a, m, MPFR_RNDN); /* s=a*2^m, err<=1 ulp */
+ mpfr_div (tmp1, __gmpfr_four, tmp2, MPFR_RNDN);/* 4/s, err<=2 ulps */
+ mpfr_agm (tmp2, __gmpfr_one, tmp1, MPFR_RNDN); /* AG(1,4/s),err<=3 ulps */
+ mpfr_mul_2ui (tmp2, tmp2, 1, MPFR_RNDN); /* 2*AG(1,4/s), err<=3 ulps */
+ mpfr_const_pi (tmp1, MPFR_RNDN); /* compute pi, err<=1ulp */
+ mpfr_div (tmp2, tmp1, tmp2, MPFR_RNDN); /* pi/2*AG(1,4/s), err<=5ulps */
+ mpfr_const_log2 (tmp1, MPFR_RNDN); /* compute log(2), err<=1ulp */
+ mpfr_mul_si (tmp1, tmp1, m, MPFR_RNDN); /* compute m*log(2),err<=2ulps */
+ mpfr_sub (tmp1, tmp2, tmp1, MPFR_RNDN); /* log(a), err<=7ulps+cancel */
+
+ if (MPFR_LIKELY (MPFR_IS_PURE_FP (tmp1) && MPFR_IS_PURE_FP (tmp2)))
+ {
+ cancel = MPFR_GET_EXP (tmp2) - MPFR_GET_EXP (tmp1);
+ MPFR_LOG_MSG (("canceled bits=%ld\n", (long) cancel));
+ MPFR_LOG_VAR (tmp1);
+ if (MPFR_UNLIKELY (cancel < 0))
+ cancel = 0;
+
+ /* we have 7 ulps of error from the above roundings,
+ 4 ulps from the 4/s^2 second order term,
+ plus the canceled bits */
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (tmp1, p-cancel-4, q, rnd_mode)))
+ break;
+
+ /* VL: I think it is better to have an increment that it isn't
+ too low; in particular, the increment must be positive even
+ if cancel = 0 (can this occur?). */
+ p += cancel >= 8 ? cancel : 8;
+ }
+ else
+ {
+ /* TODO: find why this case can occur and what is best to do
+ with it. */
+ p += 32;
+ }
+
+ MPFR_ZIV_NEXT (loop, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (r, tmp1, rnd_mode);
+ /* We clean */
+ MPFR_TMP_FREE(marker);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inexact, rnd_mode);
+}
diff --git a/src/log10.c b/src/log10.c
new file mode 100644
index 000000000..dfde9a6c5
--- /dev/null
+++ b/src/log10.c
@@ -0,0 +1,144 @@
+/* mpfr_log10 -- logarithm in base 10.
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of r=log10(a)
+
+ r=log10(a)=log(a)/log(10)
+ */
+
+int
+mpfr_log10 (mpfr_ptr r, mpfr_srcptr a, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ /* If a is NaN, the result is NaN */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (a)))
+ {
+ if (MPFR_IS_NAN (a))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ /* check for infinity before zero */
+ else if (MPFR_IS_INF (a))
+ {
+ if (MPFR_IS_NEG (a))
+ /* log10(-Inf) = NaN */
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ else /* log10(+Inf) = +Inf */
+ {
+ MPFR_SET_INF (r);
+ MPFR_SET_POS (r);
+ MPFR_RET (0); /* exact */
+ }
+ }
+ else /* a = 0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (a));
+ MPFR_SET_INF (r);
+ MPFR_SET_NEG (r);
+ MPFR_RET (0); /* log10(0) is an exact -infinity */
+ }
+ }
+
+ /* If a is negative, the result is NaN */
+ if (MPFR_UNLIKELY (MPFR_IS_NEG (a)))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+
+ /* If a is 1, the result is 0 */
+ if (mpfr_cmp_ui (a, 1) == 0)
+ {
+ MPFR_SET_ZERO (r);
+ MPFR_SET_POS (r);
+ MPFR_RET (0); /* result is exact */
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t, tt;
+ MPFR_ZIV_DECL (loop);
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(r); /* Precision of output variable */
+ mpfr_prec_t Nt; /* Precision of the intermediary variable */
+ mpfr_exp_t err; /* Precision of error */
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + 4 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ /* initialise of intermediary variables */
+ mpfr_init2 (t, Nt);
+ mpfr_init2 (tt, Nt);
+
+ /* First computation of log10 */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute log10 */
+ mpfr_set_ui (t, 10, MPFR_RNDN); /* 10 */
+ mpfr_log (t, t, MPFR_RNDD); /* log(10) */
+ mpfr_log (tt, a, MPFR_RNDN); /* log(a) */
+ mpfr_div (t, tt, t, MPFR_RNDN); /* log(a)/log(10) */
+
+ /* estimation of the error */
+ err = Nt - 4;
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ break;
+
+ /* log10(10^n) is exact:
+ FIXME: Can we have 10^n exactly representable as a mpfr_t
+ but n can't fit an unsigned long? */
+ if (MPFR_IS_POS (t)
+ && mpfr_integer_p (t) && mpfr_fits_ulong_p (t, MPFR_RNDN)
+ && !mpfr_ui_pow_ui (tt, 10, mpfr_get_ui (t, MPFR_RNDN), MPFR_RNDN)
+ && mpfr_cmp (a, tt) == 0)
+ break;
+
+ /* actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ mpfr_set_prec (tt, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (r, t, rnd_mode);
+
+ mpfr_clear (t);
+ mpfr_clear (tt);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inexact, rnd_mode);
+}
diff --git a/src/log1p.c b/src/log1p.c
new file mode 100644
index 000000000..39644c1ec
--- /dev/null
+++ b/src/log1p.c
@@ -0,0 +1,152 @@
+/* mpfr_log1p -- Compute log(1+x)
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of log1p is done by
+ log1p(x)=log(1+x) */
+
+int
+mpfr_log1p (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int comp, inexact;
+ mpfr_exp_t ex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ /* check for inf or -inf (result is not defined) */
+ else if (MPFR_IS_INF (x))
+ {
+ if (MPFR_IS_POS (x))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ else
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y); /* log1p(+/- 0) = +/- 0 */
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ }
+
+ ex = MPFR_GET_EXP (x);
+ if (ex < 0) /* -0.5 < x < 0.5 */
+ {
+ /* For x > 0, abs(log(1+x)-x) < x^2/2.
+ For x > -0.5, abs(log(1+x)-x) < x^2. */
+ if (MPFR_IS_POS (x))
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, - ex - 1, 0, 0, rnd_mode, {});
+ else
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, - ex, 0, 1, rnd_mode, {});
+ }
+
+ comp = mpfr_cmp_si (x, -1);
+ /* log1p(x) is undefined for x < -1 */
+ if (MPFR_UNLIKELY(comp <= 0))
+ {
+ if (comp == 0)
+ /* x=0: log1p(-1)=-inf (division by zero) */
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_NEG (y);
+ MPFR_RET (0);
+ }
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t;
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(y); /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ mpfr_exp_t err; /* error */
+ MPFR_ZIV_DECL (loop);
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + MPFR_INT_CEIL_LOG2 (Ny) + 6;
+
+ /* if |x| is smaller than 2^(-e), we will loose about e bits
+ in log(1+x) */
+ if (MPFR_EXP(x) < 0)
+ Nt += -MPFR_EXP(x);
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+
+ /* First computation of log1p */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute log1p */
+ inexact = mpfr_add_ui (t, x, 1, MPFR_RNDN); /* 1+x */
+ /* if inexact = 0, then t = x+1, and the result is simply log(t) */
+ if (inexact == 0)
+ {
+ inexact = mpfr_log (y, t, rnd_mode);
+ goto end;
+ }
+ mpfr_log (t, t, MPFR_RNDN); /* log(1+x) */
+
+ /* the error is bounded by (1/2+2^(1-EXP(t))*ulp(t) (cf algorithms.tex)
+ if EXP(t)>=2, then error <= ulp(t)
+ if EXP(t)<=1, then error <= 2^(2-EXP(t))*ulp(t) */
+ err = Nt - MAX (0, 2 - MPFR_GET_EXP (t));
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ break;
+
+ /* increase the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ inexact = mpfr_set (y, t, rnd_mode);
+
+ end:
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/log2.c b/src/log2.c
new file mode 100644
index 000000000..cc1dc4542
--- /dev/null
+++ b/src/log2.c
@@ -0,0 +1,136 @@
+/* mpfr_log2 -- log base 2
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of r=log2(a)
+ r=log2(a)=log(a)/log(2) */
+
+int
+mpfr_log2 (mpfr_ptr r, mpfr_srcptr a, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (a)))
+ {
+ /* If a is NaN, the result is NaN */
+ if (MPFR_IS_NAN (a))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ /* check for infinity before zero */
+ else if (MPFR_IS_INF (a))
+ {
+ if (MPFR_IS_NEG (a))
+ /* log(-Inf) = NaN */
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ else /* log(+Inf) = +Inf */
+ {
+ MPFR_SET_INF (r);
+ MPFR_SET_POS (r);
+ MPFR_RET (0);
+ }
+ }
+ else /* a is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (a));
+ MPFR_SET_INF (r);
+ MPFR_SET_NEG (r);
+ MPFR_RET (0); /* log2(0) is an exact -infinity */
+ }
+ }
+
+ /* If a is negative, the result is NaN */
+ if (MPFR_UNLIKELY (MPFR_IS_NEG (a)))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+
+ /* If a is 1, the result is 0 */
+ if (MPFR_UNLIKELY (mpfr_cmp_ui (a, 1) == 0))
+ {
+ MPFR_SET_ZERO (r);
+ MPFR_SET_POS (r);
+ MPFR_RET (0); /* only "normal" case where the result is exact */
+ }
+
+ /* If a is 2^N, log2(a) is exact*/
+ if (MPFR_UNLIKELY (mpfr_cmp_ui_2exp (a, 1, MPFR_GET_EXP (a) - 1) == 0))
+ return mpfr_set_si(r, MPFR_GET_EXP (a) - 1, rnd_mode);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t, tt;
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(r); /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ mpfr_exp_t err; /* error */
+ MPFR_ZIV_DECL (loop);
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Ny + 3 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+ mpfr_init2 (tt, Nt);
+
+ /* First computation of log2 */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ /* compute log2 */
+ mpfr_const_log2(t,MPFR_RNDD); /* log(2) */
+ mpfr_log(tt,a,MPFR_RNDN); /* log(a) */
+ mpfr_div(t,tt,t,MPFR_RNDN); /* log(a)/log(2) */
+
+ /* estimation of the error */
+ err = Nt-3;
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ break;
+
+ /* actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ mpfr_set_prec (tt, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (r, t, rnd_mode);
+
+ mpfr_clear (t);
+ mpfr_clear (tt);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inexact, rnd_mode);
+}
diff --git a/src/logging.c b/src/logging.c
new file mode 100644
index 000000000..19116abdc
--- /dev/null
+++ b/src/logging.c
@@ -0,0 +1,165 @@
+/* MPFR Logging functions.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Logging MPFR needs GCC >= 3.0 and GLIBC >= 2.0. */
+
+#ifdef MPFR_USE_LOGGING
+
+/* Can't include them before (in particular, printf.h) */
+#include <stdlib.h>
+#include <printf.h>
+#include <stdarg.h>
+#include <time.h>
+
+/* Define LOGGING variables */
+
+FILE *mpfr_log_file;
+int mpfr_log_type;
+int mpfr_log_level;
+int mpfr_log_base;
+int mpfr_log_current;
+int mpfr_log_worstcase_limit;
+mpfr_prec_t mpfr_log_prec;
+
+static int
+mpfr_printf_mpfr_print (FILE *stream, const struct printf_info *info,
+ const void * const *arg)
+{
+ int length;
+ int org_type_logging;
+
+ /* TODO: Use much more flag from info */
+ mpfr_srcptr w = *((mpfr_srcptr *) (arg[0]));
+ mpfr_prec_t prec = mpfr_log_prec != 0 ? mpfr_log_prec
+ : info->width == -1 ? 0 : (mpfr_prec_t) info->width;
+
+ org_type_logging = mpfr_log_type;
+ mpfr_log_type = 0; /* We disable the logging during this print! */
+ if (info->alt)
+ length = fprintf (stream, "%lu", (unsigned long) MPFR_PREC (w));
+ else
+ length = mpfr_out_str (stream, mpfr_log_base, prec, w, MPFR_RNDN);
+ mpfr_log_type = org_type_logging;
+
+ return length;
+}
+
+static int
+mpfr_printf_mpfr_arginfo (const struct printf_info *info, size_t n,
+ int *argtypes)
+{
+ if (n > 0)
+ argtypes[0] = PA_POINTER;
+ return 1;
+}
+
+static void mpfr_log_begin (void) __attribute__((constructor));
+
+/* We let the system close the LOG itself
+ (Otherwise functions called by destructor can't use LOG File */
+static void
+mpfr_log_begin (void)
+{
+ const char *var;
+ time_t tt;
+
+ /* Grab some information */
+ var = getenv ("MPFR_LOG_BASE");
+ mpfr_log_base = var == NULL || *var == 0 ? 10 : atoi (var);
+
+ var = getenv ("MPFR_LOG_LEVEL");
+ mpfr_log_level = var == NULL || *var == 0 ? 7 : atoi (var);
+ mpfr_log_current = 0;
+
+ var = getenv ("MPFR_LOG_PREC");
+ mpfr_log_prec = var == NULL || *var == 0 ? 0 : atol (var);
+
+ /* Get what we need to log */
+ mpfr_log_type = 0;
+ if (getenv ("MPFR_LOG_INPUT") != NULL)
+ mpfr_log_type |= MPFR_LOG_INPUT_F;
+ if (getenv ("MPFR_LOG_OUTPUT") != NULL)
+ mpfr_log_type |= MPFR_LOG_OUTPUT_F;
+ if (getenv ("MPFR_LOG_TIME") != NULL)
+ mpfr_log_type |= MPFR_LOG_TIME_F;
+ if (getenv ("MPFR_LOG_INTERNAL") != NULL)
+ mpfr_log_type |= MPFR_LOG_INTERNAL_F;
+ if (getenv ("MPFR_LOG_MSG") != NULL)
+ mpfr_log_type |= MPFR_LOG_MSG_F;
+ if (getenv ("MPFR_LOG_ZIV") != NULL)
+ mpfr_log_type |= MPFR_LOG_BADCASE_F;
+ if (getenv ("MPFR_LOG_STAT") != NULL)
+ mpfr_log_type |= MPFR_LOG_STAT_F;
+ if (getenv ("MPFR_LOG_ALL") != NULL)
+ mpfr_log_type = MPFR_LOG_INPUT_F|MPFR_LOG_OUTPUT_F|MPFR_LOG_TIME_F
+ |MPFR_LOG_INTERNAL_F|MPFR_LOG_MSG_F|MPFR_LOG_BADCASE_F|MPFR_LOG_STAT_F;
+
+ /* Register printf functions */
+ register_printf_function ('R', mpfr_printf_mpfr_print,
+ mpfr_printf_mpfr_arginfo);
+
+ /* Open filename if needed */
+ var = getenv ("MPFR_LOG_FILE");
+ if (var == NULL || *var == 0)
+ var = "mpfr.log";
+ if (mpfr_log_type != 0)
+ {
+ mpfr_log_file = fopen (var, "w");
+ if (mpfr_log_file == NULL)
+ {
+ fprintf (stderr, "MPFR LOG: Can't open '%s' with w.\n", var);
+ abort ();
+ }
+ time (&tt);
+ fprintf (mpfr_log_file, "MPFR LOG FILE %s\n", ctime (&tt));
+ }
+}
+
+/* Return user CPU time measured in milliseconds. Thanks to Torbjorn. */
+
+#if defined (ANSIONLY) || defined (USG) || defined (__SVR4) \
+ || defined (_UNICOS) || defined(__hpux)
+
+int
+mpfr_get_cputime (void)
+{
+ return (int) ((unsigned long long) clock () * 1000 / CLOCKS_PER_SEC);
+}
+
+#else /* Use getrusage for cputime */
+
+#include <sys/types.h>
+#include <sys/resource.h>
+
+int
+mpfr_get_cputime (void)
+{
+ struct rusage rus;
+ getrusage (0, &rus);
+ return rus.ru_utime.tv_sec * 1000 + rus.ru_utime.tv_usec / 1000;
+}
+
+#endif /* cputime */
+
+#endif /* MPFR_USE_LOGGING */
diff --git a/src/min_prec.c b/src/min_prec.c
new file mode 100644
index 000000000..542731992
--- /dev/null
+++ b/src/min_prec.c
@@ -0,0 +1,61 @@
+/* mpfr_min_prec -- minimal size in bits to hold the mantissa
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+mpfr_prec_t
+mpfr_min_prec (mpfr_srcptr x)
+{
+ mp_limb_t *mx;
+ mpfr_prec_t px, res;
+ mp_size_t n;
+ int i;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ return 0;
+
+ mx = MPFR_MANT (x);
+ px = MPFR_PREC (x);
+
+ res = 0;
+ /* Count full limbs set to zero */
+ for (n = 0; mx[n] == 0; n++)
+ {
+ res += GMP_NUMB_BITS;
+ }
+
+ i = 0;
+ /* mx[n] is now the first limb which is not null. Count number
+ * of null bits in mx[n], from the right */
+ while ((mx[n] & (MPFR_LIMB_ONE << i)) == 0)
+ i++;
+
+ res += i;
+ /* If we have trailing zero bits because the precision
+ * is not a multiple of GMP_NUMB_BITS, we must not count
+ * those. */
+ i = px % GMP_NUMB_BITS;
+ if (i != 0)
+ res -= GMP_NUMB_BITS - i;
+
+ return px - res;
+}
diff --git a/src/minmax.c b/src/minmax.c
new file mode 100644
index 000000000..8d23f5946
--- /dev/null
+++ b/src/minmax.c
@@ -0,0 +1,92 @@
+/* mpfr_min -- min and max of x, y
+
+Copyright 2001, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#include "mpfr-impl.h"
+
+ /* The computation of z=min(x,y)
+
+ z=x if x <= y
+ z=y if x > y
+ */
+
+int
+mpfr_min (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_ARE_SINGULAR(x,y))
+ {
+ if (MPFR_IS_NAN(x) && MPFR_IS_NAN(y) )
+ {
+ MPFR_SET_NAN(z);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_NAN(x))
+ return mpfr_set(z, y, rnd_mode);
+ else if (MPFR_IS_NAN(y))
+ return mpfr_set(z, x, rnd_mode);
+ else if (MPFR_IS_ZERO(x) && MPFR_IS_ZERO(y))
+ {
+ if (MPFR_IS_NEG(x))
+ return mpfr_set(z, x, rnd_mode);
+ else
+ return mpfr_set(z, y, rnd_mode);
+ }
+ }
+ if (mpfr_cmp(x,y) <= 0)
+ return mpfr_set(z, x, rnd_mode);
+ else
+ return mpfr_set(z, y, rnd_mode);
+}
+
+ /* The computation of z=max(x,y)
+
+ z=x if x >= y
+ z=y if x < y
+ */
+
+int
+mpfr_max (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_ARE_SINGULAR(x,y))
+ {
+ if (MPFR_IS_NAN(x) && MPFR_IS_NAN(y) )
+ {
+ MPFR_SET_NAN(z);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_NAN(x))
+ return mpfr_set(z, y, rnd_mode);
+ else if (MPFR_IS_NAN(y))
+ return mpfr_set(z, x, rnd_mode);
+ else if (MPFR_IS_ZERO(x) && MPFR_IS_ZERO(y))
+ {
+ if (MPFR_IS_NEG(x))
+ return mpfr_set(z, y, rnd_mode);
+ else
+ return mpfr_set(z, x, rnd_mode);
+ }
+ }
+ if (mpfr_cmp(x,y) <= 0)
+ return mpfr_set(z, y, rnd_mode);
+ else
+ return mpfr_set(z, x, rnd_mode);
+}
diff --git a/src/modf.c b/src/modf.c
new file mode 100644
index 000000000..4e9a0b4ad
--- /dev/null
+++ b/src/modf.c
@@ -0,0 +1,98 @@
+/* mpfr_modf -- Integral and fractional part.
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#define INEXPOS(y) ((y) == 0 ? 0 : (((y) > 0) ? 1 : 2))
+#define INEX(y,z) (INEXPOS(y) | (INEXPOS(z) << 2))
+
+/* Set iop to the integral part of op and fop to its fractional part */
+int
+mpfr_modf (mpfr_ptr iop, mpfr_ptr fop, mpfr_srcptr op, mpfr_rnd_t rnd_mode)
+{
+ mpfr_exp_t ope;
+ mpfr_prec_t opq;
+ int inexi, inexf;
+
+ MPFR_LOG_FUNC (("op[%#R]=%R rnd=%d", op, op, rnd_mode),
+ ("iop[%#R]=%R fop[%#R]=%R", iop, iop, fop, fop));
+
+ MPFR_ASSERTN (iop != fop);
+
+ if ( MPFR_UNLIKELY (MPFR_IS_SINGULAR (op)) )
+ {
+ if (MPFR_IS_NAN (op))
+ {
+ MPFR_SET_NAN (iop);
+ MPFR_SET_NAN (fop);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_SAME_SIGN (iop, op);
+ MPFR_SET_SAME_SIGN (fop, op);
+ if (MPFR_IS_INF (op))
+ {
+ MPFR_SET_INF (iop);
+ MPFR_SET_ZERO (fop);
+ MPFR_RET (0);
+ }
+ else /* op is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (op));
+ MPFR_SET_ZERO (iop);
+ MPFR_SET_ZERO (fop);
+ MPFR_RET (0);
+ }
+ }
+
+ ope = MPFR_GET_EXP (op);
+ opq = MPFR_PREC (op);
+
+ if (ope <= 0) /* 0 < |op| < 1 */
+ {
+ inexf = (fop != op) ? mpfr_set (fop, op, rnd_mode) : 0;
+ MPFR_SET_SAME_SIGN (iop, op);
+ MPFR_SET_ZERO (iop);
+ MPFR_RET (INEX(0, inexf));
+ }
+ else if (ope >= opq) /* op has no fractional part */
+ {
+ inexi = (iop != op) ? mpfr_set (iop, op, rnd_mode) : 0;
+ MPFR_SET_SAME_SIGN (fop, op);
+ MPFR_SET_ZERO (fop);
+ MPFR_RET (INEX(inexi, 0));
+ }
+ else /* op has both integral and fractional parts */
+ {
+ if (iop != op)
+ {
+ inexi = mpfr_rint_trunc (iop, op, rnd_mode);
+ inexf = mpfr_frac (fop, op, rnd_mode);
+ }
+ else
+ {
+ MPFR_ASSERTN (fop != op);
+ inexf = mpfr_frac (fop, op, rnd_mode);
+ inexi = mpfr_rint_trunc (iop, op, rnd_mode);
+ }
+ MPFR_RET (INEX(inexi, inexf));
+ }
+}
diff --git a/src/mp_clz_tab.c b/src/mp_clz_tab.c
new file mode 100644
index 000000000..92db98cd6
--- /dev/null
+++ b/src/mp_clz_tab.c
@@ -0,0 +1,38 @@
+/* __clz_tab -- support for longlong.h
+
+ THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND MAY CHANGE
+ INCOMPATIBLY OR DISAPPEAR IN A FUTURE GNU MP RELEASE.
+
+Copyright 1991, 1993, 1994, 1996, 1997, 2000, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+
+This file is part of the GNU MP Library.
+Adapted to be used by the GNU MPFR library.
+
+The GNU MP Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MP Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MP Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#if defined(COUNT_LEADING_ZEROS_NEED_CLZ_TAB) && defined(__GMPFR_GMP_H__)
+const
+unsigned char __clz_tab[128] =
+{
+ 1,2,3,3,4,4,4,4,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
+};
+#endif
diff --git a/src/mparam_h.in b/src/mparam_h.in
new file mode 100644
index 000000000..cb1a8169a
--- /dev/null
+++ b/src/mparam_h.in
@@ -0,0 +1,1431 @@
+/* Various Thresholds of MPFR, not exported. -*- mode: C -*-
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __MPFR_IMPL_H__
+# error "MPFR Internal not included"
+#endif
+
+/* Note: the different macros used here are those defined by gcc,
+ for example with gcc -dM -E -xc /dev/null
+ As of gcc 4.2, you can also use: -march=native or -mtune=native */
+
+/*****************************
+ * Threshold for Pentium 4 *
+ *****************************/
+#if defined (__tune_pentium4__)
+
+/* Generated by MPFR's tuneup.c, 2009-02-09, gcc 4.3 */
+/* crumble.loria.fr with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,18,19,19,24,26,0,0,24,26,28,27,26,28, \
+ 30,32,32,32,30,30,34,34,32,34,34,36,32,34,36,34, \
+ 35,0,0,36,35,0,36,48,0,0,48,48,51,0,48,52, \
+ 51,0,52,52,51,55,52,56,55,0,56,60,59,59,60,64, \
+ 63,63,64,64,67,67,68,72,63,71,72,60,67,67,60,72, \
+ 63,61,64,64,63,71,68,68,71,67,68,68,67,71,68,72, \
+ 68,68,72,72,76,76,68,68,72,72,70,103,104,71,72,96, \
+ 103,94,95,104,96,96,94,103,104,104,96,96,103,103,104,104, \
+ 120,120,103,103,104,104,120,111,103,112,104,104,120,120,109,103, \
+ 119,119,120,120,127,112,128,128,120,120,136,127,128,128,120,126, \
+ 121,129,126,134,135,135,136,120,113,113,114,118,127,127,136,120, \
+ 121,125,126,118,119,119,120,136,121,153,122,122,127,119,120,128, \
+ 165,153,162,134,163,159,136,136,153,153,134,158,135,135,136,136, \
+ 153,153,162,158,159,159,156,152,153,189,158,186,187,163,156,168, \
+ 189,159,165,165,176,176,172,172,183,173,164,174,165,165,176,176, \
+ 177,177,183,188,189,189,180,185,176,186,177,177,193,188,189,189, \
+ 200,200,201,201,177,207,188,198,199,189,200,200,186,201,207,207, \
+ 213,203,189,189,200,210,201,196,212,207,208,213,189,199,200,200, \
+ 203,209,216,204,199,223,206,200,213,225,208,208,203,203,216,204, \
+ 205,205,224,212,213,213,184,208,209,203,198,210,199,187,206,200, \
+ 201,189,208,208,203,185,216,198,187,199,200,212,213,213,202,208, \
+ 203,197,198,198,199,211,212,212,285,213,280,274,227,275,288,204, \
+ 284,284,278,285,307,216,273,308,309,309,275,219,213,311,305,284, \
+ 306,306,307,307,308,308,309,309,303,303,311,311,284,312,285,285, \
+ 307,286,287,308,309,309,303,303,304,332,305,305,306,299,328,300, \
+ 308,301,309,309,303,310,311,311,333,305,285,285,307,314,308,308, \
+ 321,305,354,322,331,355,332,308,309,333,334,334,303,303,304,312, \
+ 305,305,306,306,307,307,308,308,309,309,302,302,375,311,312,360, \
+ 353,305,330,354,307,355,380,308,357,309,358,358,359,311,312,312, \
+ 345,329,378,354,331,355,356,356,357,357,374,334,375,311,336,384, \
+ 356,329,330,357,331,358,404,332,369,333,334,334,335,353,354,354, \
+ 355,382,356,347,357,357,358,358,377,359,405,333,352,379,353,353, \
+ 354,354,355,355,356,356,357,357,358,376,359,377,369,360,379,370, \
+ 380,380,354,372,355,382,383,401,357,357,376,358,377,377,405,378, \
+ 379,379,380,380,381,381,402,402,353,403,404,404,405,405,376,376, \
+ 377,357,368,358,379,379,360,400,401,381,372,452,373,383,454,384, \
+ 405,405,456,356,357,357,358,378,379,379,490,370,381,381,492,402, \
+ 463,403,404,404,405,405,456,456,527,357,448,448,489,449,400,450, \
+ 453,453,454,454,455,455,456,456,468,490,491,491,492,404,405,482, \
+ 472,450,451,462,463,463,464,453,465,465,455,455,456,489,490,490, \
+ 491,491,492,492,405,526,483,527,484,484,452,452,486,453,454,454, \
+ 455,477,456,522,490,490,491,524,481,492,526,482,483,527,495,517, \
+ 521,533,522,474,499,523,524,524,489,489,490,526,527,527,528,528, \
+ 529,529,518,554,495,483,496,472,521,557,486,522,535,535,524,512, \
+ 525,561,526,526,491,527,528,528,529,517,518,554,531,519,544,520, \
+ 521,521,558,546,559,535,560,536,489,573,490,514,491,563,492,492, \
+ 528,489,555,555,491,491,492,596,597,545,546,546,560,599,600,600, \
+ 523,562,563,511,564,525,526,630,553,527,528,528,607,555,595,543, \
+ 544,531,532,571,559,559,599,560,535,535,562,562,563,537,564,564, \
+ 565,630,514,631,528,632,555,542,634,595,557,557,597,558,559,559, \
+ 571,599,530,530,559,545,546,616,561,547,548,534,633,563,564,564, \
+ 607,537,594,636,567,539,624,554,555,555,598,598,557,557,558,572, \
+ 559,559,560,574,603,561,562,632,563,563,578,592,593,607,580,608, \
+ 553,609,624,596,597,597,598,612,627,585,600,614,573,629,616,602, \
+ 599,599,585,585,616,616,587,632,603,603,634,634,635,605,636,621, \
+ 622,607,623,593,609,609,625,610,611,626,612,597,628,598,599,599, \
+ 600,630,631,616,602,632,633,603,634,634,635,635,636,636,607,637, \
+ 668,608,609,609,610,610,611,611,597,657,628,628,629,629,630,630, \
+ 609,625,562,626,627,563,564,628,629,629,630,630,631,599,664,632, \
+ 681,633,634,634,635,635,620,636,621,621,622,622,623,639,672,592, \
+ 609,641,594,594,595,627,596,564,629,597,598,598,631,599,600,600, \
+ 601,681,618,634,603,635,636,636,557,621,622,606,623,623,608,608 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,7,7,8,8, \
+ 9,9,10,10,11,11,12,12,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,26,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, \
+ 33,33,34,34,35,35,36,38,39,39,38,38,39,39,40,40, \
+ 41,41,42,42,43,43,44,46,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,68,63,63,64,64, \
+ 65,68,66,69,67,67,71,68,69,75,76,76,71,71,72,72, \
+ 73,82,74,80,78,75,76,76,77,83,78,81,82,79,80,80, \
+ 81,81,82,85,83,83,84,84,85,85,86,86,87,87,88,88, \
+ 89,92,90,93,94,91,92,92,93,93,94,94,95,95,96,96, \
+ 97,97,98,98,99,99,100,100,101,101,102,102,103,103,104,104, \
+ 105,105,106,106,107,107,108,112,109,109,110,118,111,111,112,112, \
+ 113,113,114,122,123,115,116,116,117,117,118,118,119,119,120,120, \
+ 121,121,122,122,123,123,124,124,125,125,126,126,127,127,128,128, \
+ 129,154,135,130,131,131,132,132,133,133,134,134,135,140,151,136, \
+ 142,137,138,143,144,154,155,150,151,156,152,142,153,158,144,144, \
+ 165,145,146,146,152,162,148,148,149,149,150,150,151,171,152,152, \
+ 153,153,154,154,155,155,156,156,157,157,158,158,159,159,160,160, \
+ 161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168, \
+ 169,169,170,170,171,183,172,172,173,173,174,174,175,187,194,182, \
+ 183,177,178,178,179,179,180,180,181,181,182,182,195,183,184,184, \
+ 185,197,186,186,187,187,188,188,189,189,190,190,191,191,192,204, \
+ 207,200,201,208,209,195,196,203,204,197,198,198,199,199,200,200, \
+ 201,215,216,230,217,210,211,218,212,205,206,206,207,207,208,208, \
+ 209,230,231,231,232,225,226,226,227,227,228,242,222,257,216,216, \
+ 217,245,246,218,219,240,241,234,235,235,236,264,230,258,224,224, \
+ 225,225,226,226,227,227,228,260,261,229,230,270,247,231,232,232, \
+ 233,233,234,234,235,235,236,236,237,237,238,238,239,239,240,240, \
+ 241,241,242,242,243,243,244,244,245,245,246,246,247,247,248,248, \
+ 249,249,250,250,251,251,252,252,253,253,254,254,255,255,256,256, \
+ 257,257,258,258,259,259,260,260,261,261,262,262,263,263,264,264, \
+ 265,265,266,266,267,267,268,268,269,269,279,270,271,271,272,272, \
+ 273,273,274,274,275,275,276,276,277,277,278,278,279,279,280,280, \
+ 281,281,282,282,283,283,284,284,294,285,286,286,287,287,288,288, \
+ 289,289,290,290,291,291,292,292,293,293,294,294,295,295,296,296, \
+ 297,297,298,298,299,299,300,300,301,301,302,302,303,303,304,304, \
+ 305,305,306,306,307,307,308,308,309,309,310,310,311,311,312,312, \
+ 313,313,314,314,315,315,316,316,317,317,318,318,319,319,320,320, \
+ 321,321,322,322,323,323,324,324,325,325,326,337,349,338,328,328, \
+ 329,329,330,330,331,331,332,332,333,333,334,334,335,335,336,336, \
+ 337,337,338,360,339,339,340,340,341,341,342,342,343,354,355,344, \
+ 345,345,368,368,369,369,359,348,393,382,383,361,362,362,363,363, \
+ 365,365,366,390,367,391,380,404,357,393,406,382,383,395,396,396, \
+ 385,409,386,410,387,399,388,412,413,401,390,366,367,403,404,416, \
+ 417,405,382,406,407,407,408,408,409,409,410,410,435,411,412,412, \
+ 413,413,414,378,439,427,428,392,393,441,442,430,479,455,444,432, \
+ 437,385,451,386,465,439,427,388,389,454,455,455,404,417,444,444, \
+ 393,393,394,446,460,460,461,487,410,475,398,476,477,464,400,465, \
+ 401,492,402,402,403,403,404,404,405,405,406,419,407,407,408,408, \
+ 409,409,410,410,411,411,412,412,413,413,414,414,415,415,416,416, \
+ 417,417,418,418,419,419,420,420,421,421,422,422,423,423,424,508, \
+ 425,425,426,426,427,427,428,428,429,429,430,430,431,431,432,432, \
+ 433,433,434,434,435,435,436,436,437,437,438,536,439,439,440,440, \
+ 441,441,442,442,443,443,444,444,445,445,446,446,447,447,448,476, \
+ 449,449,450,450,451,451,482,452,453,453,454,454,455,455,456,456, \
+ 457,457,458,458,459,459,460,460,461,461,462,462,463,463,464,464, \
+ 465,465,466,466,467,467,468,468,469,469,470,470,471,471,472,472, \
+ 473,473,474,474,475,475,476,476,477,477,478,478,479,479,480,480, \
+ 481,481,482,482,483,483,484,484,485,485,486,486,487,487,488,488, \
+ 489,489,490,490,491,491,492,492,493,493,494,494,495,495,496,496, \
+ 497,497,498,498,499,499,500,500,501,501,502,502,503,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,511,512,512 \
+
+#define MPFR_MUL_THRESHOLD 8 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 519 /* bits */
+#define MPFR_EXP_THRESHOLD 6533 /* bits */
+
+/****************************
+ * Threshold for Core 2 *
+ ****************************/
+#elif defined (__tune_core2__) && !defined (__i386) /* 64-bit Core 2 */
+
+/* Generated by MPFR's tuneup.c, 2009-12-18, gcc 4.4.2 */
+/* tarte.loria.fr with gmp-4.3.1 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,-1,-1,-1,0,0,0,0,0,0,0,0,0,8,0, \
+ 9,10,10,10,11,12,12,12,13,14,15,16,18,18,18,18, \
+ 19,24,22,22,26,26,26,26,28,26,24,28,32,24,24,32, \
+ 26,34,26,30,30,28,36,40,38,40,40,32,34,34,39,36, \
+ 39,43,44,40,47,47,48,48,43,51,48,48,47,51,48,52, \
+ 55,55,48,48,55,59,60,52,51,51,52,64,51,53,56,56, \
+ 63,59,64,56,61,61,68,64,69,69,68,78,81,69,72,74, \
+ 69,75,74,74,69,81,78,78,81,81,78,78,81,81,78,90, \
+ 86,80,90,90,88,91,83,86,90,90,85,88,89,86,84,90, \
+ 100,91,92,89,90,90,88,85,104,98,99,90,109,115,92,116, \
+ 117,117,115,103,104,116,111,93,112,112,116,113,114,114,115,109, \
+ 110,128,129,117,109,115,116,116,129,129,112,112,128,134,129,129, \
+ 141,129,138,138,139,127,128,128,141,141,134,134,139,135,132,140, \
+ 141,141,134,134,135,139,140,140,141,141,162,162,151,151,140,140, \
+ 141,141,158,146,151,151,156,188,141,153,166,162,187,155,172,188, \
+ 161,181,186,158,159,171,164,164,165,165,170,182,179,179,184,180, \
+ 179,179,175,185,186,186,187,187,188,188,179,184,185,185,176,176, \
+ 187,182,188,188,204,179,180,200,186,186,187,212,218,188,204,184, \
+ 180,185,216,186,217,202,208,208,214,204,215,215,216,216,212,212, \
+ 218,218,219,204,215,215,216,216,217,217,218,188,214,219,220,220, \
+ 203,251,204,204,205,235,236,218,219,219,220,214,215,251,252,252, \
+ 217,217,218,236,219,219,220,220,233,269,252,252,235,259,272,260, \
+ 249,267,256,250,251,251,252,234,271,271,272,272,243,243,250,250, \
+ 251,251,270,252,271,271,272,272,243,249,250,250,251,251,252,270, \
+ 263,305,320,320,307,272,294,308,267,316,296,268,269,318,319,305, \
+ 320,271,272,272,294,294,295,316,317,282,318,318,305,312,313,306, \
+ 307,307,308,294,295,295,296,296,304,304,305,305,306,306,307,307, \
+ 308,308,316,316,317,317,318,318,319,305,320,320,307,314,315,308, \
+ 361,305,306,314,307,307,324,356,317,341,318,318,343,319,320,320, \
+ 361,305,362,306,347,315,340,332,365,365,366,366,367,319,368,320, \
+ 353,353,346,346,363,355,356,356,341,365,366,358,359,367,368,360, \
+ 353,353,354,362,363,363,348,348,341,365,358,366,343,343,368,368, \
+ 320,392,366,366,367,367,368,341,342,342,343,343,362,344,390,390, \
+ 391,355,392,356,366,366,367,367,368,368,360,342,343,343,344,416, \
+ 354,363,364,355,356,356,366,366,439,439,368,431,459,387,415,343, \
+ 344,416,363,462,355,364,392,437,366,366,367,367,368,359,360,450, \
+ 439,459,390,440,391,451,462,462,463,463,414,414,415,455,406,416, \
+ 457,447,458,438,439,439,440,440,391,461,462,462,463,413,414,464, \
+ 455,455,416,456,407,457,438,438,439,459,440,410,461,451,452,462, \
+ 463,433,464,444,415,415,416,416,507,457,438,508,459,439,440,440, \
+ 464,464,443,454,444,444,456,511,512,435,458,524,437,536,460,416, \
+ 439,461,440,451,452,452,464,464,509,509,510,510,511,456,457,512, \
+ 524,458,459,459,460,460,461,560,462,539,452,452,464,464,509,509, \
+ 510,499,500,555,512,512,557,524,525,525,537,559,560,549,440,539, \
+ 557,521,510,558,559,511,512,536,537,525,454,538,539,527,528,504, \
+ 505,505,530,554,555,531,532,532,509,521,546,510,511,511,512,512, \
+ 537,537,538,526,539,539,552,528,553,529,530,530,543,531,532,532, \
+ 521,545,558,510,511,559,560,560,537,513,526,550,551,551,528,552, \
+ 554,554,555,555,530,543,544,531,545,558,533,559,560,560,509,509, \
+ 510,510,511,511,655,655,539,656,527,605,541,541,542,555,556,556, \
+ 531,544,545,545,559,533,534,547,548,600,549,549,550,550,551,551, \
+ 552,604,605,605,554,554,555,555,556,556,648,635,636,545,546,559, \
+ 557,557,642,656,559,629,630,560,617,603,604,604,605,605,620,606, \
+ 607,607,650,608,651,553,554,652,653,653,654,654,627,641,656,656, \
+ 629,559,560,742,603,617,604,604,619,605,606,606,607,747,748,748, \
+ 651,651,652,624,625,639,626,682,627,655,628,642,699,699,672,630, \
+ 704,704,720,615,736,736,647,647,648,648,724,724,725,635,741,651, \
+ 652,652,653,653,654,654,655,655,656,656,747,732,688,748,644,629, \
+ 735,735,736,736,632,737,738,738,724,739,740,680,681,726,682,682, \
+ 683,683,744,684,700,700,746,746,747,747,748,748,734,704,720,720, \
+ 721,737,738,738,707,675,724,708,709,709,710,710,711,727,712,728, \
+ 745,713,682,746,747,683,684,684,717,733,734,734,735,735,736,736, \
+ 721,721,722,738,723,723,724,724,725,741,742,742,743,743,744,744, \
+ 745,729,746,746,747,731,732,732,653,717,718,718,719,719,720,736 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,-1,-1,-1,-1,-1,4,5,5,7,6,7,7,8,8, \
+ 9,9,10,10,11,11,12,12,13,14,14,16,17,16,17,16, \
+ 17,18,19,18,19,19,23,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,32,31,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, \
+ 41,41,42,46,43,45,48,44,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 71,57,68,64,67,59,60,68,63,67,64,64,67,67,64,72, \
+ 68,68,72,72,67,76,68,68,69,69,70,70,71,71,72,72, \
+ 76,76,80,80,75,75,76,76,77,80,84,84,79,79,80,80, \
+ 84,81,88,82,83,95,96,96,88,91,92,92,87,99,91,88, \
+ 92,95,96,105,94,91,116,92,114,99,103,97,98,110,105,105, \
+ 105,109,110,114,111,111,104,104,105,105,102,102,135,135,104,104, \
+ 117,105,114,114,115,111,116,116,117,109,134,134,135,123,124,112, \
+ 129,117,138,114,115,127,132,116,141,129,122,118,119,139,140,120, \
+ 121,121,122,122,123,147,152,128,129,125,126,126,127,135,128,128, \
+ 129,129,140,135,136,131,132,132,133,138,149,134,135,145,146,141, \
+ 152,152,153,138,139,144,180,140,141,141,152,147,153,148,159,164, \
+ 155,180,171,171,162,172,188,158,159,179,180,180,171,166,172,172, \
+ 178,188,179,164,180,180,181,171,172,172,183,178,184,184,170,180, \
+ 203,185,186,180,187,163,164,188,195,171,184,196,203,203,180,204, \
+ 187,211,194,188,171,171,172,184,185,185,186,186,187,175,188,188, \
+ 195,177,184,184,179,179,180,180,181,187,188,182,183,183,184,184, \
+ 203,185,186,186,187,187,188,188,195,195,196,196,191,191,192,192, \
+ 193,193,194,194,195,195,196,196,197,204,198,212,199,199,200,200, \
+ 201,201,202,202,203,203,204,204,205,282,283,206,228,284,208,208, \
+ 209,209,210,210,211,211,268,212,262,248,228,249,215,215,300,216, \
+ 217,252,218,274,275,268,220,283,256,284,250,264,244,251,252,252, \
+ 249,249,250,242,243,275,252,252,253,261,262,246,255,279,264,272, \
+ 281,265,250,274,267,251,300,300,277,261,262,262,255,263,256,264, \
+ 265,265,266,282,299,267,284,252,293,269,294,286,279,279,272,280, \
+ 297,281,298,298,299,283,284,300,293,269,270,294,287,255,256,296, \
+ 284,275,276,276,277,268,314,260,315,297,262,316,308,263,264,264, \
+ 265,265,266,266,267,294,313,268,305,278,279,297,280,280,281,281, \
+ 282,300,283,283,284,284,276,276,313,313,314,296,297,297,298,298, \
+ 299,299,300,300,364,283,284,284,294,294,295,304,296,314,315,315, \
+ 299,299,300,300,321,361,362,312,363,313,294,314,295,315,316,316, \
+ 297,297,298,298,299,299,300,300,331,341,312,302,303,303,364,414, \
+ 355,305,346,346,307,307,308,308,359,379,380,380,361,361,362,362, \
+ 313,363,364,364,315,315,416,356,327,347,348,368,319,319,320,380, \
+ 376,332,377,377,389,378,379,346,347,347,348,359,360,393,416,361, \
+ 362,329,396,385,364,331,332,409,355,410,345,356,346,368,380,391, \
+ 392,348,360,404,416,416,384,362,363,363,364,364,464,387,377,377, \
+ 378,411,379,346,347,457,458,348,393,393,394,460,406,439,440,429, \
+ 401,401,402,402,391,391,392,392,453,441,454,454,455,443,444,408, \
+ 445,457,458,458,459,363,364,364,461,389,402,414,463,415,380,380, \
+ 405,405,406,406,395,395,396,396,409,373,446,446,435,447,400,448, \
+ 377,413,462,414,451,415,416,416,405,393,406,430,431,407,432,456, \
+ 411,411,412,425,426,439,440,440,389,415,416,390,391,456,392,392, \
+ 458,432,446,459,460,395,500,448,436,462,463,463,464,464,400,439, \
+ 440,401,402,402,403,455,456,456,457,457,458,458,459,511,512,408, \
+ 461,461,410,436,437,411,412,412,413,452,453,414,545,454,416,416, \
+ 459,459,460,460,447,475,546,448,449,463,464,436,437,437,438,480, \
+ 523,495,510,440,483,553,428,512,555,555,458,458,543,459,460,460, \
+ 475,559,560,532,505,519,436,464,437,437,438,438,439,439,440,440, \
+ 441,441,442,484,499,443,444,444,445,529,544,516,545,559,546,532, \
+ 464,509,450,510,511,511,512,557,543,498,499,499,500,560,546,456, \
+ 457,547,548,458,459,534,460,460,461,461,462,582,463,463,464,464, \
+ 540,540,496,556,497,542,468,498,499,499,500,500,546,591,532,517, \
+ 518,548,534,534,535,595,476,476,552,582,478,523,524,554,480,525, \
+ 529,529,546,546,579,531,532,532,533,485,486,534,535,535,536,536, \
+ 537,537,506,490,555,491,492,540,509,509,558,590,591,591,560,560, \
+ 577,561,578,562,563,563,564,596,581,581,534,566,567,519,520,520, \
+ 521,537,538,506,507,571,524,508,605,541,558,558,607,511,512,512 \
+
+#define MPFR_MUL_THRESHOLD 9 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 64 /* bits */
+#define MPFR_EXP_THRESHOLD 11062 /* bits */
+#define MPFR_SINCOS_THRESHOLD 25954 /* bits */
+
+#elif defined (__tune_core2__) && defined (__i386) /* 32-bit Core 2,
+ for example a 64-bit machine with gmp/mpfr compiled with ABI=32 */
+
+/* Generated by MPFR's tuneup.c, 2007-12-21, gcc 4.1 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,28, \
+ 30,28,30,28,28,32,30,30,30,32,32,34,32,32,32,32, \
+ 33,35,44,40,35,41,40,42,41,47,48,48,47,51,56,48, \
+ 47,55,48,56,51,55,56,60,55,55,56,56,55,55,56,64, \
+ 63,59,60,64,59,59,64,64,63,63,60,68,63,67,60,64, \
+ 67,67,64,68,63,81,64,64,81,81,64,80,81,81,92,86, \
+ 86,80,81,81,79,91,86,92,93,84,85,85,86,86,87,87, \
+ 91,88,89,98,99,96,91,91,92,92,93,93,94,91,92,92, \
+ 99,99,103,97,98,98,96,93,121,115,104,116,93,99,121,127, \
+ 128,110,129,117,121,127,128,128,129,126,124,127,122,128,123,129, \
+ 129,129,134,138,139,139,140,140,129,141,122,138,139,139,128,128, \
+ 129,165,162,138,139,139,164,164,165,153,138,162,139,139,164,140, \
+ 165,165,150,146,139,163,164,164,141,177,158,162,151,151,160,156, \
+ 165,153,162,158,163,163,164,164,165,153,174,174,163,163,164,164, \
+ 164,164,165,165,176,176,172,177,168,163,189,189,165,165,176,176, \
+ 177,177,188,188,189,189,175,180,176,176,177,177,188,183,184,189, \
+ 180,180,191,201,192,177,188,188,189,199,200,200,201,201,212,212, \
+ 213,188,189,189,200,200,201,201,177,212,188,213,189,189,200,195, \
+ 239,239,240,240,199,199,212,212,213,201,232,196,215,197,240,240, \
+ 247,211,200,236,237,213,256,256,239,257,240,240,247,247,254,248, \
+ 237,255,256,238,257,257,240,240,253,247,272,272,237,237,256,256, \
+ 275,257,258,276,265,271,272,254,255,267,274,256,257,257,258,276, \
+ 256,249,257,285,258,258,266,294,267,267,275,275,276,276,291,270, \
+ 257,257,258,293,273,294,274,267,261,261,276,276,270,277,278,271, \
+ 293,293,294,294,267,267,268,275,276,276,284,284,292,271,272,293, \
+ 294,273,274,274,275,289,276,276,291,291,285,285,293,293,294,294, \
+ 321,305,290,290,291,275,276,348,285,293,294,310,311,303,312,288, \
+ 289,329,330,306,347,291,292,348,381,381,294,294,327,343,384,384, \
+ 345,337,306,346,347,307,308,340,341,381,366,366,343,383,384,384, \
+ 345,329,330,330,347,339,380,380,357,381,366,358,359,375,376,376, \
+ 383,383,366,366,340,367,368,359,360,360,361,379,362,362,417,381, \
+ 364,382,383,383,384,384,412,376,413,359,360,378,361,379,380,416, \
+ 417,381,382,418,419,365,384,384,412,412,413,377,378,378,379,379, \
+ 380,380,381,363,418,382,383,383,384,384,385,376,377,413,414,378, \
+ 379,379,380,380,381,381,382,382,383,363,364,414,415,365,366,366, \
+ 417,377,378,418,379,419,420,420,491,441,492,492,403,383,384,384, \
+ 415,415,386,416,417,417,418,418,419,419,420,490,491,491,492,492, \
+ 423,483,414,414,415,525,416,486,497,417,418,418,419,489,490,490, \
+ 486,420,487,421,488,455,456,423,424,490,491,491,492,492,471,471, \
+ 483,417,418,528,419,419,420,420,465,487,488,488,489,456,490,490, \
+ 491,491,492,492,526,493,527,494,484,528,419,452,453,486,487,454, \
+ 455,488,489,467,468,490,491,480,492,492,482,482,483,527,528,506, \
+ 521,485,522,498,487,487,488,524,525,489,490,490,491,491,492,528, \
+ 481,505,506,494,495,495,496,520,497,497,486,498,487,487,488,524, \
+ 489,489,490,526,527,491,492,492,529,529,506,518,519,483,496,484, \
+ 485,569,510,522,523,487,488,488,489,489,490,490,491,491,492,492, \
+ 528,528,490,490,491,491,492,492,506,506,507,507,521,521,535,522, \
+ 523,523,524,563,564,525,526,526,527,527,528,528,529,542,543,556, \
+ 570,557,571,558,533,533,534,560,535,522,523,536,537,563,564,525, \
+ 526,526,527,527,528,528,542,594,595,543,557,557,597,532,559,559, \
+ 557,557,558,558,559,531,546,560,561,547,548,562,563,563,564,564, \
+ 565,565,566,566,567,553,582,568,569,583,584,584,585,557,558,600, \
+ 559,573,560,588,533,561,590,562,563,563,564,564,593,579,594,566, \
+ 567,581,582,568,569,569,598,584,571,585,600,600,629,559,560,560, \
+ 599,599,600,600,601,586,587,602,603,603,634,604,605,635,636,636, \
+ 562,637,638,563,564,564,565,565,566,611,612,552,628,568,569,584, \
+ 585,600,631,616,617,632,633,633,634,634,635,635,636,606,592,712, \
+ 713,638,639,564,595,715,716,716,717,597,583,568,569,704,600,585, \
+ 625,593,594,770,771,595,596,708,597,597,598,694,695,599,600,712, \
+ 633,633,714,634,715,715,716,636,717,717,686,750,751,751,752,768, \
+ 753,753,754,770,771,771,756,708,709,709,710,742,743,711,712,760, \
+ 761,713,714,714,715,715,716,716,765,717,718,750,751,751,768,736 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,-1,-1,0,0,0,0,0,0,0,0,-1,-1,0,8, \
+ 9,9,10,10,11,12,12,13,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,42,39,39,40,40, \
+ 41,41,42,42,43,43,44,44,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,62,63,63,64,64, \
+ 65,74,66,66,67,67,68,68,69,78,70,70,74,71,72,72, \
+ 73,73,74,74,75,75,76,76,77,77,78,78,79,79,80,80, \
+ 81,84,82,82,83,83,84,84,85,85,92,92,87,87,88,88, \
+ 89,89,90,96,91,91,92,92,93,96,100,100,95,95,96,96, \
+ 97,97,98,98,99,99,100,100,101,101,114,102,103,103,104,104, \
+ 105,105,114,106,107,123,108,108,117,109,110,110,111,111,112,112, \
+ 113,113,114,114,115,115,116,116,117,117,118,118,119,119,120,132, \
+ 141,121,122,122,123,135,140,144,141,125,126,126,127,127,128,128, \
+ 129,129,130,135,131,131,132,132,133,158,134,134,135,135,136,136, \
+ 137,137,138,138,169,139,140,140,141,141,142,142,143,143,144,144, \
+ 145,145,146,171,177,147,153,148,149,159,150,150,151,171,177,177, \
+ 153,153,154,159,180,165,156,156,157,177,183,168,159,159,170,165, \
+ 161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168, \
+ 169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176, \
+ 177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184, \
+ 185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192, \
+ 193,193,194,194,195,195,196,196,197,197,198,198,199,199,200,200, \
+ 201,201,202,202,203,210,225,204,205,205,206,206,207,207,208,208, \
+ 209,209,210,210,211,211,212,212,213,213,214,214,215,215,216,216, \
+ 217,217,218,218,219,219,220,220,221,221,222,222,223,223,224,224, \
+ 225,225,226,226,227,227,228,228,229,229,230,230,231,231,232,232, \
+ 233,249,234,234,235,235,236,236,237,237,238,238,239,239,240,240, \
+ 241,241,242,242,243,243,244,244,245,245,246,246,247,247,248,248, \
+ 249,249,250,250,251,251,252,252,253,253,254,254,255,255,256,256, \
+ 257,257,258,258,259,259,260,260,261,261,262,262,263,263,264,264, \
+ 265,283,266,284,285,267,268,268,269,269,270,270,271,271,272,272, \
+ 273,273,274,274,275,275,276,276,277,277,278,278,279,279,280,280, \
+ 281,281,282,282,283,283,284,284,285,285,286,295,332,287,288,288, \
+ 289,309,320,320,321,291,292,292,293,293,294,324,295,295,296,296, \
+ 297,297,298,298,299,299,300,300,301,301,302,302,303,333,304,304, \
+ 345,305,306,306,307,307,348,338,339,309,310,310,311,311,312,312, \
+ 333,323,324,354,355,345,336,316,317,337,338,338,319,339,320,340, \
+ 365,365,344,333,323,345,346,324,325,347,348,348,349,327,328,328, \
+ 384,329,330,330,331,364,332,354,366,366,356,356,357,335,336,336, \
+ 337,337,338,338,339,383,384,340,341,341,342,342,343,343,344,344, \
+ 345,345,346,346,347,347,348,348,349,382,383,350,351,351,352,352, \
+ 353,353,402,354,355,355,356,356,357,357,358,358,359,419,420,420, \
+ 361,361,362,362,363,363,364,364,365,365,366,366,367,367,368,368, \
+ 369,369,370,370,371,371,372,372,373,373,374,374,375,375,376,376, \
+ 377,377,378,378,379,379,380,380,381,381,382,382,383,383,384,384, \
+ 385,385,399,490,387,387,388,388,389,389,390,390,391,391,392,392, \
+ 393,393,420,394,395,395,396,396,397,397,398,398,399,399,400,400, \
+ 401,401,402,402,507,507,508,404,405,509,510,510,407,407,408,473, \
+ 474,474,527,436,437,411,412,412,413,491,492,492,415,415,416,416, \
+ 417,417,418,418,419,419,420,420,421,463,492,492,521,479,480,424, \
+ 425,425,426,426,427,427,428,484,429,429,430,472,473,473,474,432, \
+ 433,433,434,504,491,491,492,436,437,507,508,508,481,467,468,468, \
+ 469,483,484,512,513,471,486,500,501,515,516,488,489,447,448,490, \
+ 509,509,510,510,481,526,527,512,513,528,544,544,545,455,456,456, \
+ 517,487,488,488,489,489,490,460,461,461,462,492,508,508,509,509, \
+ 510,510,511,526,527,467,468,528,544,469,470,515,546,531,517,472, \
+ 473,473,474,474,475,535,581,476,477,477,478,478,479,479,480,480, \
+ 481,481,482,482,483,547,548,484,485,485,486,566,535,487,488,488, \
+ 489,489,490,490,491,491,492,492,493,493,494,494,495,495,496,496, \
+ 497,561,498,578,579,579,580,580,581,501,502,598,599,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,511,512,512 \
+
+#define MPFR_MUL_THRESHOLD 11 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 441 /* bits */
+#define MPFR_EXP_THRESHOLD 9145 /* bits */
+
+/****************************
+ * Threshold for AMD 64 *
+ ****************************/
+#elif defined (__tune_k8__)
+
+/* Generated by MPFR's tuneup.c, 2009-02-09, gcc 4.3 */
+/* achille.loria.fr with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,68,68,0,67,66,66, \
+ 65,65,64,64,67,67,64,72,65,79,68,72,67,79,72,72, \
+ 80,80,81,72,76,82,77,77,84,84,88,88,83,83,81,84, \
+ 88,88,80,80,96,96,100,97,98,89,96,84,100,97,98,98, \
+ 96,96,100,100,98,98,93,93,100,97,101,98,96,99,121,121, \
+ 104,101,108,108,112,109,110,122,111,117,115,115,122,122,123,123, \
+ 117,117,122,122,123,123,128,128,129,129,126,126,135,131,128,128, \
+ 129,129,126,126,147,143,132,132,141,137,134,134,135,147,140,140, \
+ 141,141,146,146,147,147,144,140,149,141,146,146,147,147,152,144, \
+ 153,165,146,146,147,147,152,156,165,153,162,170,167,171,168,168, \
+ 159,159,170,170,171,166,167,172,168,168,189,189,200,170,171,171, \
+ 177,167,168,168,169,199,195,200,201,201,192,192,198,198,199,189, \
+ 195,195,201,201,207,207,188,188,189,199,195,200,201,201,202,197, \
+ 198,198,189,199,210,200,196,196,197,192,213,208,219,224,225,200, \
+ 233,227,228,204,211,211,212,206,237,213,202,220,203,239,204,240, \
+ 247,211,212,212,213,243,232,232,239,215,216,222,229,235,236,224, \
+ 225,225,226,226,227,245,246,234,241,223,248,224,225,249,226,226, \
+ 227,233,234,252,247,235,236,236,237,237,250,238,239,245,246,240, \
+ 249,249,236,236,237,251,252,252,253,253,254,254,297,241,242,249, \
+ 257,257,258,251,245,245,246,267,247,261,248,248,249,249,285,278, \
+ 258,258,259,273,281,281,282,261,276,297,270,270,271,271,272,272, \
+ 273,273,288,281,282,289,290,283,284,277,285,278,286,286,287,287, \
+ 289,281,282,290,291,291,284,284,293,301,302,310,311,303,304,312, \
+ 273,297,330,282,283,299,308,348,309,285,310,366,287,327,288,288, \
+ 297,297,330,338,339,339,348,348,349,349,302,366,335,335,344,312, \
+ 297,361,330,354,355,339,348,348,341,365,342,342,359,351,360,328, \
+ 329,365,357,357,358,349,350,341,342,351,352,361,362,380,363,363, \
+ 364,364,347,347,348,348,349,367,368,341,342,342,343,343,380,380, \
+ 345,345,346,346,347,347,348,357,358,385,386,359,360,360,361,361, \
+ 362,362,363,363,364,364,365,410,411,357,358,358,359,422,414,378, \
+ 379,369,410,380,381,381,432,362,363,423,434,434,425,435,436,366, \
+ 417,437,438,438,369,399,400,440,441,441,402,382,383,393,384,384, \
+ 445,405,406,436,437,417,418,418,409,429,410,390,411,431,402,432, \
+ 423,393,414,404,445,425,436,436,417,417,438,438,399,449,450,440, \
+ 431,420,421,443,433,433,434,445,435,435,414,414,426,426,427,427, \
+ 417,417,418,418,419,430,431,442,443,443,433,433,434,434,435,501, \
+ 425,447,470,459,416,427,428,450,429,429,430,474,486,464,432,432, \
+ 455,455,489,456,468,457,469,447,459,459,438,471,472,450,462,473, \
+ 497,509,510,486,463,487,452,452,501,489,490,454,467,491,492,456, \
+ 469,469,470,470,483,447,508,508,509,509,510,510,451,499,452,452, \
+ 513,501,526,490,491,455,456,492,493,505,506,470,495,483,484,472, \
+ 473,521,510,474,535,487,488,500,501,501,502,562,563,563,528,528, \
+ 528,528,581,490,491,491,492,492,506,493,494,520,495,495,561,587, \
+ 588,510,563,589,590,499,513,513,566,566,528,515,516,516,517,569, \
+ 583,583,571,597,598,585,599,586,600,600,510,575,576,563,525,564, \
+ 578,591,527,579,567,528,594,607,556,582,583,583,558,597,598,546, \
+ 571,585,586,600,601,545,588,546,589,561,562,562,591,591,578,592, \
+ 593,593,594,594,567,595,596,596,597,597,598,598,571,599,600,572, \
+ 573,573,574,574,575,589,618,590,563,633,634,578,593,579,580,580, \
+ 581,595,596,582,597,625,626,570,571,571,600,600,601,601,602,602, \
+ 599,599,600,600,601,616,602,602,618,618,589,634,590,605,621,591, \
+ 607,607,623,623,624,609,625,595,596,596,597,597,598,628,629,599, \
+ 645,645,616,631,632,632,633,708,634,604,605,680,681,636,652,607, \
+ 608,623,624,744,670,640,641,626,627,672,673,658,659,704,630,630, \
+ 625,705,674,642,643,675,676,708,709,709,710,742,679,743,744,696, \
+ 681,633,698,682,683,699,668,732,717,669,670,670,687,703,704,704, \
+ 657,689,674,690,707,675,676,708,709,709,710,710,695,727,728,744, \
+ 713,681,682,714,715,699,700,684,701,701,702,702,703,703,704,672 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,5,6,6,7,7,8,8, \
+ 9,10,10,10,11,11,12,12,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, \
+ 41,41,42,42,43,43,44,44,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,62,63,63,64,64, \
+ 65,65,66,66,67,67,68,68,69,69,70,70,71,71,72,72, \
+ 73,73,74,74,75,75,76,76,77,77,78,78,79,79,80,80, \
+ 81,81,82,82,83,83,84,84,85,85,86,86,87,87,88,88, \
+ 89,89,90,90,91,91,92,92,93,93,94,94,95,95,96,96, \
+ 97,97,98,98,99,99,100,100,101,101,102,102,103,103,104,104, \
+ 105,105,106,106,107,107,108,108,109,109,110,110,111,111,112,112, \
+ 113,113,114,114,115,115,116,116,117,117,118,118,119,119,120,120, \
+ 121,121,122,122,123,123,124,124,125,125,126,126,127,127,128,128, \
+ 129,129,130,130,131,131,132,132,133,133,134,134,135,135,136,136, \
+ 137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144, \
+ 145,145,146,146,147,147,148,158,159,149,150,150,151,151,152,152, \
+ 153,153,154,154,155,155,156,156,157,157,158,158,159,159,160,160, \
+ 161,161,162,162,163,163,164,164,165,165,166,166,167,167,180,186, \
+ 199,169,170,170,171,171,172,172,173,209,198,198,205,175,176,176, \
+ 177,177,178,214,215,179,180,210,211,199,200,200,201,183,184,184, \
+ 185,185,216,186,187,211,212,212,213,189,190,190,191,191,192,192, \
+ 193,193,194,194,195,195,196,196,197,197,198,198,199,199,200,200, \
+ 201,201,202,202,203,203,204,204,205,205,206,206,207,207,208,208, \
+ 209,209,210,210,211,211,212,240,213,213,214,214,215,215,216,216, \
+ 217,217,218,218,219,219,248,248,249,221,222,222,223,223,224,224, \
+ 225,225,226,226,227,227,228,228,229,229,230,230,231,231,232,264, \
+ 273,233,234,234,267,235,236,236,237,237,238,238,239,239,240,288, \
+ 273,241,242,242,243,243,244,284,261,277,278,270,271,247,248,248, \
+ 249,249,250,250,251,251,252,252,253,253,254,254,255,255,256,256, \
+ 257,284,258,294,295,259,260,260,261,261,262,262,263,263,264,264, \
+ 265,265,266,266,267,267,268,268,269,269,270,270,271,271,272,272, \
+ 273,273,274,274,320,320,276,276,277,277,278,278,279,279,280,280, \
+ 281,281,282,282,283,283,284,284,285,285,286,286,287,287,288,288, \
+ 289,289,290,290,291,291,292,292,293,293,294,294,295,295,296,296, \
+ 297,297,298,298,299,299,300,300,301,301,302,302,303,303,304,304, \
+ 305,305,306,306,307,307,308,308,309,309,310,310,311,311,312,342, \
+ 333,313,314,314,315,315,316,316,317,317,318,318,319,319,320,320, \
+ 321,321,322,322,323,323,324,324,325,325,326,326,327,327,328,328, \
+ 329,329,330,330,331,331,332,332,333,333,334,334,335,335,336,336, \
+ 337,337,338,338,339,339,340,340,341,341,342,342,343,343,344,344, \
+ 345,345,346,401,402,402,348,414,349,349,350,350,351,351,352,418, \
+ 413,425,426,354,427,427,356,428,429,429,430,358,359,359,360,420, \
+ 361,361,362,422,423,363,364,436,437,401,402,366,367,415,416,416, \
+ 417,417,418,454,371,371,456,372,373,373,374,374,375,375,376,376, \
+ 377,377,378,378,379,379,380,380,381,381,382,382,383,383,384,384, \
+ 385,385,386,386,387,465,388,440,441,441,390,390,391,391,392,392, \
+ 393,393,394,446,447,395,396,396,397,397,398,437,438,464,465,400, \
+ 401,401,402,402,468,403,404,404,405,444,445,497,407,472,473,408, \
+ 409,409,501,410,411,411,451,438,465,465,414,492,493,415,416,416, \
+ 417,417,418,418,419,419,420,420,421,421,422,422,423,423,424,424, \
+ 425,425,426,426,427,427,428,428,429,429,486,430,431,431,432,432, \
+ 433,433,434,434,435,435,436,436,437,437,438,438,439,439,440,440, \
+ 441,441,442,498,499,443,444,528,445,445,446,446,447,447,448,448, \
+ 449,449,450,510,526,451,452,452,453,453,454,454,455,455,456,546, \
+ 487,457,458,458,459,459,460,460,461,461,462,462,463,463,464,464, \
+ 465,540,541,466,467,467,468,513,514,499,560,470,471,471,472,472, \
+ 473,473,474,474,475,535,536,476,477,582,553,478,479,599,600,585, \
+ 481,481,482,578,579,483,484,484,485,485,486,486,487,487,488,488, \
+ 489,489,490,490,491,603,620,492,493,589,590,494,495,495,496,496, \
+ 497,593,594,498,499,499,500,500,501,501,502,502,503,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,511,512,608 \
+
+#define MPFR_MUL_THRESHOLD 6 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 595 /* bits */
+#define MPFR_EXP_THRESHOLD 10606 /* bits */
+
+/*****************************
+ * Threshold for Athlon *
+ *****************************/
+#elif defined (__tune_athlon__)
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,-1,0,-1,0,-1,-1,0,-1,-1,0,0,0,0,10, \
+ 0,12,13,14,15,16,0,0,0,0,0,19,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32, \
+ 30,32,30,34,32,32,36,34,36,36,38,38,42,38,38,42, \
+ 40,38,42,46,42,42,42,44,44,44,44,44,48,50,46,46, \
+ 60,50,50,48,50,56,56,56,56,56,56,56,60,60,64,60, \
+ 64,64,60,60,60,72,64,64,68,72,76,72,76,72,72,76, \
+ 75,76,76,72,76,72,75,72,72,72,84,76,84,75,72,76, \
+ 84,80,76,84,84,76,76,76,76,88,84,88,80,84,83,96, \
+ 84,96,92,84,88,91,92,88,92,90,96,96,92,91,92,96, \
+ 96,120,95,100,112,120,100,119,112,120,120,112,119,112,119,119, \
+ 120,118,119,119,119,118,119,120,128,127,120,128,128,120,120,127, \
+ 120,128,128,128,128,120,128,127,120,128,128,127,128,127,144,136, \
+ 128,144,152,152,152,136,144,144,144,136,151,152,144,152,128,151, \
+ 144,152,144,144,151,144,150,150,136,151,152,143,143,143,142,168, \
+ 144,144,168,151,168,144,152,151,152,152,160,152,151,152,160,151, \
+ 152,151,152,167,152,152,166,152,167,160,168,204,160,160,168,168, \
+ 204,176,202,167,168,166,167,168,204,184,202,204,204,204,204,192, \
+ 184,192,204,202,202,216,204,227,202,204,216,203,203,204,203,227, \
+ 202,204,202,202,202,203,202,215,204,202,203,202,202,215,228,214, \
+ 226,214,228,214,228,215,228,216,226,214,228,225,226,228,226,226, \
+ 226,215,216,216,226,225,226,227,216,227,240,216,252,252,226,227, \
+ 228,239,227,250,227,226,227,227,252,238,228,239,252,227,228,228, \
+ 252,226,246,228,227,227,252,247,250,263,262,250,252,250,250,252, \
+ 250,288,288,250,250,250,251,264,264,271,287,276,250,288,252,286, \
+ 251,287,288,251,276,274,288,262,263,263,282,252,288,252,276,284, \
+ 287,274,264,274,288,275,275,274,274,288,276,264,273,286,274,276, \
+ 300,287,288,286,286,276,288,285,275,300,275,273,275,299,300,299, \
+ 279,336,336,286,287,284,360,359,336,335,288,360,333,359,334,335, \
+ 335,287,359,335,336,336,359,333,334,359,360,288,360,332,335,336, \
+ 335,336,336,333,336,335,336,359,334,334,335,360,336,334,356,357, \
+ 335,336,358,359,359,336,335,336,334,355,384,356,354,384,334,357, \
+ 336,359,384,359,360,356,384,382,336,383,384,384,354,355,383,384, \
+ 382,359,384,382,382,383,383,359,381,382,383,356,384,382,381,384, \
+ 357,384,358,357,358,358,380,383,382,382,383,360,360,381,382,360, \
+ 377,360,378,360,360,381,382,383,381,380,383,383,360,382,383,380, \
+ 383,383,384,381,379,360,381,381,382,431,380,380,378,380,384,381, \
+ 382,382,384,407,384,382,379,384,430,431,383,384,408,384,456,431, \
+ 431,455,431,428,384,454,455,403,432,383,407,430,430,456,408,427, \
+ 455,455,456,455,432,431,431,429,455,432,432,424,430,427,427,429, \
+ 452,454,426,455,456,431,430,454,452,454,431,456,431,428,455,430, \
+ 454,430,432,455,454,442,443,432,443,431,451,452,456,430,451,456, \
+ 444,455,449,456,432,454,449,454,502,432,448,504,450,503,503,453, \
+ 454,502,446,452,453,454,455,499,504,453,456,504,454,453,503,455, \
+ 456,456,500,478,502,454,456,499,478,454,480,499,501,496,502,455, \
+ 499,502,503,456,478,500,501,500,479,503,504,451,452,479,454,455, \
+ 502,503,504,504,454,499,503,501,454,503,456,503,504,501,455,502, \
+ 503,503,502,504,610,503,504,574,480,551,528,504,609,576,480,503, \
+ 496,502,503,552,551,552,551,503,504,503,610,608,609,610,611,608, \
+ 608,551,648,611,612,550,576,572,608,611,610,606,608,611,610,574, \
+ 575,576,606,607,606,606,608,606,606,575,576,604,604,604,608,609, \
+ 606,606,606,608,604,604,611,604,604,576,610,606,610,606,604,610, \
+ 606,612,604,608,610,609,604,608,606,647,612,610,604,606,648,610, \
+ 610,606,646,608,604,647,606,647,606,609,610,647,648,609,641,611, \
+ 643,611,643,648,643,640,646,611,644,644,645,611,642,646,611,612, \
+ 640,647,610,611,610,611,606,647,648,647,648,644,682,642,682,683, \
+ 610,611,680,678,642,681,682,647,682,612,682,648,611,665,680,683, \
+ 609,646,666,676,680,646,679,647,646,646,648,679,684,611,612,643, \
+ 684,645,646,647,683,647,608,645,682,647,682,646,610,648,682,610, \
+ 684,612,612,683,684,647,647,646,680,646,646,745,648,646,644,647, \
+ 648,647,678,646,647,683,642,682,682,684,646,645,642,647,646,677, \
+ 646,647,683,678,680,646,646,643,681,647,683,645,666,755,756,645, \
+ 643,647,646,647,647,648,674,755,756,674,647,643,680,682,684,680, \
+ 680,682,682,675,682,677,666,683,680,682,682,684,680,681,674,674 \
+
+#define MPFR_MUL_THRESHOLD 19
+#define MPFR_EXP_2_THRESHOLD 411 /* bits */
+#define MPFR_EXP_THRESHOLD 45200 /* bits */
+
+/**************************************
+ * Threshold for PentiumPro/Pentium M *
+ **************************************/
+#elif defined (__tune_pentiumpro__) || defined (__tune_i686__) || defined (__i386) /* we consider all other 386's here */
+
+/* Generated by MPFR's tuneup.c, 2009-02-09, gcc 4.3 */
+/* toto.loria.fr (Pentium M) with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,-1,-1,-1,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,30, \
+ 30,30,28,30,30,30,30,30,30,34,34,34,32,32,44,38, \
+ 43,39,44,44,41,41,44,44,47,43,48,44,47,51,48,48, \
+ 47,55,48,48,55,51,56,52,51,55,60,56,55,55,60,60, \
+ 55,63,60,60,63,59,60,60,63,63,60,60,59,67,68,60, \
+ 63,67,60,68,75,81,68,68,81,81,78,86,81,81,84,80, \
+ 80,80,81,81,85,85,86,86,84,84,85,91,86,86,87,87, \
+ 85,91,92,92,93,99,91,97,98,86,93,93,97,91,92,92, \
+ 90,99,91,97,98,95,96,93,97,115,128,116,117,111,115,109, \
+ 128,128,129,129,121,115,128,116,126,129,127,121,140,128,129,129, \
+ 141,129,126,126,127,127,128,128,129,129,138,138,139,127,128,128, \
+ 129,153,134,150,139,139,140,140,141,141,134,162,139,147,140,144, \
+ 141,141,150,174,175,151,140,164,165,165,150,154,155,151,152,152, \
+ 153,165,150,162,163,151,152,176,177,165,158,162,171,163,164,164, \
+ 164,164,165,165,176,166,177,172,168,163,189,189,165,175,176,176, \
+ 177,177,183,188,179,189,180,180,186,176,177,177,183,188,189,189, \
+ 185,180,186,201,177,187,183,188,189,189,200,200,201,191,192,177, \
+ 188,198,189,204,195,200,201,201,212,192,188,188,189,194,200,200, \
+ 221,239,240,240,199,235,236,230,237,231,220,208,239,239,234,240, \
+ 229,247,236,230,231,231,238,238,239,239,240,240,247,265,248,236, \
+ 237,237,256,238,239,257,258,240,253,253,254,266,249,255,256,274, \
+ 275,275,276,276,247,253,254,272,267,273,256,256,257,257,276,258, \
+ 256,256,257,257,258,258,266,294,288,267,275,275,276,276,291,270, \
+ 257,271,293,272,294,294,260,267,261,275,276,276,291,270,271,285, \
+ 293,293,294,273,288,267,275,275,276,276,277,284,285,285,293,293, \
+ 273,294,274,274,275,289,276,290,291,277,292,285,293,293,294,294, \
+ 297,297,330,290,267,307,276,276,309,293,294,310,311,319,328,328, \
+ 329,289,330,290,283,283,348,292,293,381,294,382,327,343,384,384, \
+ 289,289,290,330,291,339,308,308,381,341,342,366,383,383,384,384, \
+ 345,345,378,378,379,323,324,380,381,381,382,366,375,383,384,376, \
+ 383,365,384,384,376,340,341,341,342,378,379,379,380,371,417,381, \
+ 382,382,383,383,384,384,376,376,377,377,378,378,379,379,380,380, \
+ 381,381,382,382,383,419,420,384,376,412,413,377,378,414,415,388, \
+ 416,416,417,381,382,382,419,419,384,384,412,385,386,395,396,378, \
+ 379,419,380,420,381,381,382,382,383,383,384,384,395,405,456,406, \
+ 417,417,418,418,419,419,420,420,381,491,492,492,383,413,414,384, \
+ 415,455,456,426,427,417,488,418,419,419,420,420,441,491,492,492, \
+ 383,413,434,454,455,415,416,416,417,417,418,418,459,429,490,420, \
+ 486,420,487,454,422,499,423,489,490,490,491,491,492,492,416,438, \
+ 450,527,528,528,419,485,453,420,432,465,455,455,456,456,479,479, \
+ 480,491,492,459,493,526,527,494,495,484,485,452,453,453,487,454, \
+ 455,499,489,456,490,490,480,491,492,481,482,526,527,527,528,528, \
+ 521,485,486,474,499,487,524,524,489,489,490,490,491,491,492,492, \
+ 505,505,506,482,483,459,520,472,497,485,498,498,499,535,488,524, \
+ 489,525,490,490,491,527,528,492,529,493,494,506,495,483,484,484, \
+ 485,521,522,486,487,487,488,488,489,489,490,562,563,491,492,492, \
+ 528,528,490,555,556,491,492,492,506,519,520,507,521,521,522,522, \
+ 523,523,524,524,525,564,552,500,501,527,528,528,529,542,556,530, \
+ 531,531,558,532,533,520,534,521,548,561,562,562,563,563,564,564, \
+ 526,526,527,527,528,528,542,555,556,530,531,557,558,558,598,559, \
+ 585,557,558,558,559,531,560,560,561,561,562,548,535,549,564,564, \
+ 579,537,580,552,567,567,582,568,569,597,556,584,585,571,600,572, \
+ 573,559,588,588,533,561,590,604,605,563,564,564,565,607,608,580, \
+ 567,609,582,582,597,597,528,598,599,571,558,600,559,559,560,602, \
+ 599,599,600,600,556,601,602,617,633,603,604,634,635,560,561,636, \
+ 562,562,563,563,564,564,580,715,716,596,717,717,553,568,569,569, \
+ 570,600,631,601,602,662,663,663,604,604,695,710,711,711,712,712, \
+ 713,713,714,714,715,715,596,656,657,657,583,598,599,599,600,600, \
+ 689,769,770,594,691,707,708,596,597,693,710,662,695,711,712,712, \
+ 713,697,698,714,715,699,716,716,717,717,750,702,719,751,720,704, \
+ 705,705,722,738,755,771,708,708,709,741,742,710,711,743,744,712, \
+ 713,761,714,714,715,715,716,716,717,717,766,750,751,751,736,736 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,-1,-1,-1,-1,-1,-1,-1,-1,0,0,-1,-1,8,8, \
+ 9,9,10,12,11,11,12,12,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,42, \
+ 41,41,42,42,43,43,44,44,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,62,63,63,64,64, \
+ 65,65,66,66,67,67,68,68,69,84,70,70,74,71,72,72, \
+ 73,73,74,74,78,75,76,76,77,77,84,78,79,79,80,80, \
+ 81,84,88,82,83,83,84,84,85,85,92,92,87,87,88,88, \
+ 89,89,96,90,91,91,92,92,93,93,100,94,95,95,96,96, \
+ 97,97,98,98,99,99,100,100,101,101,102,102,103,103,104,104, \
+ 105,105,106,106,107,123,108,108,129,129,126,126,135,135,112,128, \
+ 129,129,134,134,123,135,128,140,141,129,134,134,135,135,140,132, \
+ 133,141,122,122,123,135,136,140,153,141,126,126,127,159,128,128, \
+ 129,129,135,135,141,131,132,132,133,133,134,134,135,135,136,136, \
+ 137,137,138,138,159,139,140,140,141,141,142,142,143,143,144,144, \
+ 145,165,146,146,147,147,148,148,149,149,150,150,151,171,152,152, \
+ 153,153,154,159,155,155,156,156,157,157,158,158,159,159,170,170, \
+ 161,161,162,162,163,163,188,164,165,165,166,166,167,167,168,186, \
+ 187,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176, \
+ 177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184, \
+ 185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192, \
+ 193,193,194,194,195,195,196,196,197,197,198,198,199,199,200,200, \
+ 201,201,202,202,203,203,204,204,205,205,206,206,207,207,208,208, \
+ 209,209,210,210,211,211,212,212,213,213,214,214,215,215,216,216, \
+ 217,217,218,218,219,261,248,220,221,249,222,222,223,223,224,224, \
+ 225,225,226,226,227,227,228,228,229,229,230,230,231,231,248,232, \
+ 233,233,234,234,235,235,236,236,261,285,270,238,239,239,240,248, \
+ 249,249,242,242,243,251,260,244,261,245,246,246,247,247,248,248, \
+ 249,249,250,250,251,251,252,252,285,253,254,254,255,255,256,256, \
+ 257,257,258,258,259,259,260,260,261,261,262,262,263,263,264,264, \
+ 265,265,266,284,285,267,268,268,269,296,297,270,271,271,272,272, \
+ 273,273,274,274,275,275,276,276,277,277,278,278,279,279,280,280, \
+ 281,281,282,282,283,283,284,284,285,321,295,286,287,287,288,288, \
+ 289,289,320,320,321,321,292,292,293,313,314,294,295,295,296,296, \
+ 297,297,298,298,309,299,300,300,301,301,302,332,333,333,304,384, \
+ 375,305,306,306,307,307,308,308,309,309,320,310,311,311,312,312, \
+ 313,363,394,384,375,375,316,376,357,317,348,348,369,319,320,320, \
+ 321,321,366,366,378,323,324,324,402,369,359,381,382,382,383,383, \
+ 373,384,396,374,375,375,387,365,366,399,389,400,401,390,391,402, \
+ 381,381,382,404,405,405,384,373,374,374,375,386,387,387,388,388, \
+ 378,378,390,401,391,391,403,414,404,382,383,383,395,395,396,418, \
+ 401,401,378,402,403,427,428,404,405,417,382,418,419,419,420,384, \
+ 409,421,422,410,423,423,436,436,437,401,402,366,427,463,464,368, \
+ 369,369,370,370,455,371,372,456,373,433,434,434,435,471,472,472, \
+ 473,377,474,378,379,451,380,380,381,381,382,382,383,383,384,384, \
+ 398,398,399,438,465,387,427,440,441,389,455,390,391,391,392,392, \
+ 393,393,394,420,395,395,396,396,397,397,398,398,399,399,400,400, \
+ 401,401,402,402,455,403,404,508,405,405,406,406,407,433,434,447, \
+ 474,435,436,410,411,411,412,412,413,413,414,492,415,454,455,416, \
+ 417,417,418,418,419,419,420,420,421,421,422,464,465,423,424,424, \
+ 425,425,426,426,427,455,456,484,485,429,430,472,473,431,432,432, \
+ 433,461,434,434,435,477,436,436,437,437,438,438,509,453,468,510, \
+ 441,441,512,456,471,527,528,472,543,445,544,446,447,447,448,504, \
+ 449,509,510,450,451,451,452,527,453,483,514,544,545,455,456,546, \
+ 502,517,518,518,504,519,535,460,461,551,552,507,508,463,464,509, \
+ 510,525,526,526,527,542,543,543,544,544,545,515,516,546,472,472, \
+ 473,563,474,474,475,535,551,476,477,582,478,553,479,479,480,480, \
+ 481,545,546,530,531,547,516,564,597,549,550,486,487,551,488,536, \
+ 537,489,490,490,491,491,492,492,493,493,494,494,495,495,496,496, \
+ 497,497,578,546,499,499,500,500,501,501,502,598,599,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,511,512,512 \
+
+#define MPFR_MUL_THRESHOLD 10 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 32 /* bits */
+#define MPFR_EXP_THRESHOLD 6268 /* bits */
+
+/*****************************
+ * Threshold for IA64 *
+ *****************************/
+#elif defined (__ia64) || defined (__itanium__) || defined (__tune_ia64__)
+
+/* Generated by MPFR's tuneup.c, 2009-02-10, gcc 4.3 */
+/* pipol13.inrialpes.fr (McKinley) with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,44,44,47,47,44,44, \
+ 45,51,48,48,49,49,48,48,55,51,56,52,51,51,52,60, \
+ 59,59,56,56,63,63,64,64,63,63,64,60,67,67,68,68, \
+ 67,67,64,64,71,71,72,72,71,71,72,90,75,73,76,76, \
+ 89,89,84,90,82,88,92,89,90,90,88,88,89,89,90,90, \
+ 88,88,89,98,90,99,97,94,98,95,96,96,94,103,104,104, \
+ 108,105,109,106,110,110,108,108,109,103,104,104,111,111,109,118, \
+ 116,116,120,120,121,124,128,116,117,123,124,121,125,125,126,141, \
+ 141,129,134,126,127,123,140,132,137,141,126,150,139,143,132,140, \
+ 141,141,146,146,135,139,140,140,141,141,146,138,139,147,152,140, \
+ 141,141,162,162,143,139,140,156,165,153,158,154,147,147,152,152, \
+ 153,153,150,162,155,155,156,152,153,153,174,170,171,171,156,156, \
+ 159,164,165,165,176,166,167,167,168,168,164,179,165,165,166,176, \
+ 177,177,188,188,189,174,180,180,176,171,177,192,173,183,189,189, \
+ 180,195,186,186,182,177,183,188,189,184,190,190,201,191,192,192, \
+ 188,203,204,199,200,200,201,201,192,207,208,213,204,189,210,210, \
+ 191,203,204,216,223,211,212,212,213,225,196,202,203,215,216,216, \
+ 217,211,212,236,237,267,208,220,233,263,264,228,259,223,260,260, \
+ 261,267,220,262,257,275,276,258,259,259,266,260,261,267,262,268, \
+ 263,263,264,270,271,283,272,266,267,267,268,262,269,263,264,276, \
+ 270,284,278,264,272,293,294,273,274,274,261,261,262,269,270,263, \
+ 264,264,265,265,266,287,267,267,303,275,269,262,312,270,285,285, \
+ 279,279,266,301,267,274,303,275,276,276,277,284,285,292,272,272, \
+ 294,294,330,288,289,303,276,290,291,284,285,285,286,279,287,287, \
+ 281,313,314,314,315,291,348,308,301,285,294,294,303,303,296,304, \
+ 305,305,306,330,307,307,308,292,309,309,294,326,311,311,312,328, \
+ 321,321,306,322,347,323,308,308,333,317,326,326,343,311,328,384, \
+ 329,337,338,330,315,323,348,348,357,341,342,374,343,343,312,368, \
+ 365,347,339,366,367,340,332,368,360,378,352,370,353,380,336,381, \
+ 346,328,365,383,348,366,358,358,350,359,387,369,361,379,344,407, \
+ 408,354,355,346,383,383,420,420,367,421,377,377,378,378,406,415, \
+ 398,416,372,372,418,418,419,419,411,384,385,412,413,395,414,414, \
+ 419,419,410,420,381,381,372,422,383,393,414,384,415,415,416,416, \
+ 427,417,408,418,419,419,420,420,391,391,442,392,453,413,414,454, \
+ 435,445,446,456,407,427,448,438,449,409,410,420,411,431,432,422, \
+ 423,453,434,414,415,425,436,446,427,417,418,468,489,419,420,420, \
+ 420,486,410,454,422,444,445,456,446,446,447,458,459,492,482,438, \
+ 439,417,418,418,452,452,453,420,432,454,444,444,445,445,479,446, \
+ 458,447,448,459,460,427,450,494,495,528,441,452,453,453,454,487, \
+ 488,488,456,500,501,490,491,480,492,492,526,449,450,483,484,484, \
+ 461,497,474,474,463,499,488,488,489,453,490,490,527,491,492,492, \
+ 529,505,506,482,471,495,508,484,485,497,498,498,523,487,488,524, \
+ 525,465,490,490,515,515,516,492,493,481,482,494,495,483,544,520, \
+ 509,485,546,486,535,499,512,488,525,489,490,526,527,527,528,492, \
+ 567,528,490,503,504,491,531,492,571,506,507,572,573,573,522,561, \
+ 562,510,524,524,538,525,500,565,527,514,515,528,516,555,569,556, \
+ 557,518,519,571,533,533,560,560,522,561,536,536,537,537,564,525, \
+ 526,552,527,566,567,580,607,542,543,543,596,596,545,558,559,559, \
+ 599,599,600,600,531,531,532,588,631,603,548,576,577,563,536,564, \
+ 565,593,594,566,567,567,582,582,555,555,598,556,557,571,572,558, \
+ 559,559,560,560,561,603,618,604,563,633,634,564,593,621,594,636, \
+ 567,567,568,554,555,597,598,598,599,571,572,600,629,629,588,630, \
+ 599,644,630,570,571,586,587,632,633,618,634,634,635,605,621,606, \
+ 607,592,593,593,594,594,595,595,596,626,627,627,628,643,569,644, \
+ 600,600,601,586,587,632,633,618,634,634,635,605,606,636,637,637, \
+ 638,593,594,669,670,625,641,596,672,582,643,598,644,644,600,585, \
+ 673,657,674,674,643,643,644,596,597,597,598,710,663,599,600,600, \
+ 633,681,698,602,603,619,620,796,797,797,798,798,623,671,672,640, \
+ 801,785,786,786,787,819,788,628,629,661,662,790,791,807,824,696, \
+ 681,777,714,698,699,795,796,796,781,717,798,798,815,735,704,816 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,7,8,8, \
+ 9,9,10,10,11,11,12,12,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, \
+ 41,41,42,42,43,43,44,44,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,62,63,63,64,64, \
+ 65,65,66,66,67,67,68,68,69,69,70,70,71,71,72,72, \
+ 73,73,74,74,75,75,76,76,77,77,78,78,79,79,80,80, \
+ 81,81,82,82,83,83,84,84,85,85,86,86,87,87,88,88, \
+ 89,89,90,90,91,91,92,92,93,93,94,94,95,95,96,96, \
+ 97,97,98,98,99,99,100,100,101,101,102,102,103,103,104,104, \
+ 105,105,106,106,107,107,108,108,109,109,110,110,111,111,112,112, \
+ 113,113,114,114,115,115,116,116,137,117,118,118,119,119,120,120, \
+ 121,141,138,138,147,123,124,124,153,141,146,138,143,147,128,152, \
+ 144,129,130,130,131,151,152,132,153,138,144,159,165,135,136,136, \
+ 137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144, \
+ 145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152, \
+ 153,153,154,154,155,155,156,156,157,177,183,158,159,159,160,160, \
+ 161,161,162,162,163,163,164,164,165,165,166,166,167,167,168,168, \
+ 169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176, \
+ 177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184, \
+ 185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192, \
+ 193,193,194,194,195,195,196,196,197,197,198,198,199,199,200,200, \
+ 201,201,202,202,203,203,204,204,205,205,206,206,207,207,208,208, \
+ 209,209,210,210,211,211,212,212,213,213,214,214,215,215,216,216, \
+ 217,217,218,218,219,219,220,220,221,221,222,222,223,223,224,224, \
+ 225,225,226,226,227,227,228,228,229,229,230,230,231,231,232,232, \
+ 233,233,234,234,235,235,236,236,237,237,238,238,239,239,240,240, \
+ 241,241,242,242,243,243,244,244,245,245,246,246,247,247,248,248, \
+ 249,249,250,250,251,251,252,252,253,253,254,254,255,255,256,256, \
+ 257,257,258,258,259,259,260,260,261,261,262,262,263,263,264,264, \
+ 265,265,266,266,267,267,268,268,269,269,270,270,271,271,272,272, \
+ 273,273,274,274,275,275,276,276,277,277,278,278,279,279,280,280, \
+ 281,281,282,282,283,283,284,284,285,285,286,286,287,287,288,288, \
+ 289,289,290,290,291,291,292,292,293,333,294,294,295,295,296,296, \
+ 297,297,298,308,309,299,300,300,301,301,302,302,303,303,304,304, \
+ 305,305,306,306,307,307,308,308,309,309,310,310,311,311,312,312, \
+ 313,353,354,314,315,315,316,316,317,317,318,318,319,319,320,320, \
+ 321,409,355,322,323,323,324,324,325,325,326,326,415,393,328,328, \
+ 329,329,407,330,331,342,354,420,432,410,334,411,335,335,336,336, \
+ 414,425,338,415,416,372,406,340,341,341,342,419,409,376,377,410, \
+ 411,411,379,412,424,347,348,348,437,382,416,394,395,351,429,418, \
+ 437,353,354,354,355,415,404,428,429,441,406,358,443,359,360,420, \
+ 409,421,446,362,411,411,364,424,413,413,366,366,367,415,416,452, \
+ 441,369,430,370,419,455,456,456,469,373,374,374,375,375,376,376, \
+ 377,377,378,378,379,379,380,464,381,381,466,454,455,455,456,456, \
+ 385,411,464,386,465,387,388,388,389,389,455,390,391,391,392,483, \
+ 393,393,394,472,473,421,422,474,423,410,411,437,438,490,452,426, \
+ 427,427,428,428,429,429,456,417,418,444,432,406,407,407,408,408, \
+ 409,409,410,410,411,411,464,438,413,413,414,414,415,415,416,416, \
+ 417,417,418,418,419,531,420,420,421,463,492,422,423,423,424,424, \
+ 425,425,426,426,427,427,428,428,429,429,430,430,431,487,488,474, \
+ 489,433,434,434,435,435,436,436,437,437,438,438,439,509,510,440, \
+ 441,441,442,498,499,527,528,444,445,515,516,502,503,517,504,490, \
+ 449,509,510,450,451,451,452,452,453,453,454,454,455,455,456,456, \
+ 457,457,458,503,504,459,460,460,461,461,462,462,463,463,464,464, \
+ 465,465,466,466,467,467,468,468,469,469,470,470,471,471,472,472, \
+ 473,473,474,474,565,520,536,476,477,537,478,523,479,479,480,540, \
+ 481,481,482,482,483,483,532,484,485,485,486,486,487,519,552,552, \
+ 569,489,490,554,491,491,492,492,493,573,494,494,495,495,496,496, \
+ 497,497,498,498,499,499,500,500,533,501,550,566,567,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,607,608,512 \
+
+#define MPFR_MUL_THRESHOLD 6 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 455 /* bits */
+#define MPFR_EXP_THRESHOLD 10001 /* bits */
+
+/*********************
+ * Threshold for ARM *
+ *********************/
+#elif defined (__arm__)
+
+/* Generated by MPFR's tuneup.c, 2009-02-10, gcc 4.3 */
+/* gcc50.fsffrance.org (armv5tel-unknown-linux-gnueabi) with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,80,0,0,72,72, \
+ 80,77,72,72,76,76,80,77,84,84,76,76,77,77,84,84, \
+ 76,76,80,80,84,84,76,76,80,80,84,84,79,79,80,80, \
+ 81,81,82,82,83,83,84,84,103,103,104,104,96,111,103,112, \
+ 104,104,111,111,112,112,128,128,120,120,127,127,119,119,111,111, \
+ 113,113,114,126,127,127,128,128,129,129,118,134,123,135,128,136, \
+ 137,137,134,134,127,127,128,128,141,137,142,142,143,143,144,144, \
+ 145,145,142,142,143,143,144,144,145,145,150,150,151,151,152,144, \
+ 149,149,158,150,159,159,160,160,161,153,158,154,159,159,160,160, \
+ 159,159,160,160,166,166,152,167,168,168,159,159,160,160,161,161, \
+ 167,167,163,163,164,159,150,160,161,166,167,162,168,158,159,159, \
+ 160,155,156,156,162,157,158,158,159,159,160,160,166,161,162,167, \
+ 168,168,159,159,160,160,206,206,207,167,168,168,224,224,210,205, \
+ 167,167,168,168,223,223,224,224,219,225,226,208,221,239,240,240, \
+ 223,205,224,224,225,207,208,256,239,209,222,222,253,223,224,224, \
+ 225,255,256,256,221,221,222,222,223,223,224,224,255,255,232,256, \
+ 233,233,234,234,223,253,254,254,243,249,250,250,239,233,240,288, \
+ 256,242,243,271,272,251,252,287,288,288,240,240,241,255,256,256, \
+ 257,257,258,272,252,252,253,253,254,254,255,255,256,256,271,257, \
+ 272,272,266,273,288,274,268,268,290,269,284,284,271,271,272,272, \
+ 280,280,288,281,282,282,290,304,333,298,271,299,307,286,287,287, \
+ 305,305,298,306,315,299,300,300,301,309,326,286,287,303,304,288, \
+ 289,305,330,330,331,331,332,324,333,333,286,286,287,335,336,336, \
+ 353,329,330,330,331,331,332,332,333,357,302,342,359,351,352,328, \
+ 353,345,354,330,331,331,332,332,333,357,358,358,359,367,368,368, \
+ 329,356,357,330,331,331,332,377,378,378,379,379,380,380,372,381, \
+ 355,382,383,356,357,384,367,403,404,404,405,405,379,352,353,362, \
+ 363,363,373,382,383,374,402,384,403,403,404,404,405,378,379,352, \
+ 353,362,381,381,427,427,428,428,429,429,367,376,377,431,405,432, \
+ 429,379,380,380,381,381,402,402,403,453,404,384,405,375,406,406, \
+ 407,377,378,378,379,379,380,380,381,381,432,432,383,383,384,384, \
+ 385,425,426,396,407,477,408,408,419,399,400,380,381,401,402,392, \
+ 393,403,404,404,405,405,476,426,427,417,428,428,429,429,430,430, \
+ 409,420,421,476,477,477,401,401,402,402,403,403,426,426,427,405, \
+ 417,428,429,429,430,430,431,431,432,421,477,422,423,423,424,424, \
+ 425,425,426,459,471,449,428,428,429,429,430,452,453,475,465,454, \
+ 455,455,456,456,468,468,469,458,470,492,438,471,472,450,473,473, \
+ 449,449,450,450,451,451,452,452,453,453,454,478,479,467,468,456, \
+ 481,469,470,458,459,471,496,472,473,461,462,462,463,475,476,476, \
+ 477,477,478,466,467,467,468,468,469,469,470,482,483,483,484,496, \
+ 497,485,486,474,475,475,476,500,501,477,490,490,491,491,492,492, \
+ 463,476,477,477,478,478,479,492,480,493,494,481,495,495,483,483, \
+ 497,497,498,498,499,473,474,474,475,488,489,463,464,464,465,465, \
+ 466,453,467,467,468,494,495,482,483,470,471,497,498,498,499,499, \
+ 500,474,475,475,476,463,464,490,491,491,492,492,493,480,481,572, \
+ 501,473,474,474,475,475,476,476,477,477,478,478,479,479,620,480, \
+ 481,495,496,496,497,595,596,498,499,499,612,486,613,669,614,600, \
+ 489,615,616,672,477,477,618,618,619,619,620,620,607,621,664,622, \
+ 623,623,624,624,625,611,612,668,669,669,614,614,615,671,672,616, \
+ 614,644,645,645,616,616,647,617,618,618,619,664,650,665,666,651, \
+ 667,667,668,668,669,654,655,655,656,656,657,657,658,658,659,659, \
+ 660,645,646,661,662,617,648,648,619,649,650,665,666,711,712,667, \
+ 668,668,714,669,715,715,716,671,672,672,673,658,674,704,690,675, \
+ 657,657,658,658,659,691,708,708,693,709,710,710,711,711,712,712, \
+ 713,713,714,666,667,667,668,716,765,717,718,718,719,719,720,720, \
+ 737,705,690,722,723,755,756,740,725,757,758,710,759,759,712,712, \
+ 761,713,714,714,715,715,764,764,765,717,766,766,767,767,768,768 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,12,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,30,30,31,31,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, \
+ 41,41,42,42,43,43,44,44,45,45,46,46,47,47,48,48, \
+ 49,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,62,63,63,64,64, \
+ 65,65,66,66,67,67,68,68,69,69,70,70,71,71,72,72, \
+ 73,73,74,74,75,75,76,76,77,77,78,78,79,79,80,80, \
+ 81,81,82,82,83,83,84,84,85,85,86,86,87,87,88,88, \
+ 89,89,90,90,91,91,92,92,93,93,94,94,95,95,96,96, \
+ 97,97,98,98,99,99,100,100,101,101,102,102,103,103,104,104, \
+ 105,105,106,106,107,107,108,108,109,109,110,110,111,111,112,112, \
+ 113,113,114,114,115,115,116,116,117,117,118,118,119,119,120,120, \
+ 121,121,122,122,123,123,124,124,125,125,126,126,127,127,128,128, \
+ 129,129,130,130,131,131,132,132,133,133,134,134,135,135,136,136, \
+ 137,137,138,138,139,139,140,140,141,141,142,142,143,143,144,144, \
+ 145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152, \
+ 168,153,154,154,155,155,156,156,157,177,168,168,174,159,160,160, \
+ 161,173,174,168,175,163,164,164,165,183,184,184,167,167,168,168, \
+ 169,169,170,170,171,171,172,172,173,173,174,174,175,175,176,176, \
+ 177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184, \
+ 185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192, \
+ 193,193,194,194,195,195,196,196,197,197,198,198,199,199,200,200, \
+ 201,201,202,202,203,224,204,204,205,205,206,206,207,207,208,208, \
+ 209,209,210,210,211,211,212,212,213,213,214,214,215,215,216,216, \
+ 217,217,218,218,219,219,220,220,221,221,222,222,223,223,224,224, \
+ 225,225,226,226,227,227,228,228,229,229,230,230,231,231,232,232, \
+ 233,233,234,234,235,235,236,236,237,237,238,238,239,239,240,240, \
+ 241,241,242,242,243,243,244,244,245,245,246,246,247,247,248,248, \
+ 249,249,250,250,251,251,252,252,253,253,254,254,255,255,256,256, \
+ 257,257,258,258,259,259,260,260,261,261,262,262,263,263,264,264, \
+ 265,265,266,266,267,267,268,268,269,269,270,270,271,271,272,272, \
+ 273,273,274,274,275,302,303,276,304,277,278,296,279,288,316,280, \
+ 281,308,309,282,283,319,320,302,303,285,286,313,314,287,288,288, \
+ 329,289,320,310,311,321,312,312,313,313,334,324,325,325,326,326, \
+ 327,337,328,328,329,329,330,330,331,301,302,332,333,333,334,304, \
+ 305,305,306,306,357,337,338,328,349,349,350,350,351,351,352,352, \
+ 343,343,344,354,355,365,366,366,367,387,388,388,319,349,370,370, \
+ 365,354,355,355,356,378,379,368,369,380,381,392,393,393,328,328, \
+ 329,373,374,396,386,386,387,387,388,399,378,400,412,423,336,424, \
+ 403,403,393,371,372,405,406,406,396,407,408,397,398,409,410,432, \
+ 345,422,423,401,402,413,414,392,393,415,416,427,428,417,429,440, \
+ 437,401,402,438,439,451,452,452,453,417,418,430,431,431,432,432, \
+ 433,421,422,446,447,459,460,424,425,365,366,366,367,451,452,368, \
+ 369,429,430,454,455,443,444,456,457,445,446,482,483,375,376,376, \
+ 377,437,438,450,451,475,476,488,501,465,478,430,431,383,384,384, \
+ 385,385,386,503,504,491,440,388,389,389,390,507,391,391,392,392, \
+ 393,393,394,394,395,434,396,487,449,449,450,398,399,399,400,400, \
+ 401,401,402,402,403,403,404,404,405,405,445,458,459,446,460,408, \
+ 461,409,410,410,411,411,412,412,413,413,414,492,415,415,416,416, \
+ 417,417,418,418,419,489,420,420,421,421,422,422,423,423,424,480, \
+ 425,425,426,468,497,427,428,428,429,429,430,430,431,431,432,432, \
+ 433,433,434,434,435,435,436,436,437,437,438,438,439,439,440,440, \
+ 441,441,442,442,443,443,444,444,445,445,446,446,447,447,448,448, \
+ 449,449,450,450,451,451,452,452,453,453,454,454,455,455,456,456, \
+ 457,457,458,458,459,459,460,520,521,461,462,462,463,463,464,464, \
+ 465,465,466,511,512,467,468,468,469,469,470,470,471,471,472,472, \
+ 473,473,474,474,475,535,536,551,477,477,478,478,479,479,480,480, \
+ 481,481,482,482,483,483,484,484,485,485,486,486,487,487,488,488, \
+ 489,489,490,490,491,491,492,492,493,493,494,494,495,495,496,496, \
+ 497,497,498,498,499,499,500,500,501,501,502,502,503,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,511,512,512 \
+
+#define MPFR_MUL_THRESHOLD 7 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 407 /* bits */
+#define MPFR_EXP_THRESHOLD 4030 /* bits */
+
+/*************************
+ * Threshold for PowerPC *
+ *************************/
+#elif defined (__PPC64__)
+
+/* Generated by MPFR's tuneup.c, 2009-02-18, gcc 4.1.2 */
+/* gcc40.fsffrance.org (powerpc64-unknown-linux-gnu) with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, \
+ 26,28,26,27,28,32,32,32,35,36,35,35,35,35,36,40, \
+ 39,39,44,40,39,39,40,40,43,43,44,44,43,43,44,48, \
+ 51,47,48,48,51,47,56,52,51,51,52,52,59,57,60,56, \
+ 57,63,56,64,63,63,72,64,71,71,72,72,71,69,72,72, \
+ 71,71,72,80,77,79,72,80,71,79,72,80,87,87,88,80, \
+ 80,80,87,87,79,79,80,80,84,105,88,82,92,104,93,87, \
+ 88,88,104,104,105,105,103,88,104,104,105,102,103,103,101,104, \
+ 105,105,103,100,101,104,105,105,112,103,104,104,105,111,112,109, \
+ 116,116,117,117,127,124,116,116,117,117,124,115,122,116,117,117, \
+ 117,125,122,122,123,123,128,128,129,129,126,138,127,131,132,128, \
+ 129,129,130,134,147,147,152,140,141,141,138,146,139,139,140,140, \
+ 141,141,146,146,147,147,144,144,153,141,158,146,147,163,164,152, \
+ 153,161,150,162,163,159,164,160,165,165,170,170,171,175,152,164, \
+ 174,164,175,180,156,171,172,177,188,158,159,189,165,165,171,211, \
+ 177,187,188,213,174,189,210,180,176,201,207,177,213,213,189,204, \
+ 205,205,206,211,212,207,188,213,189,189,210,210,211,206,212,212, \
+ 208,213,189,209,210,205,211,206,212,212,213,208,209,209,235,215, \
+ 209,209,210,216,211,211,212,212,213,213,220,214,209,227,216,216, \
+ 211,211,212,236,213,237,232,232,239,239,240,234,235,235,236,230, \
+ 237,237,232,232,233,233,234,234,235,235,236,236,237,237,238,238, \
+ 239,257,240,240,235,235,236,260,261,237,268,256,275,275,276,264, \
+ 235,249,257,264,237,237,252,259,309,260,261,254,311,311,312,312, \
+ 264,264,272,293,259,259,260,260,261,261,311,311,312,305,285,285, \
+ 293,272,308,301,309,309,261,310,276,276,305,312,299,348,307,300, \
+ 294,315,302,309,310,310,311,311,305,305,306,306,321,300,343,301, \
+ 313,321,330,306,315,347,308,332,341,357,342,310,343,343,312,312, \
+ 337,305,306,306,347,347,348,348,325,317,326,326,311,343,312,312, \
+ 321,321,346,346,339,347,348,348,357,357,342,366,335,343,384,384, \
+ 377,321,338,338,347,339,348,380,357,381,382,350,351,375,376,376, \
+ 347,347,348,348,340,340,341,341,342,342,343,343,344,344,345,381, \
+ 382,346,347,347,348,348,349,376,350,359,378,351,352,415,416,389, \
+ 345,345,346,346,347,383,384,384,376,412,377,377,378,378,379,379, \
+ 380,380,381,381,373,382,383,383,384,384,403,376,395,377,378,396, \
+ 419,419,380,400,381,381,382,382,383,383,404,374,375,375,376,376, \
+ 377,377,378,378,419,389,420,400,401,381,382,382,383,423,454,384, \
+ 405,415,416,456,387,427,398,418,419,399,420,420,401,441,402,422, \
+ 423,413,414,454,415,415,416,416,417,407,418,398,419,419,420,420, \
+ 420,420,443,454,444,455,456,456,457,435,447,436,437,415,438,416, \
+ 417,450,451,418,419,463,442,420,432,454,488,444,489,445,446,446, \
+ 458,458,459,459,427,438,461,439,451,440,419,452,486,453,454,454, \
+ 455,455,445,445,446,490,491,447,448,448,449,482,461,450,451,484, \
+ 461,461,474,450,451,487,464,488,489,489,466,478,479,491,492,456, \
+ 481,481,482,494,507,519,520,460,461,497,486,474,487,535,536,488, \
+ 513,525,454,526,455,527,528,528,469,505,482,494,519,519,496,508, \
+ 509,521,486,546,523,523,512,488,489,513,514,490,563,527,528,492, \
+ 515,502,555,542,491,556,557,492,558,519,520,559,495,534,561,535, \
+ 562,510,511,563,564,564,630,552,553,631,632,528,529,633,634,634, \
+ 635,635,636,636,520,520,560,534,535,561,627,562,537,550,629,564, \
+ 617,526,527,618,619,632,633,633,621,634,609,609,636,636,546,611, \
+ 557,627,628,614,573,629,630,630,603,631,562,632,619,619,634,634, \
+ 635,635,622,636,567,623,624,596,611,625,626,626,641,627,628,614, \
+ 643,643,630,616,617,617,618,618,619,633,634,620,635,621,622,622, \
+ 707,609,624,624,625,639,612,612,627,641,642,628,629,643,602,616, \
+ 614,629,630,615,616,616,617,707,708,633,634,634,635,620,621,636, \
+ 607,622,623,623,624,639,625,670,686,701,702,627,643,643,644,644, \
+ 630,705,706,706,707,632,633,633,709,619,620,710,711,636,697,652, \
+ 698,638,639,624,625,685,686,626,702,627,703,643,689,629,630,630, \
+ 705,705,690,690,691,707,708,708,629,677,630,630,631,631,632,632, \
+ 633,697,698,634,635,699,700,684,701,701,702,702,703,639,704,672, \
+ 705,641,706,690,707,707,708,708,709,693,710,710,775,695,712,712, \
+ 713,777,778,778,763,715,716,780,717,701,702,686,703,703,704,704 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,0,0,0,0,0,0,0,0,6,6,7,7,8,8, \
+ 9,9,10,10,11,11,12,12,13,13,14,14,15,15,16,16, \
+ 17,17,18,18,19,19,20,20,21,21,22,22,23,23,24,24, \
+ 25,25,26,26,27,27,28,28,29,29,32,32,32,32,32,32, \
+ 33,33,34,34,35,35,36,36,37,37,38,38,39,39,40,40, \
+ 41,41,42,42,43,43,44,44,45,45,46,46,47,47,48,48, \
+ 51,49,50,50,51,51,52,52,53,53,54,54,55,55,56,56, \
+ 57,57,58,58,59,59,64,64,63,67,64,64,63,63,64,64, \
+ 65,65,66,66,67,67,68,68,69,69,70,76,71,71,72,72, \
+ 73,73,74,80,75,75,76,76,77,77,78,78,79,79,80,80, \
+ 81,81,82,82,83,83,84,84,85,85,86,92,102,87,88,88, \
+ 89,104,102,105,106,91,92,104,102,105,94,112,104,110,96,105, \
+ 105,105,102,114,123,107,108,120,117,117,122,122,123,103,104,120, \
+ 105,105,106,106,107,107,108,108,109,109,110,110,111,111,112,112, \
+ 113,113,114,114,115,115,116,116,117,117,118,118,119,131,128,120, \
+ 121,129,122,122,123,135,140,124,125,125,126,126,127,127,128,128, \
+ 129,129,130,130,131,131,132,132,133,158,159,134,135,135,136,156, \
+ 137,147,138,138,139,139,140,140,141,141,142,142,143,143,144,144, \
+ 145,145,146,146,147,147,148,188,189,149,150,150,151,191,187,192, \
+ 153,153,189,189,155,155,156,196,187,187,188,188,189,189,190,195, \
+ 191,161,162,204,163,187,188,194,189,189,190,190,191,209,192,216, \
+ 199,193,194,188,189,189,190,190,191,191,192,192,187,193,194,194, \
+ 195,201,202,196,197,197,198,210,199,199,206,212,213,213,184,184, \
+ 215,185,216,204,187,187,188,188,189,189,190,220,191,227,228,192, \
+ 193,193,194,194,195,237,224,224,225,225,240,198,213,199,200,200, \
+ 201,201,202,202,203,224,225,204,205,226,227,206,207,207,208,236, \
+ 237,209,210,210,211,260,240,240,248,213,214,214,215,215,216,216, \
+ 217,217,218,218,219,219,220,220,221,221,222,222,223,223,224,224, \
+ 225,225,226,226,227,227,228,228,229,229,230,230,231,231,264,232, \
+ 249,233,234,266,267,235,236,268,237,237,302,238,239,239,240,240, \
+ 241,241,242,242,243,243,308,244,245,245,246,246,247,247,248,312, \
+ 249,249,250,250,251,251,252,324,285,309,254,254,327,295,272,312, \
+ 311,329,312,303,259,259,260,260,261,306,298,307,308,308,309,327, \
+ 328,310,311,293,294,285,304,268,269,305,306,315,316,307,308,326, \
+ 327,327,319,310,311,311,312,348,277,304,314,323,324,324,334,316, \
+ 317,308,309,336,319,328,329,320,312,366,286,286,359,332,333,342, \
+ 309,329,320,320,321,341,332,342,363,353,354,344,345,325,326,366, \
+ 297,337,338,328,329,339,350,350,351,351,362,352,353,373,374,344, \
+ 345,325,326,366,307,337,328,348,339,369,360,360,361,381,382,382, \
+ 333,373,314,364,315,315,346,336,337,377,378,368,369,319,320,380, \
+ 365,398,366,344,389,345,379,324,402,347,348,392,360,327,328,328, \
+ 329,329,330,374,375,375,387,365,366,366,356,389,390,357,380,369, \
+ 381,370,371,382,383,339,373,384,396,396,397,342,343,343,344,344, \
+ 345,345,346,346,347,347,348,348,393,415,416,350,351,351,407,385, \
+ 401,353,354,402,355,391,392,416,417,429,418,418,419,419,420,420, \
+ 445,409,362,362,363,363,364,436,365,437,438,366,427,415,416,416, \
+ 441,417,418,454,455,431,432,420,373,433,434,434,435,375,436,376, \
+ 377,377,378,402,415,379,380,380,381,381,382,382,383,383,384,384, \
+ 385,411,425,425,426,387,388,388,389,402,416,429,391,456,470,392, \
+ 432,393,394,446,447,395,396,474,397,397,398,398,399,399,400,400, \
+ 401,401,402,402,429,429,404,404,405,405,458,406,459,472,473,408, \
+ 474,409,410,410,411,411,412,412,413,413,414,414,415,454,455,455, \
+ 417,417,418,418,419,419,420,420,421,421,422,422,423,423,494,424, \
+ 425,425,426,510,567,427,428,428,429,429,556,430,431,431,432,558, \
+ 489,489,490,434,435,435,562,436,437,437,438,438,439,439,440,440, \
+ 441,567,568,456,569,569,570,584,585,445,446,558,573,447,448,560, \
+ 569,569,570,600,451,571,452,557,558,453,454,454,455,560,456,456, \
+ 562,457,458,563,564,564,595,580,581,461,462,582,583,598,599,599, \
+ 585,585,571,556,557,557,558,573,574,469,470,470,471,636,472,472, \
+ 473,563,564,564,550,580,581,581,582,567,568,598,599,584,585,585, \
+ 561,593,594,594,563,483,580,564,581,597,598,630,487,487,488,600, \
+ 601,601,602,618,635,571,572,572,557,573,574,558,591,591,608,592, \
+ 593,641,498,562,563,579,564,564,565,501,566,550,551,503,600,584, \
+ 585,617,618,506,635,603,604,572,573,573,574,622,591,575,576,592 \
+
+#define MPFR_MUL_THRESHOLD 6 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 744 /* bits */
+#define MPFR_EXP_THRESHOLD 11929 /* bits */
+
+/*******************************
+ * Threshold for Sparc 64 bits *
+ *******************************/
+#elif defined (__sparc_v9__)
+
+/* Generated by MPFR's tuneup.c, 2009-02-20, gcc 4.1.2 */
+/* gcc54.fsffrance.org (sparc64-unknown-linux-gnu) with gmp-4.2.4 */
+
+#define MPFR_MULHIGH_TAB \
+ -1,-1,0,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,11,-1, \
+ 11,13,13,15,13,15,15,16,17,17,17,18,19,19,19,21, \
+ 21,19,23,21,23,24,25,25,26,27,26,30,25,27,27,34, \
+ 30,25,30,33,34,31,34,34,34,34,34,37,38,37,38,38, \
+ 39,39,42,40,43,41,42,42,45,43,50,46,45,45,46,48, \
+ 49,49,50,54,49,49,50,60,59,59,60,60,59,59,60,60, \
+ 67,67,60,68,61,65,68,66,67,67,68,68,69,75,76,68, \
+ 73,75,76,68,69,81,76,76,75,81,84,76,81,75,74,80, \
+ 98,92,96,84,76,97,80,83,84,99,76,85,92,110,99,96, \
+ 97,97,92,95,111,102,106,97,92,101,111,108,109,109,110,101, \
+ 99,99,100,109,110,110,108,108,112,109,110,110,111,111,109,118, \
+ 119,122,120,117,118,121,122,122,114,123,133,121,134,122,132,132, \
+ 129,133,134,126,135,123,132,140,125,137,138,126,135,147,140,144, \
+ 133,137,138,134,147,147,132,140,153,141,138,142,147,147,148,136, \
+ 137,177,138,138,159,147,152,156,177,177,174,146,175,159,176,168, \
+ 173,177,170,174,167,171,180,156,177,177,158,174,159,191,164,200, \
+ 159,174,180,180,176,201,172,177,173,178,179,199,180,180,176,201, \
+ 177,177,183,173,179,199,195,175,201,201,177,177,198,203,199,194, \
+ 200,200,201,201,177,197,198,193,189,199,200,225,201,191,197,192, \
+ 198,198,199,224,200,200,201,201,202,207,198,213,199,224,200,225, \
+ 221,203,228,204,223,223,218,224,201,225,208,220,221,245,228,228, \
+ 241,223,218,224,225,237,220,220,221,227,228,252,229,235,236,224, \
+ 243,225,226,250,293,293,228,276,229,289,248,272,249,237,244,280, \
+ 269,227,282,282,271,271,272,266,243,249,250,292,293,293,288,288, \
+ 256,291,285,285,293,286,294,287,288,281,303,289,283,276,270,277, \
+ 285,278,279,272,294,294,302,288,296,289,283,290,291,284,278,278, \
+ 293,286,294,294,288,288,310,275,325,283,312,284,313,292,293,314, \
+ 301,315,330,316,282,324,325,276,291,291,348,292,286,300,301,294, \
+ 337,313,330,314,315,323,324,292,325,357,366,350,311,303,344,320, \
+ 321,329,330,330,339,363,292,348,293,333,294,294,327,295,320,328, \
+ 329,329,322,322,371,331,316,324,333,325,334,366,327,311,312,312, \
+ 313,353,362,346,355,355,372,348,341,349,366,342,327,351,360,328, \
+ 329,329,330,357,331,358,332,350,324,333,325,352,380,371,363,363, \
+ 391,373,365,365,339,366,349,358,359,395,333,378,370,343,344,371, \
+ 354,363,364,364,365,365,411,366,358,349,404,350,360,360,361,388, \
+ 362,380,390,381,400,382,401,392,402,375,376,403,395,368,378,378, \
+ 369,379,380,380,381,411,372,402,453,443,394,374,425,435,436,426, \
+ 427,427,438,438,399,409,400,420,431,401,432,432,413,393,424,414, \
+ 425,375,406,436,387,397,418,418,399,419,390,430,401,401,442,432, \
+ 433,393,394,474,405,435,436,426,437,437,388,438,429,449,400,440, \
+ 486,409,443,476,411,477,423,434,468,446,425,458,459,437,471,526, \
+ 450,450,473,528,441,474,519,442,432,410,477,433,478,434,435,435, \
+ 436,502,481,437,471,449,483,516,473,528,518,452,453,519,454,520, \
+ 433,532,533,434,479,512,513,513,525,459,427,438,516,516,517,528, \
+ 437,437,474,474,511,523,512,524,525,525,514,502,539,527,480,528, \
+ 517,529,506,518,519,531,520,520,533,521,522,498,499,487,500,524, \
+ 525,525,526,490,527,539,504,600,493,589,518,518,507,483,544,496, \
+ 521,593,474,522,535,511,524,524,585,597,526,514,599,587,564,528, \
+ 580,580,594,594,569,582,596,596,532,519,585,533,534,599,561,600, \
+ 523,588,524,524,590,564,591,526,579,527,528,515,516,529,608,595, \
+ 570,596,597,597,598,598,599,521,535,587,588,523,524,602,577,590, \
+ 526,578,540,605,671,593,594,594,608,582,557,609,597,558,585,598, \
+ 599,599,600,586,587,601,602,532,589,589,590,576,661,535,578,578, \
+ 593,579,566,594,581,581,554,596,611,597,570,626,599,599,614,600, \
+ 573,671,588,574,603,589,590,590,591,605,606,662,663,579,594,608, \
+ 609,595,680,652,597,667,598,598,599,599,600,600,573,587,672,588, \
+ 584,659,600,600,601,616,602,602,618,588,619,664,605,590,591,591, \
+ 592,652,593,608,609,669,595,595,596,581,672,597,613,673,599,629, \
+ 690,600,601,616,617,662,663,663,739,679,650,635,621,681,622,652, \
+ 668,623,609,669,655,670,671,671,657,672,733,658,644,674,600,660, \
+ 641,657,722,658,675,723,660,676,661,677,678,710,663,743,664,680, \
+ 649,665,714,730,699,747,684,636,685,605,654,670,671,671,752,672, \
+ 721,673,658,706,675,659,676,676,677,597,742,742,743,679,728,744, \
+ 681,665,666,730,651,747,652,668,669,685,750,670,735,687,736,752 \
+
+#define MPFR_SQRHIGH_TAB \
+ -1,0,0,0,0,-1,0,-1,5,5,6,6,7,7,9,8, \
+ 9,9,10,10,12,12,13,12,13,13,14,14,15,16,17,18, \
+ 18,17,18,19,19,24,20,20,22,24,22,22,23,23,24,25, \
+ 25,25,26,26,28,30,28,28,29,29,30,32,31,31,32,33, \
+ 33,33,36,36,39,35,36,36,43,37,38,38,39,39,40,40, \
+ 41,41,44,42,43,47,50,44,45,47,52,46,47,47,48,48, \
+ 49,49,50,50,53,51,52,52,53,55,54,54,57,57,56,56, \
+ 57,57,58,58,59,59,60,60,61,61,62,62,63,63,64,64, \
+ 65,65,66,69,70,67,68,74,69,75,70,73,74,71,72,72, \
+ 73,73,80,77,75,75,76,76,86,92,78,81,82,79,80,80, \
+ 81,87,88,85,86,83,84,84,85,88,92,101,87,87,88,88, \
+ 89,89,90,90,91,91,92,92,93,102,97,94,101,95,96,117, \
+ 97,97,98,98,99,123,108,100,113,113,106,114,107,103,104,108, \
+ 125,105,122,114,123,107,124,108,129,109,114,110,111,111,112,112, \
+ 113,113,114,114,115,115,116,132,117,125,130,126,119,119,120,120, \
+ 121,121,122,130,131,123,124,124,149,137,126,126,143,143,132,144, \
+ 129,144,130,130,131,131,132,132,133,138,144,134,135,150,156,136, \
+ 137,137,138,138,139,144,150,140,141,141,142,142,143,143,144,159, \
+ 145,145,146,146,147,147,148,148,149,149,150,150,151,151,152,152, \
+ 153,178,184,154,155,155,166,171,157,177,158,178,159,174,160,160, \
+ 161,161,162,162,163,163,164,164,165,165,166,166,167,197,180,198, \
+ 187,193,170,182,171,201,172,172,173,191,174,174,175,181,176,176, \
+ 177,177,178,178,179,179,180,180,181,181,182,182,183,183,184,184, \
+ 185,185,186,186,187,187,188,188,189,189,190,190,191,191,192,192, \
+ 193,200,194,194,195,195,196,196,197,197,198,198,199,199,200,221, \
+ 222,229,230,237,224,203,204,218,219,205,206,206,207,207,208,208, \
+ 209,237,210,210,211,225,212,212,213,213,214,214,215,222,216,216, \
+ 217,245,218,246,240,219,241,234,228,221,222,222,223,223,259,231, \
+ 233,233,226,266,267,227,228,260,261,229,230,246,255,255,240,256, \
+ 233,233,234,234,235,235,236,236,237,245,238,262,239,239,240,240, \
+ 249,241,250,242,243,243,252,244,245,245,246,246,247,255,312,256, \
+ 249,249,250,250,315,267,268,300,261,293,294,294,295,303,304,304, \
+ 293,293,303,294,295,295,305,296,288,288,289,316,317,335,318,318, \
+ 319,319,275,293,294,303,268,295,269,296,306,297,271,316,272,272, \
+ 273,273,301,292,302,311,357,303,286,286,287,278,342,279,325,298, \
+ 326,353,336,318,328,319,320,311,303,303,313,304,305,332,315,297, \
+ 309,309,300,290,311,301,292,292,373,363,354,334,355,375,356,346, \
+ 297,327,348,328,319,299,310,330,301,301,322,312,313,373,334,324, \
+ 335,345,306,356,307,327,348,348,309,329,330,370,321,311,312,312, \
+ 313,343,354,354,355,345,336,346,317,387,348,318,319,339,340,340, \
+ 321,321,322,344,411,323,324,357,402,358,370,326,371,338,350,427, \
+ 373,362,341,330,375,342,409,332,333,366,389,334,357,335,402,336, \
+ 392,337,338,338,405,350,384,373,363,341,342,364,376,365,366,388, \
+ 389,356,357,346,347,358,370,348,349,382,383,372,351,384,385,352, \
+ 413,353,366,354,355,355,404,392,393,357,358,358,359,407,408,360, \
+ 421,397,398,398,399,363,364,364,365,449,390,366,379,367,368,368, \
+ 369,369,370,382,371,371,372,444,373,373,374,374,375,375,388,400, \
+ 377,377,378,378,427,379,380,380,381,381,382,382,383,383,384,384, \
+ 385,437,399,386,387,387,440,388,389,389,390,442,391,482,392,392, \
+ 445,445,407,420,408,395,396,474,397,410,398,476,399,399,400,413, \
+ 401,401,415,402,403,416,404,404,444,405,406,406,407,407,408,408, \
+ 409,409,410,410,411,411,412,412,413,413,414,414,415,519,468,416, \
+ 445,417,418,418,419,433,420,434,435,435,436,436,423,423,424,466, \
+ 425,425,426,426,427,427,428,498,499,429,430,430,431,431,474,432, \
+ 433,433,434,462,463,435,436,436,437,493,480,438,439,439,440,454, \
+ 497,441,442,442,443,443,444,444,459,445,446,446,447,447,462,448, \
+ 449,449,450,465,451,451,452,452,453,453,454,454,455,455,486,456, \
+ 517,457,518,458,459,459,460,460,461,461,462,462,463,463,464,464, \
+ 465,465,466,466,467,467,468,468,469,469,470,470,471,501,502,472, \
+ 473,473,474,474,475,520,521,476,477,477,478,478,509,479,480,480, \
+ 481,481,482,482,483,483,484,484,501,485,486,486,487,487,488,488, \
+ 489,489,490,490,491,491,492,492,493,493,494,494,559,495,528,512, \
+ 497,497,514,498,499,499,500,500,533,501,502,502,503,503,504,504, \
+ 505,505,506,506,507,507,508,508,509,509,510,510,511,511,512,512 \
+
+#define MPFR_MUL_THRESHOLD 16 /* limbs */
+#define MPFR_EXP_2_THRESHOLD 64 /* bits */
+#define MPFR_EXP_THRESHOLD 9331 /* bits */
+
+/* __mips64? __mips? */
+#endif
+
+/*******************************
+ * Default values of Threshold *
+ *******************************/
+#ifndef MPFR_MULHIGH_TAB
+# define MPFR_MULHIGH_TAB -1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0
+#endif
+#ifndef MPFR_SQRHIGH_TAB
+# define MPFR_SQRHIGH_TAB -1,-1,-1,-1,-1,-1,-1,-1,0,0,0,0,0,0,0,0,0
+#endif
+#ifndef MPFR_MUL_THRESHOLD
+# define MPFR_MUL_THRESHOLD 40
+#endif
+#ifndef MPFR_EXP_2_THRESHOLD
+# define MPFR_EXP_2_THRESHOLD 100 /* bits */
+#endif
+#ifndef MPFR_EXP_THRESHOLD
+# define MPFR_EXP_THRESHOLD 25000 /* bits */
+#endif
+#ifndef MPFR_SINCOS_THRESHOLD
+# define MPFR_SINCOS_THRESHOLD 30000 /* bits */
+#endif
+#ifndef MPFR_AI_THRESHOLD1
+# define MPFR_AI_THRESHOLD1 -13107
+#endif
+#ifndef MPFR_AI_THRESHOLD2
+# define MPFR_AI_THRESHOLD2 1311
+#endif
+#ifndef MPFR_AI_THRESHOLD3
+# define MPFR_AI_THRESHOLD3 19661
+#endif
diff --git a/src/mpf2mpfr.h b/src/mpf2mpfr.h
new file mode 100644
index 000000000..f7f6971d6
--- /dev/null
+++ b/src/mpf2mpfr.h
@@ -0,0 +1,175 @@
+/* mpf2mpfr.h -- Compatibility include file with mpf.
+
+Copyright 1999, 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __MPFR_FROM_MPF__
+#define __MPFR_FROM_MPF__
+
+/* types */
+#define mpf_t mpfr_t
+#define mpf_srcptr mpfr_srcptr
+#define mpf_ptr mpfr_ptr
+
+/* Get current Rounding Mode */
+#ifndef MPFR_DEFAULT_RND
+# define MPFR_DEFAULT_RND mpfr_get_default_rounding_mode ()
+#endif
+
+/* mpf_init initalizes at 0 */
+#undef mpf_init
+#define mpf_init(x) mpfr_init_set_ui ((x), 0, MPFR_DEFAULT_RND)
+#undef mpf_init2
+#define mpf_init2(x,p) (mpfr_init2((x),(p)), mpfr_set_ui ((x), 0, MPFR_DEFAULT_RND))
+
+/* functions which don't take as argument the rounding mode */
+#undef mpf_ceil
+#define mpf_ceil mpfr_ceil
+#undef mpf_clear
+#define mpf_clear mpfr_clear
+#undef mpf_cmp
+#define mpf_cmp mpfr_cmp
+#undef mpf_cmp_si
+#define mpf_cmp_si mpfr_cmp_si
+#undef mpf_cmp_ui
+#define mpf_cmp_ui mpfr_cmp_ui
+#undef mpf_cmp_d
+#define mpf_cmp_d mpfr_cmp_d
+#undef mpf_eq
+#define mpf_eq mpfr_eq
+#undef mpf_floor
+#define mpf_floor mpfr_floor
+#undef mpf_get_prec
+#define mpf_get_prec mpfr_get_prec
+#undef mpf_integer_p
+#define mpf_integer_p mpfr_integer_p
+#undef mpf_random2
+#define mpf_random2 mpfr_random2
+#undef mpf_set_default_prec
+#define mpf_set_default_prec mpfr_set_default_prec
+#undef mpf_get_default_prec
+#define mpf_get_default_prec mpfr_get_default_prec
+#undef mpf_set_prec
+#define mpf_set_prec mpfr_set_prec
+#undef mpf_set_prec_raw
+#define mpf_set_prec_raw(x,p) mpfr_prec_round(x,p,MPFR_DEFAULT_RND)
+#undef mpf_trunc
+#define mpf_trunc mpfr_trunc
+#undef mpf_sgn
+#define mpf_sgn mpfr_sgn
+#undef mpf_swap
+#define mpf_swap mpfr_swap
+#undef mpf_dump
+#define mpf_dump mpfr_dump
+
+/* functions which take as argument the rounding mode */
+#undef mpf_abs
+#define mpf_abs(x,y) mpfr_abs(x,y,MPFR_DEFAULT_RND)
+#undef mpf_add
+#define mpf_add(x,y,z) mpfr_add(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_add_ui
+#define mpf_add_ui(x,y,z) mpfr_add_ui(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_div
+#define mpf_div(x,y,z) mpfr_div(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_div_ui
+#define mpf_div_ui(x,y,z) mpfr_div_ui(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_div_2exp
+#define mpf_div_2exp(x,y,z) mpfr_div_2exp(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_fits_slong_p
+#define mpf_fits_slong_p(x) mpfr_fits_ulong_p(x,MPFR_DEFAULT_RND)
+#undef mpf_fits_ulong_p
+#define mpf_fits_ulong_p(x) mpfr_fits_ulong_p(x,MPFR_DEFAULT_RND)
+#undef mpf_fits_sint_p
+#define mpf_fits_sint_p(x) mpfr_fits_uint_p(x,MPFR_DEFAULT_RND)
+#undef mpf_fits_uint_p
+#define mpf_fits_uint_p(x) mpfr_fits_uint_p(x,MPFR_DEFAULT_RND)
+#undef mpf_fits_sshort_p
+#define mpf_fits_sshort_p(x) mpfr_fits_ushort_p(x,MPFR_DEFAULT_RND)
+#undef mpf_fits_ushort_p
+#define mpf_fits_ushort_p(x) mpfr_fits_ushort_p(x,MPFR_DEFAULT_RND)
+#undef mpf_get_str
+#define mpf_get_str(x,y,z,t,u) mpfr_get_str(x,y,z,t,u,MPFR_DEFAULT_RND)
+#undef mpf_get_d
+#define mpf_get_d(x) mpfr_get_d(x,MPFR_DEFAULT_RND)
+#undef mpf_get_d_2exp
+#define mpf_get_d_2exp(e,x) mpfr_get_d_2exp(e,x,MPFR_DEFAULT_RND)
+#undef mpf_get_ui
+#define mpf_get_ui(x) mpfr_get_ui(x,MPFR_DEFAULT_RND)
+#undef mpf_get_si
+#define mpf_get_si(x) mpfr_get_ui(x,MPFR_DEFAULT_RND)
+#undef mpf_inp_str
+#define mpf_inp_str(x,y,z) mpfr_inp_str(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_set_str
+#define mpf_set_str(x,y,z) mpfr_set_str(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_init_set
+#define mpf_init_set(x,y) mpfr_init_set(x,y,MPFR_DEFAULT_RND)
+#undef mpf_init_set_d
+#define mpf_init_set_d(x,y) mpfr_init_set_d(x,y,MPFR_DEFAULT_RND)
+#undef mpf_init_set_si
+#define mpf_init_set_si(x,y) mpfr_init_set_si(x,y,MPFR_DEFAULT_RND)
+#undef mpf_init_set_str
+#define mpf_init_set_str(x,y,z) mpfr_init_set_str(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_init_set_ui
+#define mpf_init_set_ui(x,y) mpfr_init_set_ui(x,y,MPFR_DEFAULT_RND)
+#undef mpf_mul
+#define mpf_mul(x,y,z) mpfr_mul(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_mul_2exp
+#define mpf_mul_2exp(x,y,z) mpfr_mul_2exp(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_mul_ui
+#define mpf_mul_ui(x,y,z) mpfr_mul_ui(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_neg
+#define mpf_neg(x,y) mpfr_neg(x,y,MPFR_DEFAULT_RND)
+#undef mpf_out_str
+#define mpf_out_str(x,y,z,t) mpfr_out_str(x,y,z,t,MPFR_DEFAULT_RND)
+#undef mpf_pow_ui
+#define mpf_pow_ui(x,y,z) mpfr_pow_ui(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_reldiff
+#define mpf_reldiff(x,y,z) mpfr_reldiff(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_set
+#define mpf_set(x,y) mpfr_set(x,y,MPFR_DEFAULT_RND)
+#undef mpf_set_d
+#define mpf_set_d(x,y) mpfr_set_d(x,y,MPFR_DEFAULT_RND)
+#undef mpf_set_q
+#define mpf_set_q(x,y) mpfr_set_q(x,y,MPFR_DEFAULT_RND)
+#undef mpf_set_si
+#define mpf_set_si(x,y) mpfr_set_si(x,y,MPFR_DEFAULT_RND)
+#undef mpf_set_ui
+#define mpf_set_ui(x,y) mpfr_set_ui(x,y,MPFR_DEFAULT_RND)
+#undef mpf_set_z
+#define mpf_set_z(x,y) mpfr_set_z(x,y,MPFR_DEFAULT_RND)
+#undef mpf_sqrt
+#define mpf_sqrt(x,y) mpfr_sqrt(x,y,MPFR_DEFAULT_RND)
+#undef mpf_sqrt_ui
+#define mpf_sqrt_ui(x,y) mpfr_sqrt_ui(x,y,MPFR_DEFAULT_RND)
+#undef mpf_sub
+#define mpf_sub(x,y,z) mpfr_sub(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_sub_ui
+#define mpf_sub_ui(x,y,z) mpfr_sub_ui(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_ui_div
+#define mpf_ui_div(x,y,z) mpfr_ui_div(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_ui_sub
+#define mpf_ui_sub(x,y,z) mpfr_ui_sub(x,y,z,MPFR_DEFAULT_RND)
+#undef mpf_urandomb
+#define mpf_urandomb(x,y,n) mpfr_urandomb(x,y)
+
+#undef mpz_set_f
+#define mpz_set_f(z,f) mpfr_get_z(z,f,MPFR_DEFAULT_RND)
+
+#endif /* __MPFR_FROM_MPF__ */
diff --git a/src/mpfr-gmp.c b/src/mpfr-gmp.c
new file mode 100644
index 000000000..e8a3f2fbe
--- /dev/null
+++ b/src/mpfr-gmp.c
@@ -0,0 +1,386 @@
+/* mpfr_gmp -- Limited gmp-impl emulator
+ Modified version of the GMP files.
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <stdlib.h> /* For malloc, free, realloc and abort */
+
+#include "mpfr-impl.h"
+
+#ifndef MPFR_HAVE_GMP_IMPL
+
+char mpfr_rands_initialized = 0;
+gmp_randstate_t mpfr_rands;
+
+const struct bases mpfr_bases[257] =
+{
+ /* 0 */ {0.0},
+ /* 1 */ {1e37},
+ /* 2 */ {1.0000000000000000},
+ /* 3 */ {0.6309297535714574},
+ /* 4 */ {0.5000000000000000},
+ /* 5 */ {0.4306765580733931},
+ /* 6 */ {0.3868528072345416},
+ /* 7 */ {0.3562071871080222},
+ /* 8 */ {0.3333333333333333},
+ /* 9 */ {0.3154648767857287},
+ /* 10 */ {0.3010299956639812},
+ /* 11 */ {0.2890648263178878},
+ /* 12 */ {0.2789429456511298},
+ /* 13 */ {0.2702381544273197},
+ /* 14 */ {0.2626495350371935},
+ /* 15 */ {0.2559580248098155},
+ /* 16 */ {0.2500000000000000},
+ /* 17 */ {0.2446505421182260},
+ /* 18 */ {0.2398124665681314},
+ /* 19 */ {0.2354089133666382},
+ /* 20 */ {0.2313782131597592},
+ /* 21 */ {0.2276702486969530},
+ /* 22 */ {0.2242438242175754},
+ /* 23 */ {0.2210647294575037},
+ /* 24 */ {0.2181042919855316},
+ /* 25 */ {0.2153382790366965},
+ /* 26 */ {0.2127460535533632},
+ /* 27 */ {0.2103099178571525},
+ /* 28 */ {0.2080145976765095},
+ /* 29 */ {0.2058468324604344},
+ /* 30 */ {0.2037950470905062},
+ /* 31 */ {0.2018490865820999},
+ /* 32 */ {0.2000000000000000},
+ /* 33 */ {0.1982398631705605},
+ /* 34 */ {0.1965616322328226},
+ /* 35 */ {0.1949590218937863},
+ /* 36 */ {0.1934264036172708},
+ /* 37 */ {0.1919587200065601},
+ /* 38 */ {0.1905514124267734},
+ /* 39 */ {0.1892003595168700},
+ /* 40 */ {0.1879018247091076},
+ /* 41 */ {0.1866524112389434},
+ /* 42 */ {0.1854490234153689},
+ /* 43 */ {0.1842888331487062},
+ /* 44 */ {0.1831692509136336},
+ /* 45 */ {0.1820879004699383},
+ /* 46 */ {0.1810425967800402},
+ /* 47 */ {0.1800313266566926},
+ /* 48 */ {0.1790522317510414},
+ /* 49 */ {0.1781035935540111},
+ /* 50 */ {0.1771838201355579},
+ /* 51 */ {0.1762914343888821},
+ /* 52 */ {0.1754250635819545},
+ /* 53 */ {0.1745834300480449},
+ /* 54 */ {0.1737653428714400},
+ /* 55 */ {0.1729696904450771},
+ /* 56 */ {0.1721954337940981},
+ /* 57 */ {0.1714416005739134},
+ /* 58 */ {0.1707072796637201},
+ /* 59 */ {0.1699916162869140},
+ /* 60 */ {0.1692938075987814},
+ /* 61 */ {0.1686130986895011},
+ /* 62 */ {0.1679487789570419},
+ /* 63 */ {0.1673001788101741},
+ /* 64 */ {0.1666666666666667},
+ /* 65 */ {0.1660476462159378},
+ /* 66 */ {0.1654425539190583},
+ /* 67 */ {0.1648508567221603},
+ /* 68 */ {0.1642720499620502},
+ /* 69 */ {0.1637056554452156},
+ /* 70 */ {0.1631512196835108},
+ /* 71 */ {0.1626083122716342},
+ /* 72 */ {0.1620765243931223},
+ /* 73 */ {0.1615554674429964},
+ /* 74 */ {0.1610447717564444},
+ /* 75 */ {0.1605440854340214},
+ /* 76 */ {0.1600530732548213},
+ /* 77 */ {0.1595714156699382},
+ /* 78 */ {0.1590988078692941},
+ /* 79 */ {0.1586349589155960},
+ /* 80 */ {0.1581795909397823},
+ /* 81 */ {0.1577324383928644},
+ /* 82 */ {0.1572932473495469},
+ /* 83 */ {0.1568617748594410},
+ /* 84 */ {0.1564377883420715},
+ /* 85 */ {0.1560210650222250},
+ /* 86 */ {0.1556113914024939},
+ /* 87 */ {0.1552085627701551},
+ /* 88 */ {0.1548123827357682},
+ /* 89 */ {0.1544226628011101},
+ /* 90 */ {0.1540392219542636},
+ /* 91 */ {0.1536618862898642},
+ /* 92 */ {0.1532904886526781},
+ /* 93 */ {0.1529248683028321},
+ /* 94 */ {0.1525648706011593},
+ /* 95 */ {0.1522103467132434},
+ /* 96 */ {0.1518611533308632},
+ /* 97 */ {0.1515171524096389},
+ /* 98 */ {0.1511782109217764},
+ /* 99 */ {0.1508442006228941},
+ /* 100 */ {0.1505149978319906},
+ /* 101 */ {0.1501904832236880},
+ /* 102 */ {0.1498705416319474},
+ /* 103 */ {0.1495550618645152},
+ /* 104 */ {0.1492439365274121},
+ /* 105 */ {0.1489370618588283},
+ /* 106 */ {0.1486343375718350},
+ /* 107 */ {0.1483356667053617},
+ /* 108 */ {0.1480409554829326},
+ /* 109 */ {0.1477501131786861},
+ /* 110 */ {0.1474630519902391},
+ /* 111 */ {0.1471796869179852},
+ /* 112 */ {0.1468999356504447},
+ /* 113 */ {0.1466237184553111},
+ /* 114 */ {0.1463509580758620},
+ /* 115 */ {0.1460815796324244},
+ /* 116 */ {0.1458155105286054},
+ /* 117 */ {0.1455526803620167},
+ /* 118 */ {0.1452930208392429},
+ /* 119 */ {0.1450364656948130},
+ /* 120 */ {0.1447829506139581},
+ /* 121 */ {0.1445324131589439},
+ /* 122 */ {0.1442847926987864},
+ /* 123 */ {0.1440400303421672},
+ /* 124 */ {0.1437980688733776},
+ /* 125 */ {0.1435588526911310},
+ /* 126 */ {0.1433223277500932},
+ /* 127 */ {0.1430884415049874},
+ /* 128 */ {0.1428571428571428},
+ /* 129 */ {0.1426283821033600},
+ /* 130 */ {0.1424021108869747},
+ /* 131 */ {0.1421782821510107},
+ /* 132 */ {0.1419568500933153},
+ /* 133 */ {0.1417377701235801},
+ /* 134 */ {0.1415209988221527},
+ /* 135 */ {0.1413064939005528},
+ /* 136 */ {0.1410942141636095},
+ /* 137 */ {0.1408841194731412},
+ /* 138 */ {0.1406761707131039},
+ /* 139 */ {0.1404703297561400},
+ /* 140 */ {0.1402665594314587},
+ /* 141 */ {0.1400648234939879},
+ /* 142 */ {0.1398650865947379},
+ /* 143 */ {0.1396673142523192},
+ /* 144 */ {0.1394714728255649},
+ /* 145 */ {0.1392775294872041},
+ /* 146 */ {0.1390854521985406},
+ /* 147 */ {0.1388952096850913},
+ /* 148 */ {0.1387067714131417},
+ /* 149 */ {0.1385201075671774},
+ /* 150 */ {0.1383351890281539},
+ /* 151 */ {0.1381519873525671},
+ /* 152 */ {0.1379704747522905},
+ /* 153 */ {0.1377906240751463},
+ /* 154 */ {0.1376124087861776},
+ /* 155 */ {0.1374358029495937},
+ /* 156 */ {0.1372607812113589},
+ /* 157 */ {0.1370873187823978},
+ /* 158 */ {0.1369153914223921},
+ /* 159 */ {0.1367449754241439},
+ /* 160 */ {0.1365760475984821},
+ /* 161 */ {0.1364085852596902},
+ /* 162 */ {0.1362425662114337},
+ /* 163 */ {0.1360779687331669},
+ /* 164 */ {0.1359147715670014},
+ /* 165 */ {0.1357529539050150},
+ /* 166 */ {0.1355924953769864},
+ /* 167 */ {0.1354333760385373},
+ /* 168 */ {0.1352755763596663},
+ /* 169 */ {0.1351190772136599},
+ /* 170 */ {0.1349638598663645},
+ /* 171 */ {0.1348099059658080},
+ /* 172 */ {0.1346571975321549},
+ /* 173 */ {0.1345057169479844},
+ /* 174 */ {0.1343554469488779},
+ /* 175 */ {0.1342063706143054},
+ /* 176 */ {0.1340584713587979},
+ /* 177 */ {0.1339117329233981},
+ /* 178 */ {0.1337661393673756},
+ /* 179 */ {0.1336216750601996},
+ /* 180 */ {0.1334783246737591},
+ /* 181 */ {0.1333360731748201},
+ /* 182 */ {0.1331949058177136},
+ /* 183 */ {0.1330548081372441},
+ /* 184 */ {0.1329157659418126},
+ /* 185 */ {0.1327777653067443},
+ /* 186 */ {0.1326407925678156},
+ /* 187 */ {0.1325048343149731},
+ /* 188 */ {0.1323698773862368},
+ /* 189 */ {0.1322359088617821},
+ /* 190 */ {0.1321029160581950},
+ /* 191 */ {0.1319708865228925},
+ /* 192 */ {0.1318398080287045},
+ /* 193 */ {0.1317096685686114},
+ /* 194 */ {0.1315804563506306},
+ /* 195 */ {0.1314521597928493},
+ /* 196 */ {0.1313247675185968},
+ /* 197 */ {0.1311982683517524},
+ /* 198 */ {0.1310726513121843},
+ /* 199 */ {0.1309479056113158},
+ /* 200 */ {0.1308240206478128},
+ /* 201 */ {0.1307009860033912},
+ /* 202 */ {0.1305787914387386},
+ /* 203 */ {0.1304574268895465},
+ /* 204 */ {0.1303368824626505},
+ /* 205 */ {0.1302171484322746},
+ /* 206 */ {0.1300982152363760},
+ /* 207 */ {0.1299800734730872},
+ /* 208 */ {0.1298627138972530},
+ /* 209 */ {0.1297461274170591},
+ /* 210 */ {0.1296303050907487},
+ /* 211 */ {0.1295152381234257},
+ /* 212 */ {0.1294009178639407},
+ /* 213 */ {0.1292873358018581},
+ /* 214 */ {0.1291744835645007},
+ /* 215 */ {0.1290623529140715},
+ /* 216 */ {0.1289509357448472},
+ /* 217 */ {0.1288402240804449},
+ /* 218 */ {0.1287302100711566},
+ /* 219 */ {0.1286208859913518},
+ /* 220 */ {0.1285122442369443},
+ /* 221 */ {0.1284042773229231},
+ /* 222 */ {0.1282969778809442},
+ /* 223 */ {0.1281903386569819},
+ /* 224 */ {0.1280843525090381},
+ /* 225 */ {0.1279790124049077},
+ /* 226 */ {0.1278743114199984},
+ /* 227 */ {0.1277702427352035},
+ /* 228 */ {0.1276667996348261},
+ /* 229 */ {0.1275639755045533},
+ /* 230 */ {0.1274617638294791},
+ /* 231 */ {0.1273601581921740},
+ /* 232 */ {0.1272591522708010},
+ /* 233 */ {0.1271587398372755},
+ /* 234 */ {0.1270589147554692},
+ /* 235 */ {0.1269596709794558},
+ /* 236 */ {0.1268610025517973},
+ /* 237 */ {0.1267629036018709},
+ /* 238 */ {0.1266653683442337},
+ /* 239 */ {0.1265683910770258},
+ /* 240 */ {0.1264719661804097},
+ /* 241 */ {0.1263760881150453},
+ /* 242 */ {0.1262807514205999},
+ /* 243 */ {0.1261859507142915},
+ /* 244 */ {0.1260916806894653},
+ /* 245 */ {0.1259979361142023},
+ /* 246 */ {0.1259047118299582},
+ /* 247 */ {0.1258120027502338},
+ /* 248 */ {0.1257198038592741},
+ /* 249 */ {0.1256281102107963},
+ /* 250 */ {0.1255369169267456},
+ /* 251 */ {0.1254462191960791},
+ /* 252 */ {0.1253560122735751},
+ /* 253 */ {0.1252662914786691},
+ /* 254 */ {0.1251770521943144},
+ /* 255 */ {0.1250882898658681},
+ /* 256 */ {0.1250000000000000},
+};
+
+void
+mpfr_assert_fail (const char *filename, int linenum,
+ const char *expr)
+{
+ if (filename != NULL && filename[0] != '\0')
+ {
+ fprintf (stderr, "%s:", filename);
+ if (linenum != -1)
+ fprintf (stderr, "%d: ", linenum);
+ }
+ fprintf (stderr, "MPFR assertion failed: %s\n", expr);
+ abort();
+}
+
+#ifdef mp_get_memory_functions
+
+/* putting 0 as initial values forces those symbols to be fully defined,
+ and always resolved, otherwise they are only tentatively defined, which
+ leads to problems on e.g. MacOS, cf
+ http://lists.gforge.inria.fr/pipermail/mpc-discuss/2008-November/000048.html
+ and http://software.intel.com/en-us/articles/intelr-fortran-compiler-for-mac-os-non_lazy_ptr-unresolved-references-from-linking
+ Note that using ranlib -c or libtool -c is another fix.
+*/
+void * (*mpfr_allocate_func) (size_t) = 0;
+void * (*mpfr_reallocate_func) (void *,size_t, size_t) = 0;
+void (*mpfr_free_func) (void *, size_t) = 0;
+
+#endif
+
+void *
+mpfr_default_allocate (size_t size)
+{
+ void *ret;
+ ret = malloc (size);
+ if (ret == NULL)
+ {
+ fprintf (stderr, "MPFR: Can't allocate memory (size=%lu)\n",
+ (unsigned long) size);
+ abort ();
+ }
+ return ret;
+}
+
+void *
+mpfr_default_reallocate (void *oldptr, size_t old_size, size_t new_size)
+{
+ void *ret;
+ ret = realloc (oldptr, new_size);
+ if (ret == NULL)
+ {
+ fprintf (stderr,
+ "MPFR: Can't reallocate memory (old_size=%lu new_size=%lu)\n",
+ (unsigned long) old_size, (unsigned long) new_size);
+ abort ();
+ }
+ return ret;
+}
+
+void
+mpfr_default_free (void *blk_ptr, size_t blk_size)
+{
+ free (blk_ptr);
+}
+
+void *
+mpfr_tmp_allocate (struct tmp_marker **tmp_marker, size_t size)
+{
+ struct tmp_marker *head;
+
+ head = (struct tmp_marker *)
+ mpfr_default_allocate (sizeof (struct tmp_marker));
+ head->ptr = mpfr_default_allocate (size);
+ head->size = size;
+ head->next = *tmp_marker;
+ *tmp_marker = head;
+ return head->ptr;
+}
+
+void
+mpfr_tmp_free (struct tmp_marker *tmp_marker)
+{
+ struct tmp_marker *t;
+
+ while (tmp_marker != NULL)
+ {
+ t = tmp_marker;
+ mpfr_default_free (t->ptr, t->size);
+ tmp_marker = t->next;
+ mpfr_default_free (t, sizeof (struct tmp_marker));
+ }
+}
+
+#endif /* Have gmp-impl.h */
diff --git a/src/mpfr-gmp.h b/src/mpfr-gmp.h
new file mode 100644
index 000000000..6faed59cf
--- /dev/null
+++ b/src/mpfr-gmp.h
@@ -0,0 +1,314 @@
+/* Interface to replace gmp-impl.h
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __GMPFR_GMP_H__
+#define __GMPFR_GMP_H__
+
+#ifndef __MPFR_IMPL_H__
+# error "mpfr-impl.h not included"
+#endif
+
+#include <limits.h> /* For INT_MAX, ... */
+#include <string.h> /* For memcpy, memset and memmove */
+
+/* The following tries to get a good version of alloca.
+ See gmp-impl.h for implementation details and original version */
+/* FIXME: the autoconf manual gives a different piece of code under the
+ documentation of the AC_FUNC_ALLOCA macro. Should we switch to it? */
+#ifndef alloca
+# if defined ( __GNUC__ )
+# define alloca __builtin_alloca
+# elif defined (__DECC)
+# define alloca(x) __ALLOCA(x)
+# elif defined (_MSC_VER)
+# include <malloc.h>
+# define alloca _alloca
+# elif defined (HAVE_ALLOCA_H)
+# include <alloca.h>
+# elif defined (_AIX) || defined (_IBMR2)
+# pragma alloca
+# else
+void *alloca (size_t);
+# endif
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* Define GMP_NUMB_BITS
+ Can't use sizeof(mp_limb_t) since it should be a preprocessor constant */
+#if defined(GMP_NUMB_BITS) /* GMP 4.1.2 or above */
+#ifndef GMP_NUMB_BITS
+# define GMP_NUMB_BITS (GMP_NUMB_BITS+GMP_NAIL_BITS)
+#endif
+#elif defined (__GMP_GMP_NUMB_BITS) /* Older versions 4.x.x */
+# define GMP_NUMB_BITS __GMP_GMP_NUMB_BITS
+# define GMP_NUMB_BITS GMP_NUMB_BITS
+# ifndef GMP_NAIL_BITS
+# define GMP_NAIL_BITS 0
+# endif
+#else
+# error "Could not detect GMP_NUMB_BITS. Try with gmp internal files."
+#endif
+
+/* Define some macros */
+#define BYTES_PER_MP_LIMB (GMP_NUMB_BITS/CHAR_BIT)
+
+#define MP_LIMB_T_MAX (~(mp_limb_t)0)
+
+#define ULONG_HIGHBIT (ULONG_MAX ^ ((unsigned long) ULONG_MAX >> 1))
+#define UINT_HIGHBIT (UINT_MAX ^ ((unsigned) UINT_MAX >> 1))
+#define USHRT_HIGHBIT ((unsigned short) (USHRT_MAX ^ ((unsigned short) USHRT_MAX >> 1)))
+
+#define GMP_LIMB_HIGHBIT (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
+
+
+#if __GMP_MP_SIZE_T_INT
+#define MP_SIZE_T_MAX INT_MAX
+#define MP_SIZE_T_MIN INT_MIN
+#else
+#define MP_SIZE_T_MAX LONG_MAX
+#define MP_SIZE_T_MIN LONG_MIN
+#endif
+
+#define LONG_HIGHBIT LONG_MIN
+#define INT_HIGHBIT INT_MIN
+#define SHRT_HIGHBIT SHRT_MIN
+
+/* MP_LIMB macros */
+#define MPN_ZERO(dst, n) memset((dst), 0, (n)*BYTES_PER_MP_LIMB)
+#define MPN_COPY_DECR(dst,src,n) memmove((dst),(src),(n)*BYTES_PER_MP_LIMB)
+#define MPN_COPY_INCR(dst,src,n) memmove((dst),(src),(n)*BYTES_PER_MP_LIMB)
+#define MPN_COPY(dst,src,n) \
+ do \
+ { \
+ if ((dst) != (src)) \
+ { \
+ MPFR_ASSERTD ((char *) (dst) >= (char *) (src) + \
+ (n) * BYTES_PER_MP_LIMB || \
+ (char *) (src) >= (char *) (dst) + \
+ (n) * BYTES_PER_MP_LIMB); \
+ memcpy ((dst), (src), (n) * BYTES_PER_MP_LIMB); \
+ } \
+ } \
+ while (0)
+
+/* MPN macros taken from gmp-impl.h */
+#define MPN_NORMALIZE(DST, NLIMBS) \
+ do { \
+ while (NLIMBS > 0) \
+ { \
+ if ((DST)[(NLIMBS) - 1] != 0) \
+ break; \
+ NLIMBS--; \
+ } \
+ } while (0)
+#define MPN_NORMALIZE_NOT_ZERO(DST, NLIMBS) \
+ do { \
+ MPFR_ASSERTD ((NLIMBS) >= 1); \
+ while (1) \
+ { \
+ if ((DST)[(NLIMBS) - 1] != 0) \
+ break; \
+ NLIMBS--; \
+ } \
+ } while (0)
+#define MPN_OVERLAP_P(xp, xsize, yp, ysize) \
+ ((xp) + (xsize) > (yp) && (yp) + (ysize) > (xp))
+#define MPN_SAME_OR_INCR2_P(dst, dsize, src, ssize) \
+ ((dst) <= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
+#define MPN_SAME_OR_INCR_P(dst, src, size) \
+ MPN_SAME_OR_INCR2_P(dst, size, src, size)
+#define MPN_SAME_OR_DECR2_P(dst, dsize, src, ssize) \
+ ((dst) >= (src) || ! MPN_OVERLAP_P (dst, dsize, src, ssize))
+#define MPN_SAME_OR_DECR_P(dst, src, size) \
+ MPN_SAME_OR_DECR2_P(dst, size, src, size)
+
+/* If mul_basecase or mpn_sqr_basecase are not exported, used mpn_mul instead */
+#ifndef mpn_mul_basecase
+# define mpn_mul_basecase(dst,s1,n1,s2,n2) mpn_mul((dst),(s1),(n1),(s2),(n2))
+#endif
+#ifndef mpn_sqr_basecase
+# define mpn_sqr_basecase(dst,src,n) mpn_mul((dst),(src),(n),(src),(n))
+#endif
+
+/* ASSERT */
+__MPFR_DECLSPEC void mpfr_assert_fail _MPFR_PROTO((const char *, int,
+ const char *));
+
+#define ASSERT_FAIL(expr) mpfr_assert_fail (__FILE__, __LINE__, #expr)
+#define ASSERT(expr) MPFR_ASSERTD(expr)
+
+/* Access fileds of GMP struct */
+#define SIZ(x) ((x)->_mp_size)
+#define ABSIZ(x) ABS (SIZ (x))
+#define PTR(x) ((x)->_mp_d)
+#define LIMBS(x) ((x)->_mp_d)
+#define EXP(x) ((x)->_mp_exp)
+#define PREC(x) ((x)->_mp_prec)
+#define ALLOC(x) ((x)->_mp_alloc)
+#define MPZ_REALLOC(z,n) ((n) > ALLOC(z) ? _mpz_realloc(z,n) : PTR(z))
+
+/* Non IEEE float supports -- needs to detect them with proper configure */
+#undef XDEBUG
+#define XDEBUG
+
+/* For longlong.h */
+#ifdef HAVE_ATTRIBUTE_MODE
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef int SItype __attribute__ ((mode (SI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef int DItype __attribute__ ((mode (DI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+#else
+typedef unsigned char UQItype;
+typedef long SItype;
+typedef unsigned long USItype;
+#ifdef HAVE_LONG_LONG
+typedef long long int DItype;
+typedef unsigned long long int UDItype;
+#else /* Assume `long' gives us a wide enough type. Needed for hppa2.0w. */
+typedef long int DItype;
+typedef unsigned long int UDItype;
+#endif
+#endif
+typedef mp_limb_t UWtype;
+typedef unsigned int UHWtype;
+#define W_TYPE_SIZE GMP_NUMB_BITS
+
+/* Remap names of internal mpn functions (for longlong.h). */
+#undef __clz_tab
+#define __clz_tab mpfr_clz_tab
+
+/* Use (4.0 * ...) instead of (2.0 * ...) to work around buggy compilers
+ that don't convert ulong->double correctly (eg. SunOS 4 native cc). */
+#undef MP_BASE_AS_DOUBLE
+#define MP_BASE_AS_DOUBLE (4.0 * ((mp_limb_t) 1 << (GMP_NUMB_BITS - 2)))
+
+/* Structure for conversion between internal binary format and
+ strings in base 2..36. */
+struct bases
+{
+ /* log(2)/log(conversion_base) */
+ double chars_per_bit_exactly;
+};
+#undef __mp_bases
+#define __mp_bases mpfr_bases
+__MPFR_DECLSPEC extern const struct bases mpfr_bases[257];
+
+/* Standard macros */
+#undef ABS
+#undef MIN
+#undef MAX
+#undef numberof
+#define ABS(x) ((x) >= 0 ? (x) : -(x))
+#define MIN(l,o) ((l) < (o) ? (l) : (o))
+#define MAX(h,i) ((h) > (i) ? (h) : (i))
+#define numberof(x) (sizeof (x) / sizeof ((x)[0]))
+
+/* Random */
+#undef __gmp_rands_initialized
+#undef __gmp_rands
+#define __gmp_rands_initialized mpfr_rands_initialized
+#define __gmp_rands mpfr_rands
+
+__MPFR_DECLSPEC extern char mpfr_rands_initialized;
+__MPFR_DECLSPEC extern gmp_randstate_t mpfr_rands;
+
+#undef RANDS
+#define RANDS \
+ ((__gmp_rands_initialized ? 0 \
+ : (__gmp_rands_initialized = 1, \
+ gmp_randinit_default (__gmp_rands), 0)), \
+ __gmp_rands)
+
+#undef RANDS_CLEAR
+#define RANDS_CLEAR() \
+ do { \
+ if (__gmp_rands_initialized) \
+ { \
+ __gmp_rands_initialized = 0; \
+ gmp_randclear (__gmp_rands); \
+ } \
+ } while (0)
+
+typedef __gmp_randstate_struct *gmp_randstate_ptr;
+
+/* Allocate func are defined in gmp-impl.h */
+
+/* In newer GMP, there aren't anymore __gmp_allocate_func,
+ __gmp_reallocate_func & __gmp_free_func in gmp.h
+ Just getting the correct value by calling mp_get_memory_functions */
+#ifdef mp_get_memory_functions
+
+#undef __gmp_allocate_func
+#undef __gmp_reallocate_func
+#undef __gmp_free_func
+#define MPFR_GET_MEMFUNC mp_get_memory_functions(&mpfr_allocate_func, &mpfr_reallocate_func, &mpfr_free_func)
+#define __gmp_allocate_func (MPFR_GET_MEMFUNC, mpfr_allocate_func)
+#define __gmp_reallocate_func (MPFR_GET_MEMFUNC, mpfr_reallocate_func)
+#define __gmp_free_func (MPFR_GET_MEMFUNC, mpfr_free_func)
+__MPFR_DECLSPEC extern void * (*mpfr_allocate_func) _MPFR_PROTO ((size_t));
+__MPFR_DECLSPEC extern void * (*mpfr_reallocate_func) _MPFR_PROTO ((void *,
+ size_t, size_t));
+__MPFR_DECLSPEC extern void (*mpfr_free_func) _MPFR_PROTO ((void *,
+ size_t));
+
+#endif
+
+#undef __gmp_default_allocate
+#undef __gmp_default_reallocate
+#undef __gmp_default_free
+#define __gmp_default_allocate mpfr_default_allocate
+#define __gmp_default_reallocate mpfr_default_reallocate
+#define __gmp_default_free mpfr_default_free
+__MPFR_DECLSPEC void *__gmp_default_allocate _MPFR_PROTO ((size_t));
+__MPFR_DECLSPEC void *__gmp_default_reallocate _MPFR_PROTO ((void *, size_t,
+ size_t));
+__MPFR_DECLSPEC void __gmp_default_free _MPFR_PROTO ((void *, size_t));
+
+/* Temp memory allocate */
+
+struct tmp_marker
+{
+ void *ptr;
+ size_t size;
+ struct tmp_marker *next;
+};
+
+__MPFR_DECLSPEC void *mpfr_tmp_allocate _MPFR_PROTO ((struct tmp_marker **,
+ size_t));
+__MPFR_DECLSPEC void mpfr_tmp_free _MPFR_PROTO ((struct tmp_marker *));
+
+/* Do not define TMP_SALLOC (see the test in mpfr-impl.h)! */
+#define TMP_ALLOC(n) (MPFR_LIKELY ((n) < 16384) ? \
+ alloca (n) : mpfr_tmp_allocate (&tmp_marker, (n)))
+#define TMP_DECL(m) struct tmp_marker *tmp_marker
+#define TMP_MARK(m) (tmp_marker = 0)
+#define TMP_FREE(m) mpfr_tmp_free (tmp_marker)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* Gmp internal emulator */
diff --git a/src/mpfr-impl.h b/src/mpfr-impl.h
new file mode 100644
index 000000000..4663cbed0
--- /dev/null
+++ b/src/mpfr-impl.h
@@ -0,0 +1,1758 @@
+/* Utilities for MPFR developers, not exported.
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __MPFR_IMPL_H__
+#define __MPFR_IMPL_H__
+
+/* Let's include some standard headers unconditionally as they are
+ already needed by several source files or when some options are
+ enabled/disabled, and it is easy to forget them (some configure
+ options may hide the error).
+ Note: If some source file must not have such a header included
+ (which is very unlikely and probably means something broken in
+ this source file), we should do that with some macro (that would
+ also force to disable incompatible features). */
+#if defined (__cplusplus)
+#include <cstdio>
+#include <cstring>
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+#include <limits.h>
+
+/* Check if we are inside a build of MPFR or inside the test suite.
+ This is needed in mpfr.h to export or import the functions.
+ It matters only for Windows DLL */
+#ifndef __MPFR_TEST_H__
+# define __MPFR_WITHIN_MPFR 1
+#endif
+
+/******************************************************
+ ****************** Include files *********************
+ ******************************************************/
+
+/* Include 'config.h' before using ANY configure macros if needed
+ NOTE: It isn't MPFR 'config.h', but GMP's one! */
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef MPFR_HAVE_GMP_IMPL /* Build with gmp internals*/
+
+# ifndef __GMP_H__
+# include "gmp.h"
+# endif
+# ifndef __GMP_IMPL_H__
+# include "gmp-impl.h"
+# endif
+# ifdef MPFR_NEED_LONGLONG_H
+# include "longlong.h"
+# endif
+# ifndef __MPFR_H
+# include "mpfr.h"
+# endif
+
+#else /* Build without gmp internals */
+
+# ifndef __GMP_H__
+# include "gmp.h"
+# endif
+# ifndef __MPFR_H
+# include "mpfr.h"
+# endif
+# ifndef __GMPFR_GMP_H__
+# include "mpfr-gmp.h"
+# endif
+# ifdef MPFR_NEED_LONGLONG_H
+# include "mpfr-longlong.h"
+# endif
+
+#endif
+#undef MPFR_NEED_LONGLONG_H
+
+/* if mpn_sqr_n is not exported, use mpn_mul instead */
+#ifndef mpn_sqr_n
+# define mpn_sqr_n(dst,src,n) mpn_mul((dst),(src),(n),(src),(n))
+#endif
+
+/* For the definition of MPFR_THREAD_ATTR. GCC/ICC detection macros are
+ no longer used, as they sometimes gave incorrect information about
+ the support of thread-local variables. A configure check is now done.
+ If the use of detection macros is needed in the future, this could be
+ moved below (after the detection macros are defined). */
+#include "mpfr-thread.h"
+
+
+/******************************************************
+ ***************** Detection macros *******************
+ ******************************************************/
+
+/* Macros to detect STDC, GCC, GLIBC, GMP and ICC version */
+#if defined(__STDC_VERSION__)
+# define __MPFR_STDC(version) (__STDC_VERSION__>=(version))
+#elif defined(__STDC__)
+# define __MPFR_STDC(version) (0 == (version))
+#else
+# define __MPFR_STDC(version) 0
+#endif
+
+#if defined(__ICC)
+# define __MPFR_ICC(a,b,c) (__ICC >= (a)*100+(b)*10+(c))
+#elif defined(__INTEL_COMPILER)
+# define __MPFR_ICC(a,b,c) (__INTEL_COMPILER >= (a)*100+(b)*10+(c))
+#else
+# define __MPFR_ICC(a,b,c) 0
+#endif
+
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && ! __MPFR_ICC(0,0,0)
+# define __MPFR_GNUC(a,i) \
+ (MPFR_VERSION_NUM(__GNUC__,__GNUC_MINOR__,0) >= MPFR_VERSION_NUM(a,i,0))
+#else
+# define __MPFR_GNUC(a,i) 0
+#endif
+
+#if defined(__GLIBC__) && defined(__GLIBC_MINOR__)
+# define __MPFR_GLIBC(a,i) \
+ (MPFR_VERSION_NUM(__GLIBC__,__GLIBC_MINOR__,0) >= MPFR_VERSION_NUM(a,i,0))
+#else
+# define __MPFR_GLIBC(a,i) 0
+#endif
+
+#if defined(__GNU_MP_VERSION) && \
+ defined(__GNU_MP_VERSION_MINOR) && \
+ defined(__GNU_MP_VERSION_PATCHLEVEL)
+# define __MPFR_GMP(a,b,c) \
+ (MPFR_VERSION_NUM(__GNU_MP_VERSION,__GNU_MP_VERSION_MINOR,__GNU_MP_VERSION_PATCHLEVEL) >= MPFR_VERSION_NUM(a,b,c))
+#else
+# define __MPFR_GMP(a,b,c) 0
+#endif
+
+
+
+/******************************************************
+ ****************** (U)INTMAX_MAX *********************
+ ******************************************************/
+
+/* Let's try to fix UINTMAX_MAX and INTMAX_MAX if these macros don't work
+ (e.g. with gcc -ansi -pedantic-errors in 32-bit mode under GNU/Linux),
+ see <http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=582698>. */
+#ifdef _MPFR_H_HAVE_INTMAX_T
+# ifdef MPFR_HAVE_INTMAX_MAX
+# define MPFR_UINTMAX_MAX UINTMAX_MAX
+# define MPFR_INTMAX_MAX INTMAX_MAX
+# define MPFR_INTMAX_MIN INTMAX_MIN
+# else
+# define MPFR_UINTMAX_MAX ((uintmax_t) -1)
+# define MPFR_INTMAX_MAX ((intmax_t) (MPFR_UINTMAX_MAX >> 1))
+# define MPFR_INTMAX_MIN (INT_MIN + INT_MAX - MPFR_INTMAX_MAX)
+# endif
+#endif
+
+
+
+/******************************************************
+ ******************** Check GMP ***********************
+ ******************************************************/
+
+#if !__MPFR_GMP(4,1,0)
+# error "GMP 4.1.0 or newer needed"
+#endif
+
+#if GMP_NAIL_BITS != 0
+# error "MPFR doesn't support nonzero values of GMP_NAIL_BITS"
+#endif
+
+#if (GMP_NUMB_BITS<32) || (GMP_NUMB_BITS & (GMP_NUMB_BITS - 1))
+# error "GMP_NUMB_BITS must be a power of 2, and >= 32"
+#endif
+
+#if GMP_NUMB_BITS == 16
+# define MPFR_LOG2_GMP_NUMB_BITS 4
+#elif GMP_NUMB_BITS == 32
+# define MPFR_LOG2_GMP_NUMB_BITS 5
+#elif GMP_NUMB_BITS == 64
+# define MPFR_LOG2_GMP_NUMB_BITS 6
+#elif GMP_NUMB_BITS == 128
+# define MPFR_LOG2_GMP_NUMB_BITS 7
+#elif GMP_NUMB_BITS == 256
+# define MPFR_LOG2_GMP_NUMB_BITS 8
+#else
+# error "Can't compute log2(GMP_NUMB_BITS)"
+#endif
+
+#if __MPFR_GNUC(3,0) || __MPFR_ICC(8,1,0)
+/* For the future: N1478: Supporting the 'noreturn' property in C1x
+ http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1478.htm */
+# define MPFR_NORETURN_ATTR __attribute__ ((noreturn))
+# define MPFR_CONST_ATTR __attribute__ ((const))
+#else
+# define MPFR_NORETURN_ATTR
+# define MPFR_CONST_ATTR
+#endif
+
+/******************************************************
+ ************* Global Internal Variables **************
+ ******************************************************/
+
+/* Cache struct */
+struct __gmpfr_cache_s {
+ mpfr_t x;
+ int inexact;
+ int (*func)(mpfr_ptr, mpfr_rnd_t);
+};
+typedef struct __gmpfr_cache_s mpfr_cache_t[1];
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR unsigned int __gmpfr_flags;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_exp_t __gmpfr_emin;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_exp_t __gmpfr_emax;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_prec_t __gmpfr_default_fp_bit_precision;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_rnd_t __gmpfr_default_rounding_mode;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_cache_t __gmpfr_cache_const_pi;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_cache_t __gmpfr_cache_const_log2;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_cache_t __gmpfr_cache_const_euler;
+__MPFR_DECLSPEC extern MPFR_THREAD_ATTR mpfr_cache_t __gmpfr_cache_const_catalan;
+
+#define BASE_MAX 62
+__MPFR_DECLSPEC extern const __mpfr_struct __gmpfr_l2b[BASE_MAX-1][2];
+
+/* Note: do not use the following values when they can be outside the
+ current exponent range, e.g. when the exponent range has not been
+ extended yet; under such a condition, they can be used only in
+ mpfr_cmpabs. */
+__MPFR_DECLSPEC extern const mpfr_t __gmpfr_one;
+__MPFR_DECLSPEC extern const mpfr_t __gmpfr_two;
+__MPFR_DECLSPEC extern const mpfr_t __gmpfr_four;
+
+
+#if defined (__cplusplus)
+ }
+#endif
+
+/* Flags of __gmpfr_flags */
+#define MPFR_FLAGS_UNDERFLOW 1
+#define MPFR_FLAGS_OVERFLOW 2
+#define MPFR_FLAGS_NAN 4
+#define MPFR_FLAGS_INEXACT 8
+#define MPFR_FLAGS_ERANGE 16
+#define MPFR_FLAGS_ALL 31
+
+/* Replace some commun functions for direct access to the global vars */
+#define mpfr_get_emin() (__gmpfr_emin + 0)
+#define mpfr_get_emax() (__gmpfr_emax + 0)
+#define mpfr_get_default_rounding_mode() (__gmpfr_default_rounding_mode + 0)
+#define mpfr_get_default_prec() (__gmpfr_default_fp_bit_precision + 0)
+
+#define mpfr_clear_flags() \
+ ((void) (__gmpfr_flags = 0))
+#define mpfr_clear_underflow() \
+ ((void) (__gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_UNDERFLOW))
+#define mpfr_clear_overflow() \
+ ((void) (__gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_OVERFLOW))
+#define mpfr_clear_nanflag() \
+ ((void) (__gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_NAN))
+#define mpfr_clear_inexflag() \
+ ((void) (__gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_INEXACT))
+#define mpfr_clear_erangeflag() \
+ ((void) (__gmpfr_flags &= MPFR_FLAGS_ALL ^ MPFR_FLAGS_ERANGE))
+#define mpfr_underflow_p() \
+ ((int) (__gmpfr_flags & MPFR_FLAGS_UNDERFLOW))
+#define mpfr_overflow_p() \
+ ((int) (__gmpfr_flags & MPFR_FLAGS_OVERFLOW))
+#define mpfr_nanflag_p() \
+ ((int) (__gmpfr_flags & MPFR_FLAGS_NAN))
+#define mpfr_inexflag_p() \
+ ((int) (__gmpfr_flags & MPFR_FLAGS_INEXACT))
+#define mpfr_erangeflag_p() \
+ ((int) (__gmpfr_flags & MPFR_FLAGS_ERANGE))
+
+/* Testing an exception flag correctly is tricky. There are mainly two
+ pitfalls: First, one needs to remember to clear the corresponding
+ flag, in case it was set before the function call or during some
+ intermediate computations (in practice, one can clear all the flags).
+ Secondly, one needs to test the flag early enough, i.e. before it
+ can be modified by another function. Moreover, it is quite difficult
+ (if not impossible) to reliably check problems with "make check". To
+ avoid these pitfalls, it is recommended to use the following macros.
+ Other use of the exception-flag predicate functions/macros will be
+ detected by mpfrlint.
+ Note: _op can be either a statement or an expression.
+ MPFR_BLOCK_EXCEP should be used only inside a block; it is useful to
+ detect some exception in order to exit the block as soon as possible. */
+#define MPFR_BLOCK_DECL(_flags) unsigned int _flags
+#define MPFR_BLOCK(_flags,_op) \
+ do \
+ { \
+ mpfr_clear_flags (); \
+ _op; \
+ (_flags) = __gmpfr_flags; \
+ } \
+ while (0)
+#define MPFR_BLOCK_TEST(_flags,_f) MPFR_UNLIKELY ((_flags) & (_f))
+#define MPFR_BLOCK_EXCEP (__gmpfr_flags & (MPFR_FLAGS_UNDERFLOW | \
+ MPFR_FLAGS_OVERFLOW | \
+ MPFR_FLAGS_NAN))
+/* Let's use a MPFR_ prefix, because e.g. OVERFLOW is defined by glibc's
+ math.h, though this is not a reserved identifier! */
+#define MPFR_UNDERFLOW(_flags) MPFR_BLOCK_TEST (_flags, MPFR_FLAGS_UNDERFLOW)
+#define MPFR_OVERFLOW(_flags) MPFR_BLOCK_TEST (_flags, MPFR_FLAGS_OVERFLOW)
+#define MPFR_NANFLAG(_flags) MPFR_BLOCK_TEST (_flags, MPFR_FLAGS_NAN)
+#define MPFR_INEXFLAG(_flags) MPFR_BLOCK_TEST (_flags, MPFR_FLAGS_INEXACT)
+#define MPFR_ERANGEFLAG(_flags) MPFR_BLOCK_TEST (_flags, MPFR_FLAGS_ERANGE)
+
+
+/******************************************************
+ ******************** Assertions **********************
+ ******************************************************/
+
+/* Compile with -DWANT_ASSERT to check all assert statements */
+
+/* Note: do not use GMP macros ASSERT_ALWAYS and ASSERT as they are not
+ expressions, and as a consequence, they cannot be used in a for(),
+ with a comma operator and so on. */
+
+/* MPFR_ASSERTN(expr): assertions that should always be checked */
+#define MPFR_ASSERTN(expr) \
+ ((void) ((MPFR_UNLIKELY(expr)) || MPFR_UNLIKELY( (ASSERT_FAIL(expr),0) )))
+
+/* MPFR_ASSERTD(expr): assertions that should be checked when testing */
+#ifdef WANT_ASSERT
+# define MPFR_EXP_CHECK 1
+# define MPFR_ASSERTD(expr) MPFR_ASSERTN (expr)
+#else
+# define MPFR_ASSERTD(expr) ((void) 0)
+#endif
+
+/* Code to deal with impossible
+ WARNING: It doesn't use do { } while (0) for Insure++*/
+#define MPFR_RET_NEVER_GO_HERE() {MPFR_ASSERTN(0); return 0;}
+
+
+/******************************************************
+ ******************** Warnings ************************
+ ******************************************************/
+
+/* MPFR_WARNING is no longer useful, but let's keep the macro in case
+ it needs to be used again in the future. */
+
+#ifdef MPFR_USE_WARNINGS
+# include <stdlib.h>
+# define MPFR_WARNING(W) \
+ do \
+ { \
+ char *q = getenv ("MPFR_QUIET"); \
+ if (q == NULL || *q == 0) \
+ fprintf (stderr, "MPFR: %s\n", W); \
+ } \
+ while (0)
+#else
+# define MPFR_WARNING(W) ((void) 0)
+#endif
+
+
+/******************************************************
+ ****************** double macros *********************
+ ******************************************************/
+
+/* Precision used for lower precision computations */
+#define MPFR_SMALL_PRECISION 32
+
+/* Definition of constants */
+#define LOG2 0.69314718055994528622 /* log(2) rounded to zero on 53 bits */
+#define ALPHA 4.3191365662914471407 /* a+2 = a*log(a), rounded to +infinity */
+#define EXPM1 0.36787944117144227851 /* exp(-1), rounded to zero */
+
+/* MPFR_DOUBLE_SPEC = 1 if the C type 'double' corresponds to IEEE-754
+ double precision, 0 if it doesn't, and undefined if one doesn't know.
+ On all the tested machines, MPFR_DOUBLE_SPEC = 1. To have this macro
+ defined here, #include <float.h> is needed. If need be, other values
+ could be defined for other specs (once they are known). */
+#if !defined(MPFR_DOUBLE_SPEC) && defined(FLT_RADIX) && \
+ defined(DBL_MANT_DIG) && defined(DBL_MIN_EXP) && defined(DBL_MAX_EXP)
+# if FLT_RADIX == 2 && DBL_MANT_DIG == 53 && \
+ DBL_MIN_EXP == -1021 && DBL_MAX_EXP == 1024
+# define MPFR_DOUBLE_SPEC 1
+# else
+# define MPFR_DOUBLE_SPEC 0
+# endif
+#endif
+
+/* Debug non IEEE floats */
+#ifdef XDEBUG
+# undef _GMP_IEEE_FLOATS
+#endif
+#ifndef _GMP_IEEE_FLOATS
+# define _GMP_IEEE_FLOATS 0
+#endif
+
+#ifndef IEEE_DBL_MANT_DIG
+#define IEEE_DBL_MANT_DIG 53
+#endif
+#define MPFR_LIMBS_PER_DOUBLE ((IEEE_DBL_MANT_DIG-1)/GMP_NUMB_BITS+1)
+
+#ifndef IEEE_FLT_MANT_DIG
+#define IEEE_FLT_MANT_DIG 24
+#endif
+#define MPFR_LIMBS_PER_FLT ((IEEE_FLT_MANT_DIG-1)/GMP_NUMB_BITS+1)
+
+/* Visual C++ doesn't support +1.0/.00, -1.0/0.0 and 0.0/0.0
+ at compile time. */
+#if defined(_MSC_VER) && defined(_WIN32) && (_MSC_VER >= 1200)
+static double double_zero = 0.0;
+# define DBL_NAN (double_zero/double_zero)
+# define DBL_POS_INF ((double) 1.0/double_zero)
+# define DBL_NEG_INF ((double)-1.0/double_zero)
+# define DBL_NEG_ZERO (-double_zero)
+#else
+# define DBL_POS_INF ((double) 1.0/0.0)
+# define DBL_NEG_INF ((double)-1.0/0.0)
+# define DBL_NAN ((double) 0.0/0.0)
+# define DBL_NEG_ZERO (-0.0)
+#endif
+
+/* Note: In the past, there was specific code for _GMP_IEEE_FLOATS, which
+ was based on NaN and Inf memory representations. This code was breaking
+ the aliasing rules (see ISO C99, 6.5#6 and 6.5#7 on the effective type)
+ and for this reason it did not behave correctly with GCC 4.5.0 20091119.
+ The code needed a memory transfer and was probably not better than the
+ macros below with a good compiler (a fix based on the NaN / Inf memory
+ representation would be even worse due to C limitations), and this code
+ could be selected only when MPFR was built with --with-gmp-build, thus
+ introducing a difference (bad for maintaining/testing MPFR); therefore
+ it has been removed. The old code required that the argument x be an
+ lvalue of type double. We still require that, in case one would need
+ to change the macros below, e.g. for some broken compiler. But the
+ LVALUE(x) condition could be removed if really necessary. */
+/* Below, the &(x) == &(x) || &(x) != &(x) allows to make sure that x
+ is a lvalue without (probably) any warning from the compiler. The
+ &(x) != &(x) is needed to avoid a failure under Mac OS X 10.4.11
+ (with Xcode 2.4.1, i.e. the latest one). */
+#define LVALUE(x) (&(x) == &(x) || &(x) != &(x))
+#define DOUBLE_ISINF(x) (LVALUE(x) && ((x) > DBL_MAX || (x) < -DBL_MAX))
+#ifdef MPFR_NANISNAN
+/* Avoid MIPSpro / IRIX64 / gcc -ffast-math (incorrect) optimizations.
+ The + must not be replaced by a ||. With gcc -ffast-math, NaN is
+ regarded as a positive number or something like that; the second
+ test catches this case. */
+# define DOUBLE_ISNAN(x) \
+ (LVALUE(x) && !((((x) >= 0.0) + ((x) <= 0.0)) && -(x)*(x) <= 0.0))
+#else
+# define DOUBLE_ISNAN(x) (LVALUE(x) && (x) != (x))
+#endif
+
+/******************************************************
+ *************** Long double macros *******************
+ ******************************************************/
+
+/* We try to get the exact value of the precision of long double
+ (provided by the implementation) in order to provide correct
+ rounding in this case (not guaranteed if the C implementation
+ does not have an adequate long double arithmetic). Note that
+ it may be lower than the precision of some numbers that can
+ be represented in a long double; e.g. on FreeBSD/x86, it is
+ 53 because the processor is configured to round in double
+ precision (even when using the long double type -- this is a
+ limitation of the x87 arithmetic), and on Mac OS X, it is 106
+ because the implementation is a double-double arithmetic.
+ Otherwise (e.g. in base 10), we get an upper bound of the
+ precision, and correct rounding isn't currently provided.
+*/
+#if defined(LDBL_MANT_DIG) && FLT_RADIX == 2
+# define MPFR_LDBL_MANT_DIG LDBL_MANT_DIG
+#else
+# define MPFR_LDBL_MANT_DIG \
+ (sizeof(long double)*GMP_NUMB_BITS/sizeof(mp_limb_t))
+#endif
+#define MPFR_LIMBS_PER_LONG_DOUBLE \
+ ((sizeof(long double)-1)/sizeof(mp_limb_t)+1)
+
+/* LONGDOUBLE_NAN_ACTION executes the code "action" if x is a NaN. */
+
+/* On hppa2.0n-hp-hpux10 with the unbundled HP cc, the test x!=x on a NaN
+ has been seen false, meaning NaNs are not detected. This seemed to
+ happen only after other comparisons, not sure what's really going on. In
+ any case we can pick apart the bytes to identify a NaN. */
+#ifdef HAVE_LDOUBLE_IEEE_QUAD_BIG
+# define LONGDOUBLE_NAN_ACTION(x, action) \
+ do { \
+ union { \
+ long double ld; \
+ struct { \
+ unsigned int sign : 1; \
+ unsigned int exp : 15; \
+ unsigned int man3 : 16; \
+ unsigned int man2 : 32; \
+ unsigned int man1 : 32; \
+ unsigned int man0 : 32; \
+ } s; \
+ } u; \
+ u.ld = (x); \
+ if (u.s.exp == 0x7FFFL \
+ && (u.s.man0 | u.s.man1 | u.s.man2 | u.s.man3) != 0) \
+ { action; } \
+ } while (0)
+#endif
+
+#ifdef HAVE_LDOUBLE_IEEE_QUAD_LITTLE
+# define LONGDOUBLE_NAN_ACTION(x, action) \
+ do { \
+ union { \
+ long double ld; \
+ struct { \
+ unsigned int man0 : 32; \
+ unsigned int man1 : 32; \
+ unsigned int man2 : 32; \
+ unsigned int man3 : 16; \
+ unsigned int exp : 15; \
+ unsigned int sign : 1; \
+ } s; \
+ } u; \
+ u.ld = (x); \
+ if (u.s.exp == 0x7FFFL \
+ && (u.s.man0 | u.s.man1 | u.s.man2 | u.s.man3) != 0) \
+ { action; } \
+ } while (0)
+#endif
+
+/* Under IEEE rules, NaN is not equal to anything, including itself.
+ "volatile" here stops "cc" on mips64-sgi-irix6.5 from optimizing away
+ x!=x. */
+#ifndef LONGDOUBLE_NAN_ACTION
+# define LONGDOUBLE_NAN_ACTION(x, action) \
+ do { \
+ volatile long double __x = LONGDOUBLE_VOLATILE (x); \
+ if ((x) != __x) \
+ { action; } \
+ } while (0)
+# define WANT_LONGDOUBLE_VOLATILE 1
+#endif
+
+/* If we don't have a proper "volatile" then volatile is #defined to empty,
+ in this case call through an external function to stop the compiler
+ optimizing anything. */
+#ifdef WANT_LONGDOUBLE_VOLATILE
+# ifdef volatile
+__MPFR_DECLSPEC long double __gmpfr_longdouble_volatile _MPFR_PROTO ((long double)) MPFR_CONST_ATTR;
+# define LONGDOUBLE_VOLATILE(x) (__gmpfr_longdouble_volatile (x))
+# define WANT_GMPFR_LONGDOUBLE_VOLATILE 1
+# else
+# define LONGDOUBLE_VOLATILE(x) (x)
+# endif
+#endif
+
+/* Some special case for IEEE_EXT Litle Endian */
+#if HAVE_LDOUBLE_IEEE_EXT_LITTLE
+
+typedef union {
+ long double ld;
+ struct {
+ unsigned int manl : 32;
+ unsigned int manh : 32;
+ unsigned int expl : 8 ;
+ unsigned int exph : 7;
+ unsigned int sign : 1;
+ } s;
+} mpfr_long_double_t;
+
+/* #undef MPFR_LDBL_MANT_DIG */
+#undef MPFR_LIMBS_PER_LONG_DOUBLE
+/* #define MPFR_LDBL_MANT_DIG 64 */
+#define MPFR_LIMBS_PER_LONG_DOUBLE ((64-1)/GMP_NUMB_BITS+1)
+
+#endif
+
+/******************************************************
+ *************** _Decimal64 support *******************
+ ******************************************************/
+
+#ifdef MPFR_WANT_DECIMAL_FLOATS
+/* to cast between binary64 and decimal64 */
+union ieee_double_decimal64 { double d; _Decimal64 d64; };
+#endif
+
+/******************************************************
+ **************** mpfr_t properties *******************
+ ******************************************************/
+
+#define MPFR_PREC(x) ((x)->_mpfr_prec)
+#define MPFR_EXP(x) ((x)->_mpfr_exp)
+#define MPFR_MANT(x) ((x)->_mpfr_d)
+#define MPFR_LIMB_SIZE(x) ((MPFR_PREC((x))-1)/GMP_NUMB_BITS+1)
+
+
+/******************************************************
+ ***************** exponent limits ********************
+ ******************************************************/
+
+/* Define limits and unsigned type of exponent. The following definitions
+ * depend on [mp_exp_t]; if this type changes in GMP, these definitions
+ * will need to be modified.
+ */
+#if __GMP_MP_SIZE_T_INT == 1
+typedef unsigned int mpfr_uexp_t;
+# define MPFR_EXP_MAX (INT_MAX)
+# define MPFR_EXP_MIN (INT_MIN)
+#else
+typedef unsigned long int mpfr_uexp_t;
+# define MPFR_EXP_MAX (LONG_MAX)
+# define MPFR_EXP_MIN (LONG_MIN)
+#endif
+#ifndef mp_exp_unsigned_t
+# define mp_exp_unsigned_t mpfr_uexp_t
+#endif
+
+#if MPFR_EXP_MIN >= LONG_MIN && MPFR_EXP_MAX <= LONG_MAX
+typedef long int mpfr_eexp_t;
+# define mpfr_get_exp_t(x,r) mpfr_get_si((x),(r))
+# define mpfr_set_exp_t(x,e,r) mpfr_set_si((x),(e),(r))
+#elif defined (_MPFR_H_HAVE_INTMAX_T)
+typedef intmax_t mpfr_eexp_t;
+# define mpfr_get_exp_t(x,r) mpfr_get_sj((x),(r))
+# define mpfr_set_exp_t(x,e,r) mpfr_set_sj((x),(e),(r))
+#else
+# error "Cannot define mpfr_get_exp_t and mpfr_set_exp_t"
+#endif
+
+/* Invalid exponent value (to track bugs...) */
+#define MPFR_EXP_INVALID \
+ ((mpfr_exp_t) 1 << (GMP_NUMB_BITS*sizeof(mpfr_exp_t)/sizeof(mp_limb_t)-2))
+
+/* Definition of the exponent limits for MPFR numbers.
+ * These limits are chosen so that if e is such an exponent, then 2e-1 and
+ * 2e+1 are representable. This is useful for intermediate computations,
+ * in particular the multiplication.
+ */
+#undef MPFR_EMIN_MIN
+#undef MPFR_EMIN_MAX
+#undef MPFR_EMAX_MIN
+#undef MPFR_EMAX_MAX
+#define MPFR_EMIN_MIN (1-MPFR_EXP_INVALID)
+#define MPFR_EMIN_MAX (MPFR_EXP_INVALID-1)
+#define MPFR_EMAX_MIN (1-MPFR_EXP_INVALID)
+#define MPFR_EMAX_MAX (MPFR_EXP_INVALID-1)
+
+/* Use MPFR_GET_EXP and MPFR_SET_EXP instead of MPFR_EXP directly,
+ unless when the exponent may be out-of-range, for instance when
+ setting the exponent before calling mpfr_check_range.
+ MPFR_EXP_CHECK is defined when WANT_ASSERT is defined, but if you
+ don't use WANT_ASSERT (for speed reasons), you can still define
+ MPFR_EXP_CHECK by setting -DMPFR_EXP_CHECK in $CFLAGS. */
+
+#ifdef MPFR_EXP_CHECK
+# define MPFR_GET_EXP(x) (mpfr_get_exp) (x)
+# define MPFR_SET_EXP(x, exp) MPFR_ASSERTN (!mpfr_set_exp ((x), (exp)))
+# define MPFR_SET_INVALID_EXP(x) ((void) (MPFR_EXP (x) = MPFR_EXP_INVALID))
+#else
+# define MPFR_GET_EXP(x) MPFR_EXP (x)
+# define MPFR_SET_EXP(x, exp) ((void) (MPFR_EXP (x) = (exp)))
+# define MPFR_SET_INVALID_EXP(x) ((void) 0)
+#endif
+
+
+
+/******************************************************
+ ********** Singular Values (NAN, INF, ZERO) **********
+ ******************************************************/
+
+/*
+ * Clear flags macros are still defined and should be still used
+ * since the functions must not assume the internal format.
+ * How to deal with special values ?
+ * 1. Check if is a special value (Zero, Nan, Inf) wiht MPFR_IS_SINGULAR
+ * 2. Deal with the special value with MPFR_IS_NAN, MPFR_IS_INF, etc
+ * 3. Else clear the flags of the dest (it must be done after since src
+ * may be also the dest!)
+ * MPFR_SET_INF, MPFR_SET_NAN, MPFR_SET_ZERO must clear by
+ * themselves the other flags.
+ */
+
+/* Enum special value of exponent.*/
+# define MPFR_EXP_ZERO (MPFR_EXP_MIN+1)
+# define MPFR_EXP_NAN (MPFR_EXP_MIN+2)
+# define MPFR_EXP_INF (MPFR_EXP_MIN+3)
+
+#define MPFR_IS_NAN(x) (MPFR_EXP(x) == MPFR_EXP_NAN)
+#define MPFR_SET_NAN(x) (MPFR_EXP(x) = MPFR_EXP_NAN)
+#define MPFR_IS_INF(x) (MPFR_EXP(x) == MPFR_EXP_INF)
+#define MPFR_SET_INF(x) (MPFR_EXP(x) = MPFR_EXP_INF)
+#define MPFR_IS_ZERO(x) (MPFR_EXP(x) == MPFR_EXP_ZERO)
+#define MPFR_SET_ZERO(x) (MPFR_EXP(x) = MPFR_EXP_ZERO)
+#define MPFR_NOTZERO(x) (MPFR_EXP(x) != MPFR_EXP_ZERO)
+
+#define MPFR_IS_FP(x) (!MPFR_IS_NAN(x) && !MPFR_IS_INF(x))
+#define MPFR_IS_SINGULAR(x) (MPFR_EXP(x) <= MPFR_EXP_INF)
+#define MPFR_IS_PURE_FP(x) (!MPFR_IS_SINGULAR(x))
+
+#define MPFR_ARE_SINGULAR(x,y) \
+ (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)) || MPFR_UNLIKELY(MPFR_IS_SINGULAR(y)))
+
+
+
+/******************************************************
+ ********************* Sign Macros ********************
+ ******************************************************/
+
+#define MPFR_SIGN_POS (1)
+#define MPFR_SIGN_NEG (-1)
+
+#define MPFR_IS_STRICTPOS(x) (MPFR_NOTZERO((x)) && MPFR_SIGN(x) > 0)
+#define MPFR_IS_STRICTNEG(x) (MPFR_NOTZERO((x)) && MPFR_SIGN(x) < 0)
+
+#define MPFR_IS_NEG(x) (MPFR_SIGN(x) < 0)
+#define MPFR_IS_POS(x) (MPFR_SIGN(x) > 0)
+
+#define MPFR_SET_POS(x) (MPFR_SIGN(x) = MPFR_SIGN_POS)
+#define MPFR_SET_NEG(x) (MPFR_SIGN(x) = MPFR_SIGN_NEG)
+
+#define MPFR_CHANGE_SIGN(x) (MPFR_SIGN(x) = -MPFR_SIGN(x))
+#define MPFR_SET_SAME_SIGN(x, y) (MPFR_SIGN(x) = MPFR_SIGN(y))
+#define MPFR_SET_OPPOSITE_SIGN(x, y) (MPFR_SIGN(x) = -MPFR_SIGN(y))
+#define MPFR_ASSERT_SIGN(s) \
+ (MPFR_ASSERTD((s) == MPFR_SIGN_POS || (s) == MPFR_SIGN_NEG))
+#define MPFR_SET_SIGN(x, s) \
+ (MPFR_ASSERT_SIGN(s), MPFR_SIGN(x) = s)
+#define MPFR_IS_POS_SIGN(s1) (s1 > 0)
+#define MPFR_IS_NEG_SIGN(s1) (s1 < 0)
+#define MPFR_MULT_SIGN(s1, s2) ((s1) * (s2))
+/* Transform a sign to 1 or -1 */
+#define MPFR_FROM_SIGN_TO_INT(s) (s)
+#define MPFR_INT_SIGN(x) MPFR_FROM_SIGN_TO_INT(MPFR_SIGN(x))
+
+
+
+/******************************************************
+ ***************** Ternary Value Macros ***************
+ ******************************************************/
+
+/* Special inexact value */
+#define MPFR_EVEN_INEX 2
+
+/* When returning the ternary inexact value, ALWAYS use one of the
+ following two macros, unless the flag comes from another function
+ returning the ternary inexact value */
+#define MPFR_RET(I) return \
+ (I) ? ((__gmpfr_flags |= MPFR_FLAGS_INEXACT), (I)) : 0
+#define MPFR_RET_NAN return (__gmpfr_flags |= MPFR_FLAGS_NAN), 0
+
+#define MPFR_SET_ERANGE() (__gmpfr_flags |= MPFR_FLAGS_ERANGE)
+
+#define SIGN(I) ((I) < 0 ? -1 : (I) > 0)
+#define SAME_SIGN(I1,I2) (SIGN (I1) == SIGN (I2))
+
+
+
+/******************************************************
+ ************** Rounding mode macros *****************
+ ******************************************************/
+
+/* MPFR_RND_MAX gives the number of supported rounding modes by all functions.
+ * Once faithful rounding is implemented, MPFR_RNDA should be changed
+ * to MPFR_RNDF. But this will also require more changes in the tests.
+ */
+#define MPFR_RND_MAX ((mpfr_rnd_t)((MPFR_RNDA)+1))
+
+/* We want to test this :
+ * (rnd == MPFR_RNDU && test) || (rnd == RNDD && !test)
+ * ie it transforms RNDU or RNDD to Away or Zero according to the sign */
+#define MPFR_IS_RNDUTEST_OR_RNDDNOTTEST(rnd, test) \
+ (((rnd) + (test)) == MPFR_RNDD)
+
+/* We want to test if rnd = Zero, or Away.
+ 'test' is 1 if negative, and 0 if positive. */
+#define MPFR_IS_LIKE_RNDZ(rnd, test) \
+ ((rnd==MPFR_RNDZ) || MPFR_IS_RNDUTEST_OR_RNDDNOTTEST (rnd, test))
+
+#define MPFR_IS_LIKE_RNDU(rnd, sign) \
+ ((rnd==MPFR_RNDU) || (rnd==MPFR_RNDZ && sign<0) || (rnd==MPFR_RNDA && sign>0))
+
+#define MPFR_IS_LIKE_RNDD(rnd, sign) \
+ ((rnd==MPFR_RNDD) || (rnd==MPFR_RNDZ && sign>0) || (rnd==MPFR_RNDA && sign<0))
+
+/* Invert a rounding mode, RNDZ and RNDA are unchanged */
+#define MPFR_INVERT_RND(rnd) ((rnd == MPFR_RNDU) ? MPFR_RNDD : \
+ ((rnd == MPFR_RNDD) ? MPFR_RNDU : rnd))
+
+/* Transform RNDU and RNDD to RNDZ according to test */
+#define MPFR_UPDATE_RND_MODE(rnd, test) \
+ do { \
+ if (MPFR_UNLIKELY(MPFR_IS_RNDUTEST_OR_RNDDNOTTEST(rnd, test))) \
+ rnd = MPFR_RNDZ; \
+ } while (0)
+
+/* Transform RNDU and RNDD to RNDZ or RNDA according to sign,
+ leave the other modes unchanged */
+#define MPFR_UPDATE2_RND_MODE(rnd, sign) \
+ do { \
+ if (rnd == MPFR_RNDU) \
+ rnd = (sign > 0) ? MPFR_RNDA : MPFR_RNDZ; \
+ else if (rnd == MPFR_RNDD) \
+ rnd = (sign < 0) ? MPFR_RNDA : MPFR_RNDZ; \
+ } while (0)
+
+
+/******************************************************
+ ******************* Limb Macros **********************
+ ******************************************************/
+
+ /* Definition of MPFR_LIMB_HIGHBIT */
+#if defined(GMP_LIMB_HIGHBIT)
+# define MPFR_LIMB_HIGHBIT GMP_LIMB_HIGHBIT
+#elif defined(MP_LIMB_T_HIGHBIT)
+# define MPFR_LIMB_HIGHBIT MP_LIMB_T_HIGHBIT
+#else
+# error "Neither GMP_LIMB_HIGHBIT nor MP_LIMB_T_HIGHBIT defined in GMP"
+#endif
+
+/* Mask to get the Most Significant Bit of a limb */
+#define MPFR_LIMB_MSB(l) ((l)&MPFR_LIMB_HIGHBIT)
+
+/* Definition of MPFR_LIMB_ONE & MPFR_LIMB_ZERO*/
+#ifdef CNST_LIMB
+# define MPFR_LIMB_ONE CNST_LIMB(1)
+# define MPFR_LIMB_ZERO CNST_LIMB(0)
+#else
+# define MPFR_LIMB_ONE ((mp_limb_t) 1L)
+# define MPFR_LIMB_ZERO ((mp_limb_t) 0L)
+#endif
+
+/* Mask for the low 's' bits of a limb */
+#define MPFR_LIMB_MASK(s) ((MPFR_LIMB_ONE<<(s))-MPFR_LIMB_ONE)
+
+
+
+/******************************************************
+ ********************** Memory ************************
+ ******************************************************/
+
+/* Heap Memory gestion */
+typedef union { mp_size_t s; mp_limb_t l; } mpfr_size_limb_t;
+#define MPFR_GET_ALLOC_SIZE(x) \
+ ( ((mp_size_t*) MPFR_MANT(x))[-1] + 0)
+#define MPFR_SET_ALLOC_SIZE(x, n) \
+ ( ((mp_size_t*) MPFR_MANT(x))[-1] = n)
+#define MPFR_MALLOC_SIZE(s) \
+ ( sizeof(mpfr_size_limb_t) + BYTES_PER_MP_LIMB * ((size_t) s) )
+#define MPFR_SET_MANT_PTR(x,p) \
+ (MPFR_MANT(x) = (mp_limb_t*) ((mpfr_size_limb_t*) p + 1))
+#define MPFR_GET_REAL_PTR(x) \
+ ((mp_limb_t*) ((mpfr_size_limb_t*) MPFR_MANT(x) - 1))
+
+/* Temporary memory gestion */
+#ifndef TMP_SALLOC
+/* GMP 4.1.x or below or internals */
+#define MPFR_TMP_DECL TMP_DECL
+#define MPFR_TMP_MARK TMP_MARK
+#define MPFR_TMP_ALLOC TMP_ALLOC
+#define MPFR_TMP_FREE TMP_FREE
+#else
+#define MPFR_TMP_DECL(x) TMP_DECL
+#define MPFR_TMP_MARK(x) TMP_MARK
+#define MPFR_TMP_ALLOC(s) TMP_ALLOC(s)
+#define MPFR_TMP_FREE(x) TMP_FREE
+#endif
+
+/* This code is experimental: don't use it */
+#ifdef MPFR_USE_OWN_MPFR_TMP_ALLOC
+extern unsigned char *mpfr_stack;
+#undef MPFR_TMP_DECL
+#undef MPFR_TMP_MARK
+#undef MPFR_TMP_ALLOC
+#undef MPFR_TMP_FREE
+#define MPFR_TMP_DECL(_x) unsigned char *(_x)
+#define MPFR_TMP_MARK(_x) ((_x) = mpfr_stack)
+#define MPFR_TMP_ALLOC(_s) (mpfr_stack += (_s), mpfr_stack - (_s))
+#define MPFR_TMP_FREE(_x) (mpfr_stack = (_x))
+#endif
+
+/* temporary allocate 1 limb at xp, and initialize mpfr variable x */
+/* The temporary var doesn't have any size field, but it doesn't matter
+ * since only functions dealing with the Heap care about it */
+#define MPFR_TMP_INIT1(xp, x, p) \
+ ( MPFR_PREC(x) = (p), \
+ MPFR_MANT(x) = (xp), \
+ MPFR_SET_POS(x), \
+ MPFR_SET_INVALID_EXP(x))
+
+#define MPFR_TMP_INIT(xp, x, p, s) \
+ (xp = (mp_ptr) MPFR_TMP_ALLOC(BYTES_PER_MP_LIMB * ((size_t) s)), \
+ MPFR_TMP_INIT1(xp, x, p))
+
+#define MPFR_TMP_INIT_ABS(d, s) \
+ ( MPFR_PREC(d) = MPFR_PREC(s), \
+ MPFR_MANT(d) = MPFR_MANT(s), \
+ MPFR_SET_POS(d), \
+ MPFR_EXP(d) = MPFR_EXP(s))
+
+
+
+/******************************************************
+ ***************** Cache macros **********************
+ ******************************************************/
+
+#define mpfr_const_pi(_d,_r) mpfr_cache(_d, __gmpfr_cache_const_pi,_r)
+#define mpfr_const_log2(_d,_r) mpfr_cache(_d, __gmpfr_cache_const_log2, _r)
+#define mpfr_const_euler(_d,_r) mpfr_cache(_d, __gmpfr_cache_const_euler, _r)
+#define mpfr_const_catalan(_d,_r) mpfr_cache(_d,__gmpfr_cache_const_catalan,_r)
+
+#define MPFR_DECL_INIT_CACHE(_cache,_func) \
+ mpfr_cache_t MPFR_THREAD_ATTR _cache = \
+ {{{{0,MPFR_SIGN_POS,0,(mp_limb_t*)0}},0,_func}}
+
+
+
+/******************************************************
+ ******************* Threshold ***********************
+ ******************************************************/
+
+#include "mparam.h"
+
+/******************************************************
+ ***************** Useful macros *********************
+ ******************************************************/
+
+/* Theses macros help the compiler to determine if a test is
+ * likely or unlikely. */
+#if __MPFR_GNUC(3,0) || __MPFR_ICC(8,1,0)
+# define MPFR_LIKELY(x) (__builtin_expect(!!(x),1))
+# define MPFR_UNLIKELY(x) (__builtin_expect((x),0))
+#else
+# define MPFR_LIKELY(x) (x)
+# define MPFR_UNLIKELY(x) (x)
+#endif
+
+/* Declare that some variable is initialized before being used (without a
+ dummy initialization) in order to avoid some compiler warnings. Use the
+ VAR = VAR trick (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36296)
+ only with gcc as this is undefined behavior, and we don't know what
+ other compilers do (they may also be smarter). This trick could be
+ disabled with future gcc versions. */
+#if defined(__GNUC__)
+# define INITIALIZED(VAR) VAR = VAR
+#else
+# define INITIALIZED(VAR) VAR
+#endif
+
+/* Ceil log 2: If GCC, uses a GCC extension, otherwise calls a function */
+/* Warning:
+ * Needs to define MPFR_NEED_LONGLONG.
+ * Computes ceil(log2(x)) only for x integer (unsigned long)
+ * Undefined if x is 0 */
+#if __MPFR_GNUC(2,95) || __MPFR_ICC(8,1,0)
+# define MPFR_INT_CEIL_LOG2(x) \
+ (MPFR_UNLIKELY ((x) == 1) ? 0 : \
+ __extension__ ({ int _b; mp_limb_t _limb; \
+ MPFR_ASSERTN ((x) > 1); \
+ _limb = (x) - 1; \
+ MPFR_ASSERTN (_limb == (x) - 1); \
+ count_leading_zeros (_b, _limb); \
+ (GMP_NUMB_BITS - _b); }))
+#else
+# define MPFR_INT_CEIL_LOG2(x) (__gmpfr_int_ceil_log2(x))
+#endif
+
+/* Add two integers with overflow handling */
+/* Example: MPFR_SADD_OVERFLOW (c, a, b, long, unsigned long,
+ * LONG_MIN, LONG_MAX,
+ * goto overflow, goto underflow); */
+#define MPFR_UADD_OVERFLOW(c,a,b,ACTION_IF_OVERFLOW) \
+ do { \
+ (c) = (a) + (b); \
+ if ((c) < (a)) ACTION_IF_OVERFLOW; \
+ } while (0)
+
+#define MPFR_SADD_OVERFLOW(c,a,b,STYPE,UTYPE,MIN,MAX,ACTION_IF_POS_OVERFLOW,ACTION_IF_NEG_OVERFLOW) \
+ do { \
+ if ((a) >= 0 && (b) >= 0) { \
+ UTYPE uc,ua,ub; \
+ ua = (UTYPE) a; ub = (UTYPE) b; \
+ MPFR_UADD_OVERFLOW (uc, ua, ub, ACTION_IF_POS_OVERFLOW); \
+ if (uc > (UTYPE)(MAX)) ACTION_IF_POS_OVERFLOW; \
+ else (c) = (STYPE) uc; \
+ } else if ((a) < 0 && (b) < 0) { \
+ UTYPE uc,ua,ub; \
+ ua = -(UTYPE) a; ub = -(UTYPE) b; \
+ MPFR_UADD_OVERFLOW (uc, ua, ub, ACTION_IF_NEG_OVERFLOW); \
+ if (uc >= -(UTYPE)(MIN) || uc > (UTYPE)(MAX)) { \
+ if (uc == -(UTYPE)(MIN)) (c) = (MIN); \
+ else ACTION_IF_NEG_OVERFLOW; } \
+ else (c) = -(STYPE) uc; \
+ } else (c) = (a) + (b); \
+ } while (0)
+
+
+/* Set a number to 1 (Fast) - It doesn't check if 1 is in the exponent range */
+#define MPFR_SET_ONE(x) \
+do { \
+ mp_size_t _size = MPFR_LIMB_SIZE(x) - 1; \
+ MPFR_SET_POS(x); \
+ MPFR_EXP(x) = 1; \
+ MPN_ZERO ( MPFR_MANT(x), _size); \
+ MPFR_MANT(x)[_size] = MPFR_LIMB_HIGHBIT; \
+} while (0)
+
+/* Compute s = (-a) % GMP_NUMB_BITS as unsigned */
+#define MPFR_UNSIGNED_MINUS_MODULO(s, a) \
+ do \
+ { \
+ if (IS_POW2 (GMP_NUMB_BITS)) \
+ (s) = (- (unsigned int) (a)) % GMP_NUMB_BITS; \
+ else \
+ { \
+ (s) = (a) % GMP_NUMB_BITS; \
+ if ((s) != 0) \
+ (s) = GMP_NUMB_BITS - (s); \
+ } \
+ MPFR_ASSERTD ((s) >= 0 && (s) < GMP_NUMB_BITS); \
+ } \
+ while (0)
+
+/* Use it only for debug reasons */
+/* MPFR_TRACE (operation) : execute operation iff DEBUG flag is set */
+/* MPFR_DUMP (x) : print x (a mpfr_t) on stdout */
+#ifdef DEBUG
+# define MPFR_TRACE(x) x
+#else
+# define MPFR_TRACE(x) (void) 0
+#endif
+#define MPFR_DUMP(x) ( printf(#x"="), mpfr_dump(x) )
+
+/* Test if X (positive) is a power of 2 */
+#define IS_POW2(X) (((X) & ((X) - 1)) == 0)
+#define NOT_POW2(X) (((X) & ((X) - 1)) != 0)
+
+/* Safe absolute value (to avoid possible integer overflow) */
+/* type is the target (unsigned) type */
+#define SAFE_ABS(type,x) ((x) >= 0 ? (type)(x) : -(type)(x))
+
+#define mpfr_get_d1(x) mpfr_get_d(x,__gmpfr_default_rounding_mode)
+
+/* Store in r the size in bits of the mpz_t z */
+#define MPFR_MPZ_SIZEINBASE2(r, z) \
+ do { \
+ int _cnt; \
+ mp_size_t _size; \
+ MPFR_ASSERTD (mpz_sgn (z) != 0); \
+ _size = ABSIZ(z); \
+ count_leading_zeros (_cnt, PTR(z)[_size-1]); \
+ (r) = _size * GMP_NUMB_BITS - _cnt; \
+ } while (0)
+
+/* Needs <locale.h> */
+#ifdef HAVE_LOCALE_H
+#include <locale.h>
+/* Warning! In case of signed char, the value of MPFR_DECIMAL_POINT may
+ be negative (the ISO C99 does not seem to forbid negative values). */
+#define MPFR_DECIMAL_POINT (localeconv()->decimal_point[0])
+#define MPFR_THOUSANDS_SEPARATOR (localeconv()->thousands_sep[0])
+#else
+#define MPFR_DECIMAL_POINT ((char) '.')
+#define MPFR_THOUSANDS_SEPARATOR ('\0')
+#endif
+
+
+/* Set y to s*significand(x)*2^e, for example MPFR_ALIAS(y,x,1,MPFR_EXP(x))
+ sets y to |x|, and MPFR_ALIAS(y,x,MPFR_SIGN(x),0) sets y to x*2^f such
+ that 1/2 <= |y| < 1. Does not check y is in the valid exponent range.
+ WARNING! x and y share the same mantissa. So, some operations are
+ not valid if x has been provided via an argument, e.g., trying to
+ modify the mantissa of y, even temporarily, or calling mpfr_clear on y.
+*/
+#define MPFR_ALIAS(y,x,s,e) \
+ do \
+ { \
+ MPFR_PREC(y) = MPFR_PREC(x); \
+ MPFR_SIGN(y) = (s); \
+ MPFR_EXP(y) = (e); \
+ MPFR_MANT(y) = MPFR_MANT(x); \
+ } while (0)
+
+
+/******************************************************
+ ************** Save exponent macros ****************
+ ******************************************************/
+
+/* See README.dev for details on how to use the macros.
+ They are used to set the exponent range to the maximum
+ temporarily */
+
+typedef struct {
+ unsigned int saved_flags;
+ mpfr_exp_t saved_emin;
+ mpfr_exp_t saved_emax;
+} mpfr_save_expo_t;
+
+#define MPFR_SAVE_EXPO_DECL(x) mpfr_save_expo_t x
+#define MPFR_SAVE_EXPO_MARK(x) \
+ ((x).saved_flags = __gmpfr_flags, \
+ (x).saved_emin = __gmpfr_emin, \
+ (x).saved_emax = __gmpfr_emax, \
+ __gmpfr_emin = MPFR_EMIN_MIN, \
+ __gmpfr_emax = MPFR_EMAX_MAX)
+#define MPFR_SAVE_EXPO_FREE(x) \
+ (__gmpfr_flags = (x).saved_flags, \
+ __gmpfr_emin = (x).saved_emin, \
+ __gmpfr_emax = (x).saved_emax)
+#define MPFR_SAVE_EXPO_UPDATE_FLAGS(x, flags) \
+ (x).saved_flags |= (flags)
+
+/* Speed up final checking */
+#define mpfr_check_range(x,t,r) \
+ (MPFR_LIKELY (MPFR_EXP (x) >= __gmpfr_emin && MPFR_EXP (x) <= __gmpfr_emax) \
+ ? ((t) ? (__gmpfr_flags |= MPFR_FLAGS_INEXACT, (t)) : 0) \
+ : mpfr_check_range(x,t,r))
+
+
+/******************************************************
+ ***************** Inline Rounding *******************
+ ******************************************************/
+
+/*
+ * Note: due to the labels, one cannot use a macro MPFR_RNDRAW* more than
+ * once in a function (otherwise these labels would not be unique).
+ */
+
+/*
+ * Round mantissa (srcp, sprec) to mpfr_t dest using rounding mode rnd
+ * assuming dest's sign is sign.
+ * In rounding to nearest mode, execute MIDDLE_HANDLER when the value
+ * is the middle of two consecutive numbers in dest precision.
+ * Execute OVERFLOW_HANDLER in case of overflow when rounding.
+ */
+#define MPFR_RNDRAW_GEN(inexact, dest, srcp, sprec, rnd, sign, \
+ MIDDLE_HANDLER, OVERFLOW_HANDLER) \
+ do { \
+ mp_size_t _dests, _srcs; \
+ mp_limb_t *_destp; \
+ mpfr_prec_t _destprec, _srcprec; \
+ \
+ /* Check Trivial Case when Dest Mantissa has more bits than source */ \
+ _srcprec = sprec; \
+ _destprec = MPFR_PREC (dest); \
+ _destp = MPFR_MANT (dest); \
+ if (MPFR_UNLIKELY (_destprec >= _srcprec)) \
+ { \
+ _srcs = (_srcprec + GMP_NUMB_BITS-1)/GMP_NUMB_BITS; \
+ _dests = (_destprec + GMP_NUMB_BITS-1)/GMP_NUMB_BITS - _srcs; \
+ MPN_COPY (_destp + _dests, srcp, _srcs); \
+ MPN_ZERO (_destp, _dests); \
+ inexact = 0; \
+ } \
+ else \
+ { \
+ /* Non trivial case: rounding needed */ \
+ mpfr_prec_t _sh; \
+ mp_limb_t *_sp; \
+ mp_limb_t _rb, _sb, _ulp; \
+ \
+ /* Compute Position and shift */ \
+ _srcs = (_srcprec + GMP_NUMB_BITS-1)/GMP_NUMB_BITS; \
+ _dests = (_destprec + GMP_NUMB_BITS-1)/GMP_NUMB_BITS; \
+ MPFR_UNSIGNED_MINUS_MODULO (_sh, _destprec); \
+ _sp = srcp + _srcs - _dests; \
+ \
+ /* General case when prec % GMP_NUMB_BITS != 0 */ \
+ if (MPFR_LIKELY (_sh != 0)) \
+ { \
+ mp_limb_t _mask; \
+ /* Compute Rounding Bit and Sticky Bit */ \
+ _mask = MPFR_LIMB_ONE << (_sh - 1); \
+ _rb = _sp[0] & _mask; \
+ _sb = _sp[0] & (_mask - 1); \
+ if (MPFR_UNLIKELY (_sb == 0)) \
+ { /* TODO: Improve it */ \
+ mp_limb_t *_tmp; \
+ mp_size_t _n; \
+ for (_tmp = _sp, _n = _srcs - _dests ; \
+ _n != 0 && _sb == 0 ; _n--) \
+ _sb = *--_tmp; \
+ } \
+ _ulp = 2 * _mask; \
+ } \
+ else /* _sh == 0 */ \
+ { \
+ MPFR_ASSERTD (_dests < _srcs); \
+ /* Compute Rounding Bit and Sticky Bit */ \
+ _rb = _sp[-1] & MPFR_LIMB_HIGHBIT; \
+ _sb = _sp[-1] & (MPFR_LIMB_HIGHBIT-1); \
+ if (MPFR_UNLIKELY (_sb == 0)) \
+ { \
+ mp_limb_t *_tmp; \
+ mp_size_t _n; \
+ for (_tmp = _sp - 1, _n = _srcs - _dests - 1 ; \
+ _n != 0 && _sb == 0 ; _n--) \
+ _sb = *--_tmp; \
+ } \
+ _ulp = MPFR_LIMB_ONE; \
+ } \
+ /* Rounding */ \
+ if (MPFR_LIKELY (rnd == MPFR_RNDN)) \
+ { \
+ if (_rb == 0) \
+ { \
+ trunc: \
+ inexact = MPFR_LIKELY ((_sb | _rb) != 0) ? -sign : 0; \
+ trunc_doit: \
+ MPN_COPY (_destp, _sp, _dests); \
+ _destp[0] &= ~(_ulp - 1); \
+ } \
+ else if (MPFR_UNLIKELY (_sb == 0)) \
+ { /* Middle of two consecutive representable numbers */ \
+ MIDDLE_HANDLER; \
+ } \
+ else \
+ { \
+ if (0) \
+ goto addoneulp_doit; /* dummy code to avoid warning */ \
+ addoneulp: \
+ inexact = sign; \
+ addoneulp_doit: \
+ if (MPFR_UNLIKELY (mpn_add_1 (_destp, _sp, _dests, _ulp))) \
+ { \
+ _destp[_dests - 1] = MPFR_LIMB_HIGHBIT; \
+ OVERFLOW_HANDLER; \
+ } \
+ _destp[0] &= ~(_ulp - 1); \
+ } \
+ } \
+ else \
+ { /* Directed rounding mode */ \
+ if (MPFR_LIKELY (MPFR_IS_LIKE_RNDZ (rnd, \
+ MPFR_IS_NEG_SIGN (sign)))) \
+ goto trunc; \
+ else if (MPFR_UNLIKELY ((_sb | _rb) == 0)) \
+ { \
+ inexact = 0; \
+ goto trunc_doit; \
+ } \
+ else \
+ goto addoneulp; \
+ } \
+ } \
+ } while (0)
+
+/*
+ * Round mantissa (srcp, sprec) to mpfr_t dest using rounding mode rnd
+ * assuming dest's sign is sign.
+ * Execute OVERFLOW_HANDLER in case of overflow when rounding.
+ */
+#define MPFR_RNDRAW(inexact, dest, srcp, sprec, rnd, sign, OVERFLOW_HANDLER) \
+ MPFR_RNDRAW_GEN (inexact, dest, srcp, sprec, rnd, sign, \
+ if ((_sp[0] & _ulp) == 0) \
+ { \
+ inexact = -sign; \
+ goto trunc_doit; \
+ } \
+ else \
+ goto addoneulp; \
+ , OVERFLOW_HANDLER)
+
+/*
+ * Round mantissa (srcp, sprec) to mpfr_t dest using rounding mode rnd
+ * assuming dest's sign is sign.
+ * Execute OVERFLOW_HANDLER in case of overflow when rounding.
+ * Set inexact to +/- MPFR_EVEN_INEX in case of even rounding.
+ */
+#define MPFR_RNDRAW_EVEN(inexact, dest, srcp, sprec, rnd, sign, \
+ OVERFLOW_HANDLER) \
+ MPFR_RNDRAW_GEN (inexact, dest, srcp, sprec, rnd, sign, \
+ if ((_sp[0] & _ulp) == 0) \
+ { \
+ inexact = -MPFR_EVEN_INEX * sign; \
+ goto trunc_doit; \
+ } \
+ else \
+ { \
+ inexact = MPFR_EVEN_INEX * sign; \
+ goto addoneulp_doit; \
+ } \
+ , OVERFLOW_HANDLER)
+
+/* Return TRUE if b is non singular and we can round it to precision 'prec'
+ and determine the ternary value, with rounding mode 'rnd', and with
+ error at most 'error' */
+#define MPFR_CAN_ROUND(b,err,prec,rnd) \
+ (!MPFR_IS_SINGULAR (b) && mpfr_round_p (MPFR_MANT (b), MPFR_LIMB_SIZE (b), \
+ (err), (prec) + ((rnd)==MPFR_RNDN)))
+
+/* TODO: fix this description (see round_near_x.c). */
+/* Assuming that the function has a Taylor expansion which looks like:
+ y=o(f(x)) = o(v + g(x)) with |g(x)| <= 2^(EXP(v)-err)
+ we can quickly set y to v if x is small (ie err > prec(y)+1) in most
+ cases. It assumes that f(x) is not representable exactly as a FP number.
+ v must not be a singular value (NAN, INF or ZERO); usual values are
+ v=1 or v=x.
+
+ y is the destination (a mpfr_t), v the value to set (a mpfr_t),
+ err1+err2 with err2 <= 3 the error term (mpfr_exp_t's), dir (an int) is
+ the direction of the committed error (if dir = 0, it rounds toward 0,
+ if dir=1, it rounds away from 0), rnd the rounding mode.
+
+ It returns from the function a ternary value in case of success.
+ If you want to free something, you must fill the "extra" field
+ in consequences, otherwise put nothing in it.
+
+ The test is less restrictive than necessary, but the function
+ will finish the check itself.
+
+ Note: err1 + err2 is allowed to overflow as mpfr_exp_t, but it must give
+ its real value as mpfr_uexp_t.
+*/
+#define MPFR_FAST_COMPUTE_IF_SMALL_INPUT(y,v,err1,err2,dir,rnd,extra) \
+ do { \
+ mpfr_ptr _y = (y); \
+ mpfr_exp_t _err1 = (err1); \
+ mpfr_exp_t _err2 = (err2); \
+ if (_err1 > 0) \
+ { \
+ mpfr_uexp_t _err = (mpfr_uexp_t) _err1 + _err2; \
+ if (MPFR_UNLIKELY (_err > MPFR_PREC (_y) + 1)) \
+ { \
+ int _inexact = mpfr_round_near_x (_y,(v),_err,(dir),(rnd)); \
+ if (_inexact != 0) \
+ { \
+ extra; \
+ return _inexact; \
+ } \
+ } \
+ } \
+ } while (0)
+
+/* Variant, to be called somewhere after MPFR_SAVE_EXPO_MARK. This variant
+ is needed when there are some computations before or when some non-zero
+ real constant is used, such as __gmpfr_one for mpfr_cos. */
+#define MPFR_SMALL_INPUT_AFTER_SAVE_EXPO(y,v,err1,err2,dir,rnd,expo,extra) \
+ do { \
+ mpfr_ptr _y = (y); \
+ mpfr_exp_t _err1 = (err1); \
+ mpfr_exp_t _err2 = (err2); \
+ if (_err1 > 0) \
+ { \
+ mpfr_uexp_t _err = (mpfr_uexp_t) _err1 + _err2; \
+ if (MPFR_UNLIKELY (_err > MPFR_PREC (_y) + 1)) \
+ { \
+ int _inexact; \
+ mpfr_clear_flags (); \
+ _inexact = mpfr_round_near_x (_y,(v),_err,(dir),(rnd)); \
+ if (_inexact != 0) \
+ { \
+ extra; \
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags); \
+ MPFR_SAVE_EXPO_FREE (expo); \
+ return mpfr_check_range (_y, _inexact, (rnd)); \
+ } \
+ } \
+ } \
+ } while (0)
+
+/******************************************************
+ *************** Ziv Loop Macro *********************
+ ******************************************************/
+
+#ifndef MPFR_USE_LOGGING
+
+#define MPFR_ZIV_DECL(_x) mpfr_prec_t _x
+#define MPFR_ZIV_INIT(_x, _p) (_x) = GMP_NUMB_BITS
+#define MPFR_ZIV_NEXT(_x, _p) ((_p) += (_x), (_x) = (_p)/2)
+#define MPFR_ZIV_FREE(x)
+
+#else
+
+/* The following test on glibc is there mainly for Darwin (Mac OS X), to
+ obtain a better error message. The real test should have been a test
+ concerning nested functions in gcc, which are disabled by default on
+ Darwin; but it is not possible to do that without a configure test. */
+# if defined (__cplusplus) || !(__MPFR_GNUC(3,0) && __MPFR_GLIBC(2,0))
+# error "Logging not supported (needs gcc >= 3.0 and GNU C Library >= 2.0)."
+# endif
+
+/* Use LOGGING */
+#define MPFR_ZIV_DECL(_x) \
+ mpfr_prec_t _x; \
+ int _x ## _cpt = 1; \
+ static unsigned long _x ## _loop = 0, _x ## _bad = 0; \
+ static const char *_x ## _fname = __func__; \
+ auto void __attribute__ ((destructor)) x ## _f (void); \
+ void __attribute__ ((destructor)) x ## _f (void) { \
+ if (_x ## _loop != 0 && MPFR_LOG_STAT_F&mpfr_log_type) \
+ fprintf (mpfr_log_file, \
+ "%s: Ziv failed %2.2f%% (%lu bad cases / %lu calls)\n", _x ## _fname, \
+ (double) 100.0 * _x ## _bad / _x ## _loop, _x ## _bad, _x ## _loop ); }
+
+#define MPFR_ZIV_INIT(_x, _p) ((_x) = GMP_NUMB_BITS, _x ## _loop ++); \
+ if (MPFR_LOG_BADCASE_F&mpfr_log_type && mpfr_log_current<=mpfr_log_level) \
+ fprintf (mpfr_log_file, "%s:ZIV 1st prec=%lu\n", __func__, \
+ (unsigned long) (_p))
+
+#define MPFR_ZIV_NEXT(_x, _p) \
+ ((_p)+=(_x),(_x)=(_p)/2, _x ## _bad += (_x ## _cpt == 1), _x ## _cpt ++); \
+ if (MPFR_LOG_BADCASE_F&mpfr_log_type && mpfr_log_current<=mpfr_log_level) \
+ fprintf (mpfr_log_file, "%s:ZIV new prec=%lu\n", __func__, \
+ (unsigned long) (_p))
+
+#define MPFR_ZIV_FREE(_x) \
+ if (MPFR_LOG_BADCASE_F&mpfr_log_type && _x##_cpt>1 \
+ && mpfr_log_current<=mpfr_log_level) \
+ fprintf (mpfr_log_file, "%s:ZIV %d loops\n", __func__, _x ## _cpt)
+
+#endif
+
+
+/******************************************************
+ *************** Logging Macros *********************
+ ******************************************************/
+
+/* The different kind of LOG */
+#define MPFR_LOG_INPUT_F 1
+#define MPFR_LOG_OUTPUT_F 2
+#define MPFR_LOG_INTERNAL_F 4
+#define MPFR_LOG_TIME_F 8
+#define MPFR_LOG_BADCASE_F 16
+#define MPFR_LOG_MSG_F 32
+#define MPFR_LOG_STAT_F 64
+
+#ifdef MPFR_USE_LOGGING
+
+/* Check if we can support this feature */
+# ifdef MPFR_USE_THREAD_SAFE
+# error "Enable either `Logging' or `thread-safe', not both"
+# endif
+# if !__MPFR_GNUC(3,0)
+# error "Logging not supported (GCC >= 3.0)"
+# endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+__MPFR_DECLSPEC extern FILE *mpfr_log_file;
+__MPFR_DECLSPEC extern int mpfr_log_type;
+__MPFR_DECLSPEC extern int mpfr_log_level;
+__MPFR_DECLSPEC extern int mpfr_log_current;
+__MPFR_DECLSPEC extern int mpfr_log_base;
+__MPFR_DECLSPEC extern mpfr_prec_t mpfr_log_prec;
+
+#if defined (__cplusplus)
+ }
+#endif
+
+#define MPFR_LOG_VAR(x) \
+ if((MPFR_LOG_INTERNAL_F&mpfr_log_type)&&(mpfr_log_current<=mpfr_log_level))\
+ fprintf (mpfr_log_file, "%s.%d:%s[%#R]=%R\n", __func__,__LINE__, #x, x, x);
+
+#define MPFR_LOG_MSG2(format, ...) \
+ if ((MPFR_LOG_MSG_F&mpfr_log_type)&&(mpfr_log_current<=mpfr_log_level)) \
+ fprintf (mpfr_log_file, "%s.%d: "format, __func__, __LINE__, __VA_ARGS__);
+#define MPFR_LOG_MSG(x) MPFR_LOG_MSG2 x
+
+#define MPFR_LOG_BEGIN2(format, ...) \
+ mpfr_log_current ++; \
+ if ((MPFR_LOG_INPUT_F&mpfr_log_type)&&(mpfr_log_current<=mpfr_log_level)) \
+ fprintf (mpfr_log_file, "%s:IN "format"\n",__func__,__VA_ARGS__); \
+ if ((MPFR_LOG_TIME_F&mpfr_log_type)&&(mpfr_log_current<=mpfr_log_level)) \
+ __gmpfr_log_time = mpfr_get_cputime ();
+#define MPFR_LOG_BEGIN(x) \
+ int __gmpfr_log_time = 0; \
+ MPFR_LOG_BEGIN2 x
+
+#define MPFR_LOG_END2(format, ...) \
+ if ((MPFR_LOG_TIME_F&mpfr_log_type)&&(mpfr_log_current<=mpfr_log_level)) \
+ fprintf (mpfr_log_file, "%s:TIM %dms\n", __mpfr_log_fname, \
+ mpfr_get_cputime () - __gmpfr_log_time); \
+ if ((MPFR_LOG_OUTPUT_F&mpfr_log_type)&&(mpfr_log_current<=mpfr_log_level)) \
+ fprintf (mpfr_log_file, "%s:OUT "format"\n",__mpfr_log_fname,__VA_ARGS__);\
+ mpfr_log_current --;
+#define MPFR_LOG_END(x) \
+ static const char *__mpfr_log_fname = __func__; \
+ MPFR_LOG_END2 x
+
+#define MPFR_LOG_FUNC(begin,end) \
+ static const char *__mpfr_log_fname = __func__; \
+ auto void __mpfr_log_cleanup (int *time); \
+ void __mpfr_log_cleanup (int *time) { \
+ int __gmpfr_log_time = *time; \
+ MPFR_LOG_END2 end; } \
+ int __gmpfr_log_time __attribute__ ((cleanup (__mpfr_log_cleanup))); \
+ __gmpfr_log_time = 0; \
+ MPFR_LOG_BEGIN2 begin
+
+#else /* MPFR_USE_LOGGING */
+
+/* Define void macro for logging */
+
+#define MPFR_LOG_VAR(x)
+#define MPFR_LOG_BEGIN(x)
+#define MPFR_LOG_END(x)
+#define MPFR_LOG_MSG(x)
+#define MPFR_LOG_FUNC(x,y)
+
+#endif /* MPFR_USE_LOGGING */
+
+
+/**************************************************************
+ ************ Group Initialize Functions Macros *************
+ **************************************************************/
+
+#ifndef MPFR_GROUP_STATIC_SIZE
+# define MPFR_GROUP_STATIC_SIZE 16
+#endif
+
+struct mpfr_group_t {
+ size_t alloc;
+ mp_limb_t *mant;
+ mp_limb_t tab[MPFR_GROUP_STATIC_SIZE];
+};
+
+#define MPFR_GROUP_DECL(g) struct mpfr_group_t g
+#define MPFR_GROUP_CLEAR(g) do { \
+ MPFR_LOG_MSG (("GROUP_CLEAR: ptr = 0x%lX, size = %lu\n", \
+ (unsigned long) (g).mant, \
+ (unsigned long) (g).alloc)); \
+ if (MPFR_UNLIKELY ((g).alloc != 0)) { \
+ MPFR_ASSERTD ((g).mant != (g).tab); \
+ (*__gmp_free_func) ((g).mant, (g).alloc); \
+ }} while (0)
+
+#define MPFR_GROUP_INIT_TEMPLATE(g, prec, num, handler) do { \
+ mpfr_prec_t _prec = (prec); \
+ mp_size_t _size; \
+ MPFR_ASSERTD (_prec >= MPFR_PREC_MIN); \
+ if (MPFR_UNLIKELY (_prec > MPFR_PREC_MAX)) \
+ mpfr_abort_prec_max (); \
+ _size = (mpfr_prec_t) (_prec + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS; \
+ if (MPFR_UNLIKELY (_size * (num) > MPFR_GROUP_STATIC_SIZE)) \
+ { \
+ (g).alloc = (num) * _size * sizeof (mp_limb_t); \
+ (g).mant = (mp_limb_t *) (*__gmp_allocate_func) ((g).alloc); \
+ } \
+ else \
+ { \
+ (g).alloc = 0; \
+ (g).mant = (g).tab; \
+ } \
+ MPFR_LOG_MSG (("GROUP_INIT: ptr = 0x%lX, size = %lu\n", \
+ (unsigned long) (g).mant, (unsigned long) (g).alloc)); \
+ handler; \
+ } while (0)
+#define MPFR_GROUP_TINIT(g, n, x) \
+ MPFR_TMP_INIT1 ((g).mant + _size * (n), x, _prec)
+
+#define MPFR_GROUP_INIT_1(g, prec, x) \
+ MPFR_GROUP_INIT_TEMPLATE(g, prec, 1, MPFR_GROUP_TINIT(g, 0, x))
+#define MPFR_GROUP_INIT_2(g, prec, x, y) \
+ MPFR_GROUP_INIT_TEMPLATE(g, prec, 2, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y))
+#define MPFR_GROUP_INIT_3(g, prec, x, y, z) \
+ MPFR_GROUP_INIT_TEMPLATE(g, prec, 3, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z))
+#define MPFR_GROUP_INIT_4(g, prec, x, y, z, t) \
+ MPFR_GROUP_INIT_TEMPLATE(g, prec, 4, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z);MPFR_GROUP_TINIT(g, 3, t))
+#define MPFR_GROUP_INIT_5(g, prec, x, y, z, t, a) \
+ MPFR_GROUP_INIT_TEMPLATE(g, prec, 5, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z);MPFR_GROUP_TINIT(g, 3, t); \
+ MPFR_GROUP_TINIT(g, 4, a))
+#define MPFR_GROUP_INIT_6(g, prec, x, y, z, t, a, b) \
+ MPFR_GROUP_INIT_TEMPLATE(g, prec, 6, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z);MPFR_GROUP_TINIT(g, 3, t); \
+ MPFR_GROUP_TINIT(g, 4, a);MPFR_GROUP_TINIT(g, 5, b))
+
+#define MPFR_GROUP_REPREC_TEMPLATE(g, prec, num, handler) do { \
+ mpfr_prec_t _prec = (prec); \
+ size_t _oalloc = (g).alloc; \
+ mp_size_t _size; \
+ MPFR_LOG_MSG (("GROUP_REPREC: oldptr = 0x%lX, oldsize = %lu\n", \
+ (unsigned long) (g).mant, (unsigned long) _oalloc)); \
+ MPFR_ASSERTD (_prec >= MPFR_PREC_MIN); \
+ if (MPFR_UNLIKELY (_prec > MPFR_PREC_MAX)) \
+ mpfr_abort_prec_max (); \
+ _size = (mpfr_prec_t) (_prec + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS; \
+ (g).alloc = (num) * _size * sizeof (mp_limb_t); \
+ if (MPFR_LIKELY (_oalloc == 0)) \
+ (g).mant = (mp_limb_t *) (*__gmp_allocate_func) ((g).alloc); \
+ else \
+ (g).mant = (mp_limb_t *) \
+ (*__gmp_reallocate_func) ((g).mant, _oalloc, (g).alloc); \
+ MPFR_LOG_MSG (("GROUP_REPREC: newptr = 0x%lX, newsize = %lu\n", \
+ (unsigned long) (g).mant, (unsigned long) (g).alloc)); \
+ handler; \
+ } while (0)
+
+#define MPFR_GROUP_REPREC_1(g, prec, x) \
+ MPFR_GROUP_REPREC_TEMPLATE(g, prec, 1, MPFR_GROUP_TINIT(g, 0, x))
+#define MPFR_GROUP_REPREC_2(g, prec, x, y) \
+ MPFR_GROUP_REPREC_TEMPLATE(g, prec, 2, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y))
+#define MPFR_GROUP_REPREC_3(g, prec, x, y, z) \
+ MPFR_GROUP_REPREC_TEMPLATE(g, prec, 3, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z))
+#define MPFR_GROUP_REPREC_4(g, prec, x, y, z, t) \
+ MPFR_GROUP_REPREC_TEMPLATE(g, prec, 4, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z);MPFR_GROUP_TINIT(g, 3, t))
+#define MPFR_GROUP_REPREC_5(g, prec, x, y, z, t, a) \
+ MPFR_GROUP_REPREC_TEMPLATE(g, prec, 5, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z);MPFR_GROUP_TINIT(g, 3, t); \
+ MPFR_GROUP_TINIT(g, 4, a))
+#define MPFR_GROUP_REPREC_6(g, prec, x, y, z, t, a, b) \
+ MPFR_GROUP_REPREC_TEMPLATE(g, prec, 6, \
+ MPFR_GROUP_TINIT(g, 0, x);MPFR_GROUP_TINIT(g, 1, y); \
+ MPFR_GROUP_TINIT(g, 2, z);MPFR_GROUP_TINIT(g, 3, t); \
+ MPFR_GROUP_TINIT(g, 4, a);MPFR_GROUP_TINIT(g, 5, b))
+
+
+/******************************************************
+ *************** Internal Functions *****************
+ ******************************************************/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+__MPFR_DECLSPEC int mpfr_underflow _MPFR_PROTO ((mpfr_ptr, mpfr_rnd_t, int));
+__MPFR_DECLSPEC int mpfr_overflow _MPFR_PROTO ((mpfr_ptr, mpfr_rnd_t, int));
+
+__MPFR_DECLSPEC int mpfr_add1 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub1 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_add1sp _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub1sp _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_can_round_raw _MPFR_PROTO ((const mp_limb_t *,
+ mp_size_t, int, mpfr_exp_t, mpfr_rnd_t, mpfr_rnd_t, mpfr_prec_t));
+
+__MPFR_DECLSPEC int mpfr_cmp2 _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr,
+ mpfr_prec_t *));
+
+__MPFR_DECLSPEC long __gmpfr_ceil_log2 _MPFR_PROTO ((double));
+__MPFR_DECLSPEC long __gmpfr_floor_log2 _MPFR_PROTO ((double));
+__MPFR_DECLSPEC double __gmpfr_ceil_exp2 _MPFR_PROTO ((double));
+__MPFR_DECLSPEC unsigned long __gmpfr_isqrt _MPFR_PROTO ((unsigned long));
+__MPFR_DECLSPEC unsigned long __gmpfr_cuberoot _MPFR_PROTO ((unsigned long));
+__MPFR_DECLSPEC int __gmpfr_int_ceil_log2 _MPFR_PROTO ((unsigned long));
+
+__MPFR_DECLSPEC int mpfr_exp_2 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_exp_3 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_powerof2_raw _MPFR_PROTO ((mpfr_srcptr));
+
+__MPFR_DECLSPEC int mpfr_pow_general _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t, int, mpfr_save_expo_t *));
+
+__MPFR_DECLSPEC void mpfr_setmax _MPFR_PROTO ((mpfr_ptr, mpfr_exp_t));
+__MPFR_DECLSPEC void mpfr_setmin _MPFR_PROTO ((mpfr_ptr, mpfr_exp_t));
+
+__MPFR_DECLSPEC long mpfr_mpn_exp _MPFR_PROTO ((mp_limb_t *, mpfr_exp_t *, int,
+ mpfr_exp_t, size_t));
+
+#ifdef _MPFR_H_HAVE_FILE
+__MPFR_DECLSPEC void mpfr_fprint_binary _MPFR_PROTO ((FILE *, mpfr_srcptr));
+#endif
+__MPFR_DECLSPEC void mpfr_print_binary _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC void mpfr_print_mant_binary _MPFR_PROTO ((const char*,
+ const mp_limb_t*, mpfr_prec_t));
+__MPFR_DECLSPEC void mpfr_set_str_binary _MPFR_PROTO((mpfr_ptr, const char*));
+
+__MPFR_DECLSPEC int mpfr_round_raw _MPFR_PROTO ((mp_limb_t *,
+ const mp_limb_t *, mpfr_prec_t, int, mpfr_prec_t, mpfr_rnd_t, int *));
+__MPFR_DECLSPEC int mpfr_round_raw_2 _MPFR_PROTO ((const mp_limb_t *,
+ mpfr_prec_t, int, mpfr_prec_t, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_round_raw_3 _MPFR_PROTO ((const mp_limb_t *,
+ mpfr_prec_t, int, mpfr_prec_t, mpfr_rnd_t, int *));
+__MPFR_DECLSPEC int mpfr_round_raw_4 _MPFR_PROTO ((mp_limb_t *,
+ const mp_limb_t *, mpfr_prec_t, int, mpfr_prec_t, mpfr_rnd_t));
+
+#define mpfr_round_raw2(xp, xn, neg, r, prec) \
+ mpfr_round_raw_2((xp),(xn)*GMP_NUMB_BITS,(neg),(prec),(r))
+
+__MPFR_DECLSPEC int mpfr_check _MPFR_PROTO ((mpfr_srcptr));
+
+__MPFR_DECLSPEC int mpfr_sum_sort _MPFR_PROTO ((mpfr_srcptr *const,
+ unsigned long, mpfr_srcptr *));
+
+__MPFR_DECLSPEC int mpfr_get_cputime _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC void mpfr_nexttozero _MPFR_PROTO ((mpfr_ptr));
+__MPFR_DECLSPEC void mpfr_nexttoinf _MPFR_PROTO ((mpfr_ptr));
+
+__MPFR_DECLSPEC int mpfr_const_pi_internal _MPFR_PROTO ((mpfr_ptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_const_log2_internal _MPFR_PROTO((mpfr_ptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_const_euler_internal _MPFR_PROTO((mpfr_ptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_const_catalan_internal _MPFR_PROTO((mpfr_ptr, mpfr_rnd_t));
+
+#if 0
+__MPFR_DECLSPEC void mpfr_init_cache _MPFR_PROTO ((mpfr_cache_t,
+ int(*)(mpfr_ptr,mpfr_rnd_t)));
+#endif
+__MPFR_DECLSPEC void mpfr_clear_cache _MPFR_PROTO ((mpfr_cache_t));
+__MPFR_DECLSPEC int mpfr_cache _MPFR_PROTO ((mpfr_ptr, mpfr_cache_t,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_mulhigh_n _MPFR_PROTO ((mp_ptr, mp_srcptr,
+ mp_srcptr, mp_size_t));
+__MPFR_DECLSPEC void mpfr_sqrhigh_n _MPFR_PROTO ((mp_ptr, mp_srcptr, mp_size_t));
+
+__MPFR_DECLSPEC int mpfr_round_p _MPFR_PROTO ((mp_limb_t *, mp_size_t,
+ mpfr_exp_t, mpfr_prec_t));
+
+__MPFR_DECLSPEC void mpfr_dump_mant _MPFR_PROTO ((const mp_limb_t *,
+ mpfr_prec_t, mpfr_prec_t,
+ mpfr_prec_t));
+
+__MPFR_DECLSPEC int mpfr_round_near_x _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_uexp_t, int,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC void mpfr_abort_prec_max _MPFR_PROTO ((void))
+ MPFR_NORETURN_ATTR;
+
+__MPFR_DECLSPEC void mpfr_rand_raw _MPFR_PROTO((mp_ptr, gmp_randstate_t,
+ unsigned long));
+
+__MPFR_DECLSPEC mpz_t* mpfr_bernoulli_internal _MPFR_PROTO((mpz_t*,
+ unsigned long));
+
+__MPFR_DECLSPEC int mpfr_sincos_fast _MPFR_PROTO((mpfr_t, mpfr_t,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC double mpfr_scale2 _MPFR_PROTO((double, int));
+
+__MPFR_DECLSPEC void mpfr_div_ui2 _MPFR_PROTO((mpfr_ptr, mpfr_srcptr,
+ unsigned long int, unsigned long int,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_gamma_one_and_two_third _MPFR_PROTO((mpfr_ptr, mpfr_ptr, mpfr_prec_t));
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/src/mpfr-longlong.h b/src/mpfr-longlong.h
new file mode 100644
index 000000000..9d33d5e4c
--- /dev/null
+++ b/src/mpfr-longlong.h
@@ -0,0 +1,1938 @@
+/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
+
+Copyright 1991, 1992, 1993, 1994, 1996, 1997, 1999, 2000, 2001, 2002, 2003,
+2004, 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it under the
+terms of the GNU Lesser General Public License as published by the Free
+Software Foundation; either version 3 of the License, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this file. If not, see http://www.gnu.org/licenses/. */
+
+/* You have to define the following before including this file:
+
+ UWtype -- An unsigned type, default type for operations (typically a "word")
+ UHWtype -- An unsigned type, at least half the size of UWtype.
+ UDWtype -- An unsigned type, at least twice as large a UWtype
+ W_TYPE_SIZE -- size in bits of UWtype
+
+ SItype, USItype -- Signed and unsigned 32 bit types.
+ DItype, UDItype -- Signed and unsigned 64 bit types.
+
+ On a 32 bit machine UWtype should typically be USItype;
+ on a 64 bit machine, UWtype should typically be UDItype.
+
+ CAUTION! Using this file outside of GMP is not safe. You need to include
+ gmp.h and gmp-impl.h, or certain things might not work as expected.
+*/
+
+#define __BITS4 (W_TYPE_SIZE / 4)
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* This is used to make sure no undesirable sharing between different libraries
+ that use this file takes place. */
+#ifndef __MPN
+#define __MPN(x) __##x
+#endif
+
+#ifndef _PROTO
+#if (__STDC__-0) || defined (__cplusplus)
+#define _PROTO(x) x
+#else
+#define _PROTO(x) ()
+#endif
+#endif
+
+/* Define auxiliary asm macros.
+
+ 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two
+ UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype
+ word product in HIGH_PROD and LOW_PROD.
+
+ 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
+ UDWtype product. This is just a variant of umul_ppmm.
+
+ 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator) divides a UDWtype, composed by the UWtype integers
+ HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
+ in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
+ than DENOMINATOR for correct operation. If, in addition, the most
+ significant bit of DENOMINATOR must be 1, then the pre-processor symbol
+ UDIV_NEEDS_NORMALIZATION is defined to 1.
+
+ 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
+ denominator). Like udiv_qrnnd but the numbers are signed. The quotient
+ is rounded toward 0.
+
+ 5) count_leading_zeros(count, x) counts the number of zero-bits from the
+ msb to the first non-zero bit in the UWtype X. This is the number of
+ steps X needs to be shifted left to set the msb. Undefined for X == 0,
+ unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
+
+ 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
+ from the least significant end.
+
+ 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
+ high_addend_2, low_addend_2) adds two UWtype integers, composed by
+ HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
+ respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
+ (i.e. carry out) is not stored anywhere, and is lost.
+
+ 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
+ high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
+ composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
+ LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
+ and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
+ and is lost.
+
+ If any of these macros are left undefined for a particular CPU,
+ C macros are used.
+
+
+ Notes:
+
+ For add_ssaaaa the two high and two low addends can both commute, but
+ unfortunately gcc only supports one "%" commutative in each asm block.
+ This has always been so but is only documented in recent versions
+ (eg. pre-release 3.3). Having two or more "%"s can cause an internal
+ compiler error in certain rare circumstances.
+
+ Apparently it was only the last "%" that was ever actually respected, so
+ the code has been updated to leave just that. Clearly there's a free
+ choice whether high or low should get it, if there's a reason to favour
+ one over the other. Also obviously when the constraints on the two
+ operands are identical there's no benefit to the reloader in any "%" at
+ all.
+
+ */
+
+/* The CPUs come in alphabetical order below.
+
+ Please add support for more CPUs here, or improve the current support
+ for the CPUs below! */
+
+
+/* count_leading_zeros_gcc_clz is count_leading_zeros implemented with gcc
+ 3.4 __builtin_clzl or __builtin_clzll, according to our limb size.
+ Similarly count_trailing_zeros_gcc_ctz using __builtin_ctzl or
+ __builtin_ctzll.
+
+ These builtins are only used when we check what code comes out, on some
+ chips they're merely libgcc calls, where we will instead want an inline
+ in that case (either asm or generic C).
+
+ These builtins are better than an asm block of the same insn, since an
+ asm block doesn't give gcc any information about scheduling or resource
+ usage. We keep an asm block for use on prior versions of gcc though.
+
+ For reference, __builtin_ffs existed in gcc prior to __builtin_clz, but
+ it's not used (for count_leading_zeros) because it generally gives extra
+ code to ensure the result is 0 when the input is 0, which we don't need
+ or want. */
+
+#ifdef _LONG_LONG_LIMB
+#define count_leading_zeros_gcc_clz(count,x) \
+ do { \
+ ASSERT ((x) != 0); \
+ (count) = __builtin_clzll (x); \
+ } while (0)
+#else
+#define count_leading_zeros_gcc_clz(count,x) \
+ do { \
+ ASSERT ((x) != 0); \
+ (count) = __builtin_clzl (x); \
+ } while (0)
+#endif
+
+#ifdef _LONG_LONG_LIMB
+#define count_trailing_zeros_gcc_ctz(count,x) \
+ do { \
+ ASSERT ((x) != 0); \
+ (count) = __builtin_ctzll (x); \
+ } while (0)
+#else
+#define count_trailing_zeros_gcc_ctz(count,x) \
+ do { \
+ ASSERT ((x) != 0); \
+ (count) = __builtin_ctzl (x); \
+ } while (0)
+#endif
+
+
+/* FIXME: The macros using external routines like __MPN(count_leading_zeros)
+ don't need to be under !NO_ASM */
+#if ! defined (NO_ASM)
+
+#if defined (__alpha) && W_TYPE_SIZE == 64
+/* Most alpha-based machines, except Cray systems. */
+#if defined (__GNUC__)
+#if __GMP_GNUC_PREREQ (3,3)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ (ph) = __builtin_alpha_umulh (__m0, __m1); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#else
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("umulh %r1,%2,%0" \
+ : "=r" (ph) \
+ : "%rJ" (m0), "rI" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#endif
+#define UMUL_TIME 18
+#else /* ! __GNUC__ */
+#include <machine/builtins.h>
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ (ph) = __UMULH (m0, m1); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#endif
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UWtype __di; \
+ __di = __MPN(invert_limb) (d); \
+ udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
+ } while (0)
+#define UDIV_PREINV_ALWAYS 1
+#define UDIV_NEEDS_NORMALIZATION 1
+#define UDIV_TIME 220
+#endif /* LONGLONG_STANDALONE */
+
+/* clz_tab is required in all configurations, since mpn/alpha/cntlz.asm
+ always goes into libgmp.so, even when not actually used. */
+#define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
+
+#if defined (__GNUC__) && HAVE_HOST_CPU_alpha_CIX
+#define count_leading_zeros(COUNT,X) \
+ __asm__("ctlz %1,%0" : "=r"(COUNT) : "r"(X))
+#define count_trailing_zeros(COUNT,X) \
+ __asm__("cttz %1,%0" : "=r"(COUNT) : "r"(X))
+#endif /* clz/ctz using cix */
+
+#if ! defined (count_leading_zeros) \
+ && defined (__GNUC__) && ! defined (LONGLONG_STANDALONE)
+/* ALPHA_CMPBGE_0 gives "cmpbge $31,src,dst", ie. test src bytes == 0.
+ "$31" is written explicitly in the asm, since an "r" constraint won't
+ select reg 31. There seems no need to worry about "r31" syntax for cray,
+ since gcc itself (pre-release 3.4) emits just $31 in various places. */
+#define ALPHA_CMPBGE_0(dst, src) \
+ do { asm ("cmpbge $31, %1, %0" : "=r" (dst) : "r" (src)); } while (0)
+/* Zero bytes are turned into bits with cmpbge, a __clz_tab lookup counts
+ them, locating the highest non-zero byte. A second __clz_tab lookup
+ counts the leading zero bits in that byte, giving the result. */
+#define count_leading_zeros(count, x) \
+ do { \
+ UWtype __clz__b, __clz__c, __clz__x = (x); \
+ ALPHA_CMPBGE_0 (__clz__b, __clz__x); /* zero bytes */ \
+ __clz__b = __clz_tab [(__clz__b >> 1) ^ 0x7F]; /* 8 to 1 byte */ \
+ __clz__b = __clz__b * 8 - 7; /* 57 to 1 shift */ \
+ __clz__x >>= __clz__b; \
+ __clz__c = __clz_tab [__clz__x]; /* 8 to 1 bit */ \
+ __clz__b = 65 - __clz__b; \
+ (count) = __clz__b - __clz__c; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
+#endif /* clz using cmpbge */
+
+#if ! defined (count_leading_zeros) && ! defined (LONGLONG_STANDALONE)
+#if HAVE_ATTRIBUTE_CONST
+long __MPN(count_leading_zeros) _PROTO ((UDItype)) __attribute__ ((const));
+#else
+long __MPN(count_leading_zeros) _PROTO ((UDItype));
+#endif
+#define count_leading_zeros(count, x) \
+ ((count) = __MPN(count_leading_zeros) (x))
+#endif /* clz using mpn */
+#endif /* __alpha */
+
+#if defined (_CRAY) && W_TYPE_SIZE == 64
+#include <intrinsics.h>
+#define UDIV_PREINV_ALWAYS 1
+#define UDIV_NEEDS_NORMALIZATION 1
+#define UDIV_TIME 220
+long __MPN(count_leading_zeros) _PROTO ((UDItype));
+#define count_leading_zeros(count, x) \
+ ((count) = _leadz ((UWtype) (x)))
+#if defined (_CRAYIEEE) /* I.e., Cray T90/ieee, T3D, and T3E */
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ (ph) = _int_mult_upper (m0, m1); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UWtype __di; \
+ __di = __MPN(invert_limb) (d); \
+ udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
+ } while (0)
+#endif /* LONGLONG_STANDALONE */
+#endif /* _CRAYIEEE */
+#endif /* _CRAY */
+
+#if defined (__ia64) && W_TYPE_SIZE == 64
+/* This form encourages gcc (pre-release 3.4 at least) to emit predicated
+ "sub r=r,r" and "sub r=r,r,1", giving a 2 cycle latency. The generic
+ code using "al<bl" arithmetically comes out making an actual 0 or 1 in a
+ register, which takes an extra cycle. */
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) - (bl); \
+ if ((al) < (bl)) \
+ (sh) = (ah) - (bh) - 1; \
+ else \
+ (sh) = (ah) - (bh); \
+ (sl) = __x; \
+ } while (0)
+#if defined (__GNUC__) && ! defined (__INTEL_COMPILER)
+/* Do both product parts in assembly, since that gives better code with
+ all gcc versions. Some callers will just use the upper part, and in
+ that situation we waste an instruction, but not any cycles. */
+#define umul_ppmm(ph, pl, m0, m1) \
+ __asm__ ("xma.hu %0 = %2, %3, f0\n\txma.l %1 = %2, %3, f0" \
+ : "=&f" (ph), "=f" (pl) \
+ : "f" (m0), "f" (m1))
+#define UMUL_TIME 14
+#define count_leading_zeros(count, x) \
+ do { \
+ UWtype _x = (x), _y, _a, _c; \
+ __asm__ ("mux1 %0 = %1, @rev" : "=r" (_y) : "r" (_x)); \
+ __asm__ ("czx1.l %0 = %1" : "=r" (_a) : "r" (-_y | _y)); \
+ _c = (_a - 1) << 3; \
+ _x >>= _c; \
+ if (_x >= 1 << 4) \
+ _x >>= 4, _c += 4; \
+ if (_x >= 1 << 2) \
+ _x >>= 2, _c += 2; \
+ _c += _x >> 1; \
+ (count) = W_TYPE_SIZE - 1 - _c; \
+ } while (0)
+/* similar to what gcc does for __builtin_ffs, but 0 based rather than 1
+ based, and we don't need a special case for x==0 here */
+#define count_trailing_zeros(count, x) \
+ do { \
+ UWtype __ctz_x = (x); \
+ __asm__ ("popcnt %0 = %1" \
+ : "=r" (count) \
+ : "r" ((__ctz_x-1) & ~__ctz_x)); \
+ } while (0)
+#endif
+#if defined (__INTEL_COMPILER)
+#include <ia64intrin.h>
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UWtype _m0 = (m0), _m1 = (m1); \
+ ph = _m64_xmahu (_m0, _m1, 0); \
+ pl = _m0 * _m1; \
+ } while (0)
+#endif
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UWtype __di; \
+ __di = __MPN(invert_limb) (d); \
+ udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
+ } while (0)
+#define UDIV_PREINV_ALWAYS 1
+#define UDIV_NEEDS_NORMALIZATION 1
+#endif
+#define UDIV_TIME 220
+#endif
+
+
+#if defined (__GNUC__)
+
+/* We sometimes need to clobber "cc" with gcc2, but that would not be
+ understood by gcc1. Use cpp to avoid major code duplication. */
+#if __GNUC__ < 2
+#define __CLOBBER_CC
+#define __AND_CLOBBER_CC
+#else /* __GNUC__ >= 2 */
+#define __CLOBBER_CC : "cc"
+#define __AND_CLOBBER_CC , "cc"
+#endif /* __GNUC__ < 2 */
+
+#if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %1,%4,%5\n\taddc %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "rI" (bh), "%r" (al), "rI" (bl))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %1,%4,%5\n\tsubc %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "rI" (bh), "r" (al), "rI" (bl))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("multiplu %0,%1,%2" \
+ : "=r" (xl) \
+ : "r" (__m0), "r" (__m1)); \
+ __asm__ ("multmu %0,%1,%2" \
+ : "=r" (xh) \
+ : "r" (__m0), "r" (__m1)); \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("dividu %0,%3,%4" \
+ : "=r" (q), "=q" (r) \
+ : "1" (n1), "r" (n0), "r" (d))
+#define count_leading_zeros(count, x) \
+ __asm__ ("clz %0,%1" \
+ : "=r" (count) \
+ : "r" (x))
+#define COUNT_LEADING_ZEROS_0 32
+#endif /* __a29k__ */
+
+#if defined (__arc__)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.f\t%1, %4, %5\n\tadc\t%0, %2, %3" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "%r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.f\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
+ : "=r" (sh), \
+ "=&r" (sl) \
+ : "r" ((USItype) (ah)), \
+ "rIJ" ((USItype) (bh)), \
+ "r" ((USItype) (al)), \
+ "rIJ" ((USItype) (bl)))
+#endif
+
+#if defined (__arm__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("adds\t%1, %4, %5\n\tadc\t%0, %2, %3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "rI" (bh), "%r" (al), "rI" (bl) __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (al)) \
+ { \
+ if (__builtin_constant_p (ah)) \
+ __asm__ ("rsbs\t%1, %5, %4\n\trsc\t%0, %3, %2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rI" (ah), "r" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
+ else \
+ __asm__ ("rsbs\t%1, %5, %4\n\tsbc\t%0, %2, %3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "rI" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
+ } \
+ else if (__builtin_constant_p (ah)) \
+ { \
+ if (__builtin_constant_p (bl)) \
+ __asm__ ("subs\t%1, %4, %5\n\trsc\t%0, %3, %2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rI" (ah), "r" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
+ else \
+ __asm__ ("rsbs\t%1, %5, %4\n\trsc\t%0, %3, %2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rI" (ah), "r" (bh), "rI" (al), "r" (bl) __CLOBBER_CC); \
+ } \
+ else if (__builtin_constant_p (bl)) \
+ { \
+ if (__builtin_constant_p (bh)) \
+ __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "rI" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
+ else \
+ __asm__ ("subs\t%1, %4, %5\n\trsc\t%0, %3, %2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rI" (ah), "r" (bh), "r" (al), "rI" (bl) __CLOBBER_CC); \
+ } \
+ else /* only bh might be a constant */ \
+ __asm__ ("subs\t%1, %4, %5\n\tsbc\t%0, %2, %3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "rI" (bh), "r" (al), "rI" (bl) __CLOBBER_CC);\
+ } while (0)
+#if 1 || defined (__arm_m__) /* `M' series has widening multiply support */
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("umull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
+#define UMUL_TIME 5
+#define smul_ppmm(xh, xl, a, b) \
+ __asm__ ("smull %0,%1,%2,%3" : "=&r" (xl), "=&r" (xh) : "r" (a), "r" (b))
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UWtype __di; \
+ __di = __MPN(invert_limb) (d); \
+ udiv_qrnnd_preinv (q, r, n1, n0, d, __di); \
+ } while (0)
+#define UDIV_PREINV_ALWAYS 1
+#define UDIV_NEEDS_NORMALIZATION 1
+#define UDIV_TIME 70
+#endif /* LONGLONG_STANDALONE */
+#else
+#define umul_ppmm(xh, xl, a, b) \
+ __asm__ ("%@ Inlined umul_ppmm\n" \
+" mov %|r0, %2, lsr #16\n" \
+" mov %|r2, %3, lsr #16\n" \
+" bic %|r1, %2, %|r0, lsl #16\n" \
+" bic %|r2, %3, %|r2, lsl #16\n" \
+" mul %1, %|r1, %|r2\n" \
+" mul %|r2, %|r0, %|r2\n" \
+" mul %|r1, %0, %|r1\n" \
+" mul %0, %|r0, %0\n" \
+" adds %|r1, %|r2, %|r1\n" \
+" addcs %0, %0, #65536\n" \
+" adds %1, %1, %|r1, lsl #16\n" \
+" adc %0, %0, %|r1, lsr #16" \
+ : "=&r" (xh), "=r" (xl) \
+ : "r" (a), "r" (b) \
+ : "r0", "r1", "r2")
+#define UMUL_TIME 20
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UWtype __r; \
+ (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+ } while (0)
+extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
+#define UDIV_TIME 200
+#endif /* LONGLONG_STANDALONE */
+#endif
+#endif /* __arm__ */
+
+#if defined (__clipper__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __x; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__x.__ll) \
+ : "%0" ((USItype)(u)), "r" ((USItype)(v))); \
+ (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
+#define smul_ppmm(w1, w0, u, v) \
+ ({union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __x; \
+ __asm__ ("mulwx %2,%0" \
+ : "=r" (__x.__ll) \
+ : "%0" ((SItype)(u)), "r" ((SItype)(v))); \
+ (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("mulwux %2,%0" \
+ : "=r" (__w) : "%0" ((USItype)(u)), "r" ((USItype)(v))); \
+ __w; })
+#endif /* __clipper__ */
+
+/* Fujitsu vector computers. */
+#if defined (__uxp__) && W_TYPE_SIZE == 32
+#define umul_ppmm(ph, pl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("mult.lu %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v));\
+ (ph) = __x.__i.__h; \
+ (pl) = __x.__i.__l; \
+ } while (0)
+#define smul_ppmm(ph, pl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("mult.l %1,%2,%0" : "=r" (__x.__ll) : "%r" (u), "rK" (v)); \
+ (ph) = __x.__i.__h; \
+ (pl) = __x.__i.__l; \
+ } while (0)
+#endif
+
+#if defined (__gmicro__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add.w %5,%1\n\taddx %3,%0" \
+ : "=g" (sh), "=&g" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub.w %5,%1\n\tsubx %3,%0" \
+ : "=g" (sh), "=&g" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define umul_ppmm(ph, pl, m0, m1) \
+ __asm__ ("mulx %3,%0,%1" \
+ : "=g" (ph), "=r" (pl) \
+ : "%0" ((USItype)(m0)), "g" ((USItype)(m1)))
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("divx %4,%0,%1" \
+ : "=g" (q), "=r" (r) \
+ : "1" ((USItype)(nh)), "0" ((USItype)(nl)), "g" ((USItype)(d)))
+#define count_leading_zeros(count, x) \
+ __asm__ ("bsch/1 %1,%0" \
+ : "=g" (count) : "g" ((USItype)(x)), "0" ((USItype)0))
+#endif
+
+#if defined (__hppa) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%I5 %5,%r4,%1\n\taddc %r2,%r3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rM" (ah), "rM" (bh), "%rM" (al), "rI" (bl))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%I4 %4,%r5,%1\n\tsubb %r2,%r3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rM" (ah), "rM" (bh), "rI" (al), "rM" (bl))
+#if defined (_PA_RISC1_1)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("xmpyu %1,%2,%0" : "=*f" (__x.__ll) : "*f" (u), "*f" (v)); \
+ (wh) = __x.__i.__h; \
+ (wl) = __x.__i.__l; \
+ } while (0)
+#define UMUL_TIME 8
+#define UDIV_TIME 60
+#else
+#define UMUL_TIME 40
+#define UDIV_TIME 80
+#endif
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __tmp; \
+ __asm__ ( \
+ "ldi 1,%0\n" \
+" extru,= %1,15,16,%%r0 ; Bits 31..16 zero?\n" \
+" extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
+" ldo 16(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,23,8,%%r0 ; Bits 15..8 zero?\n" \
+" extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
+" ldo 8(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,27,4,%%r0 ; Bits 7..4 zero?\n" \
+" extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
+" ldo 4(%0),%0 ; Yes. Perform add.\n" \
+" extru,= %1,29,2,%%r0 ; Bits 3..2 zero?\n" \
+" extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
+" ldo 2(%0),%0 ; Yes. Perform add.\n" \
+" extru %1,30,1,%1 ; Extract bit 1.\n" \
+" sub %0,%1,%0 ; Subtract it.\n" \
+ : "=r" (count), "=r" (__tmp) : "1" (x)); \
+ } while (0)
+#endif /* hppa */
+
+/* These macros are for ABI=2.0w. In ABI=2.0n they can't be used, since GCC
+ (3.2) puts longlong into two adjacent 32-bit registers. Presumably this
+ is just a case of no direct support for 2.0n but treating it like 1.0. */
+#if defined (__hppa) && W_TYPE_SIZE == 64 && ! defined (_LONG_LONG_LIMB)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%I5 %5,%r4,%1\n\tadd,dc %r2,%r3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rM" (ah), "rM" (bh), "%rM" (al), "rI" (bl))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%I4 %4,%r5,%1\n\tsub,db %r2,%r3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rM" (ah), "rM" (bh), "rI" (al), "rM" (bl))
+#endif /* hppa */
+
+#if (defined (__i370__) || defined (__s390__) || defined (__mvs__)) && W_TYPE_SIZE == 32
+#define smul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("lr %N0,%1\n\tmr %0,%2" \
+ : "=&r" (__x.__ll) \
+ : "r" (m0), "r" (m1)); \
+ (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __x.__i.__h = n1; __x.__i.__l = n0; \
+ __asm__ ("dr %0,%2" \
+ : "=r" (__x.__ll) \
+ : "0" (__x.__ll), "r" (d)); \
+ (q) = __x.__i.__l; (r) = __x.__i.__h; \
+ } while (0)
+#endif
+
+#if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl %5,%k1\n\tadcl %3,%k0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl %5,%k1\n\tsbbl %3,%k0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mull %3" \
+ : "=a" (w0), "=d" (w1) \
+ : "%0" ((USItype)(u)), "rm" ((USItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, dx) /* d renamed to dx avoiding "=d" */\
+ __asm__ ("divl %4" /* stringification in K&R C */ \
+ : "=a" (q), "=d" (r) \
+ : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "rm" ((USItype)(dx)))
+
+#if HAVE_HOST_CPU_i586 || HAVE_HOST_CPU_pentium || HAVE_HOST_CPU_pentiummmx
+/* Pentium bsrl takes between 10 and 72 cycles depending where the most
+ significant 1 bit is, hence the use of the following alternatives. bsfl
+ is slow too, between 18 and 42 depending where the least significant 1
+ bit is, so let the generic count_trailing_zeros below make use of the
+ count_leading_zeros here too. */
+
+#if HAVE_HOST_CPU_pentiummmx && ! defined (LONGLONG_STANDALONE)
+/* The following should be a fixed 14 or 15 cycles, but possibly plus an L1
+ cache miss reading from __clz_tab. For P55 it's favoured over the float
+ below so as to avoid mixing MMX and x87, since the penalty for switching
+ between the two is about 100 cycles.
+
+ The asm block sets __shift to -3 if the high 24 bits are clear, -2 for
+ 16, -1 for 8, or 0 otherwise. This could be written equivalently as
+ follows, but as of gcc 2.95.2 it results in conditional jumps.
+
+ __shift = -(__n < 0x1000000);
+ __shift -= (__n < 0x10000);
+ __shift -= (__n < 0x100);
+
+ The middle two sbbl and cmpl's pair, and with luck something gcc
+ generates might pair with the first cmpl and the last sbbl. The "32+1"
+ constant could be folded into __clz_tab[], but it doesn't seem worth
+ making a different table just for that. */
+
+#define count_leading_zeros(c,n) \
+ do { \
+ USItype __n = (n); \
+ USItype __shift; \
+ __asm__ ("cmpl $0x1000000, %1\n" \
+ "sbbl %0, %0\n" \
+ "cmpl $0x10000, %1\n" \
+ "sbbl $0, %0\n" \
+ "cmpl $0x100, %1\n" \
+ "sbbl $0, %0\n" \
+ : "=&r" (__shift) : "r" (__n)); \
+ __shift = __shift*8 + 24 + 1; \
+ (c) = 32 + 1 - __shift - __clz_tab[__n >> __shift]; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
+#define COUNT_LEADING_ZEROS_0 31 /* n==0 indistinguishable from n==1 */
+
+#else /* ! pentiummmx || LONGLONG_STANDALONE */
+/* The following should be a fixed 14 cycles or so. Some scheduling
+ opportunities should be available between the float load/store too. This
+ sort of code is used in gcc 3 for __builtin_ffs (with "n&-n") and is
+ apparently suggested by the Intel optimizing manual (don't know exactly
+ where). gcc 2.95 or up will be best for this, so the "double" is
+ correctly aligned on the stack. */
+#define count_leading_zeros(c,n) \
+ do { \
+ union { \
+ double d; \
+ unsigned a[2]; \
+ } __u; \
+ ASSERT ((n) != 0); \
+ __u.d = (UWtype) (n); \
+ (c) = 0x3FF + 31 - (__u.a[1] >> 20); \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 (0x3FF + 31)
+#endif /* pentiummx */
+
+#else /* ! pentium */
+
+#if __GMP_GNUC_PREREQ (3,4) /* using bsrl */
+#define count_leading_zeros(count,x) count_leading_zeros_gcc_clz(count,x)
+#endif /* gcc clz */
+
+/* On P6, gcc prior to 3.0 generates a partial register stall for
+ __cbtmp^31, due to using "xorb $31" instead of "xorl $31", the former
+ being 1 code byte smaller. "31-__cbtmp" is a workaround, probably at the
+ cost of one extra instruction. Do this for "i386" too, since that means
+ generic x86. */
+#if ! defined (count_leading_zeros) && __GNUC__ < 3 \
+ && (HAVE_HOST_CPU_i386 \
+ || HAVE_HOST_CPU_i686 \
+ || HAVE_HOST_CPU_pentiumpro \
+ || HAVE_HOST_CPU_pentium2 \
+ || HAVE_HOST_CPU_pentium3)
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ ASSERT ((x) != 0); \
+ __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
+ (count) = 31 - __cbtmp; \
+ } while (0)
+#endif /* gcc<3 asm bsrl */
+
+#ifndef count_leading_zeros
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ ASSERT ((x) != 0); \
+ __asm__ ("bsrl %1,%0" : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#endif /* asm bsrl */
+
+#if __GMP_GNUC_PREREQ (3,4) /* using bsfl */
+#define count_trailing_zeros(count,x) count_trailing_zeros_gcc_ctz(count,x)
+#endif /* gcc ctz */
+
+#ifndef count_trailing_zeros
+#define count_trailing_zeros(count, x) \
+ do { \
+ ASSERT ((x) != 0); \
+ __asm__ ("bsfl %1,%k0" : "=r" (count) : "rm" ((USItype)(x))); \
+ } while (0)
+#endif /* asm bsfl */
+
+#endif /* ! pentium */
+
+#ifndef UMUL_TIME
+#define UMUL_TIME 10
+#endif
+#ifndef UDIV_TIME
+#define UDIV_TIME 40
+#endif
+#endif /* 80x86 */
+
+#if defined (__amd64__) && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addq %5,%q1\n\tadcq %3,%q0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((UDItype)(ah)), "rme" ((UDItype)(bh)), \
+ "%1" ((UDItype)(al)), "rme" ((UDItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subq %5,%q1\n\tsbbq %3,%q0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((UDItype)(ah)), "rme" ((UDItype)(bh)), \
+ "1" ((UDItype)(al)), "rme" ((UDItype)(bl)))
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulq %3" \
+ : "=a" (w0), "=d" (w1) \
+ : "%0" ((UDItype)(u)), "rm" ((UDItype)(v)))
+#define udiv_qrnnd(q, r, n1, n0, dx) /* d renamed to dx avoiding "=d" */\
+ __asm__ ("divq %4" /* stringification in K&R C */ \
+ : "=a" (q), "=d" (r) \
+ : "0" ((UDItype)(n0)), "1" ((UDItype)(n1)), "rm" ((UDItype)(dx)))
+/* bsrq destination must be a 64-bit register, hence UDItype for __cbtmp. */
+#define count_leading_zeros(count, x) \
+ do { \
+ UDItype __cbtmp; \
+ ASSERT ((x) != 0); \
+ __asm__ ("bsrq %1,%0" : "=r" (__cbtmp) : "rm" ((UDItype)(x))); \
+ (count) = __cbtmp ^ 63; \
+ } while (0)
+/* bsfq destination must be a 64-bit register, "%q0" forces this in case
+ count is only an int. */
+#define count_trailing_zeros(count, x) \
+ do { \
+ ASSERT ((x) != 0); \
+ __asm__ ("bsfq %1,%q0" : "=r" (count) : "rm" ((UDItype)(x))); \
+ } while (0)
+#endif /* x86_64 */
+
+#if defined (__i860__) && W_TYPE_SIZE == 32
+#define rshift_rhlc(r,h,l,c) \
+ __asm__ ("shr %3,r0,r0\;shrd %1,%2,%0" \
+ "=r" (r) : "r" (h), "r" (l), "rn" (c))
+#endif /* i860 */
+
+#if defined (__i960__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("cmpo 1,0\;addc %5,%4,%1\;addc %3,%2,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "dI" (ah), "dI" (bh), "%dI" (al), "dI" (bl))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("cmpo 0,0\;subc %5,%4,%1\;subc %3,%2,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "dI" (ah), "dI" (bh), "dI" (al), "dI" (bl))
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __x; \
+ __asm__ ("emul %2,%1,%0" \
+ : "=d" (__x.__ll) : "%dI" (u), "dI" (v)); \
+ (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("emul %2,%1,%0" : "=d" (__w) : "%dI" (u), "dI" (v)); \
+ __w; })
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __nn; \
+ __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
+ __asm__ ("ediv %d,%n,%0" \
+ : "=d" (__rq.__ll) : "dI" (__nn.__ll), "dI" (d)); \
+ (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("scanbit %1,%0" : "=r" (__cbtmp) : "r" (x)); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 (-32) /* sic */
+#if defined (__i960mx) /* what is the proper symbol to test??? */
+#define rshift_rhlc(r,h,l,c) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __nn; \
+ __nn.__i.__h = (h); __nn.__i.__l = (l); \
+ __asm__ ("shre %2,%1,%0" : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
+ }
+#endif /* i960mx */
+#endif /* i960 */
+
+#if (defined (__mc68000__) || defined (__mc68020__) || defined(mc68020) \
+ || defined (__m68k__) || defined (__mc5200__) || defined (__mc5206e__) \
+ || defined (__mc5307__)) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add%.l %5,%1\n\taddx%.l %3,%0" \
+ : "=d" (sh), "=&d" (sl) \
+ : "0" ((USItype)(ah)), "d" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub%.l %5,%1\n\tsubx%.l %3,%0" \
+ : "=d" (sh), "=&d" (sl) \
+ : "0" ((USItype)(ah)), "d" ((USItype)(bh)), \
+ "1" ((USItype)(al)), "g" ((USItype)(bl)))
+/* The '020, '030, '040 and CPU32 have 32x32->64 and 64/32->32q-32r. */
+#if defined (__mc68020__) || defined(mc68020) \
+ || defined (__mc68030__) || defined (mc68030) \
+ || defined (__mc68040__) || defined (mc68040) \
+ || defined (__mcpu32__) || defined (mcpu32) \
+ || defined (__NeXT__)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulu%.l %3,%1:%0" \
+ : "=d" (w0), "=d" (w1) \
+ : "%0" ((USItype)(u)), "dmi" ((USItype)(v)))
+#define UMUL_TIME 45
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divu%.l %4,%1:%0" \
+ : "=d" (q), "=d" (r) \
+ : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
+#define UDIV_TIME 90
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("divs%.l %4,%1:%0" \
+ : "=d" (q), "=d" (r) \
+ : "0" ((USItype)(n0)), "1" ((USItype)(n1)), "dmi" ((USItype)(d)))
+#else /* for other 68k family members use 16x16->32 multiplication */
+#define umul_ppmm(xh, xl, a, b) \
+ do { USItype __umul_tmp1, __umul_tmp2; \
+ __asm__ ("| Inlined umul_ppmm\n" \
+" move%.l %5,%3\n" \
+" move%.l %2,%0\n" \
+" move%.w %3,%1\n" \
+" swap %3\n" \
+" swap %0\n" \
+" mulu%.w %2,%1\n" \
+" mulu%.w %3,%0\n" \
+" mulu%.w %2,%3\n" \
+" swap %2\n" \
+" mulu%.w %5,%2\n" \
+" add%.l %3,%2\n" \
+" jcc 1f\n" \
+" add%.l %#0x10000,%0\n" \
+"1: move%.l %2,%3\n" \
+" clr%.w %2\n" \
+" swap %2\n" \
+" swap %3\n" \
+" clr%.w %3\n" \
+" add%.l %3,%1\n" \
+" addx%.l %2,%0\n" \
+" | End inlined umul_ppmm" \
+ : "=&d" (xh), "=&d" (xl), \
+ "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \
+ : "%2" ((USItype)(a)), "d" ((USItype)(b))); \
+ } while (0)
+#define UMUL_TIME 100
+#define UDIV_TIME 400
+#endif /* not mc68020 */
+/* The '020, '030, '040 and '060 have bitfield insns.
+ GCC 3.4 defines __mc68020__ when in CPU32 mode, check for __mcpu32__ to
+ exclude bfffo on that chip (bitfield insns not available). */
+#if (defined (__mc68020__) || defined (mc68020) \
+ || defined (__mc68030__) || defined (mc68030) \
+ || defined (__mc68040__) || defined (mc68040) \
+ || defined (__mc68060__) || defined (mc68060) \
+ || defined (__NeXT__)) \
+ && ! defined (__mcpu32__)
+#define count_leading_zeros(count, x) \
+ __asm__ ("bfffo %1{%b2:%b2},%0" \
+ : "=d" (count) \
+ : "od" ((USItype) (x)), "n" (0))
+#define COUNT_LEADING_ZEROS_0 32
+#endif
+#endif /* mc68000 */
+
+#if defined (__m88000__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addu.co %1,%r4,%r5\n\taddu.ci %0,%r2,%r3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rJ" (ah), "rJ" (bh), "%rJ" (al), "rJ" (bl))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subu.co %1,%r4,%r5\n\tsubu.ci %0,%r2,%r3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rJ" (ah), "rJ" (bh), "rJ" (al), "rJ" (bl))
+#define count_leading_zeros(count, x) \
+ do { \
+ USItype __cbtmp; \
+ __asm__ ("ff1 %0,%1" : "=r" (__cbtmp) : "r" (x)); \
+ (count) = __cbtmp ^ 31; \
+ } while (0)
+#define COUNT_LEADING_ZEROS_0 63 /* sic */
+#if defined (__m88110__)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
+ (wh) = __x.__i.__h; \
+ (wl) = __x.__i.__l; \
+ } while (0)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x, __q; \
+ __x.__i.__h = (n1); __x.__i.__l = (n0); \
+ __asm__ ("divu.d %0,%1,%2" \
+ : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \
+ (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
+#define UMUL_TIME 5
+#define UDIV_TIME 25
+#else
+#define UMUL_TIME 17
+#define UDIV_TIME 150
+#endif /* __m88110__ */
+#endif /* __m88000__ */
+
+#if defined (__mips) && W_TYPE_SIZE == 32
+#if __GMP_GNUC_PREREQ (4,4)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UDItype __ll = (UDItype)(u) * (v); \
+ w1 = __ll >> 32; \
+ w0 = __ll; \
+ } while (0)
+#endif
+#if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (2,7)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("multu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
+#endif
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("multu %2,%3\n\tmflo %0\n\tmfhi %1" \
+ : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
+#endif
+#define UMUL_TIME 10
+#define UDIV_TIME 100
+#endif /* __mips */
+
+#if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
+#if __GMP_GNUC_PREREQ (4,4)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
+ __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
+ w1 = __ll >> 64; \
+ w0 = __ll; \
+ } while (0)
+#endif
+#if !defined (umul_ppmm) && __GMP_GNUC_PREREQ (2,7)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("dmultu %2,%3" : "=l" (w0), "=h" (w1) : "d" (u), "d" (v))
+#endif
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("dmultu %2,%3\n\tmflo %0\n\tmfhi %1" \
+ : "=d" (w0), "=d" (w1) : "d" (u), "d" (v))
+#endif
+#define UMUL_TIME 20
+#define UDIV_TIME 140
+#endif /* __mips */
+
+#if defined (__mmix__) && W_TYPE_SIZE == 64
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("MULU %0,%2,%3" : "=r" (w0), "=z" (w1) : "r" (u), "r" (v))
+#endif
+
+#if defined (__ns32000__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __x; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__x.__ll) \
+ : "%0" ((USItype)(u)), "g" ((USItype)(v))); \
+ (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
+#define __umulsidi3(u, v) \
+ ({UDItype __w; \
+ __asm__ ("meid %2,%0" \
+ : "=g" (__w) \
+ : "%0" ((USItype)(u)), "g" ((USItype)(v))); \
+ __w; })
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ ({union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __x; \
+ __x.__i.__h = (n1); __x.__i.__l = (n0); \
+ __asm__ ("deid %2,%0" \
+ : "=g" (__x.__ll) \
+ : "0" (__x.__ll), "g" ((USItype)(d))); \
+ (r) = __x.__i.__l; (q) = __x.__i.__h; })
+#define count_trailing_zeros(count,x) \
+ do { \
+ __asm__ ("ffsd %2,%0" \
+ : "=r" (count) \
+ : "0" ((USItype) 0), "r" ((USItype) (x))); \
+ } while (0)
+#endif /* __ns32000__ */
+
+/* In the past we had a block of various #defines tested
+ _ARCH_PPC - AIX
+ _ARCH_PWR - AIX
+ __powerpc__ - gcc
+ __POWERPC__ - BEOS
+ __ppc__ - Darwin
+ PPC - old gcc, GNU/Linux, SysV
+ The plain PPC test was not good for vxWorks, since PPC is defined on all
+ CPUs there (eg. m68k too), as a constant one is expected to compare
+ CPU_FAMILY against.
+
+ At any rate, this was pretty unattractive and a bit fragile. The use of
+ HAVE_HOST_CPU_FAMILY is designed to cut through it all and be sure of
+ getting the desired effect.
+
+ ENHANCE-ME: We should test _IBMR2 here when we add assembly support for
+ the system vendor compilers. (Is that vendor compilers with inline asm,
+ or what?) */
+
+#if (HAVE_HOST_CPU_FAMILY_power || HAVE_HOST_CPU_FAMILY_powerpc) \
+ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } while (0)
+#define count_leading_zeros(count, x) \
+ __asm__ ("{cntlz|cntlzw} %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 32
+#if HAVE_HOST_CPU_FAMILY_powerpc
+#if __GMP_GNUC_PREREQ (4,4)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UDItype __ll = (UDItype)(u) * (v); \
+ w1 = __ll >> 32; \
+ w0 = __ll; \
+ } while (0)
+#endif
+#if !defined (umul_ppmm)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#endif
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ SItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhw %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14
+#define UDIV_TIME 120
+#else
+#define UMUL_TIME 8
+#define smul_ppmm(xh, xl, m0, m1) \
+ __asm__ ("mul %0,%2,%3" : "=r" (xh), "=q" (xl) : "r" (m0), "r" (m1))
+#define SMUL_TIME 4
+#define sdiv_qrnnd(q, r, nh, nl, d) \
+ __asm__ ("div %0,%2,%4" : "=r" (q), "=q" (r) : "r" (nh), "1" (nl), "r" (d))
+#define UDIV_TIME 100
+#endif
+#endif /* 32-bit POWER architecture variants. */
+
+/* We should test _IBMR2 here when we add assembly support for the system
+ vendor compilers. */
+#if HAVE_HOST_CPU_FAMILY_powerpc && W_TYPE_SIZE == 64
+#if !defined (_LONG_LONG_LIMB)
+/* _LONG_LONG_LIMB is ABI=mode32 where adde operates on 32-bit values. So
+ use adde etc only when not _LONG_LONG_LIMB. */
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else \
+ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
+ } while (0)
+/* We use "*rI" for the constant operand here, since with just "I", gcc barfs.
+ This might seem strange, but gcc folds away the dead code late. */
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ if (__builtin_constant_p (bl) && bl > -0x8000 && bl <= 0x8000) { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{ai|addic} %1,%3,%4\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "*rI" (-bl)); \
+ else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
+ __asm__ ("{ai|addic} %1,%3,%4\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "*rI" (-bl)); \
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{ai|addic} %1,%3,%4\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "*rI" (-bl)); \
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{ai|addic} %1,%3,%4\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "*rI" (-bl)); \
+ else \
+ __asm__ ("{ai|addic} %1,%4,%5\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "*rI" (-bl)); \
+ } else { \
+ if (__builtin_constant_p (ah) && (ah) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl)); \
+ else if (__builtin_constant_p (ah) && (ah) == ~(UDItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl)); \
+ else if (__builtin_constant_p (bh) && (bh) == 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl)); \
+ else if (__builtin_constant_p (bh) && (bh) == ~(UDItype) 0) \
+ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl)); \
+ else \
+ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
+ } \
+ } while (0)
+#endif /* ! _LONG_LONG_LIMB */
+#define count_leading_zeros(count, x) \
+ __asm__ ("cntlzd %0,%1" : "=r" (count) : "r" (x))
+#define COUNT_LEADING_ZEROS_0 64
+#if __GMP_GNUC_PREREQ (4,4)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
+ __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \
+ w1 = __ll >> 64; \
+ w0 = __ll; \
+ } while (0)
+#endif
+#if !defined (umul_ppmm)
+#define umul_ppmm(ph, pl, m0, m1) \
+ do { \
+ UDItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhdu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#endif
+#define UMUL_TIME 15
+#define smul_ppmm(ph, pl, m0, m1) \
+ do { \
+ DItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mulhd %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
+ (pl) = __m0 * __m1; \
+ } while (0)
+#define SMUL_TIME 14 /* ??? */
+#define UDIV_TIME 120 /* ??? */
+#endif /* 64-bit PowerPC. */
+
+#if defined (__pyr__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addw %5,%1\n\taddwc %3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subw %5,%1\n\tsubwb %3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), "g" ((USItype)(bl)))
+/* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
+#define umul_ppmm(w1, w0, u, v) \
+ ({union {UDItype __ll; \
+ struct {USItype __h, __l;} __i; \
+ } __x; \
+ __asm__ ("movw %1,%R0\n\tuemul %2,%0" \
+ : "=&r" (__x.__ll) \
+ : "g" ((USItype) (u)), "g" ((USItype)(v))); \
+ (w1) = __x.__i.__h; (w0) = __x.__i.__l;})
+#endif /* __pyr__ */
+
+#if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("a %1,%5\n\tae %0,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((USItype)(ah)), "r" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), "r" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("s %1,%5\n\tse %0,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((USItype)(ah)), "r" ((USItype)(bh)), \
+ "1" ((USItype)(al)), "r" ((USItype)(bl)))
+#define smul_ppmm(ph, pl, m0, m1) \
+ __asm__ ( \
+ "s r2,r2\n" \
+" mts r10,%2\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" m r2,%3\n" \
+" cas %0,r2,r0\n" \
+" mfs r10,%1" \
+ : "=r" (ph), "=r" (pl) \
+ : "%r" ((USItype)(m0)), "r" ((USItype)(m1)) \
+ : "r2")
+#define UMUL_TIME 20
+#define UDIV_TIME 200
+#define count_leading_zeros(count, x) \
+ do { \
+ if ((x) >= 0x10000) \
+ __asm__ ("clz %0,%1" \
+ : "=r" (count) : "r" ((USItype)(x) >> 16)); \
+ else \
+ { \
+ __asm__ ("clz %0,%1" \
+ : "=r" (count) : "r" ((USItype)(x))); \
+ (count) += 16; \
+ } \
+ } while (0)
+#endif /* RT/ROMP */
+
+#if defined (__sh2__) && W_TYPE_SIZE == 32
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("dmulu.l %2,%3\n\tsts macl,%1\n\tsts mach,%0" \
+ : "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "macl", "mach")
+#define UMUL_TIME 5
+#endif
+
+#if defined (__sparc__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addcc %r4,%5,%1\n\taddx %r2,%3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rJ" (ah), "rI" (bh),"%rJ" (al), "rI" (bl) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subcc %r4,%5,%1\n\tsubx %r2,%3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl) \
+ __CLOBBER_CC)
+/* FIXME: When gcc -mcpu=v9 is used on solaris, gcc/config/sol2-sld-64.h
+ doesn't define anything to indicate that to us, it only sets __sparcv8. */
+#if defined (__sparc_v9__) || defined (__sparcv9)
+/* Perhaps we should use floating-point operations here? */
+#if 0
+/* Triggers a bug making mpz/tests/t-gcd.c fail.
+ Perhaps we simply need explicitly zero-extend the inputs? */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("mulx %2,%3,%%g1; srl %%g1,0,%1; srlx %%g1,32,%0" : \
+ "=r" (w1), "=r" (w0) : "r" (u), "r" (v) : "g1")
+#else
+/* Use v8 umul until above bug is fixed. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
+#endif
+/* Use a plain v8 divide for v9. */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ USItype __q; \
+ __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
+ : "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
+ (r) = (n0) - __q * (d); \
+ (q) = __q; \
+ } while (0)
+#else
+#if defined (__sparc_v8__) /* gcc normal */ \
+ || defined (__sparcv8) /* gcc solaris */ \
+ || HAVE_HOST_CPU_supersparc
+/* Don't match immediate range because, 1) it is not often useful,
+ 2) the 'I' flag thinks of the range as a 13 bit signed interval,
+ while we want to match a 13 bit interval, sign extended to 32 bits,
+ but INTERPRETED AS UNSIGNED. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
+#define UMUL_TIME 5
+
+#if HAVE_HOST_CPU_supersparc
+#define UDIV_TIME 60 /* SuperSPARC timing */
+#else
+/* Don't use this on SuperSPARC because its udiv only handles 53 bit
+ dividends and will trap to the kernel for the rest. */
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ USItype __q; \
+ __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
+ : "=r" (__q) : "r" (n1), "r" (n0), "r" (d)); \
+ (r) = (n0) - __q * (d); \
+ (q) = __q; \
+ } while (0)
+#define UDIV_TIME 25
+#endif /* HAVE_HOST_CPU_supersparc */
+
+#else /* ! __sparc_v8__ */
+#if defined (__sparclite__)
+/* This has hardware multiply but not divide. It also has two additional
+ instructions scan (ffs from high bit) and divscc. */
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("umul %2,%3,%1;rd %%y,%0" : "=r" (w1), "=r" (w0) : "r" (u), "r" (v))
+#define UMUL_TIME 5
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ __asm__ ("! Inlined udiv_qrnnd\n" \
+" wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \
+" tst %%g0\n" \
+" divscc %3,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%%g1\n" \
+" divscc %%g1,%4,%0\n" \
+" rd %%y,%1\n" \
+" bl,a 1f\n" \
+" add %1,%4,%1\n" \
+"1: ! End of inline udiv_qrnnd" \
+ : "=r" (q), "=r" (r) : "r" (n1), "r" (n0), "rI" (d) \
+ : "%g1" __AND_CLOBBER_CC)
+#define UDIV_TIME 37
+#define count_leading_zeros(count, x) \
+ __asm__ ("scan %1,1,%0" : "=r" (count) : "r" (x))
+/* Early sparclites return 63 for an argument of 0, but they warn that future
+ implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
+ undefined. */
+#endif /* __sparclite__ */
+#endif /* __sparc_v8__ */
+#endif /* __sparc_v9__ */
+/* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
+#ifndef umul_ppmm
+#define umul_ppmm(w1, w0, u, v) \
+ __asm__ ("! Inlined umul_ppmm\n" \
+" wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \
+" sra %3,31,%%g2 ! Don't move this insn\n" \
+" and %2,%%g2,%%g2 ! Don't move this insn\n" \
+" andcc %%g0,0,%%g1 ! Don't move this insn\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,%3,%%g1\n" \
+" mulscc %%g1,0,%%g1\n" \
+" add %%g1,%%g2,%0\n" \
+" rd %%y,%1" \
+ : "=r" (w1), "=r" (w0) : "%rI" (u), "r" (v) \
+ : "%g1", "%g2" __AND_CLOBBER_CC)
+#define UMUL_TIME 39 /* 39 instructions */
+#endif
+#ifndef udiv_qrnnd
+#ifndef LONGLONG_STANDALONE
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { UWtype __r; \
+ (q) = __MPN(udiv_qrnnd) (&__r, (n1), (n0), (d)); \
+ (r) = __r; \
+ } while (0)
+extern UWtype __MPN(udiv_qrnnd) _PROTO ((UWtype *, UWtype, UWtype, UWtype));
+#ifndef UDIV_TIME
+#define UDIV_TIME 140
+#endif
+#endif /* LONGLONG_STANDALONE */
+#endif /* udiv_qrnnd */
+#endif /* __sparc__ */
+
+#if defined (__sparc__) && W_TYPE_SIZE == 64
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ( \
+ "addcc %r4,%5,%1\n" \
+ " addccc %r6,%7,%%g0\n" \
+ " addc %r2,%3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rJ" (ah), "rI" (bh), "%rJ" (al), "rI" (bl), \
+ "%rJ" ((al) >> 32), "rI" ((bl) >> 32) \
+ __CLOBBER_CC)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ( \
+ "subcc %r4,%5,%1\n" \
+ " subccc %r6,%7,%%g0\n" \
+ " subc %r2,%3,%0" \
+ : "=r" (sh), "=&r" (sl) \
+ : "rJ" (ah), "rI" (bh), "rJ" (al), "rI" (bl), \
+ "rJ" ((al) >> 32), "rI" ((bl) >> 32) \
+ __CLOBBER_CC)
+#endif
+
+#if defined (__vax__) && W_TYPE_SIZE == 32
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \
+ : "=g" (sh), "=&g" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "%1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("subl2 %5,%1\n\tsbwc %3,%0" \
+ : "=g" (sh), "=&g" (sl) \
+ : "0" ((USItype)(ah)), "g" ((USItype)(bh)), \
+ "1" ((USItype)(al)), "g" ((USItype)(bl)))
+#define smul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {UDItype __ll; \
+ struct {USItype __l, __h;} __i; \
+ } __x; \
+ USItype __m0 = (m0), __m1 = (m1); \
+ __asm__ ("emul %1,%2,$0,%0" \
+ : "=g" (__x.__ll) : "g" (__m0), "g" (__m1)); \
+ (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
+ } while (0)
+#define sdiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ union {DItype __ll; \
+ struct {SItype __l, __h;} __i; \
+ } __x; \
+ __x.__i.__h = n1; __x.__i.__l = n0; \
+ __asm__ ("ediv %3,%2,%0,%1" \
+ : "=g" (q), "=g" (r) : "g" (__x.__ll), "g" (d)); \
+ } while (0)
+#if 0
+/* FIXME: This instruction appears to be unimplemented on some systems (vax
+ 8800 maybe). */
+#define count_trailing_zeros(count,x) \
+ do { \
+ __asm__ ("ffs 0, 31, %1, %0" \
+ : "=g" (count) \
+ : "g" ((USItype) (x))); \
+ } while (0)
+#endif
+#endif /* __vax__ */
+
+#if defined (__z8000__) && W_TYPE_SIZE == 16
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)), \
+ "%1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "0" ((unsigned int)(ah)), "r" ((unsigned int)(bh)), \
+ "1" ((unsigned int)(al)), "rQR" ((unsigned int)(bl)))
+#define umul_ppmm(xh, xl, m0, m1) \
+ do { \
+ union {long int __ll; \
+ struct {unsigned int __h, __l;} __i; \
+ } __x; \
+ unsigned int __m0 = (m0), __m1 = (m1); \
+ __asm__ ("mult %S0,%H3" \
+ : "=r" (__x.__i.__h), "=r" (__x.__i.__l) \
+ : "%1" (m0), "rQR" (m1)); \
+ (xh) = __x.__i.__h; (xl) = __x.__i.__l; \
+ (xh) += ((((signed int) __m0 >> 15) & __m1) \
+ + (((signed int) __m1 >> 15) & __m0)); \
+ } while (0)
+#endif /* __z8000__ */
+
+#endif /* __GNUC__ */
+
+#endif /* NO_ASM */
+
+
+#if !defined (umul_ppmm) && defined (__umulsidi3)
+#define umul_ppmm(ph, pl, m0, m1) \
+ { \
+ UDWtype __ll = __umulsidi3 (m0, m1); \
+ ph = (UWtype) (__ll >> W_TYPE_SIZE); \
+ pl = (UWtype) __ll; \
+ }
+#endif
+
+#if !defined (__umulsidi3)
+#define __umulsidi3(u, v) \
+ ({UWtype __hi, __lo; \
+ umul_ppmm (__hi, __lo, u, v); \
+ ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
+#endif
+
+
+/* Use mpn_umul_ppmm or mpn_udiv_qrnnd functions, if they exist. The "_r"
+ forms have "reversed" arguments, meaning the pointer is last, which
+ sometimes allows better parameter passing, in particular on 64-bit
+ hppa. */
+
+#define mpn_umul_ppmm __MPN(umul_ppmm)
+extern UWtype mpn_umul_ppmm _PROTO ((UWtype *, UWtype, UWtype));
+
+#if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm \
+ && ! defined (LONGLONG_STANDALONE)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ UWtype __umul_ppmm__p0; \
+ (wh) = mpn_umul_ppmm (&__umul_ppmm__p0, (UWtype) (u), (UWtype) (v)); \
+ (wl) = __umul_ppmm__p0; \
+ } while (0)
+#endif
+
+#define mpn_umul_ppmm_r __MPN(umul_ppmm_r)
+extern UWtype mpn_umul_ppmm_r _PROTO ((UWtype, UWtype, UWtype *));
+
+#if ! defined (umul_ppmm) && HAVE_NATIVE_mpn_umul_ppmm_r \
+ && ! defined (LONGLONG_STANDALONE)
+#define umul_ppmm(wh, wl, u, v) \
+ do { \
+ UWtype __umul_ppmm__p0; \
+ (wh) = mpn_umul_ppmm_r ((UWtype) (u), (UWtype) (v), &__umul_ppmm__p0); \
+ (wl) = __umul_ppmm__p0; \
+ } while (0)
+#endif
+
+#define mpn_udiv_qrnnd __MPN(udiv_qrnnd)
+extern UWtype mpn_udiv_qrnnd _PROTO ((UWtype *, UWtype, UWtype, UWtype));
+
+#if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd \
+ && ! defined (LONGLONG_STANDALONE)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ UWtype __udiv_qrnnd__r; \
+ (q) = mpn_udiv_qrnnd (&__udiv_qrnnd__r, \
+ (UWtype) (n1), (UWtype) (n0), (UWtype) d); \
+ (r) = __udiv_qrnnd__r; \
+ } while (0)
+#endif
+
+#define mpn_udiv_qrnnd_r __MPN(udiv_qrnnd_r)
+extern UWtype mpn_udiv_qrnnd_r _PROTO ((UWtype, UWtype, UWtype, UWtype *));
+
+#if ! defined (udiv_qrnnd) && HAVE_NATIVE_mpn_udiv_qrnnd_r \
+ && ! defined (LONGLONG_STANDALONE)
+#define udiv_qrnnd(q, r, n1, n0, d) \
+ do { \
+ UWtype __udiv_qrnnd__r; \
+ (q) = mpn_udiv_qrnnd_r ((UWtype) (n1), (UWtype) (n0), (UWtype) d, \
+ &__udiv_qrnnd__r); \
+ (r) = __udiv_qrnnd__r; \
+ } while (0)
+#endif
+
+
+/* If this machine has no inline assembler, use C macros. */
+
+#if !defined (add_ssaaaa)
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) + (bl); \
+ (sh) = (ah) + (bh) + (__x < (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+#if !defined (sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ UWtype __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - ((al) < (bl)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+/* If we lack umul_ppmm but have smul_ppmm, define umul_ppmm in terms of
+ smul_ppmm. */
+#if !defined (umul_ppmm) && defined (smul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __w1; \
+ UWtype __xm0 = (u), __xm1 = (v); \
+ smul_ppmm (__w1, w0, __xm0, __xm1); \
+ (w1) = __w1 + (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
+ + (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
+ } while (0)
+#endif
+
+/* If we still don't have umul_ppmm, define it using plain C.
+
+ For reference, when this code is used for squaring (ie. u and v identical
+ expressions), gcc recognises __x1 and __x2 are the same and generates 3
+ multiplies, not 4. The subsequent additions could be optimized a bit,
+ but the only place GMP currently uses such a square is mpn_sqr_basecase,
+ and chips obliged to use this generic C umul will have plenty of worse
+ performance problems than a couple of extra instructions on the diagonal
+ of sqr_basecase. */
+
+#if !defined (umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __x0, __x1, __x2, __x3; \
+ UHWtype __ul, __vl, __uh, __vh; \
+ UWtype __u = (u), __v = (v); \
+ \
+ __ul = __ll_lowpart (__u); \
+ __uh = __ll_highpart (__u); \
+ __vl = __ll_lowpart (__v); \
+ __vh = __ll_highpart (__v); \
+ \
+ __x0 = (UWtype) __ul * __vl; \
+ __x1 = (UWtype) __ul * __vh; \
+ __x2 = (UWtype) __uh * __vl; \
+ __x3 = (UWtype) __uh * __vh; \
+ \
+ __x1 += __ll_highpart (__x0);/* this can't give carry */ \
+ __x1 += __x2; /* but this indeed can */ \
+ if (__x1 < __x2) /* did we get it? */ \
+ __x3 += __ll_B; /* yes, add it in the proper pos. */ \
+ \
+ (w1) = __x3 + __ll_highpart (__x1); \
+ (w0) = (__x1 << W_TYPE_SIZE/2) + __ll_lowpart (__x0); \
+ } while (0)
+#endif
+
+/* If we don't have smul_ppmm, define it using umul_ppmm (which surely will
+ exist in one form or another. */
+#if !defined (smul_ppmm)
+#define smul_ppmm(w1, w0, u, v) \
+ do { \
+ UWtype __w1; \
+ UWtype __xm0 = (u), __xm1 = (v); \
+ umul_ppmm (__w1, w0, __xm0, __xm1); \
+ (w1) = __w1 - (-(__xm0 >> (W_TYPE_SIZE - 1)) & __xm1) \
+ - (-(__xm1 >> (W_TYPE_SIZE - 1)) & __xm0); \
+ } while (0)
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
+ \
+ ASSERT ((d) != 0); \
+ ASSERT ((n1) < (d)); \
+ \
+ __d1 = __ll_highpart (d); \
+ __d0 = __ll_lowpart (d); \
+ \
+ __q1 = (n1) / __d1; \
+ __r1 = (n1) - __q1 * __d1; \
+ __m = __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart (n0); \
+ if (__r1 < __m) \
+ { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __q0 = __r1 / __d1; \
+ __r0 = __r1 - __q0 * __d1; \
+ __m = __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
+ if (__r0 < __m) \
+ { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
+ __udiv_w_sdiv (defined in libgcc or elsewhere). */
+#if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
+#define udiv_qrnnd(q, r, nh, nl, d) \
+ do { \
+ UWtype __r; \
+ (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
+ (r) = __r; \
+ } while (0)
+#endif
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined (udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+#if !defined (count_leading_zeros)
+#define count_leading_zeros(count, x) \
+ do { \
+ UWtype __xr = (x); \
+ UWtype __a; \
+ \
+ if (W_TYPE_SIZE == 32) \
+ { \
+ __a = __xr < ((UWtype) 1 << 2*__BITS4) \
+ ? (__xr < ((UWtype) 1 << __BITS4) ? 1 : __BITS4 + 1) \
+ : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 + 1 \
+ : 3*__BITS4 + 1); \
+ } \
+ else \
+ { \
+ for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
+ if (((__xr >> __a) & 0xff) != 0) \
+ break; \
+ ++__a; \
+ } \
+ \
+ (count) = W_TYPE_SIZE + 1 - __a - __clz_tab[__xr >> __a]; \
+ } while (0)
+/* This version gives a well-defined value for zero. */
+#define COUNT_LEADING_ZEROS_0 (W_TYPE_SIZE - 1)
+#define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
+#endif
+
+/* clz_tab needed by mpn/x86/pentium/mod_1.asm in a fat binary */
+#if HAVE_HOST_CPU_FAMILY_x86 && WANT_FAT_BINARY
+#define COUNT_LEADING_ZEROS_NEED_CLZ_TAB
+#endif
+
+#ifdef COUNT_LEADING_ZEROS_NEED_CLZ_TAB
+# ifdef MPFR_HAVE_GMP_IMPL
+ extern const unsigned char __GMP_DECLSPEC __clz_tab[128];
+# else
+ extern const unsigned char __clz_tab[128];
+# endif
+#endif
+
+#if !defined (count_trailing_zeros)
+/* Define count_trailing_zeros using count_leading_zeros. The latter might be
+ defined in asm, but if it is not, the C version above is good enough. */
+#define count_trailing_zeros(count, x) \
+ do { \
+ UWtype __ctz_x = (x); \
+ UWtype __ctz_c; \
+ ASSERT (__ctz_x != 0); \
+ count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
+ (count) = W_TYPE_SIZE - 1 - __ctz_c; \
+ } while (0)
+#endif
+
+#ifndef UDIV_NEEDS_NORMALIZATION
+#define UDIV_NEEDS_NORMALIZATION 0
+#endif
+
+/* Whether udiv_qrnnd is actually implemented with udiv_qrnnd_preinv, and
+ that hence the latter should always be used. */
+#ifndef UDIV_PREINV_ALWAYS
+#define UDIV_PREINV_ALWAYS 0
+#endif
+
+/* Give defaults for UMUL_TIME and UDIV_TIME. */
+#ifndef UMUL_TIME
+#define UMUL_TIME 1
+#endif
+
+#ifndef UDIV_TIME
+#define UDIV_TIME UMUL_TIME
+#endif
diff --git a/src/mpfr-thread.h b/src/mpfr-thread.h
new file mode 100644
index 000000000..1e39a44ee
--- /dev/null
+++ b/src/mpfr-thread.h
@@ -0,0 +1,48 @@
+/* MPFR internal header related to thread-local variables.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __MPFR_THREAD_H__
+#define __MPFR_THREAD_H__
+
+/* Note: Let's define MPFR_THREAD_ATTR even after a #error to make the
+ error message more visible (e.g. gcc doesn't immediately stop after
+ the #error line and outputs many error messages if MPFR_THREAD_ATTR
+ is not defined). But some compilers will just output a message and
+ may build MPFR "successfully" (without thread support). */
+#ifndef MPFR_THREAD_ATTR
+# ifdef MPFR_USE_THREAD_SAFE
+# if defined(_MSC_VER)
+# if defined(_WINDLL)
+# error "Can't build MPFR DLL as thread safe."
+# define MPFR_THREAD_ATTR
+# else
+# define MPFR_THREAD_ATTR __declspec( thread )
+# endif
+# else
+# define MPFR_THREAD_ATTR __thread
+# endif
+# else
+# define MPFR_THREAD_ATTR
+# endif
+#endif
+
+#endif
diff --git a/src/mpfr.h b/src/mpfr.h
new file mode 100644
index 000000000..0bcff0dc4
--- /dev/null
+++ b/src/mpfr.h
@@ -0,0 +1,912 @@
+/* mpfr.h -- Include file for mpfr.
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef __MPFR_H
+#define __MPFR_H
+
+/* Define MPFR version number */
+#define MPFR_VERSION_MAJOR 3
+#define MPFR_VERSION_MINOR 1
+#define MPFR_VERSION_PATCHLEVEL 0
+#define MPFR_VERSION_STRING "3.1.0-dev"
+
+/* Macros dealing with MPFR VERSION */
+#define MPFR_VERSION_NUM(a,b,c) (((a) << 16L) | ((b) << 8) | (c))
+#define MPFR_VERSION \
+MPFR_VERSION_NUM(MPFR_VERSION_MAJOR,MPFR_VERSION_MINOR,MPFR_VERSION_PATCHLEVEL)
+
+/* Check if GMP is included, and try to include it (Works with local GMP) */
+#ifndef __GMP_H__
+# include <gmp.h>
+#endif
+
+/* Check if stdio.h is included or if the user wants FILE */
+#if defined (_GMP_H_HAVE_FILE) || defined (MPFR_USE_FILE)
+# define _MPFR_H_HAVE_FILE 1
+#endif
+
+#if defined (_GMP_H_HAVE_VA_LIST)
+# define _MPFR_H_HAVE_VA_LIST 1
+#endif
+
+/* Check if <stdint.h> / <inttypes.h> is included or if the user
+ explicitly wants intmax_t. Automatical detection is done by
+ checking:
+ - INTMAX_C and UINTMAX_C, but not if the compiler is a C++ one
+ (as suggested by Patrick Pelissier) because the test does not
+ work well in this case. See:
+ http://websympa.loria.fr/wwsympa/arc/mpfr/2010-02/msg00025.html
+ We do not check INTMAX_MAX and UINTMAX_MAX because under Solaris,
+ these macros are always defined by <limits.h> (i.e. even when
+ <stdint.h> and <inttypes.h> are not included).
+ - _STDINT_H (defined by the glibc), _STDINT_H_ (defined under
+ Mac OS X) and _STDINT (defined under MS Visual Studio), but
+ this test may not work with all implementations.
+ Portable software should not rely on these tests.
+*/
+#if (defined (INTMAX_C) && defined (UINTMAX_C) && !defined(__cplusplus)) || \
+ defined (MPFR_USE_INTMAX_T) || \
+ defined (_STDINT_H) || defined (_STDINT_H_) || defined (_STDINT)
+# define _MPFR_H_HAVE_INTMAX_T 1
+#endif
+
+/* Definition of rounding modes (DON'T USE MPFR_RNDNA!).
+ Warning! Changing the contents of this enum should be seen as an
+ interface change since the old and the new types are not compatible
+ (the integer type compatible with the enumerated type can even change,
+ see ISO C99, 6.7.2.2#4), and in Makefile.am, AGE should be set to 0.
+
+ MPFR_RNDU must appear just before MPFR_RNDD (see
+ MPFR_IS_RNDUTEST_OR_RNDDNOTTEST in mpfr-impl.h).
+
+ MPFR_RNDF has been added, though not implemented yet, in order to avoid
+ to break the ABI once faithful rounding gets implemented.
+
+ If you change the order of the rounding modes, please update the routines
+ in texceptions.c which assume 0=RNDN, 1=RNDZ, 2=RNDU, 3=RNDD, 4=RNDA.
+*/
+typedef enum {
+ MPFR_RNDN=0, /* round to nearest, with ties to even */
+ MPFR_RNDZ, /* round toward zero */
+ MPFR_RNDU, /* round toward +Inf */
+ MPFR_RNDD, /* round toward -Inf */
+ MPFR_RNDA, /* round away from zero */
+ MPFR_RNDF, /* faithful rounding (not implemented yet) */
+ MPFR_RNDNA=-1 /* round to nearest, with ties away from zero (mpfr_round) */
+} mpfr_rnd_t;
+
+/* kept for compatibility with MPFR 2.4.x and before */
+#define GMP_RNDN MPFR_RNDN
+#define GMP_RNDZ MPFR_RNDZ
+#define GMP_RNDU MPFR_RNDU
+#define GMP_RNDD MPFR_RNDD
+
+/* Define precision : 1 (short), 2 (int) or 3 (long) (DON'T USE IT!)*/
+#ifndef _MPFR_PREC_FORMAT
+# if __GMP_MP_SIZE_T_INT == 1
+# define _MPFR_PREC_FORMAT 2
+# else
+# define _MPFR_PREC_FORMAT 3
+# endif
+#endif
+
+/* Let's make mpfr_prec_t signed in order to avoid problems due to the
+ usual arithmetic conversions when mixing mpfr_prec_t and mpfr_exp_t
+ in an expression (for error analysis) if casts are forgotten. */
+#if _MPFR_PREC_FORMAT == 1
+typedef short mpfr_prec_t;
+typedef unsigned short mpfr_uprec_t;
+#elif _MPFR_PREC_FORMAT == 2
+typedef int mpfr_prec_t;
+typedef unsigned int mpfr_uprec_t;
+#elif _MPFR_PREC_FORMAT == 3
+typedef long mpfr_prec_t;
+typedef unsigned long mpfr_uprec_t;
+#else
+# error "Invalid MPFR Prec format"
+#endif
+
+/* Definition of precision limits without needing <limits.h> */
+/* Note: the casts allows the expression to yield the wanted behavior
+ for _MPFR_PREC_FORMAT == 1 (due to integer promotion rules). */
+#define MPFR_PREC_MIN 2
+#define MPFR_PREC_MAX ((mpfr_prec_t)((mpfr_uprec_t)(~(mpfr_uprec_t)0)>>1))
+
+/* Definition of sign */
+typedef int mpfr_sign_t;
+
+/* Definition of the exponent: same as in GMP. */
+typedef mp_exp_t mpfr_exp_t;
+
+/* Definition of the standard exponent limits */
+#define MPFR_EMAX_DEFAULT ((mpfr_exp_t) (((unsigned long) 1 << 30) - 1))
+#define MPFR_EMIN_DEFAULT (-(MPFR_EMAX_DEFAULT))
+
+/* Definition of the main structure */
+typedef struct {
+ mpfr_prec_t _mpfr_prec;
+ mpfr_sign_t _mpfr_sign;
+ mpfr_exp_t _mpfr_exp;
+ mp_limb_t *_mpfr_d;
+} __mpfr_struct;
+
+/* Compatibility with previous types of MPFR */
+#ifndef mp_rnd_t
+# define mp_rnd_t mpfr_rnd_t
+#endif
+#ifndef mp_prec_t
+# define mp_prec_t mpfr_prec_t
+#endif
+
+/*
+ The represented number is
+ _sign*(_d[k-1]/B+_d[k-2]/B^2+...+_d[0]/B^k)*2^_exp
+ where k=ceil(_mp_prec/GMP_NUMB_BITS) and B=2^GMP_NUMB_BITS.
+
+ For the msb (most significant bit) normalized representation, we must have
+ _d[k-1]>=B/2, unless the number is singular.
+
+ We must also have the last k*GMP_NUMB_BITS-_prec bits set to zero.
+*/
+
+typedef __mpfr_struct mpfr_t[1];
+typedef __mpfr_struct *mpfr_ptr;
+typedef __gmp_const __mpfr_struct *mpfr_srcptr;
+
+/* For those who need a direct and fast access to the sign field.
+ However it is not in the API, thus use it at your own risk: it might
+ not be supported, or change name, in further versions!
+ Unfortunately, it must be defined here (instead of MPFR's internal
+ header file mpfr-impl.h) because it is used by some macros below.
+*/
+#define MPFR_SIGN(x) ((x)->_mpfr_sign)
+
+/* Stack interface */
+typedef enum {
+ MPFR_NAN_KIND = 0,
+ MPFR_INF_KIND = 1, MPFR_ZERO_KIND = 2, MPFR_REGULAR_KIND = 3
+} mpfr_kind_t;
+
+/* GMP defines:
+ + size_t: Standard size_t
+ + __GMP_ATTRIBUTE_PURE Attribute for math functions.
+ + __GMP_NOTHROW For C++: can't throw .
+ + __GMP_EXTERN_INLINE Attribute for inline function.
+ * __gmp_const const (Supports for K&R compiler only for mpfr.h).
+ + __GMP_DECLSPEC_EXPORT compiling to go into a DLL
+ + __GMP_DECLSPEC_IMPORT compiling to go into a application
+*/
+/* Extra MPFR defines */
+#define __MPFR_SENTINEL_ATTR
+#if defined (__GNUC__)
+# if __GNUC__ >= 4
+# undef __MPFR_SENTINEL_ATTR
+# define __MPFR_SENTINEL_ATTR __attribute__ ((sentinel))
+# endif
+#endif
+
+/* Prototypes: Support of K&R compiler */
+#if defined (__GMP_PROTO)
+# define _MPFR_PROTO __GMP_PROTO
+#elif defined (__STDC__) || defined (__cplusplus)
+# define _MPFR_PROTO(x) x
+#else
+# define _MPFR_PROTO(x) ()
+#endif
+/* Support for WINDOWS Dll:
+ Check if we are inside a MPFR build, and if so export the functions.
+ Otherwise does the same thing as GMP */
+#if defined(__MPFR_WITHIN_MPFR) && __GMP_LIBGMP_DLL
+# define __MPFR_DECLSPEC __GMP_DECLSPEC_EXPORT
+#else
+# define __MPFR_DECLSPEC __GMP_DECLSPEC
+#endif
+
+/* Note: In order to be declared, some functions need a specific
+ system header to be included *before* "mpfr.h". If the user
+ forgets to include the header, the MPFR function prototype in
+ the user object file is not correct. To avoid wrong results,
+ we raise a linker error in that case by changing their internal
+ name in the library (prefixed by __gmpfr instead of mpfr). See
+ the lines of the form "#define mpfr_xxx __gmpfr_xxx" below. */
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+__MPFR_DECLSPEC __gmp_const char * mpfr_get_version _MPFR_PROTO ((void));
+__MPFR_DECLSPEC __gmp_const char * mpfr_get_patches _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_buildopt_tls_p _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_buildopt_decimal_p _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_emin _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_set_emin _MPFR_PROTO ((mpfr_exp_t));
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_emin_min _MPFR_PROTO ((void));
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_emin_max _MPFR_PROTO ((void));
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_emax _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_set_emax _MPFR_PROTO ((mpfr_exp_t));
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_emax_min _MPFR_PROTO ((void));
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_emax_max _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC void mpfr_set_default_rounding_mode _MPFR_PROTO((mpfr_rnd_t));
+__MPFR_DECLSPEC mpfr_rnd_t mpfr_get_default_rounding_mode _MPFR_PROTO((void));
+__MPFR_DECLSPEC __gmp_const char *
+ mpfr_print_rnd_mode _MPFR_PROTO((mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_clear_flags _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_clear_underflow _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_clear_overflow _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_clear_nanflag _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_clear_inexflag _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_clear_erangeflag _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC void mpfr_set_underflow _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_set_overflow _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_set_nanflag _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_set_inexflag _MPFR_PROTO ((void));
+__MPFR_DECLSPEC void mpfr_set_erangeflag _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC int mpfr_underflow_p _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_overflow_p _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_nanflag_p _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_inexflag_p _MPFR_PROTO ((void));
+__MPFR_DECLSPEC int mpfr_erangeflag_p _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC int
+ mpfr_check_range _MPFR_PROTO ((mpfr_ptr, int, mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_init2 _MPFR_PROTO ((mpfr_ptr, mpfr_prec_t));
+__MPFR_DECLSPEC void mpfr_init _MPFR_PROTO ((mpfr_ptr));
+__MPFR_DECLSPEC void mpfr_clear _MPFR_PROTO ((mpfr_ptr));
+
+__MPFR_DECLSPEC void
+ mpfr_inits2 _MPFR_PROTO ((mpfr_prec_t, mpfr_ptr, ...)) __MPFR_SENTINEL_ATTR;
+__MPFR_DECLSPEC void
+ mpfr_inits _MPFR_PROTO ((mpfr_ptr, ...)) __MPFR_SENTINEL_ATTR;
+__MPFR_DECLSPEC void
+ mpfr_clears _MPFR_PROTO ((mpfr_ptr, ...)) __MPFR_SENTINEL_ATTR;
+
+__MPFR_DECLSPEC int
+ mpfr_prec_round _MPFR_PROTO ((mpfr_ptr, mpfr_prec_t, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_can_round _MPFR_PROTO ((mpfr_srcptr, mpfr_exp_t, mpfr_rnd_t, mpfr_rnd_t,
+ mpfr_prec_t));
+__MPFR_DECLSPEC mpfr_prec_t mpfr_min_prec _MPFR_PROTO ((mpfr_srcptr));
+
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_exp _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_set_exp _MPFR_PROTO ((mpfr_ptr, mpfr_exp_t));
+__MPFR_DECLSPEC mpfr_prec_t mpfr_get_prec _MPFR_PROTO((mpfr_srcptr));
+__MPFR_DECLSPEC void mpfr_set_prec _MPFR_PROTO((mpfr_ptr, mpfr_prec_t));
+__MPFR_DECLSPEC void mpfr_set_prec_raw _MPFR_PROTO((mpfr_ptr, mpfr_prec_t));
+__MPFR_DECLSPEC void mpfr_set_default_prec _MPFR_PROTO((mpfr_prec_t));
+__MPFR_DECLSPEC mpfr_prec_t mpfr_get_default_prec _MPFR_PROTO((void));
+
+__MPFR_DECLSPEC int mpfr_set_d _MPFR_PROTO ((mpfr_ptr, double, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_set_flt _MPFR_PROTO ((mpfr_ptr, float, mpfr_rnd_t));
+#ifdef MPFR_WANT_DECIMAL_FLOATS
+__MPFR_DECLSPEC int mpfr_set_decimal64 _MPFR_PROTO ((mpfr_ptr, _Decimal64,
+ mpfr_rnd_t));
+#endif
+__MPFR_DECLSPEC int
+ mpfr_set_ld _MPFR_PROTO ((mpfr_ptr, long double, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_z _MPFR_PROTO ((mpfr_ptr, mpz_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_z_2exp _MPFR_PROTO ((mpfr_ptr, mpz_srcptr, mpfr_exp_t, mpfr_rnd_t));
+__MPFR_DECLSPEC void mpfr_set_nan _MPFR_PROTO ((mpfr_ptr));
+__MPFR_DECLSPEC void mpfr_set_inf _MPFR_PROTO ((mpfr_ptr, int));
+__MPFR_DECLSPEC void mpfr_set_zero _MPFR_PROTO ((mpfr_ptr, int));
+__MPFR_DECLSPEC int
+ mpfr_set_f _MPFR_PROTO ((mpfr_ptr, mpf_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_get_f _MPFR_PROTO ((mpf_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_set_si _MPFR_PROTO ((mpfr_ptr, long, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_ui _MPFR_PROTO ((mpfr_ptr, unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_si_2exp _MPFR_PROTO ((mpfr_ptr, long, mpfr_exp_t, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_ui_2exp _MPFR_PROTO ((mpfr_ptr,unsigned long,mpfr_exp_t,mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_q _MPFR_PROTO ((mpfr_ptr, mpq_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_str _MPFR_PROTO ((mpfr_ptr, __gmp_const char *, int, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_init_set_str _MPFR_PROTO ((mpfr_ptr, __gmp_const char *, int,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set4 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t, int));
+__MPFR_DECLSPEC int
+ mpfr_abs _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_neg _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_signbit _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC int
+ mpfr_setsign _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, int, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_copysign _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr, mpfr_rnd_t));
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+#define mpfr_set_sj __gmpfr_set_sj
+#define mpfr_set_sj_2exp __gmpfr_set_sj_2exp
+#define mpfr_set_uj __gmpfr_set_uj
+#define mpfr_set_uj_2exp __gmpfr_set_uj_2exp
+#define mpfr_get_sj __gmpfr_mpfr_get_sj
+#define mpfr_get_uj __gmpfr_mpfr_get_uj
+__MPFR_DECLSPEC int mpfr_set_sj _MPFR_PROTO ((mpfr_t, intmax_t, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_sj_2exp _MPFR_PROTO ((mpfr_t, intmax_t, intmax_t, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_set_uj _MPFR_PROTO ((mpfr_t, uintmax_t, mpfr_rnd_t));
+__MPFR_DECLSPEC int
+ mpfr_set_uj_2exp _MPFR_PROTO ((mpfr_t, uintmax_t, intmax_t, mpfr_rnd_t));
+__MPFR_DECLSPEC intmax_t mpfr_get_sj _MPFR_PROTO ((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC uintmax_t mpfr_get_uj _MPFR_PROTO ((mpfr_srcptr, mpfr_rnd_t));
+#endif
+
+__MPFR_DECLSPEC mpfr_exp_t mpfr_get_z_2exp _MPFR_PROTO ((mpz_ptr, mpfr_srcptr));
+__MPFR_DECLSPEC float mpfr_get_flt _MPFR_PROTO ((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC double mpfr_get_d _MPFR_PROTO ((mpfr_srcptr, mpfr_rnd_t));
+#ifdef MPFR_WANT_DECIMAL_FLOATS
+__MPFR_DECLSPEC _Decimal64 mpfr_get_decimal64 _MPFR_PROTO ((mpfr_srcptr,
+ mpfr_rnd_t));
+#endif
+__MPFR_DECLSPEC long double mpfr_get_ld _MPFR_PROTO ((mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC double mpfr_get_d1 _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC double mpfr_get_d_2exp _MPFR_PROTO ((long*, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC long double mpfr_get_ld_2exp _MPFR_PROTO ((long*, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC long mpfr_get_si _MPFR_PROTO ((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC unsigned long mpfr_get_ui _MPFR_PROTO ((mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC char*mpfr_get_str _MPFR_PROTO ((char*, mpfr_exp_t*, int, size_t,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_get_z _MPFR_PROTO ((mpz_ptr z, mpfr_srcptr f,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_free_str _MPFR_PROTO ((char *));
+
+__MPFR_DECLSPEC int mpfr_urandom _MPFR_PROTO ((mpfr_ptr, gmp_randstate_t,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_urandomb _MPFR_PROTO ((mpfr_ptr, gmp_randstate_t));
+
+__MPFR_DECLSPEC void mpfr_nextabove _MPFR_PROTO ((mpfr_ptr));
+__MPFR_DECLSPEC void mpfr_nextbelow _MPFR_PROTO ((mpfr_ptr));
+__MPFR_DECLSPEC void mpfr_nexttoward _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr));
+
+#ifdef _MPFR_H_HAVE_FILE
+#define mpfr_inp_str __gmpfr_inp_str
+#define mpfr_out_str __gmpfr_out_str
+__MPFR_DECLSPEC size_t mpfr_inp_str _MPFR_PROTO ((mpfr_ptr, FILE*, int,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC size_t mpfr_out_str _MPFR_PROTO ((FILE*, int, size_t,
+ mpfr_srcptr, mpfr_rnd_t));
+#define mpfr_fprintf __gmpfr_fprintf
+__MPFR_DECLSPEC int mpfr_fprintf _MPFR_PROTO ((FILE*, __gmp_const char*,
+ ...));
+#endif
+__MPFR_DECLSPEC int mpfr_printf _MPFR_PROTO ((__gmp_const char*, ...));
+__MPFR_DECLSPEC int mpfr_asprintf _MPFR_PROTO ((char**, __gmp_const char*,
+ ...));
+__MPFR_DECLSPEC int mpfr_sprintf _MPFR_PROTO ((char*, __gmp_const char*,
+ ...));
+__MPFR_DECLSPEC int mpfr_snprintf _MPFR_PROTO ((char*, size_t,
+ __gmp_const char*, ...));
+
+#ifdef _MPFR_H_HAVE_VA_LIST
+#ifdef _MPFR_H_HAVE_FILE
+#define mpfr_vfprintf __gmpfr_vfprintf
+__MPFR_DECLSPEC int mpfr_vfprintf _MPFR_PROTO ((FILE*, __gmp_const char*,
+ va_list));
+#endif /* _MPFR_H_HAVE_FILE */
+#define mpfr_vprintf __gmpfr_vprintf
+#define mpfr_vasprintf __gmpfr_vasprintf
+#define mpfr_vsprintf __gmpfr_vsprintf
+#define mpfr_vsnprintf __gmpfr_vsnprintf
+__MPFR_DECLSPEC int mpfr_vprintf _MPFR_PROTO ((__gmp_const char*, va_list));
+__MPFR_DECLSPEC int mpfr_vasprintf _MPFR_PROTO ((char**, __gmp_const char*,
+ va_list));
+__MPFR_DECLSPEC int mpfr_vsprintf _MPFR_PROTO ((char*, __gmp_const char*,
+ va_list));
+__MPFR_DECLSPEC int mpfr_vsnprintf _MPFR_PROTO ((char*, size_t,
+ __gmp_const char*, va_list));
+#endif /* _MPFR_H_HAVE_VA_LIST */
+
+__MPFR_DECLSPEC int mpfr_pow _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_pow_si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_pow_ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_ui_pow_ui _MPFR_PROTO ((mpfr_ptr, unsigned long int,
+ unsigned long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_ui_pow _MPFR_PROTO ((mpfr_ptr, unsigned long int,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_pow_z _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpz_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_sqrt _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sqrt_ui _MPFR_PROTO ((mpfr_ptr, unsigned long,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_rec_sqrt _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_add _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_mul _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_add_ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub_ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_ui_sub _MPFR_PROTO ((mpfr_ptr, unsigned long,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_mul_ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_ui_div _MPFR_PROTO ((mpfr_ptr, unsigned long,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_add_si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub_si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_si_sub _MPFR_PROTO ((mpfr_ptr, long int,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_mul_si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long int, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_si_div _MPFR_PROTO ((mpfr_ptr, long int,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_add_d _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ double, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub_d _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ double, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_d_sub _MPFR_PROTO ((mpfr_ptr, double,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_mul_d _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ double, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_d _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ double, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_d_div _MPFR_PROTO ((mpfr_ptr, double,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_sqr _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_const_pi _MPFR_PROTO ((mpfr_ptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_const_log2 _MPFR_PROTO ((mpfr_ptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_const_euler _MPFR_PROTO ((mpfr_ptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_const_catalan _MPFR_PROTO ((mpfr_ptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_agm _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_log _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_log2 _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_log10 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_log1p _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_exp _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_exp2 _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_exp10 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_expm1 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_eint _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_li2 _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_cmp _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_cmp3 _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr, int));
+__MPFR_DECLSPEC int mpfr_cmp_d _MPFR_PROTO ((mpfr_srcptr, double));
+__MPFR_DECLSPEC int mpfr_cmp_ld _MPFR_PROTO ((mpfr_srcptr, long double));
+__MPFR_DECLSPEC int mpfr_cmpabs _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_cmp_ui _MPFR_PROTO ((mpfr_srcptr, unsigned long));
+__MPFR_DECLSPEC int mpfr_cmp_si _MPFR_PROTO ((mpfr_srcptr, long));
+__MPFR_DECLSPEC int mpfr_cmp_ui_2exp _MPFR_PROTO ((mpfr_srcptr, unsigned long,
+ mpfr_exp_t));
+__MPFR_DECLSPEC int mpfr_cmp_si_2exp _MPFR_PROTO ((mpfr_srcptr, long,
+ mpfr_exp_t));
+__MPFR_DECLSPEC void mpfr_reldiff _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_eq _MPFR_PROTO((mpfr_srcptr, mpfr_srcptr,
+ unsigned long));
+__MPFR_DECLSPEC int mpfr_sgn _MPFR_PROTO ((mpfr_srcptr));
+
+__MPFR_DECLSPEC int mpfr_mul_2exp _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_2exp _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_mul_2ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_2ui _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ unsigned long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_mul_2si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_2si _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ long, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_rint _MPFR_PROTO((mpfr_ptr,mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_round _MPFR_PROTO((mpfr_ptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_trunc _MPFR_PROTO((mpfr_ptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_ceil _MPFR_PROTO((mpfr_ptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_floor _MPFR_PROTO((mpfr_ptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_rint_round _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_rint_trunc _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_rint_ceil _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_rint_floor _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_frac _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_modf _MPFR_PROTO ((mpfr_ptr, mpfr_ptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_remquo _MPFR_PROTO ((mpfr_ptr, long*, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_remainder _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fmod _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_fits_ulong_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_slong_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_uint_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_sint_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_ushort_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_sshort_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_uintmax_p _MPFR_PROTO((mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fits_intmax_p _MPFR_PROTO((mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_extract _MPFR_PROTO ((mpz_ptr, mpfr_srcptr,
+ unsigned int));
+__MPFR_DECLSPEC void mpfr_swap _MPFR_PROTO ((mpfr_ptr, mpfr_ptr));
+__MPFR_DECLSPEC void mpfr_dump _MPFR_PROTO ((mpfr_srcptr));
+
+__MPFR_DECLSPEC int mpfr_nan_p _MPFR_PROTO((mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_inf_p _MPFR_PROTO((mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_number_p _MPFR_PROTO((mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_integer_p _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_zero_p _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_regular_p _MPFR_PROTO ((mpfr_srcptr));
+
+__MPFR_DECLSPEC int mpfr_greater_p _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_greaterequal_p _MPFR_PROTO ((mpfr_srcptr,
+ mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_less_p _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_lessequal_p _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_lessgreater_p _MPFR_PROTO((mpfr_srcptr,mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_equal_p _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+__MPFR_DECLSPEC int mpfr_unordered_p _MPFR_PROTO ((mpfr_srcptr, mpfr_srcptr));
+
+__MPFR_DECLSPEC int mpfr_atanh _MPFR_PROTO((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_acosh _MPFR_PROTO((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_asinh _MPFR_PROTO((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_cosh _MPFR_PROTO((mpfr_ptr,mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sinh _MPFR_PROTO((mpfr_ptr,mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_tanh _MPFR_PROTO((mpfr_ptr,mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sinh_cosh _MPFR_PROTO ((mpfr_ptr, mpfr_ptr,
+ mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_sech _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_csch _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_coth _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_acos _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_asin _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_atan _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sin _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sin_cos _MPFR_PROTO ((mpfr_ptr, mpfr_ptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_cos _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_tan _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_atan2 _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sec _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_csc _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_cot _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_hypot _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_erf _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_erfc _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_cbrt _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_root _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,unsigned long,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_gamma _MPFR_PROTO((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_lngamma _MPFR_PROTO((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_lgamma _MPFR_PROTO((mpfr_ptr,int*,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_digamma _MPFR_PROTO((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_zeta _MPFR_PROTO ((mpfr_ptr,mpfr_srcptr,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_zeta_ui _MPFR_PROTO ((mpfr_ptr,unsigned long,mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fac_ui _MPFR_PROTO ((mpfr_ptr, unsigned long int,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_j0 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_j1 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_jn _MPFR_PROTO ((mpfr_ptr, long, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_y0 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_y1 _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_yn _MPFR_PROTO ((mpfr_ptr, long, mpfr_srcptr,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_ai _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_min _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_max _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
+ mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_dim _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_mul_z _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpz_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_z _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpz_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_add_z _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpz_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub_z _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpz_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_cmp_z _MPFR_PROTO ((mpfr_srcptr, mpz_srcptr));
+
+__MPFR_DECLSPEC int mpfr_mul_q _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpq_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_div_q _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpq_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_add_q _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpq_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sub_q _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr,
+ mpq_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_cmp_q _MPFR_PROTO ((mpfr_srcptr, mpq_srcptr));
+
+__MPFR_DECLSPEC int mpfr_cmp_f _MPFR_PROTO ((mpfr_srcptr, mpf_srcptr));
+
+__MPFR_DECLSPEC int mpfr_fma _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_fms _MPFR_PROTO ((mpfr_ptr, mpfr_srcptr, mpfr_srcptr,
+ mpfr_srcptr, mpfr_rnd_t));
+__MPFR_DECLSPEC int mpfr_sum _MPFR_PROTO ((mpfr_ptr, mpfr_ptr *__gmp_const,
+ unsigned long, mpfr_rnd_t));
+
+__MPFR_DECLSPEC void mpfr_free_cache _MPFR_PROTO ((void));
+
+__MPFR_DECLSPEC int mpfr_subnormalize _MPFR_PROTO ((mpfr_ptr, int,
+ mpfr_rnd_t));
+
+__MPFR_DECLSPEC int mpfr_strtofr _MPFR_PROTO ((mpfr_ptr, __gmp_const char *,
+ char **, int, mpfr_rnd_t));
+
+__MPFR_DECLSPEC size_t mpfr_custom_get_size _MPFR_PROTO ((mpfr_prec_t));
+__MPFR_DECLSPEC void mpfr_custom_init _MPFR_PROTO ((void *, mpfr_prec_t));
+__MPFR_DECLSPEC void * mpfr_custom_get_significand _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC mpfr_exp_t mpfr_custom_get_exp _MPFR_PROTO ((mpfr_srcptr));
+__MPFR_DECLSPEC void mpfr_custom_move _MPFR_PROTO ((mpfr_ptr, void *));
+__MPFR_DECLSPEC void mpfr_custom_init_set _MPFR_PROTO ((mpfr_ptr, int,
+ mpfr_exp_t, mpfr_prec_t, void *));
+__MPFR_DECLSPEC int mpfr_custom_get_kind _MPFR_PROTO ((mpfr_srcptr));
+
+#if defined (__cplusplus)
+}
+#endif
+
+/* DON'T USE THIS! (For MPFR-public macros only, see below.)
+ The mpfr_sgn macro uses the fact that __MPFR_EXP_NAN and __MPFR_EXP_ZERO
+ are the smallest values.
+ FIXME: In the following macros, the cast of an unsigned type with MSB set
+ to the signed type mpfr_exp_t yields an integer overflow, which can give
+ unexpected results with future compilers and aggressive optimisations.
+ Why not working only with signed types, using INT_MIN and LONG_MIN? */
+#if __GMP_MP_SIZE_T_INT
+#define __MPFR_EXP_NAN ((mpfr_exp_t)((~((~(unsigned int)0)>>1))+2))
+#define __MPFR_EXP_ZERO ((mpfr_exp_t)((~((~(unsigned int)0)>>1))+1))
+#define __MPFR_EXP_INF ((mpfr_exp_t)((~((~(unsigned int)0)>>1))+3))
+#else
+#define __MPFR_EXP_NAN ((mpfr_exp_t)((~((~(unsigned long)0)>>1))+2))
+#define __MPFR_EXP_ZERO ((mpfr_exp_t)((~((~(unsigned long)0)>>1))+1))
+#define __MPFR_EXP_INF ((mpfr_exp_t)((~((~(unsigned long)0)>>1))+3))
+#endif
+
+/* Define MPFR_USE_EXTENSION to avoid "gcc -pedantic" warnings. */
+#ifndef MPFR_EXTENSION
+# if defined(MPFR_USE_EXTENSION)
+# define MPFR_EXTENSION __extension__
+# else
+# define MPFR_EXTENSION
+# endif
+#endif
+
+/* Warning! This macro doesn't work with K&R C (e.g., compare the "gcc -E"
+ output with and without -traditional) and shouldn't be used internally.
+ For public use only, but see the MPFR manual. */
+#define MPFR_DECL_INIT(_x, _p) \
+ MPFR_EXTENSION mp_limb_t __gmpfr_local_tab_##_x[((_p)-1)/GMP_NUMB_BITS+1]; \
+ MPFR_EXTENSION mpfr_t _x = {{(_p),1,__MPFR_EXP_NAN,__gmpfr_local_tab_##_x}}
+
+/* Fast access macros to replace function interface.
+ If the USER don't want to use the macro interface, let him make happy
+ even if it produces faster and smaller code. */
+#ifndef MPFR_USE_NO_MACRO
+
+/* Inlining theses functions is both faster and smaller */
+#define mpfr_nan_p(_x) ((_x)->_mpfr_exp == __MPFR_EXP_NAN)
+#define mpfr_inf_p(_x) ((_x)->_mpfr_exp == __MPFR_EXP_INF)
+#define mpfr_zero_p(_x) ((_x)->_mpfr_exp == __MPFR_EXP_ZERO)
+#define mpfr_regular_p(_x) ((_x)->_mpfr_exp > __MPFR_EXP_INF)
+#define mpfr_sgn(_x) \
+ ((_x)->_mpfr_exp < __MPFR_EXP_INF ? \
+ (mpfr_nan_p (_x) ? mpfr_set_erangeflag () : (void) 0), 0 : \
+ MPFR_SIGN (_x))
+
+/* Prevent them from using as lvalues */
+#define MPFR_VALUE_OF(x) (0 ? (x) : (x))
+#define mpfr_get_prec(_x) MPFR_VALUE_OF((_x)->_mpfr_prec)
+#define mpfr_get_exp(_x) MPFR_VALUE_OF((_x)->_mpfr_exp)
+/* Note: if need be, the MPFR_VALUE_OF can be used for other expressions
+ (of any type). Thanks to Wojtek Lerch and Tim Rentsch for the idea. */
+
+#define mpfr_round(a,b) mpfr_rint((a), (b), MPFR_RNDNA)
+#define mpfr_trunc(a,b) mpfr_rint((a), (b), MPFR_RNDZ)
+#define mpfr_ceil(a,b) mpfr_rint((a), (b), MPFR_RNDU)
+#define mpfr_floor(a,b) mpfr_rint((a), (b), MPFR_RNDD)
+
+#define mpfr_cmp_ui(b,i) mpfr_cmp_ui_2exp((b),(i),0)
+#define mpfr_cmp_si(b,i) mpfr_cmp_si_2exp((b),(i),0)
+#define mpfr_set(a,b,r) mpfr_set4(a,b,r,MPFR_SIGN(b))
+#define mpfr_abs(a,b,r) mpfr_set4(a,b,r,1)
+#define mpfr_copysign(a,b,c,r) mpfr_set4(a,b,r,MPFR_SIGN(c))
+#define mpfr_setsign(a,b,s,r) mpfr_set4(a,b,r,(s) ? -1 : 1)
+#define mpfr_signbit(x) (MPFR_SIGN(x) < 0)
+#define mpfr_cmp(b, c) mpfr_cmp3(b, c, 1)
+#define mpfr_mul_2exp(y,x,n,r) mpfr_mul_2ui((y),(x),(n),(r))
+#define mpfr_div_2exp(y,x,n,r) mpfr_div_2ui((y),(x),(n),(r))
+
+
+/* When using GCC, optimize certain common comparisons and affectations.
+ + Remove ICC since it defines __GNUC__ but produces a
+ huge number of warnings if you use this code.
+ VL: I couldn't reproduce a single warning when enabling these macros
+ with icc 10.1 20080212 on Itanium. But with this version, __ICC isn't
+ defined (__INTEL_COMPILER is, though), so that these macros are enabled
+ anyway. Checking with other ICC versions is needed. Possibly detect
+ whether warnings are produced or not with a configure test.
+ + Remove C++ too, since it complains too much. */
+#if defined (__GNUC__) && !defined(__ICC) && !defined(__cplusplus)
+#if (__GNUC__ >= 2)
+#undef mpfr_cmp_ui
+/* We use the fact that mpfr_sgn on NaN sets the erange flag and returns 0. */
+#define mpfr_cmp_ui(_f,_u) \
+ (__builtin_constant_p (_u) && (_u) == 0 ? \
+ mpfr_sgn (_f) : \
+ mpfr_cmp_ui_2exp ((_f),(_u),0))
+#undef mpfr_cmp_si
+#define mpfr_cmp_si(_f,_s) \
+ (__builtin_constant_p (_s) && (_s) >= 0 ? \
+ mpfr_cmp_ui ((_f), (_s)) : \
+ mpfr_cmp_si_2exp ((_f), (_s), 0))
+#if __GNUC__ > 2 || __GNUC_MINOR__ >= 95
+#undef mpfr_set_ui
+#define mpfr_set_ui(_f,_u,_r) \
+ (__builtin_constant_p (_u) && (_u) == 0 ? \
+ __extension__ ({ \
+ mpfr_ptr _p = (_f); \
+ _p->_mpfr_sign = 1; \
+ _p->_mpfr_exp = __MPFR_EXP_ZERO; \
+ (void) (_r); 0; }) : \
+ mpfr_set_ui_2exp ((_f), (_u), 0, (_r)))
+#endif
+#undef mpfr_set_si
+#define mpfr_set_si(_f,_s,_r) \
+ (__builtin_constant_p (_s) && (_s) >= 0 ? \
+ mpfr_set_ui ((_f), (_s), (_r)) : \
+ mpfr_set_si_2exp ((_f), (_s), 0, (_r)))
+#endif
+#endif
+
+/* Macro version of mpfr_stack interface for fast access */
+#define mpfr_custom_get_size(p) ((size_t) \
+ (((p)+GMP_NUMB_BITS-1)/GMP_NUMB_BITS*sizeof (mp_limb_t)))
+#define mpfr_custom_init(m,p) do {} while (0)
+#define mpfr_custom_get_significand(x) ((void*)((x)->_mpfr_d))
+#define mpfr_custom_get_exp(x) ((x)->_mpfr_exp)
+#define mpfr_custom_move(x,m) do { ((x)->_mpfr_d = (mp_limb_t*)(m)); } while (0)
+#define mpfr_custom_init_set(x,k,e,p,m) do { \
+ mpfr_ptr _x = (x); \
+ mpfr_exp_t _e; \
+ mpfr_kind_t _t; \
+ int _s, _k; \
+ _k = (k); \
+ if (_k >= 0) { \
+ _t = (mpfr_kind_t) _k; \
+ _s = 1; \
+ } else { \
+ _t = (mpfr_kind_t) -k; \
+ _s = -1; \
+ } \
+ _e = _t == MPFR_REGULAR_KIND ? (e) : \
+ _t == MPFR_NAN_KIND ? __MPFR_EXP_NAN : \
+ _t == MPFR_INF_KIND ? __MPFR_EXP_INF : __MPFR_EXP_ZERO; \
+ _x->_mpfr_prec = (p); \
+ _x->_mpfr_sign = _s; \
+ _x->_mpfr_exp = _e; \
+ _x->_mpfr_d = (mp_limb_t*) (m); \
+ } while (0)
+#define mpfr_custom_get_kind(x) \
+ ( (x)->_mpfr_exp > __MPFR_EXP_INF ? (int)MPFR_REGULAR_KIND*MPFR_SIGN (x) \
+ : (x)->_mpfr_exp == __MPFR_EXP_INF ? (int)MPFR_INF_KIND*MPFR_SIGN (x) \
+ : (x)->_mpfr_exp == __MPFR_EXP_NAN ? (int)MPFR_NAN_KIND \
+ : (int) MPFR_ZERO_KIND * MPFR_SIGN (x) )
+
+
+#endif /* MPFR_USE_NO_MACRO */
+
+/* Theses are defined to be macros */
+#define mpfr_init_set_si(x, i, rnd) \
+ ( mpfr_init(x), mpfr_set_si((x), (i), (rnd)) )
+#define mpfr_init_set_ui(x, i, rnd) \
+ ( mpfr_init(x), mpfr_set_ui((x), (i), (rnd)) )
+#define mpfr_init_set_d(x, d, rnd) \
+ ( mpfr_init(x), mpfr_set_d((x), (d), (rnd)) )
+#define mpfr_init_set_ld(x, d, rnd) \
+ ( mpfr_init(x), mpfr_set_ld((x), (d), (rnd)) )
+#define mpfr_init_set_z(x, i, rnd) \
+ ( mpfr_init(x), mpfr_set_z((x), (i), (rnd)) )
+#define mpfr_init_set_q(x, i, rnd) \
+ ( mpfr_init(x), mpfr_set_q((x), (i), (rnd)) )
+#define mpfr_init_set(x, y, rnd) \
+ ( mpfr_init(x), mpfr_set((x), (y), (rnd)) )
+#define mpfr_init_set_f(x, y, rnd) \
+ ( mpfr_init(x), mpfr_set_f((x), (y), (rnd)) )
+
+/* Compatibility layer -- obsolete functions and macros */
+#define mpfr_cmp_abs mpfr_cmpabs
+#define mpfr_round_prec(x,r,p) mpfr_prec_round(x,p,r)
+#define __gmp_default_rounding_mode (mpfr_get_default_rounding_mode())
+#define __mpfr_emin (mpfr_get_emin())
+#define __mpfr_emax (mpfr_get_emax())
+#define __mpfr_default_fp_bit_precision (mpfr_get_default_fp_bit_precision())
+#define MPFR_EMIN_MIN mpfr_get_emin_min()
+#define MPFR_EMIN_MAX mpfr_get_emin_max()
+#define MPFR_EMAX_MIN mpfr_get_emax_min()
+#define MPFR_EMAX_MAX mpfr_get_emax_max()
+#define mpfr_version (mpfr_get_version())
+#ifndef mpz_set_fr
+# define mpz_set_fr mpfr_get_z
+#endif
+#define mpfr_add_one_ulp(x,r) \
+ (mpfr_sgn (x) > 0 ? mpfr_nextabove (x) : mpfr_nextbelow (x))
+#define mpfr_sub_one_ulp(x,r) \
+ (mpfr_sgn (x) > 0 ? mpfr_nextbelow (x) : mpfr_nextabove (x))
+#define mpfr_get_z_exp mpfr_get_z_2exp
+#define mpfr_custom_get_mantissa mpfr_custom_get_significand
+
+#endif /* __MPFR_H*/
diff --git a/src/mpn_exp.c b/src/mpn_exp.c
new file mode 100644
index 000000000..ea2921feb
--- /dev/null
+++ b/src/mpn_exp.c
@@ -0,0 +1,175 @@
+/* mpfr_mpn_exp -- auxiliary function for mpfr_get_str and mpfr_set_str
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+Contributed by Alain Delplanque and Paul Zimmermann.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* this function computes an approximation of b^e in {a, n}, with exponent
+ stored in exp_r. The computed value is rounded toward zero (truncated).
+ It returns an integer f such that the final error is bounded by 2^f ulps,
+ that is:
+ a*2^exp_r <= b^e <= 2^exp_r (a + 2^f),
+ where a represents {a, n}, i.e. the integer
+ a[0] + a[1]*B + ... + a[n-1]*B^(n-1) where B=2^GMP_NUMB_BITS
+
+ Return -1 is the result is exact.
+ Return -2 if an overflow occurred in the computation of exp_r.
+*/
+
+long
+mpfr_mpn_exp (mp_limb_t *a, mpfr_exp_t *exp_r, int b, mpfr_exp_t e, size_t n)
+{
+ mp_limb_t *c, B;
+ mpfr_exp_t f, h;
+ int i;
+ unsigned long t; /* number of bits in e */
+ unsigned long bits;
+ size_t n1;
+ unsigned int error; /* (number - 1) of loop a^2b inexact */
+ /* error == t means no error */
+ int err_s_a2 = 0;
+ int err_s_ab = 0; /* number of error when shift A^2, AB */
+ MPFR_TMP_DECL(marker);
+
+ MPFR_ASSERTN(e > 0);
+ MPFR_ASSERTN((2 <= b) && (b <= 62));
+
+ MPFR_TMP_MARK(marker);
+
+ /* initialization of a, b, f, h */
+
+ /* normalize the base */
+ B = (mp_limb_t) b;
+ count_leading_zeros (h, B);
+
+ bits = GMP_NUMB_BITS - h;
+
+ B = B << h;
+ h = - h;
+
+ /* allocate space for A and set it to B */
+ c = (mp_limb_t*) MPFR_TMP_ALLOC(2 * n * BYTES_PER_MP_LIMB);
+ a [n - 1] = B;
+ MPN_ZERO (a, n - 1);
+ /* initial exponent for A: invariant is A = {a, n} * 2^f */
+ f = h - (n - 1) * GMP_NUMB_BITS;
+
+ /* determine number of bits in e */
+ count_leading_zeros (t, (mp_limb_t) e);
+
+ t = GMP_NUMB_BITS - t; /* number of bits of exponent e */
+
+ error = t; /* error <= GMP_NUMB_BITS */
+
+ MPN_ZERO (c, 2 * n);
+
+ for (i = t - 2; i >= 0; i--)
+ {
+
+ /* determine precision needed */
+ bits = n * GMP_NUMB_BITS - mpn_scan1 (a, 0);
+ n1 = (n * GMP_NUMB_BITS - bits) / GMP_NUMB_BITS;
+
+ /* square of A : {c+2n1, 2(n-n1)} = {a+n1, n-n1}^2 */
+ mpn_sqr_n (c + 2 * n1, a + n1, n - n1);
+
+ /* set {c+n, 2n1-n} to 0 : {c, n} = {a, n}^2*K^n */
+
+ /* check overflow on f */
+ if (MPFR_UNLIKELY(f < MPFR_EXP_MIN/2 || f > MPFR_EXP_MAX/2))
+ {
+ overflow:
+ MPFR_TMP_FREE(marker);
+ return -2;
+ }
+ /* FIXME: Could f = 2*f + n * GMP_NUMB_BITS be used? */
+ f = 2*f;
+ MPFR_SADD_OVERFLOW (f, f, n * GMP_NUMB_BITS,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto overflow, goto overflow);
+ if ((c[2*n - 1] & MPFR_LIMB_HIGHBIT) == 0)
+ {
+ /* shift A by one bit to the left */
+ mpn_lshift (a, c + n, n, 1);
+ a[0] |= mpn_lshift (c + n - 1, c + n - 1, 1, 1);
+ f --;
+ if (error != t)
+ err_s_a2 ++;
+ }
+ else
+ MPN_COPY (a, c + n, n);
+
+ if ((error == t) && (2 * n1 <= n) &&
+ (mpn_scan1 (c + 2 * n1, 0) < (n - 2 * n1) * GMP_NUMB_BITS))
+ error = i;
+
+ if (e & ((mpfr_exp_t) 1 << i))
+ {
+ /* multiply A by B */
+ c[2 * n - 1] = mpn_mul_1 (c + n - 1, a, n, B);
+ f += h + GMP_NUMB_BITS;
+ if ((c[2 * n - 1] & MPFR_LIMB_HIGHBIT) == 0)
+ { /* shift A by one bit to the left */
+ mpn_lshift (a, c + n, n, 1);
+ a[0] |= mpn_lshift (c + n - 1, c + n - 1, 1, 1);
+ f --;
+ }
+ else
+ {
+ MPN_COPY (a, c + n, n);
+ if (error != t)
+ err_s_ab ++;
+ }
+ if ((error == t) && (c[n - 1] != 0))
+ error = i;
+ }
+ }
+
+ MPFR_TMP_FREE(marker);
+
+ *exp_r = f;
+
+ if (error == t)
+ return -1; /* result is exact */
+ else /* error <= t-2 <= GMP_NUMB_BITS-2
+ err_s_ab, err_s_a2 <= t-1 */
+ {
+ /* if there are p loops after the first inexact result, with
+ j shifts in a^2 and l shifts in a*b, then the final error is
+ at most 2^(p+ceil((j+1)/2)+l+1)*ulp(res).
+ This is bounded by 2^(5/2*t-1/2) where t is the number of bits of e.
+ */
+ error = error + err_s_ab + err_s_a2 / 2 + 3; /* <= 5t/2-1/2 */
+#if 0
+ if ((error - 1) >= ((n * GMP_NUMB_BITS - 1) / 2))
+ error = n * GMP_NUMB_BITS; /* result is completely wrong:
+ this is very unlikely since error is
+ at most 5/2*log_2(e), and
+ n * GMP_NUMB_BITS is at least
+ 3*log_2(e) */
+#endif
+ return error;
+ }
+}
diff --git a/src/mul.c b/src/mul.c
new file mode 100644
index 000000000..a02ad788f
--- /dev/null
+++ b/src/mul.c
@@ -0,0 +1,511 @@
+/* mpfr_mul -- multiply two floating-point numbers
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+
+/********* BEGINNING CHECK *************/
+
+/* Check if we have to check the result of mpfr_mul.
+ TODO: Find a better (and faster?) check than using old implementation */
+#ifdef WANT_ASSERT
+# if WANT_ASSERT >= 3
+
+int mpfr_mul2 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode);
+static int
+mpfr_mul3 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ /* Old implementation */
+ int sign_product, cc, inexact;
+ mpfr_exp_t ax;
+ mp_limb_t *tmp;
+ mp_limb_t b1;
+ mpfr_prec_t bq, cq;
+ mp_size_t bn, cn, tn, k;
+ MPFR_TMP_DECL(marker);
+
+ /* deal with special cases */
+ if (MPFR_ARE_SINGULAR(b,c))
+ {
+ if (MPFR_IS_NAN(b) || MPFR_IS_NAN(c))
+ {
+ MPFR_SET_NAN(a);
+ MPFR_RET_NAN;
+ }
+ sign_product = MPFR_MULT_SIGN( MPFR_SIGN(b) , MPFR_SIGN(c) );
+ if (MPFR_IS_INF(b))
+ {
+ if (MPFR_IS_INF(c) || MPFR_NOTZERO(c))
+ {
+ MPFR_SET_SIGN(a,sign_product);
+ MPFR_SET_INF(a);
+ MPFR_RET(0); /* exact */
+ }
+ else
+ {
+ MPFR_SET_NAN(a);
+ MPFR_RET_NAN;
+ }
+ }
+ else if (MPFR_IS_INF(c))
+ {
+ if (MPFR_NOTZERO(b))
+ {
+ MPFR_SET_SIGN(a, sign_product);
+ MPFR_SET_INF(a);
+ MPFR_RET(0); /* exact */
+ }
+ else
+ {
+ MPFR_SET_NAN(a);
+ MPFR_RET_NAN;
+ }
+ }
+ else
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(b) || MPFR_IS_ZERO(c));
+ MPFR_SET_SIGN(a, sign_product);
+ MPFR_SET_ZERO(a);
+ MPFR_RET(0); /* 0 * 0 is exact */
+ }
+ }
+ sign_product = MPFR_MULT_SIGN( MPFR_SIGN(b) , MPFR_SIGN(c) );
+
+ ax = MPFR_GET_EXP (b) + MPFR_GET_EXP (c);
+
+ bq = MPFR_PREC(b);
+ cq = MPFR_PREC(c);
+
+ MPFR_ASSERTD(bq+cq > bq); /* PREC_MAX is /2 so no integer overflow */
+
+ bn = (bq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of b */
+ cn = (cq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of c */
+ k = bn + cn; /* effective nb of limbs used by b*c (= tn or tn+1) below */
+ tn = (bq + cq + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
+ /* <= k, thus no int overflow */
+ MPFR_ASSERTD(tn <= k);
+
+ /* Check for no size_t overflow*/
+ MPFR_ASSERTD((size_t) k <= ((size_t) -1) / BYTES_PER_MP_LIMB);
+ MPFR_TMP_MARK(marker);
+ tmp = (mp_limb_t *) MPFR_TMP_ALLOC((size_t) k * BYTES_PER_MP_LIMB);
+
+ /* multiplies two mantissa in temporary allocated space */
+ b1 = (MPFR_LIKELY(bn >= cn)) ?
+ mpn_mul (tmp, MPFR_MANT(b), bn, MPFR_MANT(c), cn)
+ : mpn_mul (tmp, MPFR_MANT(c), cn, MPFR_MANT(b), bn);
+
+ /* now tmp[0]..tmp[k-1] contains the product of both mantissa,
+ with tmp[k-1]>=2^(GMP_NUMB_BITS-2) */
+ b1 >>= GMP_NUMB_BITS - 1; /* msb from the product */
+
+ /* if the mantissas of b and c are uniformly distributed in ]1/2, 1],
+ then their product is in ]1/4, 1/2] with probability 2*ln(2)-1 ~ 0.386
+ and in [1/2, 1] with probability 2-2*ln(2) ~ 0.614 */
+ tmp += k - tn;
+ if (MPFR_UNLIKELY(b1 == 0))
+ mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */
+ cc = mpfr_round_raw (MPFR_MANT (a), tmp, bq + cq,
+ MPFR_IS_NEG_SIGN(sign_product),
+ MPFR_PREC (a), rnd_mode, &inexact);
+
+ /* cc = 1 ==> result is a power of two */
+ if (MPFR_UNLIKELY(cc))
+ MPFR_MANT(a)[MPFR_LIMB_SIZE(a)-1] = MPFR_LIMB_HIGHBIT;
+
+ MPFR_TMP_FREE(marker);
+
+ {
+ mpfr_exp_t ax2 = ax + (mpfr_exp_t) (b1 - 1 + cc);
+ if (MPFR_UNLIKELY( ax2 > __gmpfr_emax))
+ return mpfr_overflow (a, rnd_mode, sign_product);
+ if (MPFR_UNLIKELY( ax2 < __gmpfr_emin))
+ {
+ /* In the rounding to the nearest mode, if the exponent of the exact
+ result (i.e. before rounding, i.e. without taking cc into account)
+ is < __gmpfr_emin - 1 or the exact result is a power of 2 (i.e. if
+ both arguments are powers of 2), then round to zero. */
+ if (rnd_mode == MPFR_RNDN &&
+ (ax + (mpfr_exp_t) b1 < __gmpfr_emin ||
+ (mpfr_powerof2_raw (b) && mpfr_powerof2_raw (c))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (a, rnd_mode, sign_product);
+ }
+ MPFR_SET_EXP (a, ax2);
+ MPFR_SET_SIGN(a, sign_product);
+ }
+ MPFR_RET (inexact);
+}
+
+int
+mpfr_mul (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t ta, tb, tc;
+ int inexact1, inexact2;
+
+ mpfr_init2 (ta, MPFR_PREC (a));
+ mpfr_init2 (tb, MPFR_PREC (b));
+ mpfr_init2 (tc, MPFR_PREC (c));
+ MPFR_ASSERTN (mpfr_set (tb, b, MPFR_RNDN) == 0);
+ MPFR_ASSERTN (mpfr_set (tc, c, MPFR_RNDN) == 0);
+
+ inexact2 = mpfr_mul3 (ta, tb, tc, rnd_mode);
+ inexact1 = mpfr_mul2 (a, b, c, rnd_mode);
+ if (mpfr_cmp (ta, a) || inexact1*inexact2 < 0
+ || (inexact1*inexact2 == 0 && (inexact1|inexact2) != 0))
+ {
+ fprintf (stderr, "mpfr_mul return different values for %s\n"
+ "Prec_a = %lu, Prec_b = %lu, Prec_c = %lu\nB = ",
+ mpfr_print_rnd_mode (rnd_mode),
+ MPFR_PREC (a), MPFR_PREC (b), MPFR_PREC (c));
+ mpfr_out_str (stderr, 16, 0, tb, MPFR_RNDN);
+ fprintf (stderr, "\nC = ");
+ mpfr_out_str (stderr, 16, 0, tc, MPFR_RNDN);
+ fprintf (stderr, "\nOldMul: ");
+ mpfr_out_str (stderr, 16, 0, ta, MPFR_RNDN);
+ fprintf (stderr, "\nNewMul: ");
+ mpfr_out_str (stderr, 16, 0, a, MPFR_RNDN);
+ fprintf (stderr, "\nNewInexact = %d | OldInexact = %d\n",
+ inexact1, inexact2);
+ MPFR_ASSERTN(0);
+ }
+
+ mpfr_clears (ta, tb, tc, (mpfr_ptr) 0);
+ return inexact1;
+}
+
+# define mpfr_mul mpfr_mul2
+# endif
+#endif
+
+/****** END OF CHECK *******/
+
+/* Multiply 2 mpfr_t */
+
+int
+mpfr_mul (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ int sign, inexact;
+ mpfr_exp_t ax, ax2;
+ mp_limb_t *tmp;
+ mp_limb_t b1;
+ mpfr_prec_t bq, cq;
+ mp_size_t bn, cn, tn, k;
+ MPFR_TMP_DECL (marker);
+
+ MPFR_LOG_FUNC (("b[%#R]=%R c[%#R]=%R rnd=%d", b, b, c, c, rnd_mode),
+ ("a[%#R]=%R inexact=%d", a, a, inexact));
+
+ /* deal with special cases */
+ if (MPFR_ARE_SINGULAR (b, c))
+ {
+ if (MPFR_IS_NAN (b) || MPFR_IS_NAN (c))
+ {
+ MPFR_SET_NAN (a);
+ MPFR_RET_NAN;
+ }
+ sign = MPFR_MULT_SIGN (MPFR_SIGN (b), MPFR_SIGN (c));
+ if (MPFR_IS_INF (b))
+ {
+ if (!MPFR_IS_ZERO (c))
+ {
+ MPFR_SET_SIGN (a, sign);
+ MPFR_SET_INF (a);
+ MPFR_RET (0);
+ }
+ else
+ {
+ MPFR_SET_NAN (a);
+ MPFR_RET_NAN;
+ }
+ }
+ else if (MPFR_IS_INF (c))
+ {
+ if (!MPFR_IS_ZERO (b))
+ {
+ MPFR_SET_SIGN (a, sign);
+ MPFR_SET_INF (a);
+ MPFR_RET(0);
+ }
+ else
+ {
+ MPFR_SET_NAN (a);
+ MPFR_RET_NAN;
+ }
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO(b) || MPFR_IS_ZERO(c));
+ MPFR_SET_SIGN (a, sign);
+ MPFR_SET_ZERO (a);
+ MPFR_RET (0);
+ }
+ }
+ sign = MPFR_MULT_SIGN (MPFR_SIGN (b), MPFR_SIGN (c));
+
+ ax = MPFR_GET_EXP (b) + MPFR_GET_EXP (c);
+ /* Note: the exponent of the exact result will be e = bx + cx + ec with
+ ec in {-1,0,1} and the following assumes that e is representable. */
+
+ /* FIXME: Useful since we do an exponent check after ?
+ * It is useful iff the precision is big, there is an overflow
+ * and we are doing further mults...*/
+#ifdef HUGE
+ if (MPFR_UNLIKELY (ax > __gmpfr_emax + 1))
+ return mpfr_overflow (a, rnd_mode, sign);
+ if (MPFR_UNLIKELY (ax < __gmpfr_emin - 2))
+ return mpfr_underflow (a, rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode,
+ sign);
+#endif
+
+ bq = MPFR_PREC (b);
+ cq = MPFR_PREC (c);
+
+ MPFR_ASSERTD (bq+cq > bq); /* PREC_MAX is /2 so no integer overflow */
+
+ bn = (bq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of b */
+ cn = (cq+GMP_NUMB_BITS-1)/GMP_NUMB_BITS; /* number of limbs of c */
+ k = bn + cn; /* effective nb of limbs used by b*c (= tn or tn+1) below */
+ tn = (bq + cq + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
+ MPFR_ASSERTD (tn <= k); /* tn <= k, thus no int overflow */
+
+ /* Check for no size_t overflow*/
+ MPFR_ASSERTD ((size_t) k <= ((size_t) -1) / BYTES_PER_MP_LIMB);
+ MPFR_TMP_MARK (marker);
+ tmp = (mp_limb_t *) MPFR_TMP_ALLOC ((size_t) k * BYTES_PER_MP_LIMB);
+
+ /* multiplies two mantissa in temporary allocated space */
+ if (MPFR_UNLIKELY (bn < cn))
+ {
+ mpfr_srcptr z = b;
+ mp_size_t zn = bn;
+ b = c;
+ bn = cn;
+ c = z;
+ cn = zn;
+ }
+ MPFR_ASSERTD (bn >= cn);
+ if (MPFR_LIKELY (bn <= 2))
+ {
+ if (bn == 1)
+ {
+ /* 1 limb * 1 limb */
+ umul_ppmm (tmp[1], tmp[0], MPFR_MANT (b)[0], MPFR_MANT (c)[0]);
+ b1 = tmp[1];
+ }
+ else if (MPFR_UNLIKELY (cn == 1))
+ {
+ /* 2 limbs * 1 limb */
+ mp_limb_t t;
+ umul_ppmm (tmp[1], tmp[0], MPFR_MANT (b)[0], MPFR_MANT (c)[0]);
+ umul_ppmm (tmp[2], t, MPFR_MANT (b)[1], MPFR_MANT (c)[0]);
+ add_ssaaaa (tmp[2], tmp[1], tmp[2], tmp[1], 0, t);
+ b1 = tmp[2];
+ }
+ else
+ {
+ /* 2 limbs * 2 limbs */
+ mp_limb_t t1, t2, t3;
+ /* First 2 limbs * 1 limb */
+ umul_ppmm (tmp[1], tmp[0], MPFR_MANT (b)[0], MPFR_MANT (c)[0]);
+ umul_ppmm (tmp[2], t1, MPFR_MANT (b)[1], MPFR_MANT (c)[0]);
+ add_ssaaaa (tmp[2], tmp[1], tmp[2], tmp[1], 0, t1);
+ /* Second, the other 2 limbs * 1 limb product */
+ umul_ppmm (t1, t2, MPFR_MANT (b)[0], MPFR_MANT (c)[1]);
+ umul_ppmm (tmp[3], t3, MPFR_MANT (b)[1], MPFR_MANT (c)[1]);
+ add_ssaaaa (tmp[3], t1, tmp[3], t1, 0, t3);
+ /* Sum those two partial products */
+ add_ssaaaa (tmp[2], tmp[1], tmp[2], tmp[1], t1, t2);
+ tmp[3] += (tmp[2] < t1);
+ b1 = tmp[3];
+ }
+ b1 >>= (GMP_NUMB_BITS - 1);
+ tmp += k - tn;
+ if (MPFR_UNLIKELY (b1 == 0))
+ mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */
+ }
+ else
+ /* Mulders' mulhigh. Disable if squaring, since it is not tuned for
+ such a case */
+ if (MPFR_UNLIKELY (bn > MPFR_MUL_THRESHOLD && b != c))
+ {
+ mp_limb_t *bp, *cp;
+ mp_size_t n;
+ mpfr_prec_t p;
+
+ /* Fist check if we can reduce the precision of b or c:
+ exact values are a nightmare for the short product trick */
+ bp = MPFR_MANT (b);
+ cp = MPFR_MANT (c);
+ MPFR_ASSERTN (MPFR_MUL_THRESHOLD >= 1);
+ if (MPFR_UNLIKELY ((bp[0] == 0 && bp[1] == 0) ||
+ (cp[0] == 0 && cp[1] == 0)))
+ {
+ mpfr_t b_tmp, c_tmp;
+
+ MPFR_TMP_FREE (marker);
+ /* Check for b */
+ while (*bp == 0)
+ {
+ bp++;
+ bn--;
+ MPFR_ASSERTD (bn > 0);
+ } /* This must end since the MSL is != 0 */
+
+ /* Check for c too */
+ while (*cp == 0)
+ {
+ cp++;
+ cn--;
+ MPFR_ASSERTD (cn > 0);
+ } /* This must end since the MSL is != 0 */
+
+ /* It is not the faster way, but it is safer */
+ MPFR_SET_SAME_SIGN (b_tmp, b);
+ MPFR_SET_EXP (b_tmp, MPFR_GET_EXP (b));
+ MPFR_PREC (b_tmp) = bn * GMP_NUMB_BITS;
+ MPFR_MANT (b_tmp) = bp;
+
+ MPFR_SET_SAME_SIGN (c_tmp, c);
+ MPFR_SET_EXP (c_tmp, MPFR_GET_EXP (c));
+ MPFR_PREC (c_tmp) = cn * GMP_NUMB_BITS;
+ MPFR_MANT (c_tmp) = cp;
+
+ /* Call again mpfr_mul with the fixed arguments */
+ return mpfr_mul (a, b_tmp, c_tmp, rnd_mode);
+ }
+
+ /* Compute estimated precision of mulhigh.
+ We could use `+ (n < cn) + (n < bn)' instead of `+ 2',
+ but does it worth it? */
+ n = MPFR_LIMB_SIZE (a) + 1;
+ n = MIN (n, cn);
+ MPFR_ASSERTD (n >= 1 && 2*n <= k && n <= cn && n <= bn);
+ p = n * GMP_NUMB_BITS - MPFR_INT_CEIL_LOG2 (n + 2);
+ bp += bn - n;
+ cp += cn - n;
+
+ /* Check if MulHigh can produce a roundable result.
+ We may lost 1 bit due to RNDN, 1 due to final shift. */
+ if (MPFR_UNLIKELY (MPFR_PREC (a) > p - 5))
+ {
+ if (MPFR_UNLIKELY (MPFR_PREC (a) > p - 5 + GMP_NUMB_BITS
+ || bn <= MPFR_MUL_THRESHOLD+1))
+ {
+ /* MulHigh can't produce a roundable result. */
+ MPFR_LOG_MSG (("mpfr_mulhigh can't be used (%lu VS %lu)\n",
+ MPFR_PREC (a), p));
+ goto full_multiply;
+ }
+ /* Add one extra limb to mantissa of b and c. */
+ if (bn > n)
+ bp --;
+ else
+ {
+ bp = (mp_limb_t*) MPFR_TMP_ALLOC ((n+1) * sizeof (mp_limb_t));
+ bp[0] = 0;
+ MPN_COPY (bp + 1, MPFR_MANT (b) + bn - n, n);
+ }
+ if (cn > n)
+ cp --; /* FIXME: Could this happen? */
+ else
+ {
+ cp = (mp_limb_t*) MPFR_TMP_ALLOC ((n+1) * sizeof (mp_limb_t));
+ cp[0] = 0;
+ MPN_COPY (cp + 1, MPFR_MANT (c) + cn - n, n);
+ }
+ /* We will compute with one extra limb */
+ n++;
+ p = n * GMP_NUMB_BITS - MPFR_INT_CEIL_LOG2 (n + 2);
+ /* Due to some nasty reasons we can have only 4 bits */
+ MPFR_ASSERTD (MPFR_PREC (a) <= p - 4);
+
+ if (MPFR_LIKELY (k < 2*n))
+ {
+ tmp = (mp_limb_t*) MPFR_TMP_ALLOC (2 * n * sizeof (mp_limb_t));
+ tmp += 2*n-k; /* `tmp' still points to an area of `k' limbs */
+ }
+ }
+ MPFR_LOG_MSG (("Use mpfr_mulhigh (%lu VS %lu)\n", MPFR_PREC (a), p));
+ /* Compute an approximation of the product of b and c */
+ mpfr_mulhigh_n (tmp + k - 2 * n, bp, cp, n);
+ /* now tmp[0]..tmp[k-1] contains the product of both mantissa,
+ with tmp[k-1]>=2^(GMP_NUMB_BITS-2) */
+ b1 = tmp[k-1] >> (GMP_NUMB_BITS - 1); /* msb from the product */
+
+ /* If the mantissas of b and c are uniformly distributed in (1/2, 1],
+ then their product is in (1/4, 1/2] with probability 2*ln(2)-1
+ ~ 0.386 and in [1/2, 1] with probability 2-2*ln(2) ~ 0.614 */
+ if (MPFR_UNLIKELY (b1 == 0))
+ /* Warning: the mpfr_mulhigh_n call above only surely affects
+ tmp[k-n-1..k-1], thus we shift only those limbs */
+ mpn_lshift (tmp + k - n - 1, tmp + k - n - 1, n + 1, 1);
+ tmp += k - tn;
+ MPFR_ASSERTD (MPFR_LIMB_MSB (tmp[tn-1]) != 0);
+
+ if (MPFR_UNLIKELY (!mpfr_round_p (tmp, tn, p+b1-1, MPFR_PREC(a)
+ + (rnd_mode == MPFR_RNDN))))
+ {
+ tmp -= k - tn; /* tmp may have changed, FIX IT!!!!! */
+ goto full_multiply;
+ }
+ }
+ else
+ {
+ full_multiply:
+ MPFR_LOG_MSG (("Use mpn_mul\n", 0));
+ b1 = mpn_mul (tmp, MPFR_MANT (b), bn, MPFR_MANT (c), cn);
+
+ /* now tmp[0]..tmp[k-1] contains the product of both mantissa,
+ with tmp[k-1]>=2^(GMP_NUMB_BITS-2) */
+ b1 >>= GMP_NUMB_BITS - 1; /* msb from the product */
+
+ /* if the mantissas of b and c are uniformly distributed in (1/2, 1],
+ then their product is in (1/4, 1/2] with probability 2*ln(2)-1
+ ~ 0.386 and in [1/2, 1] with probability 2-2*ln(2) ~ 0.614 */
+ tmp += k - tn;
+ if (MPFR_UNLIKELY (b1 == 0))
+ mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */
+ }
+
+ ax2 = ax + (mpfr_exp_t) (b1 - 1);
+ MPFR_RNDRAW (inexact, a, tmp, bq+cq, rnd_mode, sign, ax2++);
+ MPFR_TMP_FREE (marker);
+ MPFR_EXP (a) = ax2; /* Can't use MPFR_SET_EXP: Expo may be out of range */
+ MPFR_SET_SIGN (a, sign);
+ if (MPFR_UNLIKELY (ax2 > __gmpfr_emax))
+ return mpfr_overflow (a, rnd_mode, sign);
+ if (MPFR_UNLIKELY (ax2 < __gmpfr_emin))
+ {
+ /* In the rounding to the nearest mode, if the exponent of the exact
+ result (i.e. before rounding, i.e. without taking cc into account)
+ is < __gmpfr_emin - 1 or the exact result is a power of 2 (i.e. if
+ both arguments are powers of 2), then round to zero. */
+ if (rnd_mode == MPFR_RNDN
+ && (ax + (mpfr_exp_t) b1 < __gmpfr_emin
+ || (mpfr_powerof2_raw (b) && mpfr_powerof2_raw (c))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (a, rnd_mode, sign);
+ }
+ MPFR_RET (inexact);
+}
diff --git a/src/mul_2exp.c b/src/mul_2exp.c
new file mode 100644
index 000000000..db6ff760e
--- /dev/null
+++ b/src/mul_2exp.c
@@ -0,0 +1,33 @@
+/* mpfr_mul_2exp -- multiply a floating-point number by a power of two
+
+Copyright 1999, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Obsolete function, use mpfr_mul_2ui or mpfr_mul_2si instead. */
+
+#undef mpfr_mul_2exp
+
+int
+mpfr_mul_2exp (mpfr_ptr y, mpfr_srcptr x, unsigned long int n, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_mul_2ui (y, x, n, rnd_mode);
+}
diff --git a/src/mul_2si.c b/src/mul_2si.c
new file mode 100644
index 000000000..392348d0e
--- /dev/null
+++ b/src/mul_2si.c
@@ -0,0 +1,56 @@
+/* mpfr_mul_2si -- multiply a floating-point number by a power of two
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_mul_2si (mpfr_ptr y, mpfr_srcptr x, long int n, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%ld rnd=%d", x, x, n, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ inexact = MPFR_UNLIKELY(y != x) ? mpfr_set (y, x, rnd_mode) : 0;
+
+ if (MPFR_LIKELY( MPFR_IS_PURE_FP(y)) )
+ {
+ mpfr_exp_t exp = MPFR_GET_EXP (y);
+ if (MPFR_UNLIKELY( n > 0 && (__gmpfr_emax < MPFR_EMIN_MIN + n ||
+ exp > __gmpfr_emax - n)))
+ return mpfr_overflow (y, rnd_mode, MPFR_SIGN(y));
+
+ else if (MPFR_UNLIKELY(n < 0 && (__gmpfr_emin > MPFR_EMAX_MAX + n ||
+ exp < __gmpfr_emin - n)))
+ {
+ if (rnd_mode == MPFR_RNDN &&
+ (__gmpfr_emin > MPFR_EMAX_MAX + (n + 1) ||
+ exp < __gmpfr_emin - (n + 1) ||
+ (inexact >= 0 && mpfr_powerof2_raw (y))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (y, rnd_mode, MPFR_SIGN(y));
+ }
+ MPFR_SET_EXP (y, exp + n);
+ }
+
+ return inexact;
+}
diff --git a/src/mul_2ui.c b/src/mul_2ui.c
new file mode 100644
index 000000000..dcc5d8a44
--- /dev/null
+++ b/src/mul_2ui.c
@@ -0,0 +1,63 @@
+/* mpfr_mul_2ui -- multiply a floating-point number by a power of two
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_mul_2ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int n, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%lu rnd=%d", x, x, n, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ inexact = MPFR_UNLIKELY(y != x) ? mpfr_set (y, x, rnd_mode) : 0;
+
+ if (MPFR_LIKELY( MPFR_IS_PURE_FP(y)) )
+ {
+ /* n will have to be casted to long to make sure that the addition
+ and subtraction below (for overflow detection) are signed */
+ while (MPFR_UNLIKELY(n > LONG_MAX))
+ {
+ int inex2;
+
+ n -= LONG_MAX;
+ inex2 = mpfr_mul_2ui(y, y, LONG_MAX, rnd_mode);
+ if (inex2)
+ return inex2; /* overflow */
+ }
+
+ /* MPFR_EMIN_MIN + (long) n is signed and doesn't lead to an overflow;
+ the first test useful so that the real test can't lead to an
+ overflow. */
+ {
+ mpfr_exp_t exp = MPFR_GET_EXP (y);
+ if (MPFR_UNLIKELY( __gmpfr_emax < MPFR_EMIN_MIN + (long) n ||
+ exp > __gmpfr_emax - (long) n))
+ return mpfr_overflow (y, rnd_mode, MPFR_SIGN(y));
+
+ MPFR_SET_EXP (y, exp + (long) n);
+ }
+ }
+
+ return inexact;
+}
diff --git a/src/mul_d.c b/src/mul_d.c
new file mode 100644
index 000000000..b989b9eaa
--- /dev/null
+++ b/src/mul_d.c
@@ -0,0 +1,49 @@
+/* mpfr_mul_d -- multiply a multiple precision floating-point number
+ by a machine double precision float
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_mul_d (mpfr_ptr a, mpfr_srcptr b, double c, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("b[%#R]=%R c=%.20g rnd=%d", b, b, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (d, IEEE_DBL_MANT_DIG);
+ inexact = mpfr_set_d (d, c, rnd_mode);
+ MPFR_ASSERTN (inexact == 0);
+
+ mpfr_clear_flags ();
+ inexact = mpfr_mul (a, b, d, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+
+ mpfr_clear(d);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (a, inexact, rnd_mode);
+}
diff --git a/src/mul_ui.c b/src/mul_ui.c
new file mode 100644
index 000000000..19cf42f90
--- /dev/null
+++ b/src/mul_ui.c
@@ -0,0 +1,133 @@
+/* mpfr_mul_ui -- multiply a floating-point number by a machine integer
+ mpfr_mul_si -- multiply a floating-point number by a machine integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_mul_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mpfr_rnd_t rnd_mode)
+{
+ mp_limb_t *yp;
+ mp_size_t xn;
+ int cnt, inexact;
+ MPFR_TMP_DECL (marker);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ if (u != 0)
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0); /* infinity is exact */
+ }
+ else /* 0 * infinity */
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0); /* zero is exact */
+ }
+ }
+ else if (MPFR_UNLIKELY (u <= 1))
+ {
+ if (u < 1)
+ {
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0); /* zero is exact */
+ }
+ else
+ return mpfr_set (y, x, rnd_mode);
+ }
+ else if (MPFR_UNLIKELY (IS_POW2 (u)))
+ return mpfr_mul_2si (y, x, MPFR_INT_CEIL_LOG2 (u), rnd_mode);
+
+ yp = MPFR_MANT (y);
+ xn = MPFR_LIMB_SIZE (x);
+
+ MPFR_ASSERTD (xn < MP_SIZE_T_MAX);
+ MPFR_TMP_MARK(marker);
+ yp = (mp_ptr) MPFR_TMP_ALLOC ((size_t) (xn + 1) * BYTES_PER_MP_LIMB);
+
+ MPFR_ASSERTN (u == (mp_limb_t) u);
+ yp[xn] = mpn_mul_1 (yp, MPFR_MANT (x), xn, u);
+
+ /* x * u is stored in yp[xn], ..., yp[0] */
+
+ /* since the case u=1 was treated above, we have u >= 2, thus
+ yp[xn] >= 1 since x was msb-normalized */
+ MPFR_ASSERTD (yp[xn] != 0);
+ if (MPFR_LIKELY (MPFR_LIMB_MSB (yp[xn]) == 0))
+ {
+ count_leading_zeros (cnt, yp[xn]);
+ mpn_lshift (yp, yp, xn + 1, cnt);
+ }
+ else
+ {
+ cnt = 0;
+ }
+
+ /* now yp[xn], ..., yp[0] is msb-normalized too, and has at most
+ PREC(x) + (GMP_NUMB_BITS - cnt) non-zero bits */
+ MPFR_RNDRAW (inexact, y, yp, (mpfr_prec_t) (xn + 1) * GMP_NUMB_BITS,
+ rnd_mode, MPFR_SIGN (x), cnt -- );
+
+ MPFR_TMP_FREE (marker);
+
+ cnt = GMP_NUMB_BITS - cnt;
+ if (MPFR_UNLIKELY (__gmpfr_emax < MPFR_EMAX_MIN + cnt
+ || MPFR_GET_EXP (x) > __gmpfr_emax - cnt))
+ return mpfr_overflow (y, rnd_mode, MPFR_SIGN(x));
+
+ MPFR_SET_EXP (y, MPFR_GET_EXP (x) + cnt);
+ MPFR_SET_SAME_SIGN (y, x);
+
+ return inexact;
+}
+
+int mpfr_mul_si (mpfr_ptr y, mpfr_srcptr x, long int u, mpfr_rnd_t rnd_mode)
+{
+ int res;
+
+ if (u >= 0)
+ res = mpfr_mul_ui (y, x, u, rnd_mode);
+ else
+ {
+ res = -mpfr_mul_ui (y, x, -u, MPFR_INVERT_RND (rnd_mode));
+ MPFR_CHANGE_SIGN (y);
+ }
+ return res;
+}
diff --git a/src/mulders.c b/src/mulders.c
new file mode 100644
index 000000000..5faacb3be
--- /dev/null
+++ b/src/mulders.c
@@ -0,0 +1,115 @@
+/* Mulder's MulHigh function (short product)
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#ifndef MUL_FFT_THRESHOLD
+#define MUL_FFT_THRESHOLD 8448
+#endif
+
+/* Don't use MPFR_MULHIGH_SIZE since it is handled by tuneup */
+#ifdef MPFR_MULHIGH_TAB_SIZE
+static short mulhigh_ktab[MPFR_MULHIGH_TAB_SIZE];
+#else
+static short mulhigh_ktab[] = {MPFR_MULHIGH_TAB};
+#define MPFR_MULHIGH_TAB_SIZE \
+ ((mp_size_t) (sizeof(mulhigh_ktab) / sizeof(mulhigh_ktab[0])))
+#endif
+
+/* Put in rp[n..2n-1] an approximation of the n high limbs
+ of {mp, n} * {np, n}.
+ The error is at worst of ln(n) for rp[n] and rp[n-1] is totally wrong */
+static void
+mpfr_mulhigh_n_basecase (mp_ptr rp, mp_srcptr up, mp_srcptr vp, mp_size_t n)
+{
+ mp_size_t i;
+
+ rp += n-1;
+ umul_ppmm (rp[1], rp[0], up[n-1], vp[0]);
+ for (i = 1 ; i < n ; i++)
+ rp[i+1] = mpn_addmul_1 (rp, up + (n - i - 1), i+1, vp[i]);
+}
+
+void
+mpfr_mulhigh_n (mp_ptr rp, mp_srcptr np, mp_srcptr mp, mp_size_t n)
+{
+ mp_size_t k;
+
+ MPFR_ASSERTD (MPFR_MULHIGH_TAB_SIZE > 4);
+ k = MPFR_LIKELY (n < MPFR_MULHIGH_TAB_SIZE) ? mulhigh_ktab[n] : 2*n/3;
+ MPFR_ASSERTD (k == -1 || k == 0 || (k > n/2 && k < n));
+ if (k < 0)
+ mpn_mul_basecase (rp, np, n, mp, n);
+ else if (k == 0)
+ mpfr_mulhigh_n_basecase (rp, np, mp, n);
+ else if (n > MUL_FFT_THRESHOLD)
+ mpn_mul_n (rp, np, mp, n);
+ else
+ {
+ mp_size_t l = n - k;
+ mp_limb_t cy;
+
+ mpn_mul_n (rp + 2 * l, np + l, mp + l, k); /* fills rp[2l..2n-1] */
+ mpfr_mulhigh_n (rp, np + k, mp, l); /* fills rp[l-1..2l-1] */
+ cy = mpn_add_n (rp + n - 1, rp + n - 1, rp + l - 1, l + 1);
+ mpfr_mulhigh_n (rp, np, mp + k, l); /* fills rp[l-1..2l-1] */
+ cy += mpn_add_n (rp + n - 1, rp + n - 1, rp + l - 1, l + 1);
+ mpn_add_1 (rp + n + l, rp + n + l, k, cy); /* propagate carry */
+ }
+}
+
+#ifdef MPFR_SQRHIGH_TAB_SIZE
+static short sqrhigh_ktab[MPFR_SQRHIGH_TAB_SIZE];
+#else
+static short sqrhigh_ktab[] = {MPFR_SQRHIGH_TAB};
+#define MPFR_SQRHIGH_TAB_SIZE (sizeof(sqrhigh_ktab) / sizeof(sqrhigh_ktab[0]))
+#endif
+
+void
+mpfr_sqrhigh_n (mp_ptr rp, mp_srcptr np, mp_size_t n)
+{
+ mp_size_t k;
+
+ MPFR_ASSERTD (MPFR_SQRHIGH_TAB_SIZE > 4);
+ k = MPFR_LIKELY (n < MPFR_SQRHIGH_TAB_SIZE) ? sqrhigh_ktab[n] : 2*n/3;
+ MPFR_ASSERTD (k == -1 || k == 0 || (k > n/2 && k < n));
+ if (k < 0)
+ /* we can't use mpn_sqr_basecase here, since it requires
+ n <= SQR_KARATSUBA_THRESHOLD, where SQR_KARATSUBA_THRESHOLD
+ is not exported by GMP */
+ mpn_sqr_n (rp, np, n);
+ else if (k == 0)
+ mpfr_mulhigh_n_basecase (rp, np, np, n);
+ else
+ {
+ mp_size_t l = n - k;
+ mp_limb_t cy;
+
+ mpn_sqr_n (rp + 2 * l, np + l, k); /* fills rp[2l..2n-1] */
+ mpfr_mulhigh_n (rp, np, np + k, l); /* fills rp[l-1..2l-1] */
+ /* FIXME: maybe shift by 2 is a better idea but it has to handle carry*/
+ cy = mpn_add_n (rp + n - 1, rp + n - 1, rp + l - 1, l + 1);
+ cy += mpn_add_n (rp + n - 1, rp + n - 1, rp + l - 1, l + 1);
+ mpn_add_1 (rp + n + l, rp + n + l, k, cy); /* propagate carry */
+ }
+}
diff --git a/src/neg.c b/src/neg.c
new file mode 100644
index 000000000..f45922de1
--- /dev/null
+++ b/src/neg.c
@@ -0,0 +1,39 @@
+/* mpfr_neg -- change the sign of a floating-point number
+
+Copyright 1999, 2000, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_neg (mpfr_ptr a, mpfr_srcptr b, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_UNLIKELY(a != b))
+ return mpfr_set4 (a, b, rnd_mode, -MPFR_SIGN(b));
+ else if (MPFR_UNLIKELY(MPFR_IS_NAN (b)))
+ {
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_CHANGE_SIGN(a);
+ MPFR_RET(0);
+ }
+}
diff --git a/src/next.c b/src/next.c
new file mode 100644
index 000000000..79099b750
--- /dev/null
+++ b/src/next.c
@@ -0,0 +1,150 @@
+/* mpfr_nextabove, mpfr_nextbelow, mpfr_nexttoward -- next representable
+floating-point number
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_nexttozero (mpfr_ptr x)
+{
+ if (MPFR_UNLIKELY(MPFR_IS_INF(x)))
+ {
+ mpfr_setmax (x, __gmpfr_emax);
+ return;
+ }
+ else if (MPFR_UNLIKELY( MPFR_IS_ZERO(x) ))
+ {
+ MPFR_CHANGE_SIGN(x);
+ mpfr_setmin (x, __gmpfr_emin);
+ }
+ else
+ {
+ mp_size_t xn;
+ int sh;
+ mp_limb_t *xp;
+
+ xn = MPFR_LIMB_SIZE (x);
+ MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC(x));
+ xp = MPFR_MANT(x);
+ mpn_sub_1 (xp, xp, xn, MPFR_LIMB_ONE << sh);
+ if (MPFR_UNLIKELY( MPFR_LIMB_MSB(xp[xn-1]) == 0) )
+ { /* was an exact power of two: not normalized any more */
+ mpfr_exp_t exp = MPFR_EXP (x);
+ if (MPFR_UNLIKELY(exp == __gmpfr_emin))
+ MPFR_SET_ZERO(x);
+ else
+ {
+ mp_size_t i;
+ MPFR_SET_EXP (x, exp - 1);
+ xp[0] = MP_LIMB_T_MAX << sh;
+ for (i = 1; i < xn; i++)
+ xp[i] = MP_LIMB_T_MAX;
+ }
+ }
+ }
+}
+
+void
+mpfr_nexttoinf (mpfr_ptr x)
+{
+ if (MPFR_UNLIKELY(MPFR_IS_INF(x)))
+ return;
+ else if (MPFR_UNLIKELY(MPFR_IS_ZERO(x)))
+ mpfr_setmin (x, __gmpfr_emin);
+ else
+ {
+ mp_size_t xn;
+ int sh;
+ mp_limb_t *xp;
+
+ xn = MPFR_LIMB_SIZE (x);
+ MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC(x));
+ xp = MPFR_MANT(x);
+ if (MPFR_UNLIKELY( mpn_add_1 (xp, xp, xn, MPFR_LIMB_ONE << sh)) )
+ /* got 1.0000... */
+ {
+ mpfr_exp_t exp = MPFR_EXP (x);
+ if (MPFR_UNLIKELY(exp == __gmpfr_emax))
+ MPFR_SET_INF(x);
+ else
+ {
+ MPFR_SET_EXP (x, exp + 1);
+ xp[xn-1] = MPFR_LIMB_HIGHBIT;
+ }
+ }
+ }
+}
+
+void
+mpfr_nextabove (mpfr_ptr x)
+{
+ if (MPFR_UNLIKELY(MPFR_IS_NAN(x)))
+ {
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+ return;
+ }
+ if (MPFR_IS_NEG(x))
+ mpfr_nexttozero (x);
+ else
+ mpfr_nexttoinf (x);
+}
+
+void
+mpfr_nextbelow (mpfr_ptr x)
+{
+ if (MPFR_UNLIKELY(MPFR_IS_NAN(x)))
+ {
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+ return;
+ }
+
+ if (MPFR_IS_NEG(x))
+ mpfr_nexttoinf (x);
+ else
+ mpfr_nexttozero (x);
+}
+
+void
+mpfr_nexttoward (mpfr_ptr x, mpfr_srcptr y)
+{
+ int s;
+
+ if (MPFR_UNLIKELY(MPFR_IS_NAN(x)))
+ {
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+ return;
+ }
+ else if (MPFR_UNLIKELY(MPFR_IS_NAN(x) || MPFR_IS_NAN(y)))
+ {
+ MPFR_SET_NAN(x);
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+ return;
+ }
+
+ s = mpfr_cmp (x, y);
+ if (s == 0)
+ return;
+ else if (s < 0)
+ mpfr_nextabove (x);
+ else
+ mpfr_nextbelow (x);
+}
diff --git a/src/out_str.c b/src/out_str.c
new file mode 100644
index 000000000..cf1941a55
--- /dev/null
+++ b/src/out_str.c
@@ -0,0 +1,98 @@
+/* mpfr_out_str -- output a floating-point number to a stream
+
+Copyright 1999, 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Warning! S should not contain "%". */
+#define OUT_STR_RET(S) \
+ do \
+ { \
+ int r; \
+ r = fprintf (stream, (S)); \
+ return r < 0 ? 0 : r; \
+ } \
+ while (0)
+
+size_t
+mpfr_out_str (FILE *stream, int base, size_t n_digits, mpfr_srcptr op,
+ mpfr_rnd_t rnd_mode)
+{
+ char *s, *s0;
+ size_t l;
+ mpfr_exp_t e;
+ int err;
+
+ MPFR_ASSERTN (base >= 2 && base <= 62);
+
+ /* when stream=NULL, output to stdout */
+ if (stream == NULL)
+ stream = stdout;
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (op)))
+ {
+ if (MPFR_IS_NAN (op))
+ OUT_STR_RET ("@NaN@");
+ else if (MPFR_IS_INF (op))
+ OUT_STR_RET (MPFR_IS_POS (op) ? "@Inf@" : "-@Inf@");
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (op));
+ OUT_STR_RET (MPFR_IS_POS (op) ? "0" : "-0");
+ }
+ }
+
+ s = mpfr_get_str (NULL, &e, base, n_digits, op, rnd_mode);
+
+ s0 = s;
+ /* for op=3.1416 we have s = "31416" and e = 1 */
+
+ l = strlen (s) + 1; /* size of allocated block returned by mpfr_get_str
+ - may be incorrect, as only an upper bound? */
+
+ /* outputs possible sign and significand */
+ err = (*s == '-' && fputc (*s++, stream) == EOF)
+ || fputc (*s++, stream) == EOF /* leading digit */
+ || fputc ((unsigned char) MPFR_DECIMAL_POINT, stream) == EOF
+ || fputs (s, stream) == EOF; /* trailing significand */
+ (*__gmp_free_func) (s0, l);
+ if (MPFR_UNLIKELY (err))
+ return 0;
+
+ e--; /* due to the leading digit */
+
+ /* outputs exponent */
+ if (e)
+ {
+ int r;
+
+ MPFR_ASSERTN(e >= LONG_MIN);
+ MPFR_ASSERTN(e <= LONG_MAX);
+
+ r = fprintf (stream, (base <= 10 ? "e%ld" : "@%ld"), (long) e);
+ if (MPFR_UNLIKELY (r < 0))
+ return 0;
+
+ l += r;
+ }
+
+ return l;
+}
diff --git a/src/pow.c b/src/pow.c
new file mode 100644
index 000000000..ac806ac63
--- /dev/null
+++ b/src/pow.c
@@ -0,0 +1,675 @@
+/* mpfr_pow -- power function x^y
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* return non zero iff x^y is exact.
+ Assumes x and y are ordinary numbers,
+ y is not an integer, x is not a power of 2 and x is positive
+
+ If x^y is exact, it computes it and sets *inexact.
+*/
+static int
+mpfr_pow_is_exact (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y,
+ mpfr_rnd_t rnd_mode, int *inexact)
+{
+ mpz_t a, c;
+ mpfr_exp_t d, b;
+ unsigned long i;
+ int res;
+
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (y));
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (x));
+ MPFR_ASSERTD (!mpfr_integer_p (y));
+ MPFR_ASSERTD (mpfr_cmp_si_2exp (x, MPFR_INT_SIGN (x),
+ MPFR_GET_EXP (x) - 1) != 0);
+ MPFR_ASSERTD (MPFR_IS_POS (x));
+
+ if (MPFR_IS_NEG (y))
+ return 0; /* x is not a power of two => x^-y is not exact */
+
+ /* compute d such that y = c*2^d with c odd integer */
+ mpz_init (c);
+ d = mpfr_get_z_2exp (c, y);
+ i = mpz_scan1 (c, 0);
+ mpz_fdiv_q_2exp (c, c, i);
+ d += i;
+ /* now y=c*2^d with c odd */
+ /* Since y is not an integer, d is necessarily < 0 */
+ MPFR_ASSERTD (d < 0);
+
+ /* Compute a,b such that x=a*2^b */
+ mpz_init (a);
+ b = mpfr_get_z_2exp (a, x);
+ i = mpz_scan1 (a, 0);
+ mpz_fdiv_q_2exp (a, a, i);
+ b += i;
+ /* now x=a*2^b with a is odd */
+
+ for (res = 1 ; d != 0 ; d++)
+ {
+ /* a*2^b is a square iff
+ (i) a is a square when b is even
+ (ii) 2*a is a square when b is odd */
+ if (b % 2 != 0)
+ {
+ mpz_mul_2exp (a, a, 1); /* 2*a */
+ b --;
+ }
+ MPFR_ASSERTD ((b % 2) == 0);
+ if (!mpz_perfect_square_p (a))
+ {
+ res = 0;
+ goto end;
+ }
+ mpz_sqrt (a, a);
+ b = b / 2;
+ }
+ /* Now x = (a'*2^b')^(2^-d) with d < 0
+ so x^y = ((a'*2^b')^(2^-d))^(c*2^d)
+ = ((a'*2^b')^c with c odd integer */
+ {
+ mpfr_t tmp;
+ mpfr_prec_t p;
+ MPFR_MPZ_SIZEINBASE2 (p, a);
+ mpfr_init2 (tmp, p); /* prec = 1 should not be possible */
+ res = mpfr_set_z (tmp, a, MPFR_RNDN);
+ MPFR_ASSERTD (res == 0);
+ res = mpfr_mul_2si (tmp, tmp, b, MPFR_RNDN);
+ MPFR_ASSERTD (res == 0);
+ *inexact = mpfr_pow_z (z, tmp, c, rnd_mode);
+ mpfr_clear (tmp);
+ res = 1;
+ }
+ end:
+ mpz_clear (a);
+ mpz_clear (c);
+ return res;
+}
+
+/* Return 1 if y is an odd integer, 0 otherwise. */
+static int
+is_odd (mpfr_srcptr y)
+{
+ mpfr_exp_t expo;
+ mpfr_prec_t prec;
+ mp_size_t yn;
+ mp_limb_t *yp;
+
+ /* NAN, INF or ZERO are not allowed */
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (y));
+
+ expo = MPFR_GET_EXP (y);
+ if (expo <= 0)
+ return 0; /* |y| < 1 and not 0 */
+
+ prec = MPFR_PREC(y);
+ if ((mpfr_prec_t) expo > prec)
+ return 0; /* y is a multiple of 2^(expo-prec), thus not odd */
+
+ /* 0 < expo <= prec:
+ y = 1xxxxxxxxxt.zzzzzzzzzzzzzzzzzz[000]
+ expo bits (prec-expo) bits
+
+ We have to check that:
+ (a) the bit 't' is set
+ (b) all the 'z' bits are zero
+ */
+
+ prec = ((prec - 1) / GMP_NUMB_BITS + 1) * GMP_NUMB_BITS - expo;
+ /* number of z+0 bits */
+
+ yn = prec / GMP_NUMB_BITS;
+ MPFR_ASSERTN(yn >= 0);
+ /* yn is the index of limb containing the 't' bit */
+
+ yp = MPFR_MANT(y);
+ /* if expo is a multiple of GMP_NUMB_BITS, t is bit 0 */
+ if (expo % GMP_NUMB_BITS == 0 ? (yp[yn] & 1) == 0
+ : yp[yn] << ((expo % GMP_NUMB_BITS) - 1) != MPFR_LIMB_HIGHBIT)
+ return 0;
+ while (--yn >= 0)
+ if (yp[yn] != 0)
+ return 0;
+ return 1;
+}
+
+/* Assumes that the exponent range has already been extended and if y is
+ an integer, then the result is not exact in unbounded exponent range. */
+int
+mpfr_pow_general (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y,
+ mpfr_rnd_t rnd_mode, int y_is_integer, mpfr_save_expo_t *expo)
+{
+ mpfr_t t, u, k, absx;
+ int k_non_zero = 0;
+ int check_exact_case = 0;
+ int inexact;
+ /* Declaration of the size variable */
+ mpfr_prec_t Nz = MPFR_PREC(z); /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ mpfr_exp_t err; /* error */
+ MPFR_ZIV_DECL (ziv_loop);
+
+
+ MPFR_LOG_FUNC (("x[%#R]=%R y[%#R]=%R rnd=%d", x, x, y, y, rnd_mode),
+ ("z[%#R]=%R inexact=%d", z, z, inexact));
+
+ /* We put the absolute value of x in absx, pointing to the significand
+ of x to avoid allocating memory for the significand of absx. */
+ MPFR_ALIAS(absx, x, /*sign=*/ 1, /*EXP=*/ MPFR_EXP(x));
+
+ /* We will compute the absolute value of the result. So, let's
+ invert the rounding mode if the result is negative. */
+ if (MPFR_IS_NEG (x) && is_odd (y))
+ rnd_mode = MPFR_INVERT_RND (rnd_mode);
+
+ /* compute the precision of intermediary variable */
+ /* the optimal number of bits : see algorithms.tex */
+ Nt = Nz + 5 + MPFR_INT_CEIL_LOG2 (Nz);
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+
+ MPFR_ZIV_INIT (ziv_loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags1);
+
+ /* compute exp(y*ln|x|), using MPFR_RNDU to get an upper bound, so
+ that we can detect underflows. */
+ mpfr_log (t, absx, MPFR_IS_NEG (y) ? MPFR_RNDD : MPFR_RNDU); /* ln|x| */
+ mpfr_mul (t, y, t, MPFR_RNDU); /* y*ln|x| */
+ if (k_non_zero)
+ {
+ MPFR_LOG_MSG (("subtract k * ln(2)\n", 0));
+ mpfr_const_log2 (u, MPFR_RNDD);
+ mpfr_mul (u, u, k, MPFR_RNDD);
+ /* Error on u = k * log(2): < k * 2^(-Nt) < 1. */
+ mpfr_sub (t, t, u, MPFR_RNDU);
+ MPFR_LOG_MSG (("t = y * ln|x| - k * ln(2)\n", 0));
+ MPFR_LOG_VAR (t);
+ }
+ /* estimate of the error -- see pow function in algorithms.tex.
+ The error on t is at most 1/2 + 3*2^(EXP(t)+1) ulps, which is
+ <= 2^(EXP(t)+3) for EXP(t) >= -1, and <= 2 ulps for EXP(t) <= -2.
+ Additional error if k_no_zero: treal = t * errk, with
+ 1 - |k| * 2^(-Nt) <= exp(-|k| * 2^(-Nt)) <= errk <= 1,
+ i.e., additional absolute error <= 2^(EXP(k)+EXP(t)-Nt).
+ Total error <= 2^err1 + 2^err2 <= 2^(max(err1,err2)+1). */
+ err = MPFR_NOTZERO (t) && MPFR_GET_EXP (t) >= -1 ?
+ MPFR_GET_EXP (t) + 3 : 1;
+ if (k_non_zero)
+ {
+ if (MPFR_GET_EXP (k) > err)
+ err = MPFR_GET_EXP (k);
+ err++;
+ }
+ MPFR_BLOCK (flags1, mpfr_exp (t, t, MPFR_RNDN)); /* exp(y*ln|x|)*/
+ /* We need to test */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (t) || MPFR_UNDERFLOW (flags1)))
+ {
+ mpfr_prec_t Ntmin;
+ MPFR_BLOCK_DECL (flags2);
+
+ MPFR_ASSERTN (!k_non_zero);
+ MPFR_ASSERTN (!MPFR_IS_NAN (t));
+
+ /* Real underflow? */
+ if (MPFR_IS_ZERO (t))
+ {
+ /* Underflow. We computed rndn(exp(t)), where t >= y*ln|x|.
+ Therefore rndn(|x|^y) = 0, and we have a real underflow on
+ |x|^y. */
+ inexact = mpfr_underflow (z, rnd_mode == MPFR_RNDN ? MPFR_RNDZ
+ : rnd_mode, MPFR_SIGN_POS);
+ if (expo != NULL)
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (*expo, MPFR_FLAGS_INEXACT
+ | MPFR_FLAGS_UNDERFLOW);
+ break;
+ }
+
+ /* Real overflow? */
+ if (MPFR_IS_INF (t))
+ {
+ /* Note: we can probably use a low precision for this test. */
+ mpfr_log (t, absx, MPFR_IS_NEG (y) ? MPFR_RNDU : MPFR_RNDD);
+ mpfr_mul (t, y, t, MPFR_RNDD); /* y * ln|x| */
+ MPFR_BLOCK (flags2, mpfr_exp (t, t, MPFR_RNDD));
+ /* t = lower bound on exp(y * ln|x|) */
+ if (MPFR_OVERFLOW (flags2))
+ {
+ /* We have computed a lower bound on |x|^y, and it
+ overflowed. Therefore we have a real overflow
+ on |x|^y. */
+ inexact = mpfr_overflow (z, rnd_mode, MPFR_SIGN_POS);
+ if (expo != NULL)
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (*expo, MPFR_FLAGS_INEXACT
+ | MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+ }
+
+ k_non_zero = 1;
+ Ntmin = sizeof(mpfr_exp_t) * CHAR_BIT;
+ if (Ntmin > Nt)
+ {
+ Nt = Ntmin;
+ mpfr_set_prec (t, Nt);
+ }
+ mpfr_init2 (u, Nt);
+ mpfr_init2 (k, Ntmin);
+ mpfr_log2 (k, absx, MPFR_RNDN);
+ mpfr_mul (k, y, k, MPFR_RNDN);
+ mpfr_round (k, k);
+ MPFR_LOG_VAR (k);
+ /* |y| < 2^Ntmin, therefore |k| < 2^Nt. */
+ continue;
+ }
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, Nt - err, Nz, rnd_mode)))
+ {
+ inexact = mpfr_set (z, t, rnd_mode);
+ break;
+ }
+
+ /* check exact power, except when y is an integer (since the
+ exact cases for y integer have already been filtered out) */
+ if (check_exact_case == 0 && ! y_is_integer)
+ {
+ if (mpfr_pow_is_exact (z, absx, y, rnd_mode, &inexact))
+ break;
+ check_exact_case = 1;
+ }
+
+ /* reactualisation of the precision */
+ MPFR_ZIV_NEXT (ziv_loop, Nt);
+ mpfr_set_prec (t, Nt);
+ if (k_non_zero)
+ mpfr_set_prec (u, Nt);
+ }
+ MPFR_ZIV_FREE (ziv_loop);
+
+ if (k_non_zero)
+ {
+ int inex2;
+ long lk;
+
+ /* The rounded result in an unbounded exponent range is z * 2^k. As
+ * MPFR chooses underflow after rounding, the mpfr_mul_2si below will
+ * correctly detect underflows and overflows. However, in rounding to
+ * nearest, if z * 2^k = 2^(emin - 2), then the double rounding may
+ * affect the result. We need to cope with that before overwriting z.
+ * If inexact >= 0, then the real result is <= 2^(emin - 2), so that
+ * o(2^(emin - 2)) = +0 is correct. If inexact < 0, then the real
+ * result is > 2^(emin - 2) and we need to round to 2^(emin - 1).
+ */
+ MPFR_ASSERTN (MPFR_EMAX_MAX <= LONG_MAX);
+ lk = mpfr_get_si (k, MPFR_RNDN);
+ if (rnd_mode == MPFR_RNDN && inexact < 0 &&
+ MPFR_GET_EXP (z) + lk == __gmpfr_emin - 1 && mpfr_powerof2_raw (z))
+ {
+ /* Rounding to nearest, real result > z * 2^k = 2^(emin - 2),
+ * underflow case: as the minimum precision is > 1, we will
+ * obtain the correct result and exceptions by replacing z by
+ * nextabove(z).
+ */
+ MPFR_ASSERTN (MPFR_PREC_MIN > 1);
+ mpfr_nextabove (z);
+ }
+ mpfr_clear_flags ();
+ inex2 = mpfr_mul_2si (z, z, lk, rnd_mode);
+ if (inex2) /* underflow or overflow */
+ {
+ inexact = inex2;
+ if (expo != NULL)
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (*expo, __gmpfr_flags);
+ }
+ mpfr_clears (u, k, (mpfr_ptr) 0);
+ }
+ mpfr_clear (t);
+
+ /* update the sign of the result if x was negative */
+ if (MPFR_IS_NEG (x) && is_odd (y))
+ {
+ MPFR_SET_NEG(z);
+ inexact = -inexact;
+ }
+
+ return inexact;
+}
+
+/* The computation of z = pow(x,y) is done by
+ z = exp(y * log(x)) = x^y
+ For the special cases, see Section F.9.4.4 of the C standard:
+ _ pow(±0, y) = ±inf for y an odd integer < 0.
+ _ pow(±0, y) = +inf for y < 0 and not an odd integer.
+ _ pow(±0, y) = ±0 for y an odd integer > 0.
+ _ pow(±0, y) = +0 for y > 0 and not an odd integer.
+ _ pow(-1, ±inf) = 1.
+ _ pow(+1, y) = 1 for any y, even a NaN.
+ _ pow(x, ±0) = 1 for any x, even a NaN.
+ _ pow(x, y) = NaN for finite x < 0 and finite non-integer y.
+ _ pow(x, -inf) = +inf for |x| < 1.
+ _ pow(x, -inf) = +0 for |x| > 1.
+ _ pow(x, +inf) = +0 for |x| < 1.
+ _ pow(x, +inf) = +inf for |x| > 1.
+ _ pow(-inf, y) = -0 for y an odd integer < 0.
+ _ pow(-inf, y) = +0 for y < 0 and not an odd integer.
+ _ pow(-inf, y) = -inf for y an odd integer > 0.
+ _ pow(-inf, y) = +inf for y > 0 and not an odd integer.
+ _ pow(+inf, y) = +0 for y < 0.
+ _ pow(+inf, y) = +inf for y > 0. */
+int
+mpfr_pow (mpfr_ptr z, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ int cmp_x_1;
+ int y_is_integer;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R y[%#R]=%R rnd=%d", x, x, y, y, rnd_mode),
+ ("z[%#R]=%R inexact=%d", z, z, inexact));
+
+ if (MPFR_ARE_SINGULAR (x, y))
+ {
+ /* pow(x, 0) returns 1 for any x, even a NaN. */
+ if (MPFR_UNLIKELY (MPFR_IS_ZERO (y)))
+ return mpfr_set_ui (z, 1, rnd_mode);
+ else if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (z);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_NAN (y))
+ {
+ /* pow(+1, NaN) returns 1. */
+ if (mpfr_cmp_ui (x, 1) == 0)
+ return mpfr_set_ui (z, 1, rnd_mode);
+ MPFR_SET_NAN (z);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (y))
+ {
+ if (MPFR_IS_INF (x))
+ {
+ if (MPFR_IS_POS (y))
+ MPFR_SET_INF (z);
+ else
+ MPFR_SET_ZERO (z);
+ MPFR_SET_POS (z);
+ MPFR_RET (0);
+ }
+ else
+ {
+ int cmp;
+ cmp = mpfr_cmpabs (x, __gmpfr_one) * MPFR_INT_SIGN (y);
+ MPFR_SET_POS (z);
+ if (cmp > 0)
+ {
+ /* Return +inf. */
+ MPFR_SET_INF (z);
+ MPFR_RET (0);
+ }
+ else if (cmp < 0)
+ {
+ /* Return +0. */
+ MPFR_SET_ZERO (z);
+ MPFR_RET (0);
+ }
+ else
+ {
+ /* Return 1. */
+ return mpfr_set_ui (z, 1, rnd_mode);
+ }
+ }
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ int negative;
+ /* Determine the sign now, in case y and z are the same object */
+ negative = MPFR_IS_NEG (x) && is_odd (y);
+ if (MPFR_IS_POS (y))
+ MPFR_SET_INF (z);
+ else
+ MPFR_SET_ZERO (z);
+ if (negative)
+ MPFR_SET_NEG (z);
+ else
+ MPFR_SET_POS (z);
+ MPFR_RET (0);
+ }
+ else
+ {
+ int negative;
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ /* Determine the sign now, in case y and z are the same object */
+ negative = MPFR_IS_NEG(x) && is_odd (y);
+ if (MPFR_IS_NEG (y))
+ MPFR_SET_INF (z);
+ else
+ MPFR_SET_ZERO (z);
+ if (negative)
+ MPFR_SET_NEG (z);
+ else
+ MPFR_SET_POS (z);
+ MPFR_RET (0);
+ }
+ }
+
+ /* x^y for x < 0 and y not an integer is not defined */
+ y_is_integer = mpfr_integer_p (y);
+ if (MPFR_IS_NEG (x) && ! y_is_integer)
+ {
+ MPFR_SET_NAN (z);
+ MPFR_RET_NAN;
+ }
+
+ /* now the result cannot be NaN:
+ (1) either x > 0
+ (2) or x < 0 and y is an integer */
+
+ cmp_x_1 = mpfr_cmpabs (x, __gmpfr_one);
+ if (cmp_x_1 == 0)
+ return mpfr_set_si (z, MPFR_IS_NEG (x) && is_odd (y) ? -1 : 1, rnd_mode);
+
+ /* now we have:
+ (1) either x > 0
+ (2) or x < 0 and y is an integer
+ and in addition |x| <> 1.
+ */
+
+ /* detect overflow: an overflow is possible if
+ (a) |x| > 1 and y > 0
+ (b) |x| < 1 and y < 0.
+ FIXME: this assumes 1 is always representable.
+
+ FIXME2: maybe we can test overflow and underflow simultaneously.
+ The idea is the following: first compute an approximation to
+ y * log2|x|, using rounding to nearest. If |x| is not too near from 1,
+ this approximation should be accurate enough, and in most cases enable
+ one to prove that there is no underflow nor overflow.
+ Otherwise, it should enable one to check only underflow or overflow,
+ instead of both cases as in the present case.
+ */
+ if (cmp_x_1 * MPFR_SIGN (y) > 0)
+ {
+ mpfr_t t;
+ int negative, overflow;
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (t, 53);
+ /* we want a lower bound on y*log2|x|:
+ (i) if x > 0, it suffices to round log2(x) toward zero, and
+ to round y*o(log2(x)) toward zero too;
+ (ii) if x < 0, we first compute t = o(-x), with rounding toward 1,
+ and then follow as in case (1). */
+ if (MPFR_SIGN (x) > 0)
+ mpfr_log2 (t, x, MPFR_RNDZ);
+ else
+ {
+ mpfr_neg (t, x, (cmp_x_1 > 0) ? MPFR_RNDZ : MPFR_RNDU);
+ mpfr_log2 (t, t, MPFR_RNDZ);
+ }
+ mpfr_mul (t, t, y, MPFR_RNDZ);
+ overflow = mpfr_cmp_si (t, __gmpfr_emax) > 0;
+ mpfr_clear (t);
+ MPFR_SAVE_EXPO_FREE (expo);
+ if (overflow)
+ {
+ MPFR_LOG_MSG (("early overflow detection\n", 0));
+ negative = MPFR_SIGN(x) < 0 && is_odd (y);
+ return mpfr_overflow (z, rnd_mode, negative ? -1 : 1);
+ }
+ }
+
+ /* Basic underflow checking. One has:
+ * - if y > 0, |x^y| < 2^(EXP(x) * y);
+ * - if y < 0, |x^y| <= 2^((EXP(x) - 1) * y);
+ * so that one can compute a value ebound such that |x^y| < 2^ebound.
+ * If we have ebound <= emin - 2 (emin - 1 in directed rounding modes),
+ * then there is an underflow and we can decide the return value.
+ */
+ if (MPFR_IS_NEG (y) ? (MPFR_GET_EXP (x) > 1) : (MPFR_GET_EXP (x) < 0))
+ {
+ mpfr_t tmp;
+ mpfr_eexp_t ebound;
+ int inex2;
+
+ /* We must restore the flags. */
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (tmp, sizeof (mpfr_exp_t) * CHAR_BIT);
+ inex2 = mpfr_set_exp_t (tmp, MPFR_GET_EXP (x), MPFR_RNDN);
+ MPFR_ASSERTN (inex2 == 0);
+ if (MPFR_IS_NEG (y))
+ {
+ inex2 = mpfr_sub_ui (tmp, tmp, 1, MPFR_RNDN);
+ MPFR_ASSERTN (inex2 == 0);
+ }
+ mpfr_mul (tmp, tmp, y, MPFR_RNDU);
+ if (MPFR_IS_NEG (y))
+ mpfr_nextabove (tmp);
+ /* tmp doesn't necessarily fit in ebound, but that doesn't matter
+ since we get the minimum value in such a case. */
+ ebound = mpfr_get_exp_t (tmp, MPFR_RNDU);
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ if (MPFR_UNLIKELY (ebound <=
+ __gmpfr_emin - (rnd_mode == MPFR_RNDN ? 2 : 1)))
+ {
+ /* warning: mpfr_underflow rounds away from 0 for MPFR_RNDN */
+ MPFR_LOG_MSG (("early underflow detection\n", 0));
+ return mpfr_underflow (z,
+ rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode,
+ MPFR_SIGN (x) < 0 && is_odd (y) ? -1 : 1);
+ }
+ }
+
+ /* If y is an integer, we can use mpfr_pow_z (based on multiplications),
+ but if y is very large (I'm not sure about the best threshold -- VL),
+ we shouldn't use it, as it can be very slow and take a lot of memory
+ (and even crash or make other programs crash, as several hundred of
+ MBs may be necessary). Note that in such a case, either x = +/-2^b
+ (this case is handled below) or x^y cannot be represented exactly in
+ any precision supported by MPFR (the general case uses this property).
+ */
+ if (y_is_integer && (MPFR_GET_EXP (y) <= 256))
+ {
+ mpz_t zi;
+
+ MPFR_LOG_MSG (("special code for y not too large integer\n", 0));
+ mpz_init (zi);
+ mpfr_get_z (zi, y, MPFR_RNDN);
+ inexact = mpfr_pow_z (z, x, zi, rnd_mode);
+ mpz_clear (zi);
+ return inexact;
+ }
+
+ /* Special case (+/-2^b)^Y which could be exact. If x is negative, then
+ necessarily y is a large integer. */
+ {
+ mpfr_exp_t b = MPFR_GET_EXP (x) - 1;
+
+ MPFR_ASSERTN (b >= LONG_MIN && b <= LONG_MAX); /* FIXME... */
+ if (mpfr_cmp_si_2exp (x, MPFR_SIGN(x), b) == 0)
+ {
+ mpfr_t tmp;
+ int sgnx = MPFR_SIGN (x);
+
+ MPFR_LOG_MSG (("special case (+/-2^b)^Y\n", 0));
+ /* now x = +/-2^b, so x^y = (+/-1)^y*2^(b*y) is exact whenever b*y is
+ an integer */
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (tmp, MPFR_PREC (y) + sizeof (long) * CHAR_BIT);
+ inexact = mpfr_mul_si (tmp, y, b, MPFR_RNDN); /* exact */
+ MPFR_ASSERTN (inexact == 0);
+ /* Note: as the exponent range has been extended, an overflow is not
+ possible (due to basic overflow and underflow checking above, as
+ the result is ~ 2^tmp), and an underflow is not possible either
+ because b is an integer (thus either 0 or >= 1). */
+ mpfr_clear_flags ();
+ inexact = mpfr_exp2 (z, tmp, rnd_mode);
+ mpfr_clear (tmp);
+ if (sgnx < 0 && is_odd (y))
+ {
+ mpfr_neg (z, z, rnd_mode);
+ inexact = -inexact;
+ }
+ /* Without the following, the overflows3 test in tpow.c fails. */
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (z, inexact, rnd_mode);
+ }
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Case where |y * log(x)| is very small. Warning: x can be negative, in
+ that case y is a large integer. */
+ {
+ mpfr_t t;
+ mpfr_exp_t err;
+
+ /* We need an upper bound on the exponent of y * log(x). */
+ mpfr_init2 (t, 16);
+ if (MPFR_IS_POS(x))
+ mpfr_log (t, x, cmp_x_1 < 0 ? MPFR_RNDD : MPFR_RNDU); /* away from 0 */
+ else
+ {
+ /* if x < -1, round to +Inf, else round to zero */
+ mpfr_neg (t, x, (mpfr_cmp_si (x, -1) < 0) ? MPFR_RNDU : MPFR_RNDD);
+ mpfr_log (t, t, (mpfr_cmp_ui (t, 1) < 0) ? MPFR_RNDD : MPFR_RNDU);
+ }
+ MPFR_ASSERTN (MPFR_IS_PURE_FP (t));
+ err = MPFR_GET_EXP (y) + MPFR_GET_EXP (t);
+ mpfr_clear (t);
+ mpfr_clear_flags ();
+ MPFR_SMALL_INPUT_AFTER_SAVE_EXPO (z, __gmpfr_one, - err, 0,
+ (MPFR_SIGN (y) > 0) ^ (cmp_x_1 < 0),
+ rnd_mode, expo, {});
+ }
+
+ /* General case */
+ inexact = mpfr_pow_general (z, x, y, rnd_mode, y_is_integer, &expo);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (z, inexact, rnd_mode);
+}
diff --git a/src/pow_si.c b/src/pow_si.c
new file mode 100644
index 000000000..32c405acb
--- /dev/null
+++ b/src/pow_si.c
@@ -0,0 +1,250 @@
+/* mpfr_pow_si -- power function x^y with y a signed int
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* The computation of y = pow_si(x,n) is done by
+ * y = pow_ui(x,n) if n >= 0
+ * y = 1 / pow_ui(x,-n) if n < 0
+ */
+
+int
+mpfr_pow_si (mpfr_ptr y, mpfr_srcptr x, long int n, mpfr_rnd_t rnd)
+{
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%ld rnd=%d", x, x, n, rnd),
+ ("y[%#R]=%R", y, y));
+
+ if (n >= 0)
+ return mpfr_pow_ui (y, x, n, rnd);
+ else
+ {
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ MPFR_SET_ZERO (y);
+ if (MPFR_IS_POS (x) || ((unsigned) n & 1) == 0)
+ MPFR_SET_POS (y);
+ else
+ MPFR_SET_NEG (y);
+ MPFR_RET (0);
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_INF(y);
+ if (MPFR_IS_POS (x) || ((unsigned) n & 1) == 0)
+ MPFR_SET_POS (y);
+ else
+ MPFR_SET_NEG (y);
+ MPFR_RET(0);
+ }
+ }
+
+ /* detect exact powers: x^(-n) is exact iff x is a power of 2 */
+ if (mpfr_cmp_si_2exp (x, MPFR_SIGN(x), MPFR_EXP(x) - 1) == 0)
+ {
+ mpfr_exp_t expx = MPFR_EXP (x) - 1, expy;
+ MPFR_ASSERTD (n < 0);
+ /* Warning: n * expx may overflow!
+ *
+ * Some systems (apparently alpha-freebsd) abort with
+ * LONG_MIN / 1, and LONG_MIN / -1 is undefined.
+ * http://www.freebsd.org/cgi/query-pr.cgi?pr=72024
+ *
+ * Proof of the overflow checking. The expressions below are
+ * assumed to be on the rational numbers, but the word "overflow"
+ * still has its own meaning in the C context. / still denotes
+ * the integer (truncated) division, and // denotes the exact
+ * division.
+ * - First, (__gmpfr_emin - 1) / n and (__gmpfr_emax - 1) / n
+ * cannot overflow due to the constraints on the exponents of
+ * MPFR numbers.
+ * - If n = -1, then n * expx = - expx, which is representable
+ * because of the constraints on the exponents of MPFR numbers.
+ * - If expx = 0, then n * expx = 0, which is representable.
+ * - If n < -1 and expx > 0:
+ * + If expx > (__gmpfr_emin - 1) / n, then
+ * expx >= (__gmpfr_emin - 1) / n + 1
+ * > (__gmpfr_emin - 1) // n,
+ * and
+ * n * expx < __gmpfr_emin - 1,
+ * i.e.
+ * n * expx <= __gmpfr_emin - 2.
+ * This corresponds to an underflow, with a null result in
+ * the rounding-to-nearest mode.
+ * + If expx <= (__gmpfr_emin - 1) / n, then n * expx cannot
+ * overflow since 0 < expx <= (__gmpfr_emin - 1) / n and
+ * 0 > n * expx >= n * ((__gmpfr_emin - 1) / n)
+ * >= __gmpfr_emin - 1.
+ * - If n < -1 and expx < 0:
+ * + If expx < (__gmpfr_emax - 1) / n, then
+ * expx <= (__gmpfr_emax - 1) / n - 1
+ * < (__gmpfr_emax - 1) // n,
+ * and
+ * n * expx > __gmpfr_emax - 1,
+ * i.e.
+ * n * expx >= __gmpfr_emax.
+ * This corresponds to an overflow (2^(n * expx) has an
+ * exponent > __gmpfr_emax).
+ * + If expx >= (__gmpfr_emax - 1) / n, then n * expx cannot
+ * overflow since 0 > expx >= (__gmpfr_emax - 1) / n and
+ * 0 < n * expx <= n * ((__gmpfr_emax - 1) / n)
+ * <= __gmpfr_emax - 1.
+ * Note: one could use expx bounds based on MPFR_EXP_MIN and
+ * MPFR_EXP_MAX instead of __gmpfr_emin and __gmpfr_emax. The
+ * current bounds do not lead to noticeably slower code and
+ * allow us to avoid a bug in Sun's compiler for Solaris/x86
+ * (when optimizations are enabled); known affected versions:
+ * cc: Sun C 5.8 2005/10/13
+ * cc: Sun C 5.8 Patch 121016-02 2006/03/31
+ * cc: Sun C 5.8 Patch 121016-04 2006/10/18
+ */
+ expy =
+ n != -1 && expx > 0 && expx > (__gmpfr_emin - 1) / n ?
+ MPFR_EMIN_MIN - 2 /* Underflow */ :
+ n != -1 && expx < 0 && expx < (__gmpfr_emax - 1) / n ?
+ MPFR_EMAX_MAX /* Overflow */ : n * expx;
+ return mpfr_set_si_2exp (y, n % 2 ? MPFR_INT_SIGN (x) : 1,
+ expy, rnd);
+ }
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t;
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny; /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ mpfr_rnd_t rnd1;
+ int size_n;
+ int inexact;
+ unsigned long abs_n;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+
+ abs_n = - (unsigned long) n;
+ count_leading_zeros (size_n, (mp_limb_t) abs_n);
+ size_n = GMP_NUMB_BITS - size_n;
+
+ /* initial working precision */
+ Ny = MPFR_PREC (y);
+ Nt = Ny + size_n + 3 + MPFR_INT_CEIL_LOG2 (Ny);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+
+ /* We will compute rnd(rnd1(1/x) ^ |n|), where rnd1 is the rounding
+ toward sign(x), to avoid spurious overflow or underflow, as in
+ mpfr_pow_z. */
+ rnd1 = MPFR_EXP (x) < 1 ? MPFR_RNDZ :
+ (MPFR_SIGN (x) > 0 ? MPFR_RNDU : MPFR_RNDD);
+
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* compute (1/x)^|n| */
+ MPFR_BLOCK (flags, mpfr_ui_div (t, 1, x, rnd1));
+ MPFR_ASSERTD (! MPFR_UNDERFLOW (flags));
+ /* t = (1/x)*(1+theta) where |theta| <= 2^(-Nt) */
+ if (MPFR_UNLIKELY (MPFR_OVERFLOW (flags)))
+ goto overflow;
+ MPFR_BLOCK (flags, mpfr_pow_ui (t, t, abs_n, rnd));
+ /* t = (1/x)^|n|*(1+theta')^(|n|+1) where |theta'| <= 2^(-Nt).
+ If (|n|+1)*2^(-Nt) <= 1/2, which is satisfied as soon as
+ Nt >= bits(n)+2, then we can use Lemma \ref{lemma_graillat}
+ from algorithms.tex, which yields x^n*(1+theta) with
+ |theta| <= 2(|n|+1)*2^(-Nt), thus the error is bounded by
+ 2(|n|+1) ulps <= 2^(bits(n)+2) ulps. */
+ if (MPFR_UNLIKELY (MPFR_OVERFLOW (flags)))
+ {
+ overflow:
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ MPFR_SAVE_EXPO_FREE (expo);
+ MPFR_LOG_MSG (("overflow\n", 0));
+ return mpfr_overflow (y, rnd, abs_n & 1 ?
+ MPFR_SIGN (x) : MPFR_SIGN_POS);
+ }
+ if (MPFR_UNLIKELY (MPFR_UNDERFLOW (flags)))
+ {
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ MPFR_LOG_MSG (("underflow\n", 0));
+ if (rnd == MPFR_RNDN)
+ {
+ mpfr_t y2, nn;
+
+ /* We cannot decide now whether the result should be
+ rounded toward zero or away from zero. So, like
+ in mpfr_pow_pos_z, let's use the general case of
+ mpfr_pow in precision 2. */
+ MPFR_ASSERTD (mpfr_cmp_si_2exp (x, MPFR_SIGN (x),
+ MPFR_EXP (x) - 1) != 0);
+ mpfr_init2 (y2, 2);
+ mpfr_init2 (nn, sizeof (long) * CHAR_BIT);
+ inexact = mpfr_set_si (nn, n, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+ inexact = mpfr_pow_general (y2, x, nn, rnd, 1,
+ (mpfr_save_expo_t *) NULL);
+ mpfr_clear (nn);
+ mpfr_set (y, y2, MPFR_RNDN);
+ mpfr_clear (y2);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_UNDERFLOW);
+ goto end;
+ }
+ else
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (y, rnd, abs_n & 1 ?
+ MPFR_SIGN (x) : MPFR_SIGN_POS);
+ }
+ }
+ /* error estimate -- see pow function in algorithms.ps */
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, Nt - size_n - 2, Ny, rnd)))
+ break;
+
+ /* actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (y, t, rnd);
+ mpfr_clear (t);
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd);
+ }
+ }
+}
diff --git a/src/pow_ui.c b/src/pow_ui.c
new file mode 100644
index 000000000..c7c1a35ab
--- /dev/null
+++ b/src/pow_ui.c
@@ -0,0 +1,161 @@
+/* mpfr_pow_ui-- compute the power of a floating-point
+ by a machine integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* sets y to x^n, and return 0 if exact, non-zero otherwise */
+int
+mpfr_pow_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int n, mpfr_rnd_t rnd)
+{
+ unsigned long m;
+ mpfr_t res;
+ mpfr_prec_t prec, err;
+ int inexact;
+ mpfr_rnd_t rnd1;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_ZIV_DECL (loop);
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%lu rnd=%d", x, x, n, rnd),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ /* x^0 = 1 for any x, even a NaN */
+ if (MPFR_UNLIKELY (n == 0))
+ return mpfr_set_ui (y, 1, rnd);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ /* Inf^n = Inf, (-Inf)^n = Inf for n even, -Inf for n odd */
+ if (MPFR_IS_NEG (x) && (n & 1) == 1)
+ MPFR_SET_NEG (y);
+ else
+ MPFR_SET_POS (y);
+ MPFR_SET_INF (y);
+ MPFR_RET (0);
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ /* 0^n = 0 for any n */
+ MPFR_SET_ZERO (y);
+ if (MPFR_IS_POS (x) || (n & 1) == 0)
+ MPFR_SET_POS (y);
+ else
+ MPFR_SET_NEG (y);
+ MPFR_RET (0);
+ }
+ }
+ else if (MPFR_UNLIKELY (n <= 2))
+ {
+ if (n < 2)
+ /* x^1 = x */
+ return mpfr_set (y, x, rnd);
+ else
+ /* x^2 = sqr(x) */
+ return mpfr_sqr (y, x, rnd);
+ }
+
+ /* Augment exponent range */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* setup initial precision */
+ prec = MPFR_PREC (y) + 3 + GMP_NUMB_BITS
+ + MPFR_INT_CEIL_LOG2 (MPFR_PREC (y));
+ mpfr_init2 (res, prec);
+
+ rnd1 = MPFR_IS_POS (x) ? MPFR_RNDU : MPFR_RNDD; /* away */
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ int i;
+
+ for (m = n, i = 0; m; i++, m >>= 1)
+ ;
+ /* now 2^(i-1) <= n < 2^i */
+ MPFR_ASSERTD (prec > (mpfr_prec_t) i);
+ err = prec - 1 - (mpfr_prec_t) i;
+ /* First step: compute square from x */
+ MPFR_BLOCK (flags,
+ inexact = mpfr_mul (res, x, x, MPFR_RNDU);
+ MPFR_ASSERTD (i >= 2);
+ if (n & (1UL << (i-2)))
+ inexact |= mpfr_mul (res, res, x, rnd1);
+ for (i -= 3; i >= 0 && !MPFR_BLOCK_EXCEP; i--)
+ {
+ inexact |= mpfr_mul (res, res, res, MPFR_RNDU);
+ if (n & (1UL << i))
+ inexact |= mpfr_mul (res, res, x, rnd1);
+ });
+ /* let r(n) be the number of roundings: we have r(2)=1, r(3)=2,
+ and r(2n)=2r(n)+1, r(2n+1)=2r(n)+2, thus r(n)=n-1.
+ Using Higham's method, to each rounding corresponds a factor
+ (1-theta) with 0 <= theta <= 2^(1-p), thus at the end the
+ absolute error is bounded by (n-1)*2^(1-p)*res <= 2*(n-1)*ulp(res)
+ since 2^(-p)*x <= ulp(x). Since n < 2^i, this gives a maximal
+ error of 2^(1+i)*ulp(res).
+ */
+ if (MPFR_LIKELY (inexact == 0
+ || MPFR_OVERFLOW (flags) || MPFR_UNDERFLOW (flags)
+ || MPFR_CAN_ROUND (res, err, MPFR_PREC (y), rnd)))
+ break;
+ /* Actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (res, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ if (MPFR_UNLIKELY (MPFR_OVERFLOW (flags) || MPFR_UNDERFLOW (flags)))
+ {
+ mpz_t z;
+
+ /* Internal overflow or underflow. However the approximation error has
+ * not been taken into account. So, let's solve this problem by using
+ * mpfr_pow_z, which can handle it. This case could be improved in the
+ * future, without having to use mpfr_pow_z.
+ */
+ MPFR_LOG_MSG (("Internal overflow or underflow,"
+ " let's use mpfr_pow_z.\n", 0));
+ mpfr_clear (res);
+ MPFR_SAVE_EXPO_FREE (expo);
+ mpz_init (z);
+ mpz_set_ui (z, n);
+ inexact = mpfr_pow_z (y, x, z, rnd);
+ mpz_clear (z);
+ return inexact;
+ }
+
+ inexact = mpfr_set (y, res, rnd);
+ mpfr_clear (res);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd);
+}
diff --git a/src/pow_z.c b/src/pow_z.c
new file mode 100644
index 000000000..061d6407c
--- /dev/null
+++ b/src/pow_z.c
@@ -0,0 +1,365 @@
+/* mpfr_pow_z -- power function x^z with z a MPZ
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* y <- x^|z| with z != 0
+ if cr=1: ensures correct rounding of y
+ if cr=0: does not ensure correct rounding, but avoid spurious overflow
+ or underflow, and uses the precision of y as working precision (warning,
+ y and x might be the same variable). */
+static int
+mpfr_pow_pos_z (mpfr_ptr y, mpfr_srcptr x, mpz_srcptr z, mpfr_rnd_t rnd, int cr)
+{
+ mpfr_t res;
+ mpfr_prec_t prec, err;
+ int inexact;
+ mpfr_rnd_t rnd1, rnd2;
+ mpz_t absz;
+ mp_size_t size_z;
+ MPFR_ZIV_DECL (loop);
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R z=? rnd=%d cr=%d", x, x, rnd, cr),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ MPFR_ASSERTD (mpz_sgn (z) != 0);
+
+ if (MPFR_UNLIKELY (mpz_cmpabs_ui (z, 1) == 0))
+ return mpfr_set (y, x, rnd);
+
+ absz[0] = z[0];
+ SIZ (absz) = ABS(SIZ(absz)); /* Hack to get abs(z) */
+ MPFR_MPZ_SIZEINBASE2 (size_z, z);
+
+ /* round toward 1 (or -1) to avoid spurious overflow or underflow,
+ i.e. if an overflow or underflow occurs, it is a real exception
+ and is not just due to the rounding error. */
+ rnd1 = (MPFR_EXP(x) >= 1) ? MPFR_RNDZ
+ : (MPFR_IS_POS(x) ? MPFR_RNDU : MPFR_RNDD);
+ rnd2 = (MPFR_EXP(x) >= 1) ? MPFR_RNDD : MPFR_RNDU;
+
+ if (cr != 0)
+ prec = MPFR_PREC (y) + 3 + size_z + MPFR_INT_CEIL_LOG2 (MPFR_PREC (y));
+ else
+ prec = MPFR_PREC (y);
+ mpfr_init2 (res, prec);
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ unsigned int inexmul; /* will be non-zero if res may be inexact */
+ mp_size_t i = size_z;
+
+ /* now 2^(i-1) <= z < 2^i */
+ /* see below (case z < 0) for the error analysis, which is identical,
+ except if z=n, the maximal relative error is here 2(n-1)2^(-prec)
+ instead of 2(2n-1)2^(-prec) for z<0. */
+ MPFR_ASSERTD (prec > (mpfr_prec_t) i);
+ err = prec - 1 - (mpfr_prec_t) i;
+
+ MPFR_BLOCK (flags,
+ inexmul = mpfr_mul (res, x, x, rnd2);
+ MPFR_ASSERTD (i >= 2);
+ if (mpz_tstbit (absz, i - 2))
+ inexmul |= mpfr_mul (res, res, x, rnd1);
+ for (i -= 3; i >= 0 && !MPFR_BLOCK_EXCEP; i--)
+ {
+ inexmul |= mpfr_mul (res, res, res, rnd2);
+ if (mpz_tstbit (absz, i))
+ inexmul |= mpfr_mul (res, res, x, rnd1);
+ });
+ if (MPFR_LIKELY (inexmul == 0 || cr == 0
+ || MPFR_OVERFLOW (flags) || MPFR_UNDERFLOW (flags)
+ || MPFR_CAN_ROUND (res, err, MPFR_PREC (y), rnd)))
+ break;
+ /* Can't decide correct rounding, increase the precision */
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (res, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ /* Check Overflow */
+ if (MPFR_OVERFLOW (flags))
+ {
+ MPFR_LOG_MSG (("overflow\n", 0));
+ inexact = mpfr_overflow (y, rnd, mpz_odd_p (absz) ?
+ MPFR_SIGN (x) : MPFR_SIGN_POS);
+ }
+ /* Check Underflow */
+ else if (MPFR_UNDERFLOW (flags))
+ {
+ MPFR_LOG_MSG (("underflow\n", 0));
+ if (rnd == MPFR_RNDN)
+ {
+ mpfr_t y2, zz;
+
+ /* We cannot decide now whether the result should be rounded
+ toward zero or +Inf. So, let's use the general case of
+ mpfr_pow, which can do that. But the problem is that the
+ result can be exact! However, it is sufficient to try to
+ round on 2 bits (the precision does not matter in case of
+ underflow, since MPFR does not have subnormals), in which
+ case, the result cannot be exact due to previous filtering
+ of trivial cases. */
+ MPFR_ASSERTD (mpfr_cmp_si_2exp (x, MPFR_SIGN (x),
+ MPFR_EXP (x) - 1) != 0);
+ mpfr_init2 (y2, 2);
+ mpfr_init2 (zz, ABS (SIZ (z)) * GMP_NUMB_BITS);
+ inexact = mpfr_set_z (zz, z, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+ inexact = mpfr_pow_general (y2, x, zz, rnd, 1,
+ (mpfr_save_expo_t *) NULL);
+ mpfr_clear (zz);
+ mpfr_set (y, y2, MPFR_RNDN);
+ mpfr_clear (y2);
+ __gmpfr_flags = MPFR_FLAGS_INEXACT | MPFR_FLAGS_UNDERFLOW;
+ }
+ else
+ {
+ inexact = mpfr_underflow (y, rnd, mpz_odd_p (absz) ?
+ MPFR_SIGN (x) : MPFR_SIGN_POS);
+ }
+ }
+ else
+ inexact = mpfr_set (y, res, rnd);
+
+ mpfr_clear (res);
+ return inexact;
+}
+
+/* The computation of y = pow(x,z) is done by
+ * y = set_ui(1) if z = 0
+ * y = pow_ui(x,z) if z > 0
+ * y = pow_ui(1/x,-z) if z < 0
+ *
+ * Note: in case z < 0, we could also compute 1/pow_ui(x,-z). However, in
+ * case MAX < 1/MIN, where MAX is the largest positive value, i.e.,
+ * MAX = nextbelow(+Inf), and MIN is the smallest positive value, i.e.,
+ * MIN = nextabove(+0), then x^(-z) might produce an overflow, whereas
+ * x^z is representable.
+ */
+
+int
+mpfr_pow_z (mpfr_ptr y, mpfr_srcptr x, mpz_srcptr z, mpfr_rnd_t rnd)
+{
+ int inexact;
+ mpz_t tmp;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R z=? rnd=%d", x, x, rnd),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ /* x^0 = 1 for any x, even a NaN */
+ if (MPFR_UNLIKELY (mpz_sgn (z) == 0))
+ return mpfr_set_ui (y, 1, rnd);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x))
+ {
+ /* Inf^n = Inf, (-Inf)^n = Inf for n even, -Inf for n odd */
+ /* Inf ^(-n) = 0, sign = + if x>0 or z even */
+ if (mpz_sgn (z) > 0)
+ MPFR_SET_INF (y);
+ else
+ MPFR_SET_ZERO (y);
+ if (MPFR_UNLIKELY (MPFR_IS_NEG (x) && mpz_odd_p (z)))
+ MPFR_SET_NEG (y);
+ else
+ MPFR_SET_POS (y);
+ MPFR_RET (0);
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO(x));
+ if (mpz_sgn (z) > 0)
+ /* 0^n = +/-0 for any n */
+ MPFR_SET_ZERO (y);
+ else
+ /* 0^(-n) if +/- INF */
+ MPFR_SET_INF (y);
+ if (MPFR_LIKELY (MPFR_IS_POS (x) || mpz_even_p (z)))
+ MPFR_SET_POS (y);
+ else
+ MPFR_SET_NEG (y);
+ MPFR_RET(0);
+ }
+ }
+
+ /* detect exact powers: x^-n is exact iff x is a power of 2
+ Do it if n > 0 too as this is faster and this filtering is
+ needed in case of underflow. */
+ if (MPFR_UNLIKELY (mpfr_cmp_si_2exp (x, MPFR_SIGN (x),
+ MPFR_EXP (x) - 1) == 0))
+ {
+ mpfr_exp_t expx = MPFR_EXP (x); /* warning: x and y may be the same
+ variable */
+
+ MPFR_LOG_MSG (("x^n with x power of two\n", 0));
+ mpfr_set_si (y, mpz_odd_p (z) ? MPFR_INT_SIGN(x) : 1, rnd);
+ MPFR_ASSERTD (MPFR_IS_FP (y));
+ mpz_init (tmp);
+ mpz_mul_si (tmp, z, expx - 1);
+ MPFR_ASSERTD (MPFR_GET_EXP (y) == 1);
+ mpz_add_ui (tmp, tmp, 1);
+ inexact = 0;
+ if (MPFR_UNLIKELY (mpz_cmp_si (tmp, __gmpfr_emin) < 0))
+ {
+ MPFR_LOG_MSG (("underflow\n", 0));
+ /* |y| is a power of two, thus |y| <= 2^(emin-2), and in
+ rounding to nearest, the value must be rounded to 0. */
+ if (rnd == MPFR_RNDN)
+ rnd = MPFR_RNDZ;
+ inexact = mpfr_underflow (y, rnd, MPFR_SIGN (y));
+ }
+ else if (MPFR_UNLIKELY (mpz_cmp_si (tmp, __gmpfr_emax) > 0))
+ {
+ MPFR_LOG_MSG (("overflow\n", 0));
+ inexact = mpfr_overflow (y, rnd, MPFR_SIGN (y));
+ }
+ else
+ MPFR_SET_EXP (y, mpz_get_si (tmp));
+ mpz_clear (tmp);
+ MPFR_RET (inexact);
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ if (mpz_sgn (z) > 0)
+ {
+ inexact = mpfr_pow_pos_z (y, x, z, rnd, 1);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ }
+ else
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t;
+ mpfr_prec_t Nt; /* Precision of the intermediary variable */
+ mpfr_rnd_t rnd1;
+ mp_size_t size_z;
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_MPZ_SIZEINBASE2 (size_z, z);
+
+ /* initial working precision */
+ Nt = MPFR_PREC (y);
+ Nt = Nt + size_z + 3 + MPFR_INT_CEIL_LOG2 (Nt);
+ /* ensures Nt >= bits(z)+2 */
+
+ /* initialise of intermediary variable */
+ mpfr_init2 (t, Nt);
+
+ /* We will compute rnd(rnd1(1/x) ^ (-z)), where rnd1 is the rounding
+ toward sign(x), to avoid spurious overflow or underflow. */
+ rnd1 = MPFR_EXP (x) < 1 ? MPFR_RNDZ :
+ (MPFR_SIGN (x) > 0 ? MPFR_RNDU : MPFR_RNDD);
+
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* compute (1/x)^(-z), -z>0 */
+ /* As emin = -emax, an underflow cannot occur in the division.
+ And if an overflow occurs, then this means that x^z overflows
+ too (since we have rounded toward 1 or -1). */
+ MPFR_BLOCK (flags, mpfr_ui_div (t, 1, x, rnd1));
+ MPFR_ASSERTD (! MPFR_UNDERFLOW (flags));
+ /* t = (1/x)*(1+theta) where |theta| <= 2^(-Nt) */
+ if (MPFR_UNLIKELY (MPFR_OVERFLOW (flags)))
+ goto overflow;
+ MPFR_BLOCK (flags, mpfr_pow_pos_z (t, t, z, rnd, 0));
+ /* Now if z=-n, t = x^z*(1+theta)^(2n-1) where |theta| <= 2^(-Nt),
+ with theta maybe different from above. If (2n-1)*2^(-Nt) <= 1/2,
+ which is satisfied as soon as Nt >= bits(z)+2, then we can use
+ Lemma \ref{lemma_graillat} from algorithms.tex, which yields
+ t = x^z*(1+theta) with |theta| <= 2(2n-1)*2^(-Nt), thus the
+ error is bounded by 2(2n-1) ulps <= 2^(bits(z)+2) ulps. */
+ if (MPFR_UNLIKELY (MPFR_OVERFLOW (flags)))
+ {
+ overflow:
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ MPFR_SAVE_EXPO_FREE (expo);
+ MPFR_LOG_MSG (("overflow\n", 0));
+ return mpfr_overflow (y, rnd,
+ mpz_odd_p (z) ? MPFR_SIGN (x) :
+ MPFR_SIGN_POS);
+ }
+ if (MPFR_UNLIKELY (MPFR_UNDERFLOW (flags)))
+ {
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (t);
+ MPFR_LOG_MSG (("underflow\n", 0));
+ if (rnd == MPFR_RNDN)
+ {
+ mpfr_t y2, zz;
+
+ /* We cannot decide now whether the result should be
+ rounded toward zero or away from zero. So, like
+ in mpfr_pow_pos_z, let's use the general case of
+ mpfr_pow in precision 2. */
+ MPFR_ASSERTD (mpfr_cmp_si_2exp (x, MPFR_SIGN (x),
+ MPFR_EXP (x) - 1) != 0);
+ mpfr_init2 (y2, 2);
+ mpfr_init2 (zz, ABS (SIZ (z)) * GMP_NUMB_BITS);
+ inexact = mpfr_set_z (zz, z, MPFR_RNDN);
+ MPFR_ASSERTN (inexact == 0);
+ inexact = mpfr_pow_general (y2, x, zz, rnd, 1,
+ (mpfr_save_expo_t *) NULL);
+ mpfr_clear (zz);
+ mpfr_set (y, y2, MPFR_RNDN);
+ mpfr_clear (y2);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_UNDERFLOW);
+ goto end;
+ }
+ else
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_underflow (y, rnd, mpz_odd_p (z) ?
+ MPFR_SIGN (x) : MPFR_SIGN_POS);
+ }
+ }
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, Nt - size_z - 2, MPFR_PREC (y),
+ rnd)))
+ break;
+ /* actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ mpfr_set_prec (t, Nt);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (y, t, rnd);
+ mpfr_clear (t);
+ }
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd);
+}
diff --git a/src/powerof2.c b/src/powerof2.c
new file mode 100644
index 000000000..4283ee455
--- /dev/null
+++ b/src/powerof2.c
@@ -0,0 +1,46 @@
+/* mpfr_powerof2_raw -- test whether a floating-point number is a power of 2
+
+Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* This is an internal function and one assumes that x is a real number. */
+
+int
+mpfr_powerof2_raw (mpfr_srcptr x)
+{
+ mp_limb_t *xp;
+ mp_size_t xn;
+
+ /* This is an internal function, and we may call it with some
+ wrong numbers (ie good mantissa but wrong flags or exp)
+ So we don't want to test if it is a pure FP.
+ MPFR_ASSERTN(MPFR_IS_PURE_FP(x)); */
+ xp = MPFR_MANT(x);
+ xn = (MPFR_PREC(x) - 1) / GMP_NUMB_BITS;
+ /*if (NOT_POW2(xp[xn]))*/
+ if (xp[xn] != MPFR_LIMB_HIGHBIT)
+ return 0;
+ while (xn > 0)
+ if (xp[--xn] != 0)
+ return 0;
+ return 1;
+}
diff --git a/src/print_raw.c b/src/print_raw.c
new file mode 100644
index 000000000..a8a1d36e1
--- /dev/null
+++ b/src/print_raw.c
@@ -0,0 +1,129 @@
+/* mpfr_print_binary -- print the internal binary representation of a
+ floating-point number
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_fprint_binary (FILE *stream, mpfr_srcptr x)
+{
+ if (MPFR_IS_NAN (x))
+ {
+ fprintf (stream, "@NaN@");
+ return;
+ }
+
+ if (MPFR_SIGN (x) < 0)
+ fprintf (stream, "-");
+
+ if (MPFR_IS_INF (x))
+ fprintf (stream, "@Inf@");
+ else if (MPFR_IS_ZERO (x))
+ fprintf (stream, "0");
+ else
+ {
+ mp_limb_t *mx;
+ mpfr_prec_t px;
+ mp_size_t n;
+
+ mx = MPFR_MANT (x);
+ px = MPFR_PREC (x);
+
+ fprintf (stream, "0.");
+ for (n = (px - 1) / GMP_NUMB_BITS; ; n--)
+ {
+ mp_limb_t wd, t;
+
+ MPFR_ASSERTN (n >= 0);
+ wd = mx[n];
+ for (t = MPFR_LIMB_HIGHBIT; t != 0; t >>= 1)
+ {
+ putc ((wd & t) == 0 ? '0' : '1', stream);
+ if (--px == 0)
+ {
+ mpfr_exp_t ex;
+
+ ex = MPFR_GET_EXP (x);
+ MPFR_ASSERTN (ex >= LONG_MIN && ex <= LONG_MAX);
+ fprintf (stream, "E%ld", (long) ex);
+ return;
+ }
+ }
+ }
+ }
+}
+
+void
+mpfr_print_binary (mpfr_srcptr x)
+{
+ mpfr_fprint_binary (stdout, x);
+}
+
+void
+mpfr_print_mant_binary(const char *str, const mp_limb_t *p, mpfr_prec_t r)
+{
+ int i;
+ mpfr_prec_t count = 0;
+ char c;
+ mp_size_t n = (r - 1) / GMP_NUMB_BITS + 1;
+
+ printf("%s ", str);
+ for(n-- ; n>=0 ; n--)
+ {
+ for(i = GMP_NUMB_BITS-1 ; i >=0 ; i--)
+ {
+ c = (p[n] & (((mp_limb_t)1L)<<i)) ? '1' : '0';
+ putchar(c);
+ count++;
+ if (count == r)
+ putchar('[');
+ }
+ putchar('.');
+ }
+ putchar('\n');
+}
+
+void
+mpfr_dump_mant (const mp_limb_t *p, mpfr_prec_t r, mpfr_prec_t precx,
+ mpfr_prec_t error)
+{
+ int i;
+ mpfr_prec_t count = 0;
+ char c;
+ mp_size_t n = (r - 1) / GMP_NUMB_BITS + 1;
+
+ for(n-- ; n>=0 ; n--)
+ {
+ for(i = GMP_NUMB_BITS-1 ; i >=0 ; i--)
+ {
+ c = (p[n] & (((mp_limb_t)1L)<<i)) ? '1' : '0';
+ putchar(c);
+ count++;
+ if (count == precx)
+ putchar (',');
+ if (count == error)
+ putchar('[');
+ }
+ putchar('.');
+ }
+ putchar('\n');
+}
diff --git a/src/print_rnd_mode.c b/src/print_rnd_mode.c
new file mode 100644
index 000000000..60827d374
--- /dev/null
+++ b/src/print_rnd_mode.c
@@ -0,0 +1,46 @@
+/* mpfr_print_rnd_mode -- convert a given rounding mode to a string
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+const char *
+mpfr_print_rnd_mode (mpfr_rnd_t rnd_mode)
+{
+ /* If we forget to update this function after a new rounding mode
+ is added, this will be detected by the following assertion. */
+ MPFR_ASSERTN (MPFR_RND_MAX == MPFR_RNDA + 1);
+ switch (rnd_mode)
+ {
+ case MPFR_RNDD:
+ return "MPFR_RNDD";
+ case MPFR_RNDU:
+ return "MPFR_RNDU";
+ case MPFR_RNDN:
+ return "MPFR_RNDN";
+ case MPFR_RNDZ:
+ return "MPFR_RNDZ";
+ case MPFR_RNDA:
+ return "MPFR_RNDA";
+ default:
+ return (const char*) 0;
+ }
+}
diff --git a/src/printf.c b/src/printf.c
new file mode 100644
index 000000000..b55c29f6f
--- /dev/null
+++ b/src/printf.c
@@ -0,0 +1,215 @@
+/* mpfr_printf -- printf function and friends.
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* The mpfr_printf-like functions are defined only if stdarg.h exist */
+#ifdef HAVE_STDARG
+
+#include <stdarg.h>
+
+#ifndef HAVE_VA_COPY
+# ifdef HAVE___VA_COPY
+# define va_copy(dst,src) __va_copy(dst, src)
+# else
+/* autoconf manual advocates this fallback.
+ This is also the solution chosen by gmp */
+# define va_copy(dst,src) \
+ do { memcpy(&(dst), &(src), sizeof(va_list)); } while (0)
+# endif /* HAVE___VA_COPY */
+#endif /* HAVE_VA_COPY */
+
+#include <errno.h>
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_FILE
+
+/* Each printf-like function calls mpfr_vasprintf which
+ - returns the number of characters in the returned string excluding the
+ terminating null
+ - returns -1 and sets the erange flag if the number of produced characters
+ exceeds INT_MAX (in that case, also sets errno to EOVERFLOW in POSIX
+ systems) */
+
+#define GET_STR_VA(sz, str, fmt, ap) \
+ do \
+ { \
+ sz = mpfr_vasprintf (&(str), fmt, ap); \
+ if (sz < 0) \
+ { \
+ if (str) \
+ mpfr_free_str (str); \
+ return -1; \
+ } \
+ } while (0)
+
+#define GET_STR(sz, str, fmt) \
+ do \
+ { \
+ va_list ap; \
+ va_start(ap, fmt); \
+ sz = mpfr_vasprintf (&(str), fmt, ap); \
+ va_end (ap); \
+ if (sz < 0) \
+ { \
+ if (str) \
+ mpfr_free_str (str); \
+ return -1; \
+ } \
+ } while (0)
+
+int
+mpfr_printf (const char *fmt, ...)
+{
+ char *str;
+ int ret;
+
+ GET_STR (ret, str, fmt);
+ ret = printf ("%s", str);
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+int
+mpfr_vprintf (const char *fmt, va_list ap)
+{
+ char *str;
+ int ret;
+
+ GET_STR_VA (ret, str, fmt, ap);
+ ret = printf ("%s", str);
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+
+int
+mpfr_fprintf (FILE *fp, const char *fmt, ...)
+{
+ char *str;
+ int ret;
+
+ GET_STR (ret, str, fmt);
+ ret = fprintf (fp, "%s", str);
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+int
+mpfr_vfprintf (FILE *fp, const char *fmt, va_list ap)
+{
+ char *str;
+ int ret;
+
+ GET_STR_VA (ret, str, fmt, ap);
+ ret = fprintf (fp, "%s", str);
+
+ mpfr_free_str (str);
+ return ret;
+}
+#endif /* _MPFR_H_HAVE_FILE */
+
+int
+mpfr_sprintf (char *buf, const char *fmt, ...)
+{
+ char *str;
+ int ret;
+
+ GET_STR (ret, str, fmt);
+ ret = sprintf (buf, "%s", str);
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+int
+mpfr_vsprintf (char *buf, const char *fmt, va_list ap)
+{
+ char *str;
+ int ret;
+
+ GET_STR_VA (ret, str, fmt, ap);
+ ret = sprintf (buf, "%s", str);
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+int
+mpfr_snprintf (char *buf, size_t size, const char *fmt, ...)
+{
+ char *str;
+ int ret;
+ size_t min_size;
+
+ GET_STR (ret, str, fmt);
+
+ /* C99 allows SIZE to be zero */
+ if (size != 0)
+ {
+ MPFR_ASSERTN (buf != NULL);
+ min_size = (size_t)ret < size ? (size_t)ret : size - 1;
+ strncpy (buf, str, min_size);
+ buf[min_size] = '\0';
+ }
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+int
+mpfr_vsnprintf (char *buf, size_t size, const char *fmt, va_list ap)
+{
+ char *str;
+ int ret;
+ int min_size;
+
+ GET_STR_VA (ret, str, fmt, ap);
+
+ /* C99 allows SIZE to be zero */
+ if (size != 0)
+ {
+ MPFR_ASSERTN (buf != NULL);
+ min_size = (size_t)ret < size ? (size_t)ret : size - 1;
+ strncpy (buf, str, min_size);
+ buf[min_size] = '\0';
+ }
+
+ mpfr_free_str (str);
+ return ret;
+}
+
+int
+mpfr_asprintf (char **pp, const char *fmt, ...)
+{
+ int ret;
+
+ GET_STR (ret, *pp, fmt);
+
+ return ret;
+}
+#endif /* HAVE_STDARG */
diff --git a/src/rec_sqrt.c b/src/rec_sqrt.c
new file mode 100644
index 000000000..0b5df8b0e
--- /dev/null
+++ b/src/rec_sqrt.c
@@ -0,0 +1,535 @@
+/* mpfr_rec_sqrt -- inverse square root
+
+Copyright 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#define MPFR_NEED_LONGLONG_H /* for umul_ppmm */
+#include "mpfr-impl.h"
+
+#define LIMB_SIZE(x) ((((x)-1)>>MPFR_LOG2_GMP_NUMB_BITS) + 1)
+
+#define MPFR_COM_N(x,y,n) \
+ { \
+ mp_size_t i; \
+ for (i = 0; i < n; i++) \
+ *((x)+i) = ~*((y)+i); \
+ }
+
+/* Put in X a p-bit approximation of 1/sqrt(A),
+ where X = {x, n}/B^n, n = ceil(p/GMP_NUMB_BITS),
+ A = 2^(1+as)*{a, an}/B^an, as is 0 or 1, an = ceil(ap/GMP_NUMB_BITS),
+ where B = 2^GMP_NUMB_BITS.
+
+ We have 1 <= A < 4 and 1/2 <= X < 1.
+
+ The error in the approximate result with respect to the true
+ value 1/sqrt(A) is bounded by 1 ulp(X), i.e., 2^{-p} since 1/2 <= X < 1.
+
+ Note: x and a are left-aligned, i.e., the most significant bit of
+ a[an-1] is set, and so is the most significant bit of the output x[n-1].
+
+ If p is not a multiple of GMP_NUMB_BITS, the extra low bits of the input
+ A are taken into account to compute the approximation of 1/sqrt(A), but
+ whether or not they are zero, the error between X and 1/sqrt(A) is bounded
+ by 1 ulp(X) [in precision p].
+ The extra low bits of the output X (if p is not a multiple of GMP_NUMB_BITS)
+ are set to 0.
+
+ Assumptions:
+ (1) A should be normalized, i.e., the most significant bit of a[an-1]
+ should be 1. If as=0, we have 1 <= A < 2; if as=1, we have 2 <= A < 4.
+ (2) p >= 12
+ (3) {a, an} and {x, n} should not overlap
+ (4) GMP_NUMB_BITS >= 12 and is even
+
+ Note: this routine is much more efficient when ap is small compared to p,
+ including the case where ap <= GMP_NUMB_BITS, thus it can be used to
+ implement an efficient mpfr_rec_sqrt_ui function.
+
+ Reference: Modern Computer Algebra, Richard Brent and Paul Zimmermann,
+ http://www.loria.fr/~zimmerma/mca/pub226.html
+*/
+static void
+mpfr_mpn_rec_sqrt (mp_ptr x, mpfr_prec_t p,
+ mp_srcptr a, mpfr_prec_t ap, int as)
+
+{
+ /* the following T1 and T2 are bipartite tables giving initial
+ approximation for the inverse square root, with 13-bit input split in
+ 5+4+4, and 11-bit output. More precisely, if 2048 <= i < 8192,
+ with i = a*2^8 + b*2^4 + c, we use for approximation of
+ 2048/sqrt(i/2048) the value x = T1[16*(a-8)+b] + T2[16*(a-8)+c].
+ The largest error is obtained for i = 2054, where x = 2044,
+ and 2048/sqrt(i/2048) = 2045.006576...
+ */
+ static short int T1[384] = {
+2040, 2033, 2025, 2017, 2009, 2002, 1994, 1987, 1980, 1972, 1965, 1958, 1951,
+1944, 1938, 1931, /* a=8 */
+1925, 1918, 1912, 1905, 1899, 1892, 1886, 1880, 1874, 1867, 1861, 1855, 1849,
+1844, 1838, 1832, /* a=9 */
+1827, 1821, 1815, 1810, 1804, 1799, 1793, 1788, 1783, 1777, 1772, 1767, 1762,
+1757, 1752, 1747, /* a=10 */
+1742, 1737, 1733, 1728, 1723, 1718, 1713, 1709, 1704, 1699, 1695, 1690, 1686,
+1681, 1677, 1673, /* a=11 */
+1669, 1664, 1660, 1656, 1652, 1647, 1643, 1639, 1635, 1631, 1627, 1623, 1619,
+1615, 1611, 1607, /* a=12 */
+1603, 1600, 1596, 1592, 1588, 1585, 1581, 1577, 1574, 1570, 1566, 1563, 1559,
+1556, 1552, 1549, /* a=13 */
+1545, 1542, 1538, 1535, 1532, 1528, 1525, 1522, 1518, 1515, 1512, 1509, 1505,
+1502, 1499, 1496, /* a=14 */
+1493, 1490, 1487, 1484, 1481, 1478, 1475, 1472, 1469, 1466, 1463, 1460, 1457,
+1454, 1451, 1449, /* a=15 */
+1446, 1443, 1440, 1438, 1435, 1432, 1429, 1427, 1424, 1421, 1419, 1416, 1413,
+1411, 1408, 1405, /* a=16 */
+1403, 1400, 1398, 1395, 1393, 1390, 1388, 1385, 1383, 1380, 1378, 1375, 1373,
+1371, 1368, 1366, /* a=17 */
+1363, 1360, 1358, 1356, 1353, 1351, 1349, 1346, 1344, 1342, 1340, 1337, 1335,
+1333, 1331, 1329, /* a=18 */
+1327, 1325, 1323, 1321, 1319, 1316, 1314, 1312, 1310, 1308, 1306, 1304, 1302,
+1300, 1298, 1296, /* a=19 */
+1294, 1292, 1290, 1288, 1286, 1284, 1282, 1280, 1278, 1276, 1274, 1272, 1270,
+1268, 1266, 1265, /* a=20 */
+1263, 1261, 1259, 1257, 1255, 1253, 1251, 1250, 1248, 1246, 1244, 1242, 1241,
+1239, 1237, 1235, /* a=21 */
+1234, 1232, 1230, 1229, 1227, 1225, 1223, 1222, 1220, 1218, 1217, 1215, 1213,
+1212, 1210, 1208, /* a=22 */
+1206, 1204, 1203, 1201, 1199, 1198, 1196, 1195, 1193, 1191, 1190, 1188, 1187,
+1185, 1184, 1182, /* a=23 */
+1181, 1180, 1178, 1177, 1175, 1174, 1172, 1171, 1169, 1168, 1166, 1165, 1163,
+1162, 1160, 1159, /* a=24 */
+1157, 1156, 1154, 1153, 1151, 1150, 1149, 1147, 1146, 1144, 1143, 1142, 1140,
+1139, 1137, 1136, /* a=25 */
+1135, 1133, 1132, 1131, 1129, 1128, 1127, 1125, 1124, 1123, 1121, 1120, 1119,
+1117, 1116, 1115, /* a=26 */
+1114, 1113, 1111, 1110, 1109, 1108, 1106, 1105, 1104, 1103, 1101, 1100, 1099,
+1098, 1096, 1095, /* a=27 */
+1093, 1092, 1091, 1090, 1089, 1087, 1086, 1085, 1084, 1083, 1081, 1080, 1079,
+1078, 1077, 1076, /* a=28 */
+1075, 1073, 1072, 1071, 1070, 1069, 1068, 1067, 1065, 1064, 1063, 1062, 1061,
+1060, 1059, 1058, /* a=29 */
+1057, 1056, 1055, 1054, 1052, 1051, 1050, 1049, 1048, 1047, 1046, 1045, 1044,
+1043, 1042, 1041, /* a=30 */
+1040, 1039, 1038, 1037, 1036, 1035, 1034, 1033, 1032, 1031, 1030, 1029, 1028,
+1027, 1026, 1025 /* a=31 */
+};
+ static unsigned char T2[384] = {
+ 7, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 2, 2, 1, 1, 0, /* a=8 */
+ 6, 5, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 0, 0, /* a=9 */
+ 5, 5, 4, 4, 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 0, 0, /* a=10 */
+ 4, 4, 3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, /* a=11 */
+ 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, /* a=12 */
+ 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* a=13 */
+ 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, /* a=14 */
+ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* a=15 */
+ 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* a=16 */
+ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* a=17 */
+ 3, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, /* a=18 */
+ 2, 2, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* a=19 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, /* a=20 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* a=21 */
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a=22 */
+ 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, /* a=23 */
+ 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a=24 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* a=25 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* a=26 */
+ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a=27 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* a=28 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, /* a=29 */
+ 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a=30 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* a=31 */
+};
+ mp_size_t n = LIMB_SIZE(p); /* number of limbs of X */
+ mp_size_t an = LIMB_SIZE(ap); /* number of limbs of A */
+
+ /* A should be normalized */
+ MPFR_ASSERTD((a[an - 1] & MPFR_LIMB_HIGHBIT) != 0);
+ /* We should have enough bits in one limb and GMP_NUMB_BITS should be even.
+ Since that does not depend on MPFR, we always check this. */
+ MPFR_ASSERTN((GMP_NUMB_BITS >= 12) && ((GMP_NUMB_BITS & 1) == 0));
+ /* {a, an} and {x, n} should not overlap */
+ MPFR_ASSERTD((a + an <= x) || (x + n <= a));
+ MPFR_ASSERTD(p >= 11);
+
+ if (MPFR_UNLIKELY(an > n)) /* we can cut the input to n limbs */
+ {
+ a += an - n;
+ an = n;
+ }
+
+ if (p == 11) /* should happen only from recursive calls */
+ {
+ unsigned long i, ab, ac;
+ mp_limb_t t;
+
+ /* take the 12+as most significant bits of A */
+ i = a[an - 1] >> (GMP_NUMB_BITS - (12 + as));
+ /* if one wants faithful rounding for p=11, replace #if 0 by #if 1 */
+ ab = i >> 4;
+ ac = (ab & 0x3F0) | (i & 0x0F);
+ t = (mp_limb_t) T1[ab - 0x80] + (mp_limb_t) T2[ac - 0x80];
+ x[0] = t << (GMP_NUMB_BITS - p);
+ }
+ else /* p >= 12 */
+ {
+ mpfr_prec_t h, pl;
+ mp_ptr r, s, t, u;
+ mp_size_t xn, rn, th, ln, tn, sn, ahn, un;
+ mp_limb_t neg, cy, cu;
+ MPFR_TMP_DECL(marker);
+
+ /* h = max(11, ceil((p+3)/2)) is the bitsize of the recursive call */
+ h = (p < 18) ? 11 : (p >> 1) + 2;
+
+ xn = LIMB_SIZE(h); /* limb size of the recursive Xh */
+ rn = LIMB_SIZE(2 * h); /* a priori limb size of Xh^2 */
+ ln = n - xn; /* remaining limbs to be computed */
+
+ /* Since |Xh - A^{-1/2}| <= 2^{-h}, then by multiplying by Xh + A^{-1/2}
+ we get |Xh^2 - 1/A| <= 2^{-h+1}, thus |A*Xh^2 - 1| <= 2^{-h+3},
+ thus the h-3 most significant bits of t should be zero,
+ which is in fact h+1+as-3 because of the normalization of A.
+ This corresponds to th=floor((h+1+as-3)/GMP_NUMB_BITS) limbs. */
+ th = (h + 1 + as - 3) >> MPFR_LOG2_GMP_NUMB_BITS;
+ tn = LIMB_SIZE(2 * h + 1 + as);
+
+ /* we need h+1+as bits of a */
+ ahn = LIMB_SIZE(h + 1 + as); /* number of high limbs of A
+ needed for the recursive call*/
+ if (MPFR_UNLIKELY(ahn > an))
+ ahn = an;
+ mpfr_mpn_rec_sqrt (x + ln, h, a + an - ahn, ahn * GMP_NUMB_BITS, as);
+ /* the most h significant bits of X are set, X has ceil(h/GMP_NUMB_BITS)
+ limbs, the low (-h) % GMP_NUMB_BITS bits are zero */
+
+ MPFR_TMP_MARK (marker);
+ /* first step: square X in r, result is exact */
+ un = xn + (tn - th);
+ /* We use the same temporary buffer to store r and u: r needs 2*xn
+ limbs where u needs xn+(tn-th) limbs. Since tn can store at least
+ 2h bits, and th at most h bits, then tn-th can store at least h bits,
+ thus tn - th >= xn, and reserving the space for u is enough. */
+ MPFR_ASSERTD(2 * xn <= un);
+ u = r = (mp_ptr) MPFR_TMP_ALLOC (un * sizeof (mp_limb_t));
+ if (2 * h <= GMP_NUMB_BITS) /* xn=rn=1, and since p <= 2h-3, n=1,
+ thus ln = 0 */
+ {
+ MPFR_ASSERTD(ln == 0);
+ cy = x[0] >> (GMP_NUMB_BITS >> 1);
+ r ++;
+ r[0] = cy * cy;
+ }
+ else if (xn == 1) /* xn=1, rn=2 */
+ umul_ppmm(r[1], r[0], x[ln], x[ln]);
+ else
+ {
+ mpn_mul_n (r, x + ln, x + ln, xn);
+ if (rn < 2 * xn)
+ r ++;
+ }
+ /* now the 2h most significant bits of {r, rn} contains X^2, r has rn
+ limbs, and the low (-2h) % GMP_NUMB_BITS bits are zero */
+
+ /* Second step: s <- A * (r^2), and truncate the low ap bits,
+ i.e., at weight 2^{-2h} (s is aligned to the low significant bits)
+ */
+ sn = an + rn;
+ s = (mp_ptr) MPFR_TMP_ALLOC (sn * sizeof (mp_limb_t));
+ if (rn == 1) /* rn=1 implies n=1, since rn*GMP_NUMB_BITS >= 2h,
+ and 2h >= p+3 */
+ {
+ /* necessarily p <= GMP_NUMB_BITS-3: we can ignore the two low
+ bits from A */
+ /* since n=1, and we ensured an <= n, we also have an=1 */
+ MPFR_ASSERTD(an == 1);
+ umul_ppmm (s[1], s[0], r[0], a[0]);
+ }
+ else
+ {
+ /* we have p <= n * GMP_NUMB_BITS
+ 2h <= rn * GMP_NUMB_BITS with p+3 <= 2h <= p+4
+ thus n <= rn <= n + 1 */
+ MPFR_ASSERTD(rn <= n + 1);
+ /* since we ensured an <= n, we have an <= rn */
+ MPFR_ASSERTD(an <= rn);
+ mpn_mul (s, r, rn, a, an);
+ /* s should be near B^sn/2^(1+as), thus s[sn-1] is either
+ 100000... or 011111... if as=0, or
+ 010000... or 001111... if as=1.
+ We ignore the bits of s after the first 2h+1+as ones.
+ */
+ }
+
+ /* We ignore the bits of s after the first 2h+1+as ones: s has rn + an
+ limbs, where rn = LIMBS(2h), an=LIMBS(a), and tn = LIMBS(2h+1+as). */
+ t = s + sn - tn; /* pointer to low limb of the high part of t */
+ /* the upper h-3 bits of 1-t should be zero,
+ where 1 corresponds to the most significant bit of t[tn-1] if as=0,
+ and to the 2nd most significant bit of t[tn-1] if as=1 */
+
+ /* compute t <- 1 - t, which is B^tn - {t, tn+1},
+ with rounding toward -Inf, i.e., rounding the input t toward +Inf.
+ We could only modify the low tn - th limbs from t, but it gives only
+ a small speedup, and would make the code more complex.
+ */
+ neg = t[tn - 1] & (MPFR_LIMB_HIGHBIT >> as);
+ if (neg == 0) /* Ax^2 < 1: we have t = th + eps, where 0 <= eps < ulp(th)
+ is the part truncated above, thus 1 - t rounded to -Inf
+ is 1 - th - ulp(th) */
+ {
+ /* since the 1+as most significant bits of t are zero, set them
+ to 1 before the one-complement */
+ t[tn - 1] |= MPFR_LIMB_HIGHBIT | (MPFR_LIMB_HIGHBIT >> as);
+ MPFR_COM_N (t, t, tn);
+ /* we should add 1 here to get 1-th complement, and subtract 1 for
+ -ulp(th), thus we do nothing */
+ }
+ else /* negative case: we want 1 - t rounded toward -Inf, i.e.,
+ th + eps rounded toward +Inf, which is th + ulp(th):
+ we discard the bit corresponding to 1,
+ and we add 1 to the least significant bit of t */
+ {
+ t[tn - 1] ^= neg;
+ mpn_add_1 (t, t, tn, 1);
+ }
+ tn -= th; /* we know at least th = floor((h+1+as-3)/GMP_NUMB_LIMBS) of
+ the high limbs of {t, tn} are zero */
+
+ /* tn = rn - th, where rn * GMP_NUMB_BITS >= 2*h and
+ th * GMP_NUMB_BITS <= h+1+as-3, thus tn > 0 */
+ MPFR_ASSERTD(tn > 0);
+
+ /* u <- x * t, where {t, tn} contains at least h+3 bits,
+ and {x, xn} contains h bits, thus tn >= xn */
+ MPFR_ASSERTD(tn >= xn);
+ if (tn == 1) /* necessarily xn=1 */
+ umul_ppmm (u[1], u[0], t[0], x[ln]);
+ else
+ mpn_mul (u, t, tn, x + ln, xn);
+
+ /* we have already discarded the upper th high limbs of t, thus we only
+ have to consider the upper n - th limbs of u */
+ un = n - th; /* un cannot be zero, since p <= n*GMP_NUMB_BITS,
+ h = ceil((p+3)/2) <= (p+4)/2,
+ th*GMP_NUMB_BITS <= h-1 <= p/2+1,
+ thus (n-th)*GMP_NUMB_BITS >= p/2-1.
+ */
+ MPFR_ASSERTD(un > 0);
+ u += (tn + xn) - un; /* xn + tn - un = xn + (original_tn - th) - (n - th)
+ = xn + original_tn - n
+ = LIMBS(h) + LIMBS(2h+1+as) - LIMBS(p) > 0
+ since 2h >= p+3 */
+ MPFR_ASSERTD(tn + xn > un); /* will allow to access u[-1] below */
+
+ /* In case as=0, u contains |x*(1-Ax^2)/2|, which is exactly what we
+ need to add or subtract.
+ In case as=1, u contains |x*(1-Ax^2)/4|, thus we need to multiply
+ u by 2. */
+
+ if (as == 1)
+ /* shift on un+1 limbs to get most significant bit of u[-1] into
+ least significant bit of u[0] */
+ mpn_lshift (u - 1, u - 1, un + 1, 1);
+
+ pl = n * GMP_NUMB_BITS - p; /* low bits from x */
+ /* We want that the low pl bits are zero after rounding to nearest,
+ thus we round u to nearest at bit pl-1 of u[0] */
+ if (pl > 0)
+ {
+ cu = mpn_add_1 (u, u, un, u[0] & (MPFR_LIMB_ONE << (pl - 1)));
+ /* mask bits 0..pl-1 of u[0] */
+ u[0] &= ~MPFR_LIMB_MASK(pl);
+ }
+ else /* round bit is in u[-1] */
+ cu = mpn_add_1 (u, u, un, u[-1] >> (GMP_NUMB_BITS - 1));
+
+ /* We already have filled {x + ln, xn = n - ln}, and we want to add or
+ subtract cu*B^un + {u, un} at position x.
+ un = n - th, where th contains <= h+1+as-3<=h-1 bits
+ ln = n - xn, where xn contains >= h bits
+ thus un > ln.
+ Warning: ln might be zero.
+ */
+ MPFR_ASSERTD(un > ln);
+ /* we can have un = ln + 2, for example with GMP_NUMB_BITS=32 and
+ p=62, as=0, then h=33, n=2, th=0, xn=2, thus un=2 and ln=0. */
+ MPFR_ASSERTD(un == ln + 1 || un == ln + 2);
+ /* the high un-ln limbs of u will overlap the low part of {x+ln,xn},
+ we need to add or subtract the overlapping part {u + ln, un - ln} */
+ if (neg == 0)
+ {
+ if (ln > 0)
+ MPN_COPY (x, u, ln);
+ cy = mpn_add (x + ln, x + ln, xn, u + ln, un - ln);
+ /* add cu at x+un */
+ cy += mpn_add_1 (x + un, x + un, th, cu);
+ }
+ else /* negative case */
+ {
+ /* subtract {u+ln, un-ln} from {x+ln,un} */
+ cy = mpn_sub (x + ln, x + ln, xn, u + ln, un - ln);
+ /* carry cy is at x+un, like cu */
+ cy = mpn_sub_1 (x + un, x + un, th, cy + cu); /* n - un = th */
+ /* cy cannot be zero, since the most significant bit of Xh is 1,
+ and the correction is bounded by 2^{-h+3} */
+ MPFR_ASSERTD(cy == 0);
+ if (ln > 0)
+ {
+ MPFR_COM_N (x, u, ln);
+ /* we must add one for the 2-complement ... */
+ cy = mpn_add_1 (x, x, n, MPFR_LIMB_ONE);
+ /* ... and subtract 1 at x[ln], where n = ln + xn */
+ cy -= mpn_sub_1 (x + ln, x + ln, xn, MPFR_LIMB_ONE);
+ }
+ }
+
+ /* cy can be 1 when A=1, i.e., {a, n} = B^n. In that case we should
+ have X = B^n, and setting X to 1-2^{-p} satisties the error bound
+ of 1 ulp. */
+ if (MPFR_UNLIKELY(cy != 0))
+ {
+ cy -= mpn_sub_1 (x, x, n, MPFR_LIMB_ONE << pl);
+ MPFR_ASSERTD(cy == 0);
+ }
+
+ MPFR_TMP_FREE (marker);
+ }
+}
+
+int
+mpfr_rec_sqrt (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t rp, up, wp;
+ mp_size_t rn, wn;
+ int s, cy, inex;
+ mp_ptr x;
+ int out_of_place;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", u, u, rnd_mode),
+ ("y[%#R]=%R inexact=%d", r, r, inex));
+
+ /* special values */
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(u)))
+ {
+ if (MPFR_IS_NAN(u))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_ZERO(u)) /* 1/sqrt(+0) = 1/sqrt(-0) = +Inf */
+ {
+ /* 0+ or 0- */
+ MPFR_SET_INF(r);
+ MPFR_SET_POS(r);
+ MPFR_RET(0); /* Inf is exact */
+ }
+ else
+ {
+ MPFR_ASSERTD(MPFR_IS_INF(u));
+ /* 1/sqrt(-Inf) = NAN */
+ if (MPFR_IS_NEG(u))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ /* 1/sqrt(+Inf) = +0 */
+ MPFR_SET_POS(r);
+ MPFR_SET_ZERO(r);
+ MPFR_RET(0);
+ }
+ }
+
+ /* if u < 0, 1/sqrt(u) is NaN */
+ if (MPFR_UNLIKELY(MPFR_IS_NEG(u)))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+
+ MPFR_SET_POS(r);
+
+ rp = MPFR_PREC(r); /* output precision */
+ up = MPFR_PREC(u); /* input precision */
+ wp = rp + 11; /* initial working precision */
+
+ /* Let u = U*2^e, where e = EXP(u), and 1/2 <= U < 1.
+ If e is even, we compute an approximation of X of (4U)^{-1/2},
+ and the result is X*2^(-(e-2)/2) [case s=1].
+ If e is odd, we compute an approximation of X of (2U)^{-1/2},
+ and the result is X*2^(-(e-1)/2) [case s=0]. */
+
+ /* parity of the exponent of u */
+ s = 1 - ((mpfr_uexp_t) MPFR_GET_EXP (u) & 1);
+
+ rn = LIMB_SIZE(rp);
+
+ /* for the first iteration, if rp + 11 fits into rn limbs, we round up
+ up to a full limb to maximize the chance of rounding, while avoiding
+ to allocate extra space */
+ wp = rp + 11;
+ if (wp < rn * GMP_NUMB_BITS)
+ wp = rn * GMP_NUMB_BITS;
+ for (;;)
+ {
+ MPFR_TMP_MARK (marker);
+ wn = LIMB_SIZE(wp);
+ out_of_place = (r == u) || (wn > rn);
+ if (out_of_place)
+ x = (mp_ptr) MPFR_TMP_ALLOC (wn * sizeof (mp_limb_t));
+ else
+ x = MPFR_MANT(r);
+ mpfr_mpn_rec_sqrt (x, wp, MPFR_MANT(u), up, s);
+ /* If the input was not truncated, the error is at most one ulp;
+ if the input was truncated, the error is at most two ulps
+ (see algorithms.tex). */
+ if (MPFR_LIKELY (mpfr_round_p (x, wn, wp - (wp < up),
+ rp + (rnd_mode == MPFR_RNDN))))
+ break;
+
+ /* We detect only now the exact case where u=2^(2e), to avoid
+ slowing down the average case. This can happen only when the
+ mantissa is exactly 1/2 and the exponent is odd. */
+ if (s == 0 && mpfr_cmp_ui_2exp (u, 1, MPFR_EXP(u) - 1) == 0)
+ {
+ mpfr_prec_t pl = wn * GMP_NUMB_BITS - wp;
+
+ /* we should have x=111...111 */
+ mpn_add_1 (x, x, wn, MPFR_LIMB_ONE << pl);
+ x[wn - 1] = MPFR_LIMB_HIGHBIT;
+ s += 2;
+ break; /* go through */
+ }
+ MPFR_TMP_FREE(marker);
+
+ wp += GMP_NUMB_BITS;
+ }
+ cy = mpfr_round_raw (MPFR_MANT(r), x, wp, 0, rp, rnd_mode, &inex);
+ MPFR_EXP(r) = - (MPFR_EXP(u) - 1 - s) / 2;
+ if (MPFR_UNLIKELY(cy != 0))
+ {
+ MPFR_EXP(r) ++;
+ MPFR_MANT(r)[rn - 1] = MPFR_LIMB_HIGHBIT;
+ }
+ MPFR_TMP_FREE(marker);
+ return mpfr_check_range (r, inex, rnd_mode);
+}
diff --git a/src/reldiff.c b/src/reldiff.c
new file mode 100644
index 000000000..979049135
--- /dev/null
+++ b/src/reldiff.c
@@ -0,0 +1,73 @@
+/* mpfr_reldiff -- compute relative difference of two floating-point numbers.
+
+Copyright 2000, 2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* reldiff(b, c) = abs(b-c)/b */
+void
+mpfr_reldiff (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t b_copy;
+
+ if (MPFR_ARE_SINGULAR (b, c))
+ {
+ if (MPFR_IS_NAN(b) || MPFR_IS_NAN(c))
+ {
+ MPFR_SET_NAN(a);
+ return;
+ }
+ else if (MPFR_IS_INF(b))
+ {
+ if (MPFR_IS_INF (c) && (MPFR_SIGN (c) == MPFR_SIGN (b)))
+ MPFR_SET_ZERO(a);
+ else
+ MPFR_SET_NAN(a);
+ return;
+ }
+ else if (MPFR_IS_INF(c))
+ {
+ MPFR_SET_SAME_SIGN (a, b);
+ MPFR_SET_INF (a);
+ return;
+ }
+ else if (MPFR_IS_ZERO(b)) /* reldiff = abs(c)/c = sign(c) */
+ {
+ mpfr_set_si (a, MPFR_INT_SIGN (c), rnd_mode);
+ return;
+ }
+ /* Fall through */
+ }
+
+ if (a == b)
+ {
+ mpfr_init2 (b_copy, MPFR_PREC(b));
+ mpfr_set (b_copy, b, MPFR_RNDN);
+ }
+
+ mpfr_sub (a, b, c, rnd_mode);
+ mpfr_abs (a, a, rnd_mode); /* for compatibility with MPF */
+ mpfr_div (a, a, (a == b) ? b_copy : b, rnd_mode);
+
+ if (a == b)
+ mpfr_clear (b_copy);
+
+}
diff --git a/src/rem1.c b/src/rem1.c
new file mode 100644
index 000000000..381a53e64
--- /dev/null
+++ b/src/rem1.c
@@ -0,0 +1,231 @@
+/* mpfr_rem1 -- internal function
+ mpfr_fmod -- compute the floating-point remainder of x/y
+ mpfr_remquo and mpfr_remainder -- argument reduction functions
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+# include "mpfr-impl.h"
+
+/* we return as many bits as we can, keeping just one bit for the sign */
+# define WANTED_BITS (sizeof(long) * CHAR_BIT - 1)
+
+/*
+ rem1 works as follows:
+ The first rounding mode rnd_q indicate if we are actually computing
+ a fmod (MPFR_RNDZ) or a remainder/remquo (MPFR_RNDN).
+
+ Let q = x/y rounded to an integer in the direction rnd_q.
+ Put x - q*y in rem, rounded according to rnd.
+ If quo is not null, the value stored in *quo has the sign of q,
+ and agrees with q with the 2^n low order bits.
+ In other words, *quo = q (mod 2^n) and *quo q >= 0.
+ If rem is zero, then it has the sign of x.
+ The returned 'int' is the inexact flag giving the place of rem wrt x - q*y.
+
+ If x or y is NaN: *quo is undefined, rem is NaN.
+ If x is Inf, whatever y: *quo is undefined, rem is NaN.
+ If y is Inf, x not NaN nor Inf: *quo is 0, rem is x.
+ If y is 0, whatever x: *quo is undefined, rem is NaN.
+ If x is 0, whatever y (not NaN nor 0): *quo is 0, rem is x.
+
+ Otherwise if x and y are neither NaN, Inf nor 0, q is always defined,
+ thus *quo is.
+ Since |x - q*y| <= y/2, no overflow is possible.
+ Only an underflow is possible when y is very small.
+ */
+
+static int
+mpfr_rem1 (mpfr_ptr rem, long *quo, mpfr_rnd_t rnd_q,
+ mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd)
+{
+ mpfr_exp_t ex, ey;
+ int compare, inex, q_is_odd, sign, signx = MPFR_SIGN (x);
+ mpz_t mx, my, r;
+
+ MPFR_ASSERTD (rnd_q == MPFR_RNDN || rnd_q == MPFR_RNDZ);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x) || MPFR_IS_SINGULAR (y)))
+ {
+ if (MPFR_IS_NAN (x) || MPFR_IS_NAN (y) || MPFR_IS_INF (x)
+ || MPFR_IS_ZERO (y))
+ {
+ /* for remquo, quo is undefined */
+ MPFR_SET_NAN (rem);
+ MPFR_RET_NAN;
+ }
+ else /* either y is Inf and x is 0 or non-special,
+ or x is 0 and y is non-special,
+ in both cases the quotient is zero. */
+ {
+ if (quo)
+ *quo = 0;
+ return mpfr_set (rem, x, rnd);
+ }
+ }
+
+ /* now neither x nor y is NaN, Inf or zero */
+
+ mpz_init (mx);
+ mpz_init (my);
+ mpz_init (r);
+
+ ex = mpfr_get_z_2exp (mx, x); /* x = mx*2^ex */
+ ey = mpfr_get_z_2exp (my, y); /* y = my*2^ey */
+
+ /* to get rid of sign problems, we compute it separately:
+ quo(-x,-y) = quo(x,y), rem(-x,-y) = -rem(x,y)
+ quo(-x,y) = -quo(x,y), rem(-x,y) = -rem(x,y)
+ thus quo = sign(x/y)*quo(|x|,|y|), rem = sign(x)*rem(|x|,|y|) */
+ sign = (signx == MPFR_SIGN (y)) ? 1 : -1;
+ mpz_abs (mx, mx);
+ mpz_abs (my, my);
+ q_is_odd = 0;
+
+ /* divide my by 2^k if possible to make operations mod my easier */
+ {
+ unsigned long k = mpz_scan1 (my, 0);
+ ey += k;
+ mpz_fdiv_q_2exp (my, my, k);
+ }
+
+ if (ex <= ey)
+ {
+ /* q = x/y = mx/(my*2^(ey-ex)) */
+ mpz_mul_2exp (my, my, ey - ex); /* divide mx by my*2^(ey-ex) */
+ if (rnd_q == MPFR_RNDZ)
+ /* 0 <= |r| <= |my|, r has the same sign as mx */
+ mpz_tdiv_qr (mx, r, mx, my);
+ else
+ /* 0 <= |r| <= |my|, r has the same sign as my */
+ mpz_fdiv_qr (mx, r, mx, my);
+
+ if (rnd_q == MPFR_RNDN)
+ q_is_odd = mpz_tstbit (mx, 0);
+ if (quo) /* mx is the quotient */
+ {
+ mpz_tdiv_r_2exp (mx, mx, WANTED_BITS);
+ *quo = mpz_get_si (mx);
+ }
+ }
+ else /* ex > ey */
+ {
+ if (quo) /* remquo case */
+ /* for remquo, to get the low WANTED_BITS more bits of the quotient,
+ we first compute R = X mod Y*2^WANTED_BITS, where X and Y are
+ defined below. Then the low WANTED_BITS of the quotient are
+ floor(R/Y). */
+ mpz_mul_2exp (my, my, WANTED_BITS); /* 2^WANTED_BITS*Y */
+
+ else if (rnd_q == MPFR_RNDN) /* remainder case */
+ /* Let X = mx*2^(ex-ey) and Y = my. Then both X and Y are integers.
+ Assume X = R mod Y, then x = X*2^ey = R*2^ey mod (Y*2^ey=y).
+ To be able to perform the rounding, we need the least significant
+ bit of the quotient, i.e., one more bit in the remainder,
+ which is obtained by dividing by 2Y. */
+ mpz_mul_2exp (my, my, 1); /* 2Y */
+
+ mpz_set_ui (r, 2);
+ mpz_powm_ui (r, r, ex - ey, my); /* 2^(ex-ey) mod my */
+ mpz_mul (r, r, mx);
+ mpz_mod (r, r, my);
+
+ if (quo) /* now 0 <= r < 2^WANTED_BITS*Y */
+ {
+ mpz_fdiv_q_2exp (my, my, WANTED_BITS); /* back to Y */
+ mpz_tdiv_qr (mx, r, r, my);
+ /* oldr = mx*my + newr */
+ *quo = mpz_get_si (mx);
+ q_is_odd = *quo & 1;
+ }
+ else if (rnd_q == MPFR_RNDN) /* now 0 <= r < 2Y in the remainder case */
+ {
+ mpz_fdiv_q_2exp (my, my, 1); /* back to Y */
+ /* least significant bit of q */
+ q_is_odd = mpz_cmpabs (r, my) >= 0;
+ if (q_is_odd)
+ mpz_sub (r, r, my);
+ }
+ /* now 0 <= |r| < |my|, and if needed,
+ q_is_odd is the least significant bit of q */
+ }
+
+ if (mpz_cmp_ui (r, 0) == 0)
+ {
+ inex = mpfr_set_ui (rem, 0, MPFR_RNDN);
+ /* take into account sign of x */
+ if (signx < 0)
+ mpfr_neg (rem, rem, MPFR_RNDN);
+ }
+ else
+ {
+ if (rnd_q == MPFR_RNDN)
+ {
+ /* FIXME: the comparison 2*r < my could be done more efficiently
+ at the mpn level */
+ mpz_mul_2exp (r, r, 1);
+ compare = mpz_cmpabs (r, my);
+ mpz_fdiv_q_2exp (r, r, 1);
+ compare = ((compare > 0) ||
+ ((rnd_q == MPFR_RNDN) && (compare == 0) && q_is_odd));
+ /* if compare != 0, we need to subtract my to r, and add 1 to quo */
+ if (compare)
+ {
+ mpz_sub (r, r, my);
+ if (quo && (rnd_q == MPFR_RNDN))
+ *quo += 1;
+ }
+ }
+ /* take into account sign of x */
+ if (signx < 0)
+ mpz_neg (r, r);
+ inex = mpfr_set_z (rem, r, rnd);
+ /* if ex > ey, rem should be multiplied by 2^ey, else by 2^ex */
+ MPFR_EXP (rem) += (ex > ey) ? ey : ex;
+ }
+
+ if (quo)
+ *quo *= sign;
+
+ mpz_clear (mx);
+ mpz_clear (my);
+ mpz_clear (r);
+
+ return inex;
+}
+
+int
+mpfr_remainder (mpfr_ptr rem, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd)
+{
+ return mpfr_rem1 (rem, (long *) 0, MPFR_RNDN, x, y, rnd);
+}
+
+int
+mpfr_remquo (mpfr_ptr rem, long *quo,
+ mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd)
+{
+ return mpfr_rem1 (rem, quo, MPFR_RNDN, x, y, rnd);
+}
+
+int
+mpfr_fmod (mpfr_ptr rem, mpfr_srcptr x, mpfr_srcptr y, mpfr_rnd_t rnd)
+{
+ return mpfr_rem1 (rem, (long *) 0, MPFR_RNDZ, x, y, rnd);
+}
diff --git a/src/rint.c b/src/rint.c
new file mode 100644
index 000000000..6e8047f17
--- /dev/null
+++ b/src/rint.c
@@ -0,0 +1,437 @@
+/* mpfr_rint -- Round to an integer.
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Merge the following mpfr_rint code with mpfr_round_raw_generic? */
+
+int
+mpfr_rint (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ int sign;
+ int rnd_away;
+ mpfr_exp_t exp;
+
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(u) ))
+ {
+ if (MPFR_IS_NAN(u))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_SAME_SIGN(r, u);
+ if (MPFR_IS_INF(u))
+ {
+ MPFR_SET_INF(r);
+ MPFR_RET(0); /* infinity is exact */
+ }
+ else /* now u is zero */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(u));
+ MPFR_SET_ZERO(r);
+ MPFR_RET(0); /* zero is exact */
+ }
+ }
+ MPFR_SET_SAME_SIGN (r, u); /* Does nothing if r==u */
+
+ sign = MPFR_INT_SIGN (u);
+ exp = MPFR_GET_EXP (u);
+
+ rnd_away =
+ rnd_mode == MPFR_RNDD ? sign < 0 :
+ rnd_mode == MPFR_RNDU ? sign > 0 :
+ rnd_mode == MPFR_RNDZ ? 0 :
+ rnd_mode == MPFR_RNDA ? 1 :
+ -1; /* round to nearest-even (RNDN) or nearest-away (RNDNA) */
+
+ /* rnd_away:
+ 1 if round away from zero,
+ 0 if round to zero,
+ -1 if not decided yet.
+ */
+
+ if (MPFR_UNLIKELY (exp <= 0)) /* 0 < |u| < 1 ==> round |u| to 0 or 1 */
+ {
+ /* Note: in the MPFR_RNDN mode, 0.5 must be rounded to 0. */
+ if (rnd_away != 0 &&
+ (rnd_away > 0 ||
+ (exp == 0 && (rnd_mode == MPFR_RNDNA ||
+ !mpfr_powerof2_raw (u)))))
+ {
+ mp_limb_t *rp;
+ mp_size_t rm;
+
+ rp = MPFR_MANT(r);
+ rm = (MPFR_PREC(r) - 1) / GMP_NUMB_BITS;
+ rp[rm] = MPFR_LIMB_HIGHBIT;
+ MPN_ZERO(rp, rm);
+ MPFR_SET_EXP (r, 1); /* |r| = 1 */
+ MPFR_RET(sign > 0 ? 2 : -2);
+ }
+ else
+ {
+ MPFR_SET_ZERO(r); /* r = 0 */
+ MPFR_RET(sign > 0 ? -2 : 2);
+ }
+ }
+ else /* exp > 0, |u| >= 1 */
+ {
+ mp_limb_t *up, *rp;
+ mp_size_t un, rn, ui;
+ int sh, idiff;
+ int uflags;
+
+ /*
+ * uflags will contain:
+ * _ 0 if u is an integer representable in r,
+ * _ 1 if u is an integer not representable in r,
+ * _ 2 if u is not an integer.
+ */
+
+ up = MPFR_MANT(u);
+ rp = MPFR_MANT(r);
+
+ un = MPFR_LIMB_SIZE(u);
+ rn = MPFR_LIMB_SIZE(r);
+ MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC (r));
+
+ MPFR_SET_EXP (r, exp); /* Does nothing if r==u */
+
+ if ((exp - 1) / GMP_NUMB_BITS >= un)
+ {
+ ui = un;
+ idiff = 0;
+ uflags = 0; /* u is an integer, representable or not in r */
+ }
+ else
+ {
+ mp_size_t uj;
+
+ ui = (exp - 1) / GMP_NUMB_BITS + 1; /* #limbs of the int part */
+ MPFR_ASSERTD (un >= ui);
+ uj = un - ui; /* lowest limb of the integer part */
+ idiff = exp % GMP_NUMB_BITS; /* #int-part bits in up[uj] or 0 */
+
+ uflags = idiff == 0 || (up[uj] << idiff) == 0 ? 0 : 2;
+ if (uflags == 0)
+ while (uj > 0)
+ if (up[--uj] != 0)
+ {
+ uflags = 2;
+ break;
+ }
+ }
+
+ if (ui > rn)
+ {
+ /* More limbs in the integer part of u than in r.
+ Just round u with the precision of r. */
+ MPFR_ASSERTD (rp != up && un > rn);
+ MPN_COPY (rp, up + (un - rn), rn); /* r != u */
+ if (rnd_away < 0)
+ {
+ /* This is a rounding to nearest mode (MPFR_RNDN or MPFR_RNDNA).
+ Decide the rounding direction here. */
+ if (rnd_mode == MPFR_RNDN &&
+ (rp[0] & (MPFR_LIMB_ONE << sh)) == 0)
+ { /* halfway cases rounded toward zero */
+ mp_limb_t a, b;
+ /* a: rounding bit and some of the following bits */
+ /* b: boundary for a (weight of the rounding bit in a) */
+ if (sh != 0)
+ {
+ a = rp[0] & ((MPFR_LIMB_ONE << sh) - 1);
+ b = MPFR_LIMB_ONE << (sh - 1);
+ }
+ else
+ {
+ a = up[un - rn - 1];
+ b = MPFR_LIMB_HIGHBIT;
+ }
+ rnd_away = a > b;
+ if (a == b)
+ {
+ mp_size_t i;
+ for (i = un - rn - 1 - (sh == 0); i >= 0; i--)
+ if (up[i] != 0)
+ {
+ rnd_away = 1;
+ break;
+ }
+ }
+ }
+ else /* halfway cases rounded away from zero */
+ rnd_away = /* rounding bit */
+ ((sh != 0 && (rp[0] & (MPFR_LIMB_ONE << (sh - 1))) != 0) ||
+ (sh == 0 && (up[un - rn - 1] & MPFR_LIMB_HIGHBIT) != 0));
+ }
+ if (uflags == 0)
+ { /* u is an integer; determine if it is representable in r */
+ if (sh != 0 && rp[0] << (GMP_NUMB_BITS - sh) != 0)
+ uflags = 1; /* u is not representable in r */
+ else
+ {
+ mp_size_t i;
+ for (i = un - rn - 1; i >= 0; i--)
+ if (up[i] != 0)
+ {
+ uflags = 1; /* u is not representable in r */
+ break;
+ }
+ }
+ }
+ }
+ else /* ui <= rn */
+ {
+ mp_size_t uj, rj;
+ int ush;
+
+ uj = un - ui; /* lowest limb of the integer part in u */
+ rj = rn - ui; /* lowest limb of the integer part in r */
+
+ if (MPFR_LIKELY (rp != up))
+ MPN_COPY(rp + rj, up + uj, ui);
+
+ /* Ignore the lowest rj limbs, all equal to zero. */
+ rp += rj;
+ rn = ui;
+
+ /* number of fractional bits in whole rp[0] */
+ ush = idiff == 0 ? 0 : GMP_NUMB_BITS - idiff;
+
+ if (rj == 0 && ush < sh)
+ {
+ /* If u is an integer (uflags == 0), we need to determine
+ if it is representable in r, i.e. if its sh - ush bits
+ in the non-significant part of r are all 0. */
+ if (uflags == 0 && (rp[0] & ((MPFR_LIMB_ONE << sh) -
+ (MPFR_LIMB_ONE << ush))) != 0)
+ uflags = 1; /* u is an integer not representable in r */
+ }
+ else /* The integer part of u fits in r, we'll round to it. */
+ sh = ush;
+
+ if (rnd_away < 0)
+ {
+ /* This is a rounding to nearest mode.
+ Decide the rounding direction here. */
+ if (uj == 0 && sh == 0)
+ rnd_away = 0; /* rounding bit = 0 (not represented in u) */
+ else if (rnd_mode == MPFR_RNDN &&
+ (rp[0] & (MPFR_LIMB_ONE << sh)) == 0)
+ { /* halfway cases rounded toward zero */
+ mp_limb_t a, b;
+ /* a: rounding bit and some of the following bits */
+ /* b: boundary for a (weight of the rounding bit in a) */
+ if (sh != 0)
+ {
+ a = rp[0] & ((MPFR_LIMB_ONE << sh) - 1);
+ b = MPFR_LIMB_ONE << (sh - 1);
+ }
+ else
+ {
+ MPFR_ASSERTD (uj >= 1); /* see above */
+ a = up[uj - 1];
+ b = MPFR_LIMB_HIGHBIT;
+ }
+ rnd_away = a > b;
+ if (a == b)
+ {
+ mp_size_t i;
+ for (i = uj - 1 - (sh == 0); i >= 0; i--)
+ if (up[i] != 0)
+ {
+ rnd_away = 1;
+ break;
+ }
+ }
+ }
+ else /* halfway cases rounded away from zero */
+ rnd_away = /* rounding bit */
+ ((sh != 0 && (rp[0] & (MPFR_LIMB_ONE << (sh - 1))) != 0) ||
+ (sh == 0 && (MPFR_ASSERTD (uj >= 1),
+ up[uj - 1] & MPFR_LIMB_HIGHBIT) != 0));
+ }
+ /* Now we can make the low rj limbs to 0 */
+ MPN_ZERO (rp-rj, rj);
+ }
+
+ if (sh != 0)
+ rp[0] &= MP_LIMB_T_MAX << sh;
+
+ /* If u is a representable integer, there is no rounding. */
+ if (uflags == 0)
+ MPFR_RET(0);
+
+ MPFR_ASSERTD (rnd_away >= 0); /* rounding direction is defined */
+ if (rnd_away && mpn_add_1(rp, rp, rn, MPFR_LIMB_ONE << sh))
+ {
+ if (exp == __gmpfr_emax)
+ return mpfr_overflow(r, rnd_mode, MPFR_SIGN(r)) >= 0 ?
+ uflags : -uflags;
+ else
+ {
+ MPFR_SET_EXP(r, exp + 1);
+ rp[rn-1] = MPFR_LIMB_HIGHBIT;
+ }
+ }
+
+ MPFR_RET (rnd_away ^ (sign < 0) ? uflags : -uflags);
+ } /* exp > 0, |u| >= 1 */
+}
+
+#undef mpfr_round
+
+int
+mpfr_round (mpfr_ptr r, mpfr_srcptr u)
+{
+ return mpfr_rint (r, u, MPFR_RNDNA);
+}
+
+#undef mpfr_trunc
+
+int
+mpfr_trunc (mpfr_ptr r, mpfr_srcptr u)
+{
+ return mpfr_rint (r, u, MPFR_RNDZ);
+}
+
+#undef mpfr_ceil
+
+int
+mpfr_ceil (mpfr_ptr r, mpfr_srcptr u)
+{
+ return mpfr_rint (r, u, MPFR_RNDU);
+}
+
+#undef mpfr_floor
+
+int
+mpfr_floor (mpfr_ptr r, mpfr_srcptr u)
+{
+ return mpfr_rint (r, u, MPFR_RNDD);
+}
+
+#undef mpfr_rint_round
+
+int
+mpfr_rint_round (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(u) ) || mpfr_integer_p (u))
+ return mpfr_set (r, u, rnd_mode);
+ else
+ {
+ mpfr_t tmp;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (tmp, MPFR_PREC (u));
+ /* round(u) is representable in tmp unless an overflow occurs */
+ MPFR_BLOCK (flags, mpfr_round (tmp, u));
+ inex = (MPFR_OVERFLOW (flags)
+ ? mpfr_overflow (r, rnd_mode, MPFR_SIGN (u))
+ : mpfr_set (r, tmp, rnd_mode));
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inex, rnd_mode);
+ }
+}
+
+#undef mpfr_rint_trunc
+
+int
+mpfr_rint_trunc (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(u) ) || mpfr_integer_p (u))
+ return mpfr_set (r, u, rnd_mode);
+ else
+ {
+ mpfr_t tmp;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (tmp, MPFR_PREC (u));
+ /* trunc(u) is always representable in tmp */
+ mpfr_trunc (tmp, u);
+ inex = mpfr_set (r, tmp, rnd_mode);
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inex, rnd_mode);
+ }
+}
+
+#undef mpfr_rint_ceil
+
+int
+mpfr_rint_ceil (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(u) ) || mpfr_integer_p (u))
+ return mpfr_set (r, u, rnd_mode);
+ else
+ {
+ mpfr_t tmp;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (tmp, MPFR_PREC (u));
+ /* ceil(u) is representable in tmp unless an overflow occurs */
+ MPFR_BLOCK (flags, mpfr_ceil (tmp, u));
+ inex = (MPFR_OVERFLOW (flags)
+ ? mpfr_overflow (r, rnd_mode, MPFR_SIGN_POS)
+ : mpfr_set (r, tmp, rnd_mode));
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inex, rnd_mode);
+ }
+}
+
+#undef mpfr_rint_floor
+
+int
+mpfr_rint_floor (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(u) ) || mpfr_integer_p (u))
+ return mpfr_set (r, u, rnd_mode);
+ else
+ {
+ mpfr_t tmp;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_BLOCK_DECL (flags);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (tmp, MPFR_PREC (u));
+ /* floor(u) is representable in tmp unless an overflow occurs */
+ MPFR_BLOCK (flags, mpfr_floor (tmp, u));
+ inex = (MPFR_OVERFLOW (flags)
+ ? mpfr_overflow (r, rnd_mode, MPFR_SIGN_NEG)
+ : mpfr_set (r, tmp, rnd_mode));
+ mpfr_clear (tmp);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inex, rnd_mode);
+ }
+}
diff --git a/src/root.c b/src/root.c
new file mode 100644
index 000000000..40fce3750
--- /dev/null
+++ b/src/root.c
@@ -0,0 +1,199 @@
+/* mpfr_root -- kth root.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of y = x^(1/k) is done as follows:
+
+ Let x = sign * m * 2^(k*e) where m is an integer
+
+ with 2^(k*(n-1)) <= m < 2^(k*n) where n = PREC(y)
+
+ and m = s^k + r where 0 <= r and m < (s+1)^k
+
+ we want that s has n bits i.e. s >= 2^(n-1), or m >= 2^(k*(n-1))
+ i.e. m must have at least k*(n-1)+1 bits
+
+ then, not taking into account the sign, the result will be
+ x^(1/k) = s * 2^e or (s+1) * 2^e according to the rounding mode.
+ */
+
+int
+mpfr_root (mpfr_ptr y, mpfr_srcptr x, unsigned long k, mpfr_rnd_t rnd_mode)
+{
+ mpz_t m;
+ mpfr_exp_t e, r, sh;
+ mpfr_prec_t n, size_m, tmp;
+ int inexact, negative;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY (k <= 1))
+ {
+ if (k < 1) /* k==0 => y=x^(1/0)=x^(+Inf) */
+#if 0
+ /* For 0 <= x < 1 => +0.
+ For x = 1 => 1.
+ For x > 1, => +Inf.
+ For x < 0 => NaN.
+ */
+ {
+ if (MPFR_IS_NEG (x) && !MPFR_IS_ZERO (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ inexact = mpfr_cmp (x, __gmpfr_one);
+ if (inexact == 0)
+ return mpfr_set_ui (y, 1, rnd_mode); /* 1 may be Out of Range */
+ else if (inexact < 0)
+ return mpfr_set_ui (y, 0, rnd_mode); /* 0+ */
+ else
+ {
+ mpfr_set_inf (y, 1);
+ return 0;
+ }
+ }
+#endif
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else /* y =x^(1/1)=x */
+ return mpfr_set (y, x, rnd_mode);
+ }
+
+ /* Singular values */
+ else if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x))
+ {
+ MPFR_SET_NAN (y); /* NaN^(1/k) = NaN */
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (x)) /* +Inf^(1/k) = +Inf
+ -Inf^(1/k) = -Inf if k odd
+ -Inf^(1/k) = NaN if k even */
+ {
+ if (MPFR_IS_NEG(x) && (k % 2 == 0))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ else /* x is necessarily 0: (+0)^(1/k) = +0
+ (-0)^(1/k) = -0 */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ }
+
+ /* Returns NAN for x < 0 and k even */
+ else if (MPFR_IS_NEG (x) && (k % 2 == 0))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+
+ /* General case */
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpz_init (m);
+
+ e = mpfr_get_z_2exp (m, x); /* x = m * 2^e */
+ if ((negative = MPFR_IS_NEG(x)))
+ mpz_neg (m, m);
+ r = e % (mpfr_exp_t) k;
+ if (r < 0)
+ r += k; /* now r = e (mod k) with 0 <= e < r */
+ /* x = (m*2^r) * 2^(e-r) where e-r is a multiple of k */
+
+ MPFR_MPZ_SIZEINBASE2 (size_m, m);
+ /* for rounding to nearest, we want the round bit to be in the root */
+ n = MPFR_PREC (y) + (rnd_mode == MPFR_RNDN);
+
+ /* we now multiply m by 2^(r+k*sh) so that root(m,k) will give
+ exactly n bits: we want k*(n-1)+1 <= size_m + k*sh + r <= k*n
+ i.e. sh = floor ((kn-size_m-r)/k) */
+ if ((mpfr_exp_t) size_m + r > k * (mpfr_exp_t) n)
+ sh = 0; /* we already have too many bits */
+ else
+ sh = (k * (mpfr_exp_t) n - (mpfr_exp_t) size_m - r) / k;
+ sh = k * sh + r;
+ if (sh >= 0)
+ {
+ mpz_mul_2exp (m, m, sh);
+ e = e - sh;
+ }
+ else if (r > 0)
+ {
+ mpz_mul_2exp (m, m, r);
+ e = e - r;
+ }
+
+ /* invariant: x = m*2^e, with e divisible by k */
+
+ /* we reuse the variable m to store the kth root, since it is not needed
+ any more: we just need to know if the root is exact */
+ inexact = mpz_root (m, m, k) == 0;
+
+ MPFR_MPZ_SIZEINBASE2 (tmp, m);
+ sh = tmp - n;
+ if (sh > 0) /* we have to flush to 0 the last sh bits from m */
+ {
+ inexact = inexact || ((mpfr_exp_t) mpz_scan1 (m, 0) < sh);
+ mpz_fdiv_q_2exp (m, m, sh);
+ e += k * sh;
+ }
+
+ if (inexact)
+ {
+ if (negative)
+ rnd_mode = MPFR_INVERT_RND (rnd_mode);
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDA
+ || (rnd_mode == MPFR_RNDN && mpz_tstbit (m, 0)))
+ inexact = 1, mpz_add_ui (m, m, 1);
+ else
+ inexact = -1;
+ }
+
+ /* either inexact is not zero, and the conversion is exact, i.e. inexact
+ is not changed; or inexact=0, and inexact is set only when
+ rnd_mode=MPFR_RNDN and bit (n+1) from m is 1 */
+ inexact += mpfr_set_z (y, m, MPFR_RNDN);
+ MPFR_SET_EXP (y, MPFR_GET_EXP (y) + e / (mpfr_exp_t) k);
+
+ if (negative)
+ {
+ MPFR_CHANGE_SIGN (y);
+ inexact = -inexact;
+ }
+
+ mpz_clear (m);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/round_near_x.c b/src/round_near_x.c
new file mode 100644
index 000000000..fc564adf2
--- /dev/null
+++ b/src/round_near_x.c
@@ -0,0 +1,233 @@
+/* mpfr_round_near_x -- Round a floating point number nears another one.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library, and was contributed by Mathieu Dutour.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Use MPFR_FAST_COMPUTE_IF_SMALL_INPUT instead (a simple wrapper) */
+
+/* int mpfr_round_near_x (mpfr_ptr y, mpfr_srcptr v, mpfr_uexp_t err, int dir,
+ mpfr_rnd_t rnd)
+
+ TODO: fix this description.
+ Assuming y = o(f(x)) = o(x + g(x)) with |g(x)| < 2^(EXP(v)-error)
+ If x is small enough, y ~= v. This function checks and does this.
+
+ It assumes that f(x) is not representable exactly as a FP number.
+ v must not be a singular value (NAN, INF or ZERO), usual values are
+ v=1 or v=x.
+
+ y is the destination (a mpfr_t), v the value to set (a mpfr_t),
+ err the error term (a mpfr_uexp_t) such that |g(x)| < 2^(EXP(x)-err),
+ dir (an int) is the direction of the error (if dir = 0,
+ it rounds toward 0, if dir=1, it rounds away from 0),
+ rnd the rounding mode.
+
+ It returns 0 if it can't round.
+ Otherwise it returns the ternary flag (It can't return an exact value).
+*/
+
+/* What "small enough" means?
+
+ We work with the positive values.
+ Assuming err > Prec (y)+1
+
+ i = [ y = o(x)] // i = inexact flag
+ If i == 0
+ Setting x in y is exact. We have:
+ y = [XXXXXXXXX[...]]0[...] + error where [..] are optional zeros
+ if dirError = ToInf,
+ x < f(x) < x + 2^(EXP(x)-err)
+ since x=y, and ulp (y)/2 > 2^(EXP(x)-err), we have:
+ y < f(x) < y+ulp(y) and |y-f(x)| < ulp(y)/2
+ if rnd = RNDN, nothing
+ if rnd = RNDZ, nothing
+ if rnd = RNDA, addoneulp
+ elif dirError = ToZero
+ x -2^(EXP(x)-err) < f(x) < x
+ since x=y, and ulp (y)/2 > 2^(EXP(x)-err), we have:
+ y-ulp(y) < f(x) < y and |y-f(x)| < ulp(y)/2
+ if rnd = RNDN, nothing
+ if rnd = RNDZ, nexttozero
+ if rnd = RNDA, nothing
+ NOTE: err > prec (y)+1 is needed only for RNDN.
+ elif i > 0 and i = EVEN_ROUNDING
+ So rnd = RNDN and we have y = x + ulp(y)/2
+ if dirError = ToZero,
+ we have x -2^(EXP(x)-err) < f(x) < x
+ so y - ulp(y)/2 - 2^(EXP(x)-err) < f(x) < y-ulp(y)/2
+ so y -ulp(y) < f(x) < y-ulp(y)/2
+ => nexttozero(y)
+ elif dirError = ToInf
+ we have x < f(x) < x + 2^(EXP(x)-err)
+ so y - ulp(y)/2 < f(x) < y+ulp(y)/2-ulp(y)/2
+ so y - ulp(y)/2 < f(x) < y
+ => do nothing
+ elif i < 0 and i = -EVEN_ROUNDING
+ So rnd = RNDN and we have y = x - ulp(y)/2
+ if dirError = ToZero,
+ y < f(x) < y + ulp(y)/2 => do nothing
+ if dirError = ToInf
+ y + ulp(y)/2 < f(x) < y + ulp(y) => AddOneUlp
+ elif i > 0
+ we can't have rnd = RNDZ, and prec(x) > prec(y), so ulp(x) < ulp(y)
+ we have y - ulp (y) < x < y
+ or more exactly y - ulp(y) + ulp(x)/2 <= x <= y - ulp(x)/2
+ if rnd = RNDA,
+ if dirError = ToInf,
+ we have x < f(x) < x + 2^(EXP(x)-err)
+ if err > prec (x),
+ we have 2^(EXP(x)-err) < ulp(x), so 2^(EXP(x)-err) <= ulp(x)/2
+ so f(x) <= y - ulp(x)/2+ulp(x)/2 <= y
+ and y - ulp(y) < x < f(x)
+ so we have y - ulp(y) < f(x) < y
+ so do nothing.
+ elif we can round, ie y - ulp(y) < x + 2^(EXP(x)-err) < y
+ we have y - ulp(y) < x < f(x) < x + 2^(EXP(x)-err) < y
+ so do nothing
+ otherwise
+ Wrong. Example X=[0.11101]111111110000
+ + 1111111111111111111....
+ elif dirError = ToZero
+ we have x - 2^(EXP(x)-err) < f(x) < x
+ so f(x) < x < y
+ if err > prec (x)
+ x-2^(EXP(x)-err) >= x-ulp(x)/2 >= y - ulp(y) + ulp(x)/2-ulp(x)/2
+ so y - ulp(y) < f(x) < y
+ so do nothing
+ elif we can round, ie y - ulp(y) < x - 2^(EXP(x)-err) < y
+ y - ulp(y) < x - 2^(EXP(x)-err) < f(x) < y
+ so do nothing
+ otherwise
+ Wrong. Example: X=[1.111010]00000010
+ - 10000001000000000000100....
+ elif rnd = RNDN,
+ y - ulp(y)/2 < x < y and we can't have x = y-ulp(y)/2:
+ so we have:
+ y - ulp(y)/2 + ulp(x)/2 <= x <= y - ulp(x)/2
+ if dirError = ToInf
+ we have x < f(x) < x+2^(EXP(x)-err) and ulp(y) > 2^(EXP(x)-err)
+ so y - ulp(y)/2 + ulp (x)/2 < f(x) < y + ulp (y)/2 - ulp (x)/2
+ we can round but we can't compute inexact flag.
+ if err > prec (x)
+ y - ulp(y)/2 + ulp (x)/2 < f(x) < y + ulp(x)/2 - ulp(x)/2
+ so y - ulp(y)/2 + ulp (x)/2 < f(x) < y
+ we can round and compute inexact flag. do nothing
+ elif we can round, ie y - ulp(y)/2 < x + 2^(EXP(x)-err) < y
+ we have y - ulp(y)/2 + ulp (x)/2 < f(x) < y
+ so do nothing
+ otherwise
+ Wrong
+ elif dirError = ToZero
+ we have x -2^(EXP(x)-err) < f(x) < x and ulp(y)/2 > 2^(EXP(x)-err)
+ so y-ulp(y)+ulp(x)/2 < f(x) < y - ulp(x)/2
+ if err > prec (x)
+ x- ulp(x)/2 < f(x) < x
+ so y - ulp(y)/2+ulp(x)/2 - ulp(x)/2 < f(x) < x <= y - ulp(x)/2 < y
+ do nothing
+ elif we can round, ie y-ulp(y)/2 < x-2^(EXP(x)-err) < y
+ we have y-ulp(y)/2 < x-2^(EXP(x)-err) < f(x) < x < y
+ do nothing
+ otherwise
+ Wrong
+ elif i < 0
+ same thing?
+ */
+
+int
+mpfr_round_near_x (mpfr_ptr y, mpfr_srcptr v, mpfr_uexp_t err, int dir,
+ mpfr_rnd_t rnd)
+{
+ int inexact, sign;
+ unsigned int old_flags = __gmpfr_flags;
+
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (v));
+ MPFR_ASSERTD (dir == 0 || dir == 1);
+
+ /* First check if we can round. The test is more restrictive than
+ necessary. Note that if err is not representable in an mpfr_exp_t,
+ then err > MPFR_PREC (v) and the conversion to mpfr_exp_t will not
+ occur. */
+ if (!(err > MPFR_PREC (y) + 1
+ && (err > MPFR_PREC (v)
+ || mpfr_round_p (MPFR_MANT (v), MPFR_LIMB_SIZE (v),
+ (mpfr_exp_t) err,
+ MPFR_PREC (y) + (rnd == MPFR_RNDN)))))
+ /* If we assume we can not round, return 0, and y is not modified */
+ return 0;
+
+ /* First round v in y */
+ sign = MPFR_SIGN (v);
+ MPFR_SET_EXP (y, MPFR_GET_EXP (v));
+ MPFR_SET_SIGN (y, sign);
+ MPFR_RNDRAW_GEN (inexact, y, MPFR_MANT (v), MPFR_PREC (v), rnd, sign,
+ if (dir == 0)
+ {
+ inexact = -sign;
+ goto trunc_doit;
+ }
+ else
+ goto addoneulp;
+ , if (MPFR_UNLIKELY (++MPFR_EXP (y) > __gmpfr_emax))
+ mpfr_overflow (y, rnd, sign)
+ );
+
+ /* Fix it in some cases */
+ MPFR_ASSERTD (!MPFR_IS_NAN (y) && !MPFR_IS_ZERO (y));
+ /* If inexact == 0, setting y from v is exact but we haven't
+ take into account yet the error term */
+ if (inexact == 0)
+ {
+ if (dir == 0) /* The error term is negative for v positive */
+ {
+ inexact = sign;
+ if (MPFR_IS_LIKE_RNDZ (rnd, MPFR_IS_NEG_SIGN (sign)))
+ {
+ /* case nexttozero */
+ /* The underflow flag should be set if the result is zero */
+ __gmpfr_flags = old_flags;
+ inexact = -sign;
+ mpfr_nexttozero (y);
+ if (MPFR_UNLIKELY (MPFR_IS_ZERO (y)))
+ mpfr_set_underflow ();
+ }
+ }
+ else /* The error term is positive for v positive */
+ {
+ inexact = -sign;
+ /* Round Away */
+ if (rnd != MPFR_RNDN && !MPFR_IS_LIKE_RNDZ (rnd, MPFR_IS_NEG_SIGN(sign)))
+ {
+ /* case nexttoinf */
+ /* The overflow flag should be set if the result is infinity */
+ inexact = sign;
+ mpfr_nexttoinf (y);
+ if (MPFR_UNLIKELY (MPFR_IS_INF (y)))
+ mpfr_set_overflow ();
+ }
+ }
+ }
+
+ /* the inexact flag cannot be 0, since this would mean an exact value,
+ and in this case we cannot round correctly */
+ MPFR_ASSERTD(inexact != 0);
+ MPFR_RET (inexact);
+}
diff --git a/src/round_p.c b/src/round_p.c
new file mode 100644
index 000000000..d1bfb42bb
--- /dev/null
+++ b/src/round_p.c
@@ -0,0 +1,123 @@
+/* mpfr_round_p -- check if an approximation is roundable.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Check against mpfr_can_round ? */
+#ifdef WANT_ASSERT
+# if WANT_ASSERT >= 2
+int mpfr_round_p_2 (mp_limb_t *, mp_size_t, mpfr_exp_t, mpfr_prec_t);
+int
+mpfr_round_p (mp_limb_t *bp, mp_size_t bn, mpfr_exp_t err0, mpfr_prec_t prec)
+{
+ int i1, i2;
+
+ i1 = mpfr_round_p_2 (bp, bn, err0, prec);
+ i2 = mpfr_can_round_raw (bp, bn, MPFR_SIGN_POS, err0,
+ MPFR_RNDN, MPFR_RNDZ, prec);
+ if (i1 != i2)
+ {
+ fprintf (stderr, "mpfr_round_p(%d) != mpfr_can_round(%d)!\n"
+ "bn = %lu, err0 = %ld, prec = %lu\nbp = ", i1, i2,
+ (unsigned long) bn, (long) err0, (unsigned long) prec);
+ gmp_fprintf (stderr, "%NX\n", bp, bn);
+ MPFR_ASSERTN (0);
+ }
+ return i1;
+}
+# define mpfr_round_p mpfr_round_p_2
+# endif
+#endif
+
+/*
+ * Assuming {bp, bn} is an approximation of a non-singular number
+ * with error at most equal to 2^(EXP(b)-err0) (`err0' bits of b are known)
+ * of direction unknown, check if we can round b toward zero with
+ * precision prec.
+ */
+int
+mpfr_round_p (mp_limb_t *bp, mp_size_t bn, mpfr_exp_t err0, mpfr_prec_t prec)
+{
+ mpfr_prec_t err;
+ mp_size_t k, n;
+ mp_limb_t tmp, mask;
+ int s;
+
+ err = (mpfr_prec_t) bn * GMP_NUMB_BITS;
+ if (MPFR_UNLIKELY (err0 <= 0 || (mpfr_uexp_t) err0 <= prec || prec >= err))
+ return 0; /* can't round */
+ err = MIN (err, (mpfr_uexp_t) err0);
+
+ k = prec / GMP_NUMB_BITS;
+ s = GMP_NUMB_BITS - prec%GMP_NUMB_BITS;
+ n = err / GMP_NUMB_BITS - k;
+
+ MPFR_ASSERTD (n >= 0);
+ MPFR_ASSERTD (bn > k);
+
+ /* Check first limb */
+ bp += bn-1-k;
+ tmp = *bp--;
+ mask = s == GMP_NUMB_BITS ? MP_LIMB_T_MAX : MPFR_LIMB_MASK (s);
+ tmp &= mask;
+
+ if (MPFR_LIKELY (n == 0))
+ {
+ /* prec and error are in the same limb */
+ s = GMP_NUMB_BITS - err % GMP_NUMB_BITS;
+ MPFR_ASSERTD (s < GMP_NUMB_BITS);
+ tmp >>= s;
+ mask >>= s;
+ return tmp != 0 && tmp != mask;
+ }
+ else if (MPFR_UNLIKELY (tmp == 0))
+ {
+ /* Check if all (n-1) limbs are 0 */
+ while (--n)
+ if (*bp-- != 0)
+ return 1;
+ /* Check if final error limb is 0 */
+ s = GMP_NUMB_BITS - err % GMP_NUMB_BITS;
+ if (s == GMP_NUMB_BITS)
+ return 0;
+ tmp = *bp >> s;
+ return tmp != 0;
+ }
+ else if (MPFR_UNLIKELY (tmp == mask))
+ {
+ /* Check if all (n-1) limbs are 11111111111111111 */
+ while (--n)
+ if (*bp-- != MP_LIMB_T_MAX)
+ return 1;
+ /* Check if final error limb is 0 */
+ s = GMP_NUMB_BITS - err % GMP_NUMB_BITS;
+ if (s == GMP_NUMB_BITS)
+ return 0;
+ tmp = *bp >> s;
+ return tmp != (MP_LIMB_T_MAX >> s);
+ }
+ else
+ {
+ /* First limb is different from 000000 or 1111111 */
+ return 1;
+ }
+}
diff --git a/src/round_prec.c b/src/round_prec.c
new file mode 100644
index 000000000..d5ac5c93c
--- /dev/null
+++ b/src/round_prec.c
@@ -0,0 +1,240 @@
+/* mpfr_round_raw_generic, mpfr_round_raw2, mpfr_round_raw, mpfr_prec_round,
+ mpfr_can_round, mpfr_can_round_raw -- various rounding functions
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#define mpfr_round_raw_generic mpfr_round_raw
+#define flag 0
+#define use_inexp 1
+#include "round_raw_generic.c"
+
+#define mpfr_round_raw_generic mpfr_round_raw_2
+#define flag 1
+#define use_inexp 0
+#include "round_raw_generic.c"
+
+/* Seems to be unused. Remove comment to implement it.
+#define mpfr_round_raw_generic mpfr_round_raw_3
+#define flag 1
+#define use_inexp 1
+#include "round_raw_generic.c"
+*/
+
+#define mpfr_round_raw_generic mpfr_round_raw_4
+#define flag 0
+#define use_inexp 0
+#include "round_raw_generic.c"
+
+int
+mpfr_prec_round (mpfr_ptr x, mpfr_prec_t prec, mpfr_rnd_t rnd_mode)
+{
+ mp_limb_t *tmp, *xp;
+ int carry, inexact;
+ mpfr_prec_t nw, ow;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_ASSERTN(prec >= MPFR_PREC_MIN && prec <= MPFR_PREC_MAX);
+
+ nw = 1 + (prec - 1) / GMP_NUMB_BITS; /* needed allocated limbs */
+
+ /* check if x has enough allocated space for the significand */
+ /* Get the number of limbs from the precision.
+ (Compatible with all allocation methods) */
+ ow = (MPFR_PREC (x) + GMP_NUMB_BITS - 1) / GMP_NUMB_BITS;
+ if (nw > ow)
+ {
+ /* FIXME: Variable can't be created using custom allocation,
+ MPFR_DECL_INIT or GROUP_ALLOC: How to detect? */
+ ow = MPFR_GET_ALLOC_SIZE(x);
+ if (nw > ow)
+ {
+ /* Realloc significand */
+ mp_ptr tmpx = (mp_ptr) (*__gmp_reallocate_func)
+ (MPFR_GET_REAL_PTR(x), MPFR_MALLOC_SIZE(ow), MPFR_MALLOC_SIZE(nw));
+ MPFR_SET_MANT_PTR(x, tmpx); /* mant ptr must be set
+ before alloc size */
+ MPFR_SET_ALLOC_SIZE(x, nw); /* new number of allocated limbs */
+ }
+ }
+
+ if (MPFR_UNLIKELY( MPFR_IS_SINGULAR(x) ))
+ {
+ MPFR_PREC(x) = prec; /* Special value: need to set prec */
+ if (MPFR_IS_NAN(x))
+ MPFR_RET_NAN;
+ MPFR_ASSERTD(MPFR_IS_INF(x) || MPFR_IS_ZERO(x));
+ return 0; /* infinity and zero are exact */
+ }
+
+ /* x is a non-zero real number */
+
+ MPFR_TMP_MARK(marker);
+ tmp = (mp_limb_t*) MPFR_TMP_ALLOC (nw * BYTES_PER_MP_LIMB);
+ xp = MPFR_MANT(x);
+ carry = mpfr_round_raw (tmp, xp, MPFR_PREC(x), MPFR_IS_NEG(x),
+ prec, rnd_mode, &inexact);
+ MPFR_PREC(x) = prec;
+
+ if (MPFR_UNLIKELY(carry))
+ {
+ mpfr_exp_t exp = MPFR_EXP (x);
+
+ if (MPFR_UNLIKELY(exp == __gmpfr_emax))
+ (void) mpfr_overflow(x, rnd_mode, MPFR_SIGN(x));
+ else
+ {
+ MPFR_ASSERTD (exp < __gmpfr_emax);
+ MPFR_SET_EXP (x, exp + 1);
+ xp[nw - 1] = MPFR_LIMB_HIGHBIT;
+ if (nw - 1 > 0)
+ MPN_ZERO(xp, nw - 1);
+ }
+ }
+ else
+ MPN_COPY(xp, tmp, nw);
+
+ MPFR_TMP_FREE(marker);
+ return inexact;
+}
+
+/* assumption: GMP_NUMB_BITS is a power of 2 */
+
+/* assuming b is an approximation to x in direction rnd1 with error at
+ most 2^(MPFR_EXP(b)-err), returns 1 if one is able to round exactly
+ x to precision prec with direction rnd2, and 0 otherwise.
+
+ Side effects: none.
+*/
+
+int
+mpfr_can_round (mpfr_srcptr b, mpfr_exp_t err, mpfr_rnd_t rnd1,
+ mpfr_rnd_t rnd2, mpfr_prec_t prec)
+{
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(b)))
+ return 0; /* We cannot round if Zero, Nan or Inf */
+ else
+ return mpfr_can_round_raw (MPFR_MANT(b), MPFR_LIMB_SIZE(b),
+ MPFR_SIGN(b), err, rnd1, rnd2, prec);
+}
+
+int
+mpfr_can_round_raw (const mp_limb_t *bp, mp_size_t bn, int neg, mpfr_exp_t err0,
+ mpfr_rnd_t rnd1, mpfr_rnd_t rnd2, mpfr_prec_t prec)
+{
+ mpfr_prec_t err;
+ mp_size_t k, k1, tn;
+ int s, s1;
+ mp_limb_t cc, cc2;
+ mp_limb_t *tmp;
+ MPFR_TMP_DECL(marker);
+
+ if (MPFR_UNLIKELY(err0 < 0 || (mpfr_uexp_t) err0 <= prec))
+ return 0; /* can't round */
+ else if (MPFR_UNLIKELY (prec > (mpfr_prec_t) bn * GMP_NUMB_BITS))
+ { /* then ulp(b) < precision < error */
+ return rnd2 == MPFR_RNDN && (mpfr_uexp_t) err0 - 2 >= prec;
+ /* can round only in rounding to the nearest and err0 >= prec + 2 */
+ }
+
+ MPFR_ASSERT_SIGN(neg);
+ neg = MPFR_IS_NEG_SIGN(neg);
+
+ /* if the error is smaller than ulp(b), then anyway it will propagate
+ up to ulp(b) */
+ err = ((mpfr_uexp_t) err0 > (mpfr_prec_t) bn * GMP_NUMB_BITS) ?
+ (mpfr_prec_t) bn * GMP_NUMB_BITS : (mpfr_prec_t) err0;
+
+ /* warning: if k = m*GMP_NUMB_BITS, consider limb m-1 and not m */
+ k = (err - 1) / GMP_NUMB_BITS;
+ MPFR_UNSIGNED_MINUS_MODULO(s, err);
+ /* the error corresponds to bit s in limb k, the most significant limb
+ being limb 0 */
+
+ k1 = (prec - 1) / GMP_NUMB_BITS;
+ MPFR_UNSIGNED_MINUS_MODULO(s1, prec);
+ /* the last significant bit is bit s1 in limb k1 */
+
+ /* don't need to consider the k1 most significant limbs */
+ k -= k1;
+ bn -= k1;
+ prec -= (mpfr_prec_t) k1 * GMP_NUMB_BITS;
+
+ /* if when adding or subtracting (1 << s) in bp[bn-1-k], it does not
+ change bp[bn-1] >> s1, then we can round */
+ MPFR_TMP_MARK(marker);
+ tn = bn;
+ k++; /* since we work with k+1 everywhere */
+ tmp = (mp_limb_t*) MPFR_TMP_ALLOC(tn * BYTES_PER_MP_LIMB);
+ if (bn > k)
+ MPN_COPY (tmp, bp, bn - k);
+
+ MPFR_ASSERTD (k > 0);
+
+ /* Transform RNDD and RNDU to Zero / Away */
+ MPFR_ASSERTD((neg == 0) || (neg ==1));
+ if (MPFR_IS_RNDUTEST_OR_RNDDNOTTEST(rnd1, neg))
+ rnd1 = MPFR_RNDZ;
+
+ switch (rnd1)
+ {
+ case MPFR_RNDZ:
+ /* Round to Zero */
+ cc = (bp[bn - 1] >> s1) & 1;
+ /* mpfr_round_raw2 returns 1 if one should add 1 at ulp(b,prec),
+ and 0 otherwise */
+ cc ^= mpfr_round_raw2 (bp, bn, neg, rnd2, prec);
+ /* cc is the new value of bit s1 in bp[bn-1] */
+ /* now round b + 2^(MPFR_EXP(b)-err) */
+ cc2 = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
+ break;
+ case MPFR_RNDN:
+ /* Round to nearest */
+ /* first round b+2^(MPFR_EXP(b)-err) */
+ cc = mpn_add_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
+ cc = (tmp[bn - 1] >> s1) & 1; /* gives 0 when cc=1 */
+ cc ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec);
+ /* now round b-2^(MPFR_EXP(b)-err) */
+ cc2 = mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
+ break;
+ default:
+ /* Round away */
+ cc = (bp[bn - 1] >> s1) & 1;
+ cc ^= mpfr_round_raw2 (bp, bn, neg, rnd2, prec);
+ /* now round b +/- 2^(MPFR_EXP(b)-err) */
+ cc2 = mpn_sub_1 (tmp + bn - k, bp + bn - k, k, MPFR_LIMB_ONE << s);
+ break;
+ }
+
+ /* if cc2 is 1, then a carry or borrow propagates to the next limb */
+ if (cc2 && cc)
+ {
+ MPFR_TMP_FREE(marker);
+ return 0;
+ }
+
+ cc2 = (tmp[bn - 1] >> s1) & 1;
+ cc2 ^= mpfr_round_raw2 (tmp, bn, neg, rnd2, prec);
+
+ MPFR_TMP_FREE(marker);
+ return cc == cc2;
+}
diff --git a/src/round_raw_generic.c b/src/round_raw_generic.c
new file mode 100644
index 000000000..aadd8b461
--- /dev/null
+++ b/src/round_raw_generic.c
@@ -0,0 +1,259 @@
+/* mpfr_round_raw_generic -- Generic rounding function
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifndef flag
+# error "ERROR: flag must be defined (0 / 1)"
+#endif
+#ifndef use_inexp
+# error "ERROR: use_enexp must be defined (0 / 1)"
+#endif
+#ifndef mpfr_round_raw_generic
+# error "ERROR: mpfr_round_raw_generic must be defined"
+#endif
+
+/*
+ * If flag = 0, puts in y the value of xp (with precision xprec and
+ * sign 1 if negative=0, -1 otherwise) rounded to precision yprec and
+ * direction rnd_mode. Supposes x is not zero nor NaN nor +/- Infinity
+ * (i.e. *xp != 0). In that case, the return value is a possible carry
+ * (0 or 1) that may happen during the rounding, in which case the result
+ * is a power of two.
+ *
+ * If inexp != NULL, put in *inexp the inexact flag of the rounding (0, 1, -1).
+ * In case of even rounding when rnd = MPFR_RNDN, put MPFR_EVEN_INEX (2) or
+ * -MPFR_EVEN_INEX (-2) in *inexp.
+ *
+ * If flag = 1, just returns whether one should add 1 or not for rounding.
+ *
+ * Note: yprec may be < MPFR_PREC_MIN; in particular, it may be equal
+ * to 1. In this case, the even rounding is done away from 0, which is
+ * a natural generalization. Indeed, a number with 1-bit precision can
+ * be seen as a denormalized number with more precision.
+ */
+
+int
+mpfr_round_raw_generic(
+#if flag == 0
+ mp_limb_t *yp,
+#endif
+ const mp_limb_t *xp, mpfr_prec_t xprec,
+ int neg, mpfr_prec_t yprec, mpfr_rnd_t rnd_mode
+#if use_inexp != 0
+ , int *inexp
+#endif
+ )
+{
+ mp_size_t xsize, nw;
+ mp_limb_t himask, lomask, sb;
+ int rw;
+#if flag == 0
+ int carry;
+#endif
+#if use_inexp == 0
+ int *inexp;
+#endif
+
+ if (use_inexp)
+ MPFR_ASSERTD(inexp != ((int*) 0));
+ MPFR_ASSERTD(neg == 0 || neg == 1);
+
+ if (flag && !use_inexp &&
+ (xprec <= yprec || MPFR_IS_LIKE_RNDZ (rnd_mode, neg)))
+ return 0;
+
+ xsize = (xprec-1)/GMP_NUMB_BITS + 1;
+ nw = yprec / GMP_NUMB_BITS;
+ rw = yprec & (GMP_NUMB_BITS - 1);
+
+ if (MPFR_UNLIKELY(xprec <= yprec))
+ { /* No rounding is necessary. */
+ /* if yp=xp, maybe an overlap: MPN_COPY_DECR is ok when src <= dst */
+ if (MPFR_LIKELY(rw))
+ nw++;
+ MPFR_ASSERTD(nw >= 1);
+ MPFR_ASSERTD(nw >= xsize);
+ if (use_inexp)
+ *inexp = 0;
+#if flag == 0
+ MPN_COPY_DECR(yp + (nw - xsize), xp, xsize);
+ MPN_ZERO(yp, nw - xsize);
+#endif
+ return 0;
+ }
+
+ if (use_inexp || !MPFR_IS_LIKE_RNDZ(rnd_mode, neg))
+ {
+ mp_size_t k = xsize - nw - 1;
+
+ if (MPFR_LIKELY(rw))
+ {
+ nw++;
+ lomask = MPFR_LIMB_MASK (GMP_NUMB_BITS - rw);
+ himask = ~lomask;
+ }
+ else
+ {
+ lomask = ~(mp_limb_t) 0;
+ himask = ~(mp_limb_t) 0;
+ }
+ MPFR_ASSERTD(k >= 0);
+ sb = xp[k] & lomask; /* First non-significant bits */
+ /* Rounding to nearest ? */
+ if (MPFR_LIKELY( rnd_mode == MPFR_RNDN) )
+ {
+ /* Rounding to nearest */
+ mp_limb_t rbmask = MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1 - rw);
+ if (sb & rbmask) /* rounding bit */
+ sb &= ~rbmask; /* it is 1, clear it */
+ else
+ {
+ /* Rounding bit is 0, behave like rounding to 0 */
+ goto rnd_RNDZ;
+ }
+ while (MPFR_UNLIKELY(sb == 0) && k > 0)
+ sb = xp[--k];
+ /* rounding to nearest, with rounding bit = 1 */
+ if (MPFR_UNLIKELY(sb == 0)) /* Even rounding. */
+ {
+ /* sb == 0 && rnd_mode == MPFR_RNDN */
+ sb = xp[xsize - nw] & (himask ^ (himask << 1));
+ if (sb == 0)
+ {
+ if (use_inexp)
+ *inexp = 2*MPFR_EVEN_INEX*neg-MPFR_EVEN_INEX;
+ /* ((neg!=0)^(sb!=0)) ? MPFR_EVEN_INEX : -MPFR_EVEN_INEX;*/
+ /* Since neg = 0 or 1 and sb=0*/
+#if flag == 1
+ return 0 /*sb != 0 && rnd_mode != MPFR_RNDZ */;
+#else
+ MPN_COPY_INCR(yp, xp + xsize - nw, nw);
+ yp[0] &= himask;
+ return 0;
+#endif
+ }
+ else
+ {
+ /* sb != 0 && rnd_mode == MPFR_RNDN */
+ if (use_inexp)
+ *inexp = MPFR_EVEN_INEX-2*MPFR_EVEN_INEX*neg;
+ /*((neg!=0)^(sb!=0))? MPFR_EVEN_INEX : -MPFR_EVEN_INEX; */
+ /*Since neg= 0 or 1 and sb != 0 */
+ goto rnd_RNDN_add_one_ulp;
+ }
+ }
+ else /* sb != 0 && rnd_mode == MPFR_RNDN*/
+ {
+ if (use_inexp)
+ /* *inexp = (neg == 0) ? 1 : -1; but since neg = 0 or 1 */
+ *inexp = 1-2*neg;
+ rnd_RNDN_add_one_ulp:
+#if flag == 1
+ return 1; /*sb != 0 && rnd_mode != MPFR_RNDZ;*/
+#else
+ carry = mpn_add_1 (yp, xp + xsize - nw, nw,
+ rw ?
+ MPFR_LIMB_ONE << (GMP_NUMB_BITS - rw)
+ : MPFR_LIMB_ONE);
+ yp[0] &= himask;
+ return carry;
+#endif
+ }
+ }
+ /* Rounding to Zero ? */
+ else if (MPFR_IS_LIKE_RNDZ(rnd_mode, neg))
+ {
+ /* rnd_mode == MPFR_RNDZ */
+ rnd_RNDZ:
+ while (MPFR_UNLIKELY(sb == 0) && k > 0)
+ sb = xp[--k];
+ if (use_inexp)
+ /* rnd_mode == MPFR_RNDZ and neg = 0 or 1 */
+ /* (neg != 0) ^ (rnd_mode != MPFR_RNDZ)) ? 1 : -1);*/
+ *inexp = MPFR_UNLIKELY(sb == 0) ? 0 : (2*neg-1);
+#if flag == 1
+ return 0; /*sb != 0 && rnd_mode != MPFR_RNDZ;*/
+#else
+ MPN_COPY_INCR(yp, xp + xsize - nw, nw);
+ yp[0] &= himask;
+ return 0;
+#endif
+ }
+ else
+ {
+ /* rnd_mode = Away */
+ while (MPFR_UNLIKELY(sb == 0) && k > 0)
+ sb = xp[--k];
+ if (MPFR_UNLIKELY(sb == 0))
+ {
+ /* sb = 0 && rnd_mode != MPFR_RNDZ */
+ if (use_inexp)
+ /* (neg != 0) ^ (rnd_mode != MPFR_RNDZ)) ? 1 : -1);*/
+ *inexp = 0;
+#if flag == 1
+ return 0;
+#else
+ MPN_COPY_INCR(yp, xp + xsize - nw, nw);
+ yp[0] &= himask;
+ return 0;
+#endif
+ }
+ else
+ {
+ /* sb != 0 && rnd_mode != MPFR_RNDZ */
+ if (use_inexp)
+ /* (neg != 0) ^ (rnd_mode != MPFR_RNDZ)) ? 1 : -1);*/
+ *inexp = 1-2*neg;
+#if flag == 1
+ return 1;
+#else
+ carry = mpn_add_1(yp, xp + xsize - nw, nw,
+ rw ? MPFR_LIMB_ONE << (GMP_NUMB_BITS - rw)
+ : 1);
+ yp[0] &= himask;
+ return carry;
+#endif
+ }
+ }
+ }
+ else
+ {
+ /* Roundind mode = Zero / No inexact flag */
+#if flag == 1
+ return 0 /*sb != 0 && rnd_mode != MPFR_RNDZ*/;
+#else
+ if (MPFR_LIKELY(rw))
+ {
+ nw++;
+ himask = ~MPFR_LIMB_MASK (GMP_NUMB_BITS - rw);
+ }
+ else
+ himask = ~(mp_limb_t) 0;
+ MPN_COPY_INCR(yp, xp + xsize - nw, nw);
+ yp[0] &= himask;
+ return 0;
+#endif
+ }
+}
+
+#undef flag
+#undef use_inexp
+#undef mpfr_round_raw_generic
diff --git a/src/scale2.c b/src/scale2.c
new file mode 100644
index 000000000..47f30db8a
--- /dev/null
+++ b/src/scale2.c
@@ -0,0 +1,91 @@
+/* mpfr_scale2 -- multiply a double float by 2^exp
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <float.h> /* for DBL_EPSILON */
+#include "mpfr-impl.h"
+
+/* Note: we could use the ldexp function, but since we want not to depend on
+ math.h, we write our own implementation. */
+
+/* multiplies 1/2 <= d <= 1 by 2^exp */
+double
+mpfr_scale2 (double d, int exp)
+{
+#if _GMP_IEEE_FLOATS
+ {
+ union ieee_double_extract x;
+
+ if (MPFR_UNLIKELY (d == 1.0))
+ {
+ d = 0.5;
+ exp ++;
+ }
+
+ /* now 1/2 <= d < 1 */
+
+ /* infinities and zeroes have already been checked */
+ MPFR_ASSERTD (-1073 <= exp && exp <= 1025);
+
+ x.d = d;
+ if (MPFR_UNLIKELY (exp < -1021)) /* subnormal case */
+ {
+ x.s.exp += exp + 52;
+ x.d *= DBL_EPSILON;
+ }
+ else /* normalized case */
+ {
+ x.s.exp += exp;
+ }
+ return x.d;
+ }
+#else /* _GMP_IEEE_FLOATS */
+ {
+ double factor;
+
+ /* An overflow may occurs (example: 0.5*2^1024) */
+ if (d < 1.0)
+ {
+ d += d;
+ exp--;
+ }
+ /* Now 1.0 <= d < 2.0 */
+
+ if (exp < 0)
+ {
+ factor = 0.5;
+ exp = -exp;
+ }
+ else
+ {
+ factor = 2.0;
+ }
+ while (exp != 0)
+ {
+ if ((exp & 1) != 0)
+ d *= factor;
+ exp >>= 1;
+ factor *= factor;
+ }
+ return d;
+ }
+#endif
+}
diff --git a/src/sec.c b/src/sec.c
new file mode 100644
index 000000000..d2f9c56b5
--- /dev/null
+++ b/src/sec.c
@@ -0,0 +1,34 @@
+/* mpfr_sec - secant function = 1/cos.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define FUNCTION mpfr_sec
+#define INVERSE mpfr_cos
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_ZERO(y,x) return mpfr_set_ui (y, 1, rnd_mode)
+/* for x near 0, sec(x) = 1 + x^2/2 + ..., more precisely |sec(x)-1| < x^2
+ for |x| <= 1. */
+#define ACTION_TINY(y,x,r) \
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT(y, __gmpfr_one, -2 * MPFR_GET_EXP (x), 0, \
+ 1, r, inexact = _inexact; goto end)
+
+#include "gen_inverse.h"
diff --git a/src/sech.c b/src/sech.c
new file mode 100644
index 000000000..995b5ee7c
--- /dev/null
+++ b/src/sech.c
@@ -0,0 +1,40 @@
+/* mpfr_sech - Hyperbolic secant function = 1/cosh.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* The hyperbolic secant function is defined by sech(x)=1/cosh(x):
+ csc (NaN) = NaN.
+ csc (+Inf) = csc (-Inf) = 0+.
+ csc (+0) = csc (-0) = 1.
+ */
+
+#define FUNCTION mpfr_sech
+#define INVERSE mpfr_cosh
+#define ACTION_NAN(y) do { MPFR_SET_NAN(y); MPFR_RET_NAN; } while (1)
+#define ACTION_INF(y) return mpfr_set_ui (y, 0, MPFR_RNDN)
+#define ACTION_ZERO(y,x) return mpfr_set_ui (y, 1, rnd_mode)
+/* for x near 0, sech(x) = 1 - x^2/2 + ..., more precisely |sech(x)-1| <= x^2/2
+ for |x| <= 1. The tiny action is the same as for cos(x). */
+#define ACTION_TINY(y,x,r) \
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT(y, __gmpfr_one, -2 * MPFR_GET_EXP (x), 1, \
+ 0, r, inexact = _inexact; goto end)
+
+#include "gen_inverse.h"
diff --git a/src/set.c b/src/set.c
new file mode 100644
index 000000000..7245e6b22
--- /dev/null
+++ b/src/set.c
@@ -0,0 +1,81 @@
+/* mpfr_set -- copy of a floating-point number
+
+Copyright 1999, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* set a to abs(b) * signb: a=b when signb = SIGN(b), a=abs(b) when signb=1 */
+int
+mpfr_set4 (mpfr_ptr a, mpfr_srcptr b, mpfr_rnd_t rnd_mode, int signb)
+{
+ /* Sign is ALWAYS copied */
+ MPFR_SET_SIGN (a, signb);
+
+ /* Exponent is also always copied since if the number is singular,
+ the exponent field determined the number.
+ Can't use MPFR_SET_EXP since the exponent may be singular */
+ MPFR_EXP (a) = MPFR_EXP (b);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (b)))
+ {
+ /* MPFR_SET_NAN, MPFR_SET_ZERO and MPFR_SET_INF are useless
+ since MPFR_EXP (a) = MPFR_EXP (b) does the job */
+ if (MPFR_IS_NAN (b))
+ MPFR_RET_NAN;
+ else
+ MPFR_RET (0);
+ }
+ else if (MPFR_LIKELY (MPFR_PREC (b) == MPFR_PREC (a)))
+ {
+ /* Same precision and b is not singular:
+ * just copy the mantissa, and set the exponent and the sign
+ * The result is exact. */
+ MPN_COPY (MPFR_MANT (a), MPFR_MANT (b),
+ (MPFR_PREC (b) + GMP_NUMB_BITS-1)/GMP_NUMB_BITS);
+ MPFR_RET (0);
+ }
+ else
+ {
+ int inex;
+
+ /* Else Round B inside a */
+ MPFR_RNDRAW (inex, a, MPFR_MANT (b), MPFR_PREC (b), rnd_mode, signb,
+ if (MPFR_UNLIKELY ( ++MPFR_EXP (a) > __gmpfr_emax))
+ return mpfr_overflow (a, rnd_mode, signb) );
+ MPFR_RET (inex);
+ }
+}
+
+/* Set a to b */
+#undef mpfr_set
+int
+mpfr_set (mpfr_ptr a, mpfr_srcptr b, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set4 (a, b, rnd_mode, MPFR_SIGN (b));
+}
+
+/* Set a to |b| */
+#undef mpfr_abs
+int
+mpfr_abs (mpfr_ptr a, mpfr_srcptr b, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set4 (a, b, rnd_mode, MPFR_SIGN_POS);
+}
diff --git a/src/set_d.c b/src/set_d.c
new file mode 100644
index 000000000..c7037b35c
--- /dev/null
+++ b/src/set_d.c
@@ -0,0 +1,255 @@
+/* mpfr_set_d -- convert a machine double precision float to
+ a multiple precision floating-point number
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <float.h> /* For DOUBLE_ISINF and DOUBLE_ISNAN */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* extracts the bits of d in rp[0..n-1] where n=ceil(53/GMP_NUMB_BITS).
+ Assumes d is neither 0 nor NaN nor Inf. */
+static long
+__gmpfr_extract_double (mp_ptr rp, double d)
+ /* e=0 iff GMP_NUMB_BITS=32 and rp has only one limb */
+{
+ long exp;
+ mp_limb_t manl;
+#if GMP_NUMB_BITS == 32
+ mp_limb_t manh;
+#endif
+
+ /* BUGS
+ 1. Should handle Inf and NaN in IEEE specific code.
+ 2. Handle Inf and NaN also in default code, to avoid hangs.
+ 3. Generalize to handle all GMP_NUMB_BITS.
+ 4. This lits is incomplete and misspelled.
+ */
+
+ MPFR_ASSERTD(!DOUBLE_ISNAN(d));
+ MPFR_ASSERTD(!DOUBLE_ISINF(d));
+ MPFR_ASSERTD(d != 0.0);
+
+#if _GMP_IEEE_FLOATS
+
+ {
+ union ieee_double_extract x;
+ x.d = d;
+
+ exp = x.s.exp;
+ if (exp)
+ {
+#if GMP_NUMB_BITS >= 64
+ manl = ((MPFR_LIMB_ONE << 63)
+ | ((mp_limb_t) x.s.manh << 43) | ((mp_limb_t) x.s.manl << 11));
+#else
+ manh = (MPFR_LIMB_ONE << 31) | (x.s.manh << 11) | (x.s.manl >> 21);
+ manl = x.s.manl << 11;
+#endif
+ }
+ else /* denormalized number */
+ {
+#if GMP_NUMB_BITS >= 64
+ manl = ((mp_limb_t) x.s.manh << 43) | ((mp_limb_t) x.s.manl << 11);
+#else
+ manh = (x.s.manh << 11) /* high 21 bits */
+ | (x.s.manl >> 21); /* middle 11 bits */
+ manl = x.s.manl << 11; /* low 21 bits */
+#endif
+ }
+
+ if (exp)
+ exp -= 1022;
+ else
+ exp = -1021;
+ }
+
+#else /* _GMP_IEEE_FLOATS */
+
+ {
+ /* Unknown (or known to be non-IEEE) double format. */
+ exp = 0;
+ if (d >= 1.0)
+ {
+ MPFR_ASSERTN (d * 0.5 != d);
+ while (d >= 32768.0)
+ {
+ d *= (1.0 / 65536.0);
+ exp += 16;
+ }
+ while (d >= 1.0)
+ {
+ d *= 0.5;
+ exp += 1;
+ }
+ }
+ else if (d < 0.5)
+ {
+ while (d < (1.0 / 65536.0))
+ {
+ d *= 65536.0;
+ exp -= 16;
+ }
+ while (d < 0.5)
+ {
+ d *= 2.0;
+ exp -= 1;
+ }
+ }
+
+ d *= MP_BASE_AS_DOUBLE;
+#if GMP_NUMB_BITS >= 64
+ manl = d;
+#else
+ manh = (mp_limb_t) d;
+ manl = (mp_limb_t) ((d - manh) * MP_BASE_AS_DOUBLE);
+#endif
+ }
+
+#endif /* _GMP_IEEE_FLOATS */
+
+#if GMP_NUMB_BITS >= 64
+ rp[0] = manl;
+#else
+ rp[1] = manh;
+ rp[0] = manl;
+#endif
+
+ return exp;
+}
+
+/* End of part included from gmp-2.0.2 */
+
+int
+mpfr_set_d (mpfr_ptr r, double d, mpfr_rnd_t rnd_mode)
+{
+ int signd, inexact;
+ unsigned int cnt;
+ mp_size_t i, k;
+ mpfr_t tmp;
+ mp_limb_t tmpmant[MPFR_LIMBS_PER_DOUBLE];
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY(DOUBLE_ISNAN(d)))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_UNLIKELY(d == 0))
+ {
+#if _GMP_IEEE_FLOATS
+ union ieee_double_extract x;
+
+ MPFR_SET_ZERO(r);
+ /* set correct sign */
+ x.d = d;
+ if (x.s.sig == 1)
+ MPFR_SET_NEG(r);
+ else
+ MPFR_SET_POS(r);
+#else /* _GMP_IEEE_FLOATS */
+ MPFR_SET_ZERO(r);
+ {
+ /* This is to get the sign of zero on non-IEEE hardware
+ Some systems support +0.0, -0.0 and unsigned zero.
+ We can't use d==+0.0 since it should be always true,
+ so we check that the memory representation of d is the
+ same than +0.0. etc */
+ /* FIXME: consider the case where +0.0 or -0.0 may have several
+ representations. */
+ double poszero = +0.0, negzero = DBL_NEG_ZERO;
+ if (memcmp(&d, &poszero, sizeof(double)) == 0)
+ MPFR_SET_POS(r);
+ else if (memcmp(&d, &negzero, sizeof(double)) == 0)
+ MPFR_SET_NEG(r);
+ else
+ MPFR_SET_POS(r);
+ }
+#endif
+ return 0; /* 0 is exact */
+ }
+ else if (MPFR_UNLIKELY(DOUBLE_ISINF(d)))
+ {
+ MPFR_SET_INF(r);
+ if (d > 0)
+ MPFR_SET_POS(r);
+ else
+ MPFR_SET_NEG(r);
+ return 0; /* infinity is exact */
+ }
+
+ /* now d is neither 0, nor NaN nor Inf */
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* warning: don't use tmp=r here, even if SIZE(r) >= MPFR_LIMBS_PER_DOUBLE,
+ since PREC(r) may be different from PREC(tmp), and then both variables
+ would have same precision in the mpfr_set4 call below. */
+ MPFR_MANT(tmp) = tmpmant;
+ MPFR_PREC(tmp) = IEEE_DBL_MANT_DIG;
+
+ signd = (d < 0) ? MPFR_SIGN_NEG : MPFR_SIGN_POS;
+ d = ABS (d);
+
+ /* don't use MPFR_SET_EXP here since the exponent may be out of range */
+ MPFR_EXP(tmp) = __gmpfr_extract_double (tmpmant, d);
+
+#ifdef WANT_ASSERT
+ /* Failed assertion if the stored value is 0 (e.g., if the exponent range
+ has been reduced at the wrong moment and an underflow to 0 occurred).
+ Probably a bug in the C implementation if this happens. */
+ i = 0;
+ while (tmpmant[i] == 0)
+ {
+ i++;
+ MPFR_ASSERTN(i < MPFR_LIMBS_PER_DOUBLE);
+ }
+#endif
+
+ /* determine the index i-1 of the most significant non-zero limb
+ and the number k of zero high limbs */
+ i = MPFR_LIMBS_PER_DOUBLE;
+ MPN_NORMALIZE_NOT_ZERO(tmpmant, i);
+ k = MPFR_LIMBS_PER_DOUBLE - i;
+
+ count_leading_zeros (cnt, tmpmant[i - 1]);
+
+ if (MPFR_LIKELY(cnt != 0))
+ mpn_lshift (tmpmant + k, tmpmant, i, cnt);
+ else if (k != 0)
+ MPN_COPY (tmpmant + k, tmpmant, i);
+
+ if (MPFR_UNLIKELY(k != 0))
+ MPN_ZERO (tmpmant, k);
+
+ /* don't use MPFR_SET_EXP here since the exponent may be out of range */
+ MPFR_EXP(tmp) -= (mpfr_exp_t) (cnt + k * GMP_NUMB_BITS);
+
+ /* tmp is exact since PREC(tmp)=53 */
+ inexact = mpfr_set4 (r, tmp, rnd_mode, signd);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inexact, rnd_mode);
+}
+
+
+
diff --git a/src/set_d64.c b/src/set_d64.c
new file mode 100644
index 000000000..1b510e40f
--- /dev/null
+++ b/src/set_d64.c
@@ -0,0 +1,224 @@
+/* mpfr_set_decimal64 -- convert a IEEE 754r decimal64 float to
+ a multiple precision floating-point number
+
+See http://gcc.gnu.org/ml/gcc/2006-06/msg00691.html,
+http://gcc.gnu.org/onlinedocs/gcc/Decimal-Float.html,
+and TR 24732 <http://www.open-std.org/jtc1/sc22/wg14/www/projects#24732>.
+
+Copyright 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#ifdef MPFR_WANT_DECIMAL_FLOATS
+
+#ifdef DPD_FORMAT
+ /* conversion 10-bits to 3 digits */
+static unsigned int T[1024] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 80, 81, 800, 801, 880, 881, 10, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 90, 91, 810, 811, 890, 891, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 82, 83, 820, 821, 808, 809, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 92, 93, 830, 831, 818, 819, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 84, 85, 840, 841, 88, 89, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 94, 95,
+ 850, 851, 98, 99, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 86, 87, 860, 861,
+ 888, 889, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 96, 97, 870, 871, 898,
+ 899, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 180, 181, 900, 901,
+ 980, 981, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 190, 191, 910,
+ 911, 990, 991, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 182, 183,
+ 920, 921, 908, 909, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 192,
+ 193, 930, 931, 918, 919, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ 184, 185, 940, 941, 188, 189, 150, 151, 152, 153, 154, 155, 156, 157, 158,
+ 159, 194, 195, 950, 951, 198, 199, 160, 161, 162, 163, 164, 165, 166, 167,
+ 168, 169, 186, 187, 960, 961, 988, 989, 170, 171, 172, 173, 174, 175, 176,
+ 177, 178, 179, 196, 197, 970, 971, 998, 999, 200, 201, 202, 203, 204, 205,
+ 206, 207, 208, 209, 280, 281, 802, 803, 882, 883, 210, 211, 212, 213, 214,
+ 215, 216, 217, 218, 219, 290, 291, 812, 813, 892, 893, 220, 221, 222, 223,
+ 224, 225, 226, 227, 228, 229, 282, 283, 822, 823, 828, 829, 230, 231, 232,
+ 233, 234, 235, 236, 237, 238, 239, 292, 293, 832, 833, 838, 839, 240, 241,
+ 242, 243, 244, 245, 246, 247, 248, 249, 284, 285, 842, 843, 288, 289, 250,
+ 251, 252, 253, 254, 255, 256, 257, 258, 259, 294, 295, 852, 853, 298, 299,
+ 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 286, 287, 862, 863, 888,
+ 889, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 296, 297, 872, 873,
+ 898, 899, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 380, 381, 902,
+ 903, 982, 983, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 390, 391,
+ 912, 913, 992, 993, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 382,
+ 383, 922, 923, 928, 929, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339,
+ 392, 393, 932, 933, 938, 939, 340, 341, 342, 343, 344, 345, 346, 347, 348,
+ 349, 384, 385, 942, 943, 388, 389, 350, 351, 352, 353, 354, 355, 356, 357,
+ 358, 359, 394, 395, 952, 953, 398, 399, 360, 361, 362, 363, 364, 365, 366,
+ 367, 368, 369, 386, 387, 962, 963, 988, 989, 370, 371, 372, 373, 374, 375,
+ 376, 377, 378, 379, 396, 397, 972, 973, 998, 999, 400, 401, 402, 403, 404,
+ 405, 406, 407, 408, 409, 480, 481, 804, 805, 884, 885, 410, 411, 412, 413,
+ 414, 415, 416, 417, 418, 419, 490, 491, 814, 815, 894, 895, 420, 421, 422,
+ 423, 424, 425, 426, 427, 428, 429, 482, 483, 824, 825, 848, 849, 430, 431,
+ 432, 433, 434, 435, 436, 437, 438, 439, 492, 493, 834, 835, 858, 859, 440,
+ 441, 442, 443, 444, 445, 446, 447, 448, 449, 484, 485, 844, 845, 488, 489,
+ 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 494, 495, 854, 855, 498,
+ 499, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 486, 487, 864, 865,
+ 888, 889, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 496, 497, 874,
+ 875, 898, 899, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 580, 581,
+ 904, 905, 984, 985, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 590,
+ 591, 914, 915, 994, 995, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529,
+ 582, 583, 924, 925, 948, 949, 530, 531, 532, 533, 534, 535, 536, 537, 538,
+ 539, 592, 593, 934, 935, 958, 959, 540, 541, 542, 543, 544, 545, 546, 547,
+ 548, 549, 584, 585, 944, 945, 588, 589, 550, 551, 552, 553, 554, 555, 556,
+ 557, 558, 559, 594, 595, 954, 955, 598, 599, 560, 561, 562, 563, 564, 565,
+ 566, 567, 568, 569, 586, 587, 964, 965, 988, 989, 570, 571, 572, 573, 574,
+ 575, 576, 577, 578, 579, 596, 597, 974, 975, 998, 999, 600, 601, 602, 603,
+ 604, 605, 606, 607, 608, 609, 680, 681, 806, 807, 886, 887, 610, 611, 612,
+ 613, 614, 615, 616, 617, 618, 619, 690, 691, 816, 817, 896, 897, 620, 621,
+ 622, 623, 624, 625, 626, 627, 628, 629, 682, 683, 826, 827, 868, 869, 630,
+ 631, 632, 633, 634, 635, 636, 637, 638, 639, 692, 693, 836, 837, 878, 879,
+ 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 684, 685, 846, 847, 688,
+ 689, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 694, 695, 856, 857,
+ 698, 699, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 686, 687, 866,
+ 867, 888, 889, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 696, 697,
+ 876, 877, 898, 899, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 780,
+ 781, 906, 907, 986, 987, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719,
+ 790, 791, 916, 917, 996, 997, 720, 721, 722, 723, 724, 725, 726, 727, 728,
+ 729, 782, 783, 926, 927, 968, 969, 730, 731, 732, 733, 734, 735, 736, 737,
+ 738, 739, 792, 793, 936, 937, 978, 979, 740, 741, 742, 743, 744, 745, 746,
+ 747, 748, 749, 784, 785, 946, 947, 788, 789, 750, 751, 752, 753, 754, 755,
+ 756, 757, 758, 759, 794, 795, 956, 957, 798, 799, 760, 761, 762, 763, 764,
+ 765, 766, 767, 768, 769, 786, 787, 966, 967, 988, 989, 770, 771, 772, 773,
+ 774, 775, 776, 777, 778, 779, 796, 797, 976, 977, 998, 999 };
+#endif
+
+/* Convert d to a decimal string (one-to-one correspondence, no rounding).
+ The string s needs to have at least 23 characters.
+ */
+static void
+decimal64_to_string (char *s, _Decimal64 d)
+{
+ union ieee_double_extract x;
+ union ieee_double_decimal64 y;
+ char *t;
+ unsigned int Gh; /* most 5 significant bits from combination field */
+ int exp; /* exponent */
+ mp_limb_t rp[2];
+ mp_size_t rn = 2;
+ unsigned int i;
+#ifdef DPD_FORMAT
+ unsigned int d0, d1, d2, d3, d4, d5;
+#endif
+
+ /* now convert BID or DPD to string */
+ y.d64 = d;
+ x.d = y.d;
+ Gh = x.s.exp >> 6;
+ if (Gh == 31)
+ {
+ sprintf (s, "NaN");
+ return;
+ }
+ else if (Gh == 30)
+ {
+ if (x.s.sig == 0)
+ sprintf (s, "Inf");
+ else
+ sprintf (s, "-Inf");
+ return;
+ }
+ t = s;
+ if (x.s.sig)
+ *t++ = '-';
+
+#ifdef DPD_FORMAT
+ if (Gh < 24)
+ {
+ exp = (x.s.exp >> 1) & 768;
+ d0 = Gh & 7;
+ }
+ else
+ {
+ exp = (x.s.exp & 384) << 1;
+ d0 = 8 | (Gh & 1);
+ }
+ exp |= (x.s.exp & 63) << 2;
+ exp |= x.s.manh >> 18;
+ d1 = (x.s.manh >> 8) & 1023;
+ d2 = ((x.s.manh << 2) | (x.s.manl >> 30)) & 1023;
+ d3 = (x.s.manl >> 20) & 1023;
+ d4 = (x.s.manl >> 10) & 1023;
+ d5 = x.s.manl & 1023;
+ sprintf (t, "%1u%3u%3u%3u%3u%3u", d0, T[d1], T[d2], T[d3], T[d4], T[d5]);
+ /* Warning: some characters may be blank */
+ for (i = 0; i < 16; i++)
+ if (t[i] == ' ')
+ t[i] = '0';
+ t += 16;
+#else /* BID */
+ if (Gh < 24)
+ {
+ /* the biased exponent E is formed from G[0] to G[9] and the
+ significand from bits G[10] through the end of the decoding */
+ exp = x.s.exp >> 1;
+ /* manh has 20 bits, manl has 32 bits */
+ rp[1] = ((x.s.exp & 1) << 20) | x.s.manh;
+ rp[0] = x.s.manl;
+ }
+ else
+ {
+ /* the biased exponent is formed from G[2] to G[11] */
+ exp = (x.s.exp & 511) << 1;
+ rp[1] = x.s.manh;
+ rp[0] = x.s.manl;
+ exp |= rp[1] >> 19;
+ rp[1] &= 524287; /* 2^19-1: cancel G[11] */
+ rp[1] |= 2097152; /* add 2^21 */
+ }
+#if GMP_NUMB_BITS >= 54
+ rp[0] |= rp[1] << 32;
+ rn = 1;
+#endif
+ while (rn > 0 && rp[rn - 1] == 0)
+ rn --;
+ if (rn == 0)
+ {
+ *t = 0;
+ i = 1;
+ }
+ else
+ {
+ i = mpn_get_str ((unsigned char*)t, 10, rp, rn);
+ }
+ while (i-- > 0)
+ *t++ += '0';
+#endif /* DPD or BID */
+
+ exp -= 398; /* unbiased exponent */
+ t += sprintf (t, "E%d", exp);
+}
+
+int
+mpfr_set_decimal64 (mpfr_ptr r, _Decimal64 d, mpfr_rnd_t rnd_mode)
+{
+ char s[23]; /* need 1 character for sign,
+ 16 characters for mantissa,
+ 1 character for exponent,
+ 4 characters for exponent (including sign),
+ 1 character for terminating \0. */
+
+ decimal64_to_string (s, d);
+ return mpfr_set_str (r, s, 10, rnd_mode);
+}
+
+#endif /* MPFR_WANT_DECIMAL_FLOATS */
diff --git a/src/set_dfl_prec.c b/src/set_dfl_prec.c
new file mode 100644
index 000000000..eeb4ccd19
--- /dev/null
+++ b/src/set_dfl_prec.c
@@ -0,0 +1,41 @@
+/* mpfr_set_default_prec, mpfr_get_default_prec -- set/get default precision
+
+Copyright 1999, 2000, 2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* default is IEEE double precision, i.e. 53 bits */
+mpfr_prec_t MPFR_THREAD_ATTR __gmpfr_default_fp_bit_precision \
+ = IEEE_DBL_MANT_DIG;
+
+void
+mpfr_set_default_prec (mpfr_prec_t prec)
+{
+ MPFR_ASSERTN (prec >= MPFR_PREC_MIN && prec <= MPFR_PREC_MAX);
+ __gmpfr_default_fp_bit_precision = prec;
+}
+
+#undef mpfr_get_default_prec
+mpfr_prec_t
+mpfr_get_default_prec (void)
+{
+ return __gmpfr_default_fp_bit_precision;
+}
diff --git a/src/set_exp.c b/src/set_exp.c
new file mode 100644
index 000000000..b79bf4faa
--- /dev/null
+++ b/src/set_exp.c
@@ -0,0 +1,37 @@
+/* mpfr_set_exp - set the exponent of a floating-point number
+
+Copyright 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_set_exp (mpfr_ptr x, mpfr_exp_t exponent)
+{
+ if (exponent >= __gmpfr_emin && exponent <= __gmpfr_emax)
+ {
+ MPFR_EXP(x) = exponent; /* do not use MPFR_SET_EXP of course... */
+ return 0;
+ }
+ else
+ {
+ return 1;
+ }
+}
diff --git a/src/set_f.c b/src/set_f.c
new file mode 100644
index 000000000..bbd6e5efd
--- /dev/null
+++ b/src/set_f.c
@@ -0,0 +1,99 @@
+/* mpfr_set_f -- set a MPFR number from a GNU MPF number
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_set_f (mpfr_ptr y, mpf_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mp_limb_t *my, *mx, *tmp;
+ unsigned long cnt, sx, sy;
+ int inexact, carry = 0;
+ MPFR_TMP_DECL(marker);
+
+ sx = ABS(SIZ(x)); /* number of limbs of the mantissa of x */
+
+ if (sx == 0) /* x is zero */
+ {
+ MPFR_SET_ZERO(y);
+ MPFR_SET_POS(y);
+ return 0; /* 0 is exact */
+ }
+
+ if (SIZ(x) * MPFR_FROM_SIGN_TO_INT(MPFR_SIGN(y)) < 0)
+ MPFR_CHANGE_SIGN (y);
+
+ sy = 1 + (MPFR_PREC(y) - 1) / GMP_NUMB_BITS;
+ my = MPFR_MANT(y);
+ mx = PTR(x);
+
+ count_leading_zeros(cnt, mx[sx - 1]);
+
+ if (sy <= sx) /* we may have to round even when sy = sx */
+ {
+ unsigned long xprec = sx * GMP_NUMB_BITS;
+
+ MPFR_TMP_MARK(marker);
+ tmp = (mp_limb_t*) MPFR_TMP_ALLOC(sx * BYTES_PER_MP_LIMB);
+ if (cnt)
+ mpn_lshift (tmp, mx, sx, cnt);
+ else
+ /* FIXME: we may avoid the copy here, and directly call mpfr_round_raw
+ on mx instead of tmp */
+ MPN_COPY (tmp, mx, sx);
+ carry = mpfr_round_raw (my, tmp, xprec, (SIZ(x) < 0), MPFR_PREC(y),
+ rnd_mode, &inexact);
+ if (MPFR_UNLIKELY(carry)) /* result is a power of two */
+ my[sy - 1] = MPFR_LIMB_HIGHBIT;
+ MPFR_TMP_FREE(marker);
+ }
+ else
+ {
+ if (cnt)
+ mpn_lshift (my + sy - sx, mx, sx, cnt);
+ else
+ MPN_COPY (my + sy - sx, mx, sx);
+ MPN_ZERO(my, sy - sx);
+ /* no rounding necessary, since y has a larger mantissa */
+ inexact = 0;
+ }
+
+ /* warning: EXP(x) * GMP_NUMB_BITS may exceed the maximal exponent */
+ if (EXP(x) > 1 + (__gmpfr_emax - 1) / GMP_NUMB_BITS)
+ {
+ /* EXP(x) >= 2 + floor((__gmpfr_emax-1)/GMP_NUMB_BITS)
+ EXP(x) >= 2 + (__gmpfr_emax - GMP_NUMB_BITS) / GMP_NUMB_BITS
+ >= 1 + __gmpfr_emax / GMP_NUMB_BITS
+ EXP(x) * GMP_NUMB_BITS >= __gmpfr_emax + GMP_NUMB_BITS
+ Since 0 <= cnt <= GMP_NUMB_BITS-1, and 0 <= carry <= 1,
+ we have then EXP(x) * GMP_NUMB_BITS - cnt + carry > __gmpfr_emax */
+ return mpfr_overflow (y, rnd_mode, MPFR_SIGN (y));
+ }
+ else
+ {
+ /* Do not use MPFR_SET_EXP as the exponent may be out of range. */
+ MPFR_EXP (y) = EXP (x) * GMP_NUMB_BITS - (mpfr_exp_t) cnt + carry;
+ }
+
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/set_flt.c b/src/set_flt.c
new file mode 100644
index 000000000..79c106e69
--- /dev/null
+++ b/src/set_flt.c
@@ -0,0 +1,34 @@
+/* mpfr_set_flt -- convert a machine single precision float to mpfr_t
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_set_flt (mpfr_ptr r, float f, mpfr_rnd_t rnd_mode)
+{
+ /* we convert f to double precision and use mpfr_set_d;
+ NaN and infinities should be preserved, and all single precision
+ numbers are exactly representable in the double format, thus the
+ conversion is always exact */
+ return mpfr_set_d (r, (double) f, rnd_mode);
+}
+
diff --git a/src/set_inf.c b/src/set_inf.c
new file mode 100644
index 000000000..be91f62f0
--- /dev/null
+++ b/src/set_inf.c
@@ -0,0 +1,33 @@
+/* mpfr_set_inf -- set a number to plus or minus infinity.
+
+Copyright 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_set_inf (mpfr_ptr x, int sign)
+{
+ MPFR_SET_INF(x);
+ if (sign >= 0)
+ MPFR_SET_POS(x);
+ else
+ MPFR_SET_NEG(x);
+}
diff --git a/src/set_ld.c b/src/set_ld.c
new file mode 100644
index 000000000..162ac96c9
--- /dev/null
+++ b/src/set_ld.c
@@ -0,0 +1,321 @@
+/* mpfr_set_ld -- convert a machine long double to
+ a multiple precision floating-point number
+
+Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <float.h>
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Various i386 systems have been seen with float.h LDBL constants equal to
+ the DBL ones, whereas they ought to be bigger, reflecting the 10-byte
+ IEEE extended format on that processor. gcc 3.2.1 on FreeBSD and Solaris
+ has been seen with the problem, and gcc 2.95.4 on FreeBSD 4.7. */
+
+#if HAVE_LDOUBLE_IEEE_EXT_LITTLE
+static const union {
+ char bytes[10];
+ long double d;
+} ldbl_max_struct = {
+ { '\377','\377','\377','\377',
+ '\377','\377','\377','\377',
+ '\376','\177' }
+};
+#define MPFR_LDBL_MAX (ldbl_max_struct.d)
+#else
+#define MPFR_LDBL_MAX LDBL_MAX
+#endif
+
+#ifndef HAVE_LDOUBLE_IEEE_EXT_LITTLE
+
+/* Generic code */
+int
+mpfr_set_ld (mpfr_ptr r, long double d, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t t, u;
+ int inexact, shift_exp;
+ long double x;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ /* Check for NAN */
+ LONGDOUBLE_NAN_ACTION (d, goto nan);
+
+ /* Check for INF */
+ if (d > MPFR_LDBL_MAX)
+ {
+ mpfr_set_inf (r, 1);
+ return 0;
+ }
+ else if (d < -MPFR_LDBL_MAX)
+ {
+ mpfr_set_inf (r, -1);
+ return 0;
+ }
+ /* Check for ZERO */
+ else if (d == 0.0)
+ return mpfr_set_d (r, (double) d, rnd_mode);
+
+ mpfr_init2 (t, MPFR_LDBL_MANT_DIG);
+ mpfr_init2 (u, IEEE_DBL_MANT_DIG);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ convert:
+ x = d;
+ MPFR_SET_ZERO (t); /* The sign doesn't matter. */
+ shift_exp = 0; /* invariant: remainder to deal with is d*2^shift_exp */
+ while (x != (long double) 0.0)
+ {
+ /* Check overflow of double */
+ if (x > (long double) DBL_MAX || (-x) > (long double) DBL_MAX)
+ {
+ long double div9, div10, div11, div12, div13;
+
+#define TWO_64 18446744073709551616.0 /* 2^64 */
+#define TWO_128 (TWO_64 * TWO_64)
+#define TWO_256 (TWO_128 * TWO_128)
+ div9 = (long double) (double) (TWO_256 * TWO_256); /* 2^(2^9) */
+ div10 = div9 * div9;
+ div11 = div10 * div10; /* 2^(2^11) */
+ div12 = div11 * div11; /* 2^(2^12) */
+ div13 = div12 * div12; /* 2^(2^13) */
+ if (ABS (x) >= div13)
+ {
+ x /= div13; /* exact */
+ shift_exp += 8192;
+ }
+ if (ABS (x) >= div12)
+ {
+ x /= div12; /* exact */
+ shift_exp += 4096;
+ }
+ if (ABS (x) >= div11)
+ {
+ x /= div11; /* exact */
+ shift_exp += 2048;
+ }
+ if (ABS (x) >= div10)
+ {
+ x /= div10; /* exact */
+ shift_exp += 1024;
+ }
+ /* warning: we may have DBL_MAX=2^1024*(1-2^(-53)) < x < 2^1024,
+ therefore we have one extra exponent reduction step */
+ if (ABS (x) >= div9)
+ {
+ x /= div9; /* exact */
+ shift_exp += 512;
+ }
+ } /* Check overflow of double */
+ else
+ {
+ long double div9, div10, div11;
+
+ div9 = (long double) (double) 7.4583407312002067432909653e-155;
+ /* div9 = 2^(-2^9) */
+ div10 = div9 * div9; /* 2^(-2^10) */
+ div11 = div10 * div10; /* 2^(-2^11) if extended precision */
+ /* since -DBL_MAX <= x <= DBL_MAX, the cast to double should not
+ overflow here */
+ if (ABS(x) < div10 &&
+ div11 != (long double) 0.0 &&
+ div11 / div10 == div10) /* possible underflow */
+ {
+ long double div12, div13;
+ /* After the divisions, any bit of x must be >= div10,
+ hence the possible division by div9. */
+ div12 = div11 * div11; /* 2^(-2^12) */
+ div13 = div12 * div12; /* 2^(-2^13) */
+ if (ABS (x) <= div13)
+ {
+ x /= div13; /* exact */
+ shift_exp -= 8192;
+ }
+ if (ABS (x) <= div12)
+ {
+ x /= div12; /* exact */
+ shift_exp -= 4096;
+ }
+ if (ABS (x) <= div11)
+ {
+ x /= div11; /* exact */
+ shift_exp -= 2048;
+ }
+ if (ABS (x) <= div10)
+ {
+ x /= div10; /* exact */
+ shift_exp -= 1024;
+ }
+ if (ABS(x) <= div9)
+ {
+ x /= div9; /* exact */
+ shift_exp -= 512;
+ }
+ }
+ else
+ {
+ inexact = mpfr_set_d (u, (double) x, MPFR_RNDZ);
+ MPFR_ASSERTD (inexact == 0);
+ if (mpfr_add (t, t, u, MPFR_RNDZ) != 0)
+ {
+ if (!mpfr_number_p (t))
+ break;
+ /* Inexact. This cannot happen unless the C implementation
+ "lies" on the precision or when long doubles are
+ implemented with FP expansions like under Mac OS X. */
+ if (MPFR_PREC (t) != MPFR_PREC (r) + 1)
+ {
+ /* We assume that MPFR_PREC (r) < MPFR_PREC_MAX.
+ The precision MPFR_PREC (r) + 1 allows us to
+ deduce the rounding bit and the sticky bit. */
+ mpfr_set_prec (t, MPFR_PREC (r) + 1);
+ goto convert;
+ }
+ else
+ {
+ mp_limb_t *tp;
+ int rb_mask;
+
+ /* Since mpfr_add was inexact, the sticky bit is 1. */
+ tp = MPFR_MANT (t);
+ rb_mask = MPFR_LIMB_ONE <<
+ (GMP_NUMB_BITS - 1 -
+ (MPFR_PREC (r) & (GMP_NUMB_BITS - 1)));
+ if (rnd_mode == MPFR_RNDN)
+ rnd_mode = (*tp & rb_mask) ^ MPFR_IS_NEG (t) ?
+ MPFR_RNDU : MPFR_RNDD;
+ *tp |= rb_mask;
+ break;
+ }
+ }
+ x -= (long double) mpfr_get_d1 (u); /* exact */
+ }
+ }
+ }
+ inexact = mpfr_mul_2si (r, t, shift_exp, rnd_mode);
+ mpfr_clear (t);
+ mpfr_clear (u);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inexact, rnd_mode);
+
+ nan:
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+}
+
+#else /* IEEE Extended Little Endian Code */
+
+int
+mpfr_set_ld (mpfr_ptr r, long double d, mpfr_rnd_t rnd_mode)
+{
+ int inexact, i, k, cnt;
+ mpfr_t tmp;
+ mp_limb_t tmpmant[MPFR_LIMBS_PER_LONG_DOUBLE];
+ mpfr_long_double_t x;
+ mpfr_exp_t exp;
+ int signd;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ /* Check for NAN */
+ if (MPFR_UNLIKELY (d != d))
+ {
+ MPFR_SET_NAN (r);
+ MPFR_RET_NAN;
+ }
+ /* Check for INF */
+ else if (MPFR_UNLIKELY (d > MPFR_LDBL_MAX))
+ {
+ MPFR_SET_INF (r);
+ MPFR_SET_POS (r);
+ return 0;
+ }
+ else if (MPFR_UNLIKELY (d < -MPFR_LDBL_MAX))
+ {
+ MPFR_SET_INF (r);
+ MPFR_SET_NEG (r);
+ return 0;
+ }
+ /* Check for ZERO */
+ else if (MPFR_UNLIKELY (d == 0.0))
+ {
+ x.ld = d;
+ MPFR_SET_ZERO (r);
+ if (x.s.sign == 1)
+ MPFR_SET_NEG(r);
+ else
+ MPFR_SET_POS(r);
+ return 0;
+ }
+
+ /* now d is neither 0, nor NaN nor Inf */
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ MPFR_MANT (tmp) = tmpmant;
+ MPFR_PREC (tmp) = 64;
+
+ /* Extract sign */
+ x.ld = d;
+ signd = MPFR_SIGN_POS;
+ if (x.ld < 0.0)
+ {
+ signd = MPFR_SIGN_NEG;
+ x.ld = -x.ld;
+ }
+
+ /* Extract mantissa */
+#if GMP_NUMB_BITS >= 64
+ tmpmant[0] = ((mp_limb_t) x.s.manh << 32) | ((mp_limb_t) x.s.manl);
+#else
+ tmpmant[0] = (mp_limb_t) x.s.manl;
+ tmpmant[1] = (mp_limb_t) x.s.manh;
+#endif
+
+ /* Normalize mantissa */
+ i = MPFR_LIMBS_PER_LONG_DOUBLE;
+ MPN_NORMALIZE_NOT_ZERO (tmpmant, i);
+ k = MPFR_LIMBS_PER_LONG_DOUBLE - i;
+ count_leading_zeros (cnt, tmpmant[i - 1]);
+ if (MPFR_LIKELY (cnt != 0))
+ mpn_lshift (tmpmant + k, tmpmant, i, cnt);
+ else if (k != 0)
+ MPN_COPY (tmpmant + k, tmpmant, i);
+ if (MPFR_UNLIKELY (k != 0))
+ MPN_ZERO (tmpmant, k);
+
+ /* Set exponent */
+ exp = (mpfr_exp_t) ((x.s.exph << 8) + x.s.expl); /* 15-bit unsigned int */
+ if (MPFR_UNLIKELY (exp == 0))
+ exp -= 0x3FFD;
+ else
+ exp -= 0x3FFE;
+
+ MPFR_SET_EXP (tmp, exp - cnt - k * GMP_NUMB_BITS);
+
+ /* tmp is exact */
+ inexact = mpfr_set4 (r, tmp, rnd_mode, signd);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (r, inexact, rnd_mode);
+}
+
+#endif
diff --git a/src/set_nan.c b/src/set_nan.c
new file mode 100644
index 000000000..2782365ec
--- /dev/null
+++ b/src/set_nan.c
@@ -0,0 +1,31 @@
+/* mpfr_set_nan -- set a number to NaN.
+
+Copyright 2002, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#include "mpfr-impl.h"
+
+void
+mpfr_set_nan (mpfr_ptr x)
+{
+ MPFR_SET_NAN (x);
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+}
diff --git a/src/set_prc_raw.c b/src/set_prc_raw.c
new file mode 100644
index 000000000..339882988
--- /dev/null
+++ b/src/set_prc_raw.c
@@ -0,0 +1,31 @@
+/* mpfr_set_prec_raw -- reset the precision of a floating-point number
+
+Copyright 2000, 2001, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_set_prec_raw (mpfr_ptr x, mpfr_prec_t p)
+{
+ MPFR_ASSERTN (p >= MPFR_PREC_MIN && p <= MPFR_PREC_MAX);
+ MPFR_ASSERTN (p <= (mpfr_prec_t) MPFR_GET_ALLOC_SIZE(x) * GMP_NUMB_BITS);
+ MPFR_PREC(x) = p;
+}
diff --git a/src/set_prec.c b/src/set_prec.c
new file mode 100644
index 000000000..8dde77d94
--- /dev/null
+++ b/src/set_prec.c
@@ -0,0 +1,55 @@
+/* mpfr_set_prec -- reset the precision of a floating-point number
+
+Copyright 1999, 2001, 2002, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_set_prec (mpfr_ptr x, mpfr_prec_t p)
+{
+ mp_size_t xsize, xoldsize;
+ mp_ptr tmp;
+
+ /* first, check if p is correct */
+ MPFR_ASSERTN (p >= MPFR_PREC_MIN && p <= MPFR_PREC_MAX);
+
+ /* Calculate the new number of limbs */
+ xsize = (p - 1) / GMP_NUMB_BITS + 1;
+
+ /* Realloc only if the new size is greater than the old */
+ xoldsize = MPFR_GET_ALLOC_SIZE (x);
+ if (xsize > xoldsize)
+ {
+ tmp = (mp_ptr) (*__gmp_reallocate_func)
+ (MPFR_GET_REAL_PTR(x), MPFR_MALLOC_SIZE(xoldsize), MPFR_MALLOC_SIZE(xsize));
+ MPFR_SET_MANT_PTR(x, tmp);
+ MPFR_SET_ALLOC_SIZE(x, xsize);
+ }
+ MPFR_PREC (x) = p;
+ MPFR_SET_NAN (x); /* initializes to NaN */
+}
+
+#undef mpfr_get_prec
+mpfr_prec_t
+mpfr_get_prec (mpfr_srcptr x)
+{
+ return MPFR_PREC(x);
+}
diff --git a/src/set_q.c b/src/set_q.c
new file mode 100644
index 000000000..213b2aa3d
--- /dev/null
+++ b/src/set_q.c
@@ -0,0 +1,133 @@
+/* mpfr_set_q -- set a floating-point number from a multiple-precision rational
+
+Copyright 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/*
+ * Set f to z, choosing the smallest precision for f
+ * so that z = f*(2^BPML)*zs*2^(RetVal)
+ */
+static int
+set_z (mpfr_ptr f, mpz_srcptr z, mp_size_t *zs)
+{
+ mp_limb_t *p;
+ mp_size_t s;
+ int c;
+ mpfr_prec_t pf;
+
+ MPFR_ASSERTD (mpz_sgn (z) != 0);
+
+ /* Remove useless ending 0 */
+ for (p = PTR (z), s = *zs = ABS (SIZ (z)) ; *p == 0; p++, s--)
+ MPFR_ASSERTD (s >= 0);
+
+ /* Get working precision */
+ count_leading_zeros (c, p[s-1]);
+ pf = s * GMP_NUMB_BITS - c;
+ if (pf < MPFR_PREC_MIN)
+ pf = MPFR_PREC_MIN;
+ mpfr_init2 (f, pf);
+
+ /* Copy Mantissa */
+ if (MPFR_LIKELY (c))
+ mpn_lshift (MPFR_MANT (f), p, s, c);
+ else
+ MPN_COPY (MPFR_MANT (f), p, s);
+
+ MPFR_SET_SIGN (f, mpz_sgn (z));
+ MPFR_SET_EXP (f, 0);
+
+ return -c;
+}
+
+/* set f to the rational q */
+int
+mpfr_set_q (mpfr_ptr f, mpq_srcptr q, mpfr_rnd_t rnd)
+{
+ mpz_srcptr num, den;
+ mpfr_t n, d;
+ int inexact;
+ int cn, cd;
+ long shift;
+ mp_size_t sn, sd;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ num = mpq_numref (q);
+ den = mpq_denref (q);
+ /* NAN and INF for mpq are not really documented, but could be found */
+ if (MPFR_UNLIKELY (mpz_sgn (num) == 0))
+ {
+ if (MPFR_UNLIKELY (mpz_sgn (den) == 0))
+ {
+ MPFR_SET_NAN (f);
+ MPFR_RET_NAN;
+ }
+ else
+ {
+ MPFR_SET_ZERO (f);
+ MPFR_SET_POS (f);
+ MPFR_RET (0);
+ }
+ }
+ if (MPFR_UNLIKELY (mpz_sgn (den) == 0))
+ {
+ MPFR_SET_INF (f);
+ MPFR_SET_SIGN (f, mpz_sgn (num));
+ MPFR_RET (0);
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ cn = set_z (n, num, &sn);
+ cd = set_z (d, den, &sd);
+
+ sn -= sd;
+ if (MPFR_UNLIKELY (sn > MPFR_EMAX_MAX / GMP_NUMB_BITS))
+ {
+ inexact = mpfr_overflow (f, rnd, MPFR_SIGN (f));
+ goto end;
+ }
+ if (MPFR_UNLIKELY (sn < MPFR_EMIN_MIN / GMP_NUMB_BITS -1))
+ {
+ if (rnd == MPFR_RNDN)
+ rnd = MPFR_RNDZ;
+ inexact = mpfr_underflow (f, rnd, MPFR_SIGN (f));
+ goto end;
+ }
+
+ inexact = mpfr_div (f, n, d, rnd);
+ shift = GMP_NUMB_BITS*sn+cn-cd;
+ MPFR_ASSERTD (shift == GMP_NUMB_BITS*sn+cn-cd);
+ cd = mpfr_mul_2si (f, f, shift, rnd);
+ MPFR_SAVE_EXPO_FREE (expo);
+ if (MPFR_UNLIKELY (cd != 0))
+ inexact = cd;
+ else
+ inexact = mpfr_check_range (f, inexact, rnd);
+ end:
+ mpfr_clear (d);
+ mpfr_clear (n);
+ return inexact;
+}
+
+
diff --git a/src/set_rnd.c b/src/set_rnd.c
new file mode 100644
index 000000000..447f2fee1
--- /dev/null
+++ b/src/set_rnd.c
@@ -0,0 +1,40 @@
+/* mpfr_set_default_rounding_mode -- set the default rounding mode
+ mpfr_get_default_rounding_mode -- get the default rounding mode
+
+Copyright 1999, 2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+mpfr_rnd_t MPFR_THREAD_ATTR __gmpfr_default_rounding_mode = MPFR_RNDN;
+
+void
+mpfr_set_default_rounding_mode (mpfr_rnd_t rnd_mode)
+{
+ if (rnd_mode >= MPFR_RNDN && rnd_mode < MPFR_RND_MAX)
+ __gmpfr_default_rounding_mode = rnd_mode;
+}
+
+#undef mpfr_get_default_rounding_mode
+mpfr_rnd_t
+mpfr_get_default_rounding_mode (void)
+{
+ return __gmpfr_default_rounding_mode;
+}
diff --git a/src/set_si.c b/src/set_si.c
new file mode 100644
index 000000000..86008e94d
--- /dev/null
+++ b/src/set_si.c
@@ -0,0 +1,30 @@
+/* mpfr_set_si -- set a MPFR number from a machine signed integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#undef mpfr_set_si
+int
+mpfr_set_si (mpfr_ptr x, long i, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set_si_2exp (x, i, 0, rnd_mode);
+}
diff --git a/src/set_si_2exp.c b/src/set_si_2exp.c
new file mode 100644
index 000000000..fcf5acf65
--- /dev/null
+++ b/src/set_si_2exp.c
@@ -0,0 +1,73 @@
+/* mpfr_set_si_2exp -- set a MPFR number from a machine signed integer with
+ a shift
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_set_si_2exp (mpfr_ptr x, long i, mpfr_exp_t e, mpfr_rnd_t rnd_mode)
+{
+ if (i == 0)
+ {
+ MPFR_SET_ZERO (x);
+ MPFR_SET_POS (x);
+ MPFR_RET (0);
+ }
+ else
+ {
+ mp_size_t xn;
+ unsigned int cnt, nbits;
+ mp_limb_t ai, *xp;
+ int inex = 0;
+
+ /* FIXME: support int limbs (e.g. 16-bit limbs on 16-bit proc) */
+ ai = SAFE_ABS (unsigned long, i);
+ MPFR_ASSERTN (SAFE_ABS (unsigned long, i) == ai);
+
+ /* Position of the highest limb */
+ xn = (MPFR_PREC (x) - 1) / GMP_NUMB_BITS;
+ count_leading_zeros (cnt, ai);
+ MPFR_ASSERTD (cnt < GMP_NUMB_BITS); /* OK since i != 0 */
+
+ xp = MPFR_MANT(x);
+ xp[xn] = ai << cnt;
+ /* Zero the xn lower limbs. */
+ MPN_ZERO(xp, xn);
+ MPFR_SET_SIGN (x, i < 0 ? MPFR_SIGN_NEG : MPFR_SIGN_POS);
+
+ nbits = GMP_NUMB_BITS - cnt;
+ e += nbits; /* exponent _before_ the rounding */
+
+ /* round if MPFR_PREC(x) smaller than length of i */
+ if (MPFR_UNLIKELY (MPFR_PREC (x) < nbits) &&
+ MPFR_UNLIKELY (mpfr_round_raw (xp + xn, xp + xn, nbits, i < 0,
+ MPFR_PREC (x), rnd_mode, &inex)))
+ {
+ e++;
+ xp[xn] = MPFR_LIMB_HIGHBIT;
+ }
+
+ MPFR_EXP (x) = e;
+ return mpfr_check_range (x, inex, rnd_mode);
+ }
+}
diff --git a/src/set_sj.c b/src/set_sj.c
new file mode 100644
index 000000000..2620f2321
--- /dev/null
+++ b/src/set_sj.c
@@ -0,0 +1,65 @@
+/* mpfr_set_sj -- set a MPFR number from a huge machine signed integer
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h" /* for a build within gmp */
+#endif
+
+/* The ISO C99 standard specifies that in C++ implementations the
+ INTMAX_MAX, ... macros should only be defined if explicitly requested. */
+#if defined __cplusplus
+# define __STDC_LIMIT_MACROS
+# define __STDC_CONSTANT_MACROS
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+
+int
+mpfr_set_sj (mpfr_t x, intmax_t j, mpfr_rnd_t rnd)
+{
+ return mpfr_set_sj_2exp (x, j, 0, rnd);
+}
+
+int
+mpfr_set_sj_2exp (mpfr_t x, intmax_t j, intmax_t e, mpfr_rnd_t rnd)
+{
+ if (j>=0)
+ return mpfr_set_uj_2exp (x, j, e, rnd);
+ else
+ {
+ int inex;
+ inex = mpfr_set_uj_2exp (x, - (uintmax_t) j, e, MPFR_INVERT_RND (rnd));
+ MPFR_CHANGE_SIGN (x);
+ return -inex;
+ }
+}
+
+#endif
diff --git a/src/set_str.c b/src/set_str.c
new file mode 100644
index 000000000..85cdfa202
--- /dev/null
+++ b/src/set_str.c
@@ -0,0 +1,42 @@
+/* mpfr_set_str -- set a floating-point number from a string
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_set_str (mpfr_t x, const char *str, int base, mpfr_rnd_t rnd)
+{
+ char *p;
+
+ if (MPFR_UNLIKELY (*str == 0))
+ return -1;
+ mpfr_strtofr (x, str, &p, base, rnd);
+ return (*p == 0) ? 0 : -1;
+}
+
+
+int
+mpfr_init_set_str (mpfr_ptr x, const char *str, int base, mpfr_rnd_t rnd)
+{
+ mpfr_init (x);
+ return mpfr_set_str (x, str, base, rnd);
+}
diff --git a/src/set_str_raw.c b/src/set_str_raw.c
new file mode 100644
index 000000000..a457eb2e9
--- /dev/null
+++ b/src/set_str_raw.c
@@ -0,0 +1,55 @@
+/* mpfr_set_str_binary -- set a floating-point number from a binary string
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Currently the number should be of the form +/- xxxx.xxxxxxEyy, with
+ decimal exponent. The mantissa of x is supposed to be large enough
+ to hold all the bits of str. */
+
+void
+mpfr_set_str_binary (mpfr_ptr x, const char *str)
+{
+ int has_sign;
+ int res;
+
+ if (*str == 'N')
+ {
+ MPFR_SET_NAN(x);
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+ return;
+ }
+
+ has_sign = *str == '-' || *str == '+';
+ if (str[has_sign] == 'I')
+ {
+ MPFR_SET_INF(x);
+ if (*str == '-')
+ MPFR_SET_NEG(x);
+ else
+ MPFR_SET_POS(x);
+ return;
+ }
+
+ res = mpfr_strtofr (x, str, 0, 2, MPFR_RNDZ);
+ MPFR_ASSERTN (res == 0);
+}
diff --git a/src/set_ui.c b/src/set_ui.c
new file mode 100644
index 000000000..b897a4b8a
--- /dev/null
+++ b/src/set_ui.c
@@ -0,0 +1,30 @@
+/* mpfr_set_ui -- set a MPFR number from a machine unsigned integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#undef mpfr_set_ui
+int
+mpfr_set_ui (mpfr_ptr x, unsigned long i, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set_ui_2exp (x, i, 0, rnd_mode);
+}
diff --git a/src/set_ui_2exp.c b/src/set_ui_2exp.c
new file mode 100644
index 000000000..71ad4085b
--- /dev/null
+++ b/src/set_ui_2exp.c
@@ -0,0 +1,72 @@
+/* mpfr_set_ui_2exp -- set a MPFR number from a machine unsigned integer with
+ a shift
+
+Copyright 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_set_ui_2exp (mpfr_ptr x, unsigned long i, mpfr_exp_t e, mpfr_rnd_t rnd_mode)
+{
+ MPFR_SET_POS (x);
+
+ if (i == 0)
+ {
+ MPFR_SET_ZERO (x);
+ MPFR_RET (0);
+ }
+ else
+ {
+ mp_size_t xn;
+ unsigned int cnt, nbits;
+ mp_limb_t *xp;
+ int inex = 0;
+
+ /* FIXME: support int limbs (e.g. 16-bit limbs on 16-bit proc) */
+ MPFR_ASSERTD (i == (mp_limb_t) i);
+
+ /* Position of the highest limb */
+ xn = (MPFR_PREC (x) - 1) / GMP_NUMB_BITS;
+ count_leading_zeros (cnt, (mp_limb_t) i);
+ MPFR_ASSERTD (cnt < GMP_NUMB_BITS); /* OK since i != 0 */
+
+ xp = MPFR_MANT(x);
+ xp[xn] = ((mp_limb_t) i) << cnt;
+ /* Zero the xn lower limbs. */
+ MPN_ZERO(xp, xn);
+
+ nbits = GMP_NUMB_BITS - cnt;
+ e += nbits; /* exponent _before_ the rounding */
+
+ /* round if MPFR_PREC(x) smaller than length of i */
+ if (MPFR_UNLIKELY (MPFR_PREC (x) < nbits) &&
+ MPFR_UNLIKELY (mpfr_round_raw (xp + xn, xp + xn, nbits, 0,
+ MPFR_PREC (x), rnd_mode, &inex)))
+ {
+ e++;
+ xp[xn] = MPFR_LIMB_HIGHBIT;
+ }
+
+ MPFR_EXP (x) = e;
+ return mpfr_check_range (x, inex, rnd_mode);
+ }
+}
diff --git a/src/set_uj.c b/src/set_uj.c
new file mode 100644
index 000000000..00307984e
--- /dev/null
+++ b/src/set_uj.c
@@ -0,0 +1,136 @@
+/* mpfr_set_uj -- set a MPFR number from a huge machine unsigned integer
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h" /* for a build within gmp */
+#endif
+
+/* The ISO C99 standard specifies that in C++ implementations the
+ INTMAX_MAX, ... macros should only be defined if explicitly requested. */
+#if defined(__cplusplus)
+# define __STDC_LIMIT_MACROS
+# define __STDC_CONSTANT_MACROS
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#ifdef _MPFR_H_HAVE_INTMAX_T
+
+int
+mpfr_set_uj (mpfr_t x, uintmax_t j, mpfr_rnd_t rnd)
+{
+ return mpfr_set_uj_2exp (x, j, 0, rnd);
+}
+
+int
+mpfr_set_uj_2exp (mpfr_t x, uintmax_t j, intmax_t e, mpfr_rnd_t rnd)
+{
+ unsigned int cnt, i;
+ mp_size_t k, len;
+ mp_limb_t limb;
+ mp_limb_t yp[sizeof(uintmax_t) / sizeof(mp_limb_t)];
+ mpfr_t y;
+ unsigned long uintmax_bit_size = sizeof(uintmax_t) * CHAR_BIT;
+ unsigned long bpml = GMP_NUMB_BITS % uintmax_bit_size;
+
+ /* Special case */
+ if (j == 0)
+ {
+ MPFR_SET_POS(x);
+ MPFR_SET_ZERO(x);
+ MPFR_RET(0);
+ }
+
+ MPFR_ASSERTN (sizeof(uintmax_t) % sizeof(mp_limb_t) == 0);
+
+ /* Create an auxillary var */
+ MPFR_TMP_INIT1 (yp, y, uintmax_bit_size);
+ k = numberof (yp);
+ if (k == 1)
+ limb = yp[0] = j;
+ else
+ {
+ /* Note: either GMP_NUMB_BITS = uintmax_bit_size, then k = 1 the
+ shift j >>= bpml is never done, or GMP_NUMB_BITS < uintmax_bit_size
+ and bpml = GMP_NUMB_BITS. */
+ for (i = 0; i < k; i++, j >>= bpml)
+ yp[i] = j; /* Only the low bits are copied */
+
+ /* Find the first limb not equal to zero. */
+ do
+ {
+ MPFR_ASSERTD (k > 0);
+ limb = yp[--k];
+ }
+ while (limb == 0);
+ k++;
+ }
+ count_leading_zeros(cnt, limb);
+ len = numberof (yp) - k;
+
+ /* Normalize it: len = number of last 0 limb, k number of non-zero limbs */
+ if (MPFR_LIKELY(cnt))
+ mpn_lshift (yp+len, yp, k, cnt); /* Normalize the High Limb*/
+ else if (len != 0)
+ MPN_COPY_DECR (yp+len, yp, k); /* Must use DECR */
+ if (len != 0)
+ /* Note: when numberof(yp)==1, len is constant and null, so the compiler
+ can optimize out this code. */
+ {
+ if (len == 1)
+ yp[0] = (mp_limb_t) 0;
+ else
+ MPN_ZERO (yp, len); /* Zeroing the last limbs */
+ }
+ e += k * GMP_NUMB_BITS - cnt; /* Update Expo */
+ MPFR_ASSERTD (MPFR_LIMB_MSB(yp[numberof (yp) - 1]) != 0);
+
+ /* Check expo underflow / overflow (can't use mpfr_check_range) */
+ if (MPFR_UNLIKELY(e < __gmpfr_emin))
+ {
+ /* The following test is necessary because in the rounding to the
+ * nearest mode, mpfr_underflow always rounds away from 0. In
+ * this rounding mode, we need to round to 0 if:
+ * _ |x| < 2^(emin-2), or
+ * _ |x| = 2^(emin-2) and the absolute value of the exact
+ * result is <= 2^(emin-2). */
+ if (rnd == MPFR_RNDN && (e+1 < __gmpfr_emin || mpfr_powerof2_raw(y)))
+ rnd = MPFR_RNDZ;
+ return mpfr_underflow (x, rnd, MPFR_SIGN_POS);
+ }
+ if (MPFR_UNLIKELY(e > __gmpfr_emax))
+ return mpfr_overflow (x, rnd, MPFR_SIGN_POS);
+ MPFR_SET_EXP (y, e);
+
+ /* Final: set x to y (rounding if necessary) */
+ return mpfr_set (x, y, rnd);
+}
+
+#endif
diff --git a/src/set_z.c b/src/set_z.c
new file mode 100644
index 000000000..83c789c37
--- /dev/null
+++ b/src/set_z.c
@@ -0,0 +1,30 @@
+/* mpfr_set_z -- set a floating-point number from a multiple-precision integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* set f to the integer z */
+int
+mpfr_set_z (mpfr_ptr f, mpz_srcptr z, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set_z_2exp (f, z, 0, rnd_mode);
+}
diff --git a/src/set_z_exp.c b/src/set_z_exp.c
new file mode 100644
index 000000000..5aeea98dc
--- /dev/null
+++ b/src/set_z_exp.c
@@ -0,0 +1,180 @@
+/* mpfr_set_z_2exp -- set a floating-point number from a multiple-precision
+ integer and an exponent
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* set f to the integer z multiplied by 2^e */
+int
+mpfr_set_z_2exp (mpfr_ptr f, mpz_srcptr z, mpfr_exp_t e, mpfr_rnd_t rnd_mode)
+{
+ mp_size_t fn, zn, dif, en;
+ int k, sign_z, inex;
+ mp_limb_t *fp, *zp;
+ mpfr_exp_t exp;
+
+ sign_z = mpz_sgn (z);
+ if (MPFR_UNLIKELY (sign_z == 0)) /* ignore the exponent for 0 */
+ {
+ MPFR_SET_ZERO(f);
+ MPFR_SET_POS(f);
+ MPFR_RET(0);
+ }
+ MPFR_ASSERTD (sign_z == MPFR_SIGN_POS || sign_z == MPFR_SIGN_NEG);
+
+ zn = ABS(SIZ(z)); /* limb size of z */
+ /* compute en = floor(e/GMP_NUMB_BITS) */
+ en = (e >= 0) ? e / GMP_NUMB_BITS : (e + 1) / GMP_NUMB_BITS - 1;
+ MPFR_ASSERTD (zn >= 1);
+ if (MPFR_UNLIKELY (zn + en > MPFR_EMAX_MAX / GMP_NUMB_BITS + 1))
+ return mpfr_overflow (f, rnd_mode, sign_z);
+ /* because zn + en >= MPFR_EMAX_MAX / GMP_NUMB_BITS + 2
+ implies (zn + en) * GMP_NUMB_BITS >= MPFR_EMAX_MAX + GMP_NUMB_BITS + 1
+ and exp = zn * GMP_NUMB_BITS + e - k
+ >= (zn + en) * GMP_NUMB_BITS - k > MPFR_EMAX_MAX */
+
+ fp = MPFR_MANT (f);
+ fn = MPFR_LIMB_SIZE (f);
+ dif = zn - fn;
+ zp = PTR(z);
+ count_leading_zeros (k, zp[zn-1]);
+
+ /* now zn + en <= MPFR_EMAX_MAX / GMP_NUMB_BITS + 1
+ thus (zn + en) * GMP_NUMB_BITS <= MPFR_EMAX_MAX + GMP_NUMB_BITS
+ and exp = zn * GMP_NUMB_BITS + e - k
+ <= (zn + en) * GMP_NUMB_BITS - k + GMP_NUMB_BITS - 1
+ <= MPFR_EMAX_MAX + 2 * GMP_NUMB_BITS - 1 */
+ exp = (mpfr_prec_t) zn * GMP_NUMB_BITS + e - k;
+ /* The exponent will be exp or exp + 1 (due to rounding) */
+ if (MPFR_UNLIKELY (exp > __gmpfr_emax))
+ return mpfr_overflow (f, rnd_mode, sign_z);
+ if (MPFR_UNLIKELY (exp + 1 < __gmpfr_emin))
+ return mpfr_underflow (f, rnd_mode == MPFR_RNDN ? MPFR_RNDZ : rnd_mode,
+ sign_z);
+
+ if (MPFR_LIKELY (dif >= 0))
+ {
+ mp_limb_t rb, sb, ulp;
+ int sh;
+
+ /* number has to be truncated */
+ if (MPFR_LIKELY (k != 0))
+ {
+ mpn_lshift (fp, &zp[dif], fn, k);
+ if (MPFR_LIKELY (dif > 0))
+ fp[0] |= zp[dif - 1] >> (GMP_NUMB_BITS - k);
+ }
+ else
+ MPN_COPY (fp, zp + dif, fn);
+
+ /* Compute Rounding Bit and Sticky Bit */
+ MPFR_UNSIGNED_MINUS_MODULO (sh, MPFR_PREC (f) );
+ if (MPFR_LIKELY (sh != 0))
+ {
+ mp_limb_t mask = MPFR_LIMB_ONE << (sh-1);
+ mp_limb_t limb = fp[0];
+ rb = limb & mask;
+ sb = limb & (mask-1);
+ ulp = 2*mask;
+ fp[0] = limb & ~(ulp-1);
+ }
+ else /* sh == 0 */
+ {
+ mp_limb_t mask = MPFR_LIMB_ONE << (GMP_NUMB_BITS - 1 - k);
+ if (MPFR_LIKELY (dif > 0))
+ {
+ rb = zp[--dif] & mask;
+ sb = zp[dif] & (mask-1);
+ }
+ else
+ rb = sb = 0;
+ k = 0;
+ ulp = MPFR_LIMB_ONE;
+ }
+ if (MPFR_UNLIKELY (sb == 0) && MPFR_LIKELY (dif > 0))
+ {
+ sb = zp[--dif];
+ if (MPFR_LIKELY (k != 0))
+ sb &= MPFR_LIMB_MASK (GMP_NUMB_BITS - k);
+ if (MPFR_UNLIKELY (sb == 0) && MPFR_LIKELY (dif > 0))
+ do {
+ sb = zp[--dif];
+ } while (dif > 0 && sb == 0);
+ }
+
+ /* Rounding */
+ if (MPFR_LIKELY (rnd_mode == MPFR_RNDN))
+ {
+ if (rb == 0 || MPFR_UNLIKELY (sb == 0 && (fp[0] & ulp) == 0))
+ goto trunc;
+ else
+ goto addoneulp;
+ }
+ else /* Not Nearest */
+ {
+ if (MPFR_LIKELY (MPFR_IS_LIKE_RNDZ (rnd_mode, sign_z < 0))
+ || MPFR_UNLIKELY ( (sb | rb) == 0 ))
+ goto trunc;
+ else
+ goto addoneulp;
+ }
+
+ trunc:
+ inex = MPFR_LIKELY ((sb | rb) != 0) ? -1 : 0;
+ goto end;
+
+ addoneulp:
+ inex = 1;
+ if (MPFR_UNLIKELY (mpn_add_1 (fp, fp, fn, ulp)))
+ {
+ /* Pow 2 case */
+ if (MPFR_UNLIKELY (exp == __gmpfr_emax))
+ return mpfr_overflow (f, rnd_mode, sign_z);
+ exp ++;
+ fp[fn-1] = MPFR_LIMB_HIGHBIT;
+ }
+ end:
+ (void) 0;
+ }
+ else /* dif < 0: Mantissa F is strictly bigger than z's one */
+ {
+ if (MPFR_LIKELY (k != 0))
+ mpn_lshift (fp - dif, zp, zn, k);
+ else
+ MPN_COPY (fp - dif, zp, zn);
+ /* fill with zeroes */
+ MPN_ZERO (fp, -dif);
+ inex = 0; /* result is exact */
+ }
+
+ if (MPFR_UNLIKELY (exp < __gmpfr_emin))
+ {
+ if (rnd_mode == MPFR_RNDN && inex == 0 && mpfr_powerof2_raw (f))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (f, rnd_mode, sign_z);
+ }
+
+ MPFR_SET_EXP (f, exp);
+ MPFR_SET_SIGN (f, sign_z);
+ MPFR_RET (inex*sign_z);
+}
diff --git a/src/set_zero.c b/src/set_zero.c
new file mode 100644
index 000000000..466d2abf6
--- /dev/null
+++ b/src/set_zero.c
@@ -0,0 +1,31 @@
+/* mpfr_set_zero -- set a number to plus or minus zero.
+
+Copyright 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+void
+mpfr_set_zero (mpfr_ptr x, int sign)
+{
+ mpfr_set_ui (x, 0, MPFR_RNDN);
+ if (sign < 0)
+ MPFR_SET_NEG(x);
+}
diff --git a/src/setmax.c b/src/setmax.c
new file mode 100644
index 000000000..055b4342c
--- /dev/null
+++ b/src/setmax.c
@@ -0,0 +1,41 @@
+/* mpfr_setmax -- maximum representable floating-point number (raw version)
+
+Copyright 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Note: the flags are not cleared and the current sign is kept. */
+
+void
+mpfr_setmax (mpfr_ptr x, mpfr_exp_t e)
+{
+ mp_size_t xn, i;
+ int sh;
+ mp_limb_t *xp;
+
+ MPFR_SET_EXP (x, e);
+ xn = 1 + (MPFR_PREC(x) - 1) / GMP_NUMB_BITS;
+ sh = (mpfr_prec_t) xn * GMP_NUMB_BITS - MPFR_PREC(x);
+ xp = MPFR_MANT(x);
+ xp[0] = MP_LIMB_T_MAX << sh;
+ for (i = 1; i < xn; i++)
+ xp[i] = MP_LIMB_T_MAX;
+}
diff --git a/src/setmin.c b/src/setmin.c
new file mode 100644
index 000000000..0319b54cf
--- /dev/null
+++ b/src/setmin.c
@@ -0,0 +1,38 @@
+/* mpfr_setmin -- minimum representable floating-point number (raw version)
+
+Copyright 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Note: the flags are not cleared and the current sign is kept. */
+
+void
+mpfr_setmin (mpfr_ptr x, mpfr_exp_t e)
+{
+ mp_size_t xn;
+ mp_limb_t *xp;
+
+ MPFR_SET_EXP (x, e);
+ xn = (MPFR_PREC(x) - 1) / GMP_NUMB_BITS;
+ xp = MPFR_MANT(x);
+ xp[xn] = MPFR_LIMB_HIGHBIT;
+ MPN_ZERO(xp, xn);
+}
diff --git a/src/setsign.c b/src/setsign.c
new file mode 100644
index 000000000..cfc80b43e
--- /dev/null
+++ b/src/setsign.c
@@ -0,0 +1,30 @@
+/* mpfr_setsign -- Produce a value with the magnitude of x and sign bit s
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#undef mpfr_setsign
+int
+mpfr_setsign (mpfr_ptr z, mpfr_srcptr x, int s, mpfr_rnd_t rnd_mode)
+{
+ return mpfr_set4 (z, x, rnd_mode, s ? -1 : 1);
+}
diff --git a/src/sgn.c b/src/sgn.c
new file mode 100644
index 000000000..189d797dd
--- /dev/null
+++ b/src/sgn.c
@@ -0,0 +1,40 @@
+/* mpfr_sgn -- Sign of a floating point number.
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+(mpfr_sgn) (mpfr_srcptr a)
+{
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (a)))
+ {
+ if (MPFR_LIKELY (MPFR_IS_ZERO (a)))
+ return 0;
+ if (MPFR_UNLIKELY (MPFR_IS_NAN (a)))
+ {
+ MPFR_SET_ERANGE ();
+ return 0;
+ }
+ /* Remains infinity, handled by the return below. */
+ }
+ return MPFR_INT_SIGN (a);
+}
diff --git a/src/si_op.c b/src/si_op.c
new file mode 100644
index 000000000..5938f898d
--- /dev/null
+++ b/src/si_op.c
@@ -0,0 +1,57 @@
+/* mpfr_add_si -- add a floating-point number with a machine integer
+ mpfr_sub_si -- sub a floating-point number with a machine integer
+ mpfr_si_sub -- sub a machine number with a floating-point number
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_add_si (mpfr_ptr y, mpfr_srcptr x, long int u, mpfr_rnd_t rnd_mode)
+{
+ if (u >= 0)
+ return mpfr_add_ui (y, x, u, rnd_mode);
+ else
+ return mpfr_sub_ui (y, x, -u, rnd_mode);
+}
+
+int
+mpfr_sub_si (mpfr_ptr y, mpfr_srcptr x, long int u, mpfr_rnd_t rnd_mode)
+{
+ if (u >= 0)
+ return mpfr_sub_ui (y, x, u, rnd_mode);
+ else
+ return mpfr_add_ui (y, x, -u, rnd_mode);
+}
+
+int
+mpfr_si_sub (mpfr_ptr y, long int u, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ if (u >= 0)
+ return mpfr_ui_sub (y, u, x, rnd_mode);
+ else
+ {
+ int res = -mpfr_add_ui (y, x, -u, MPFR_INVERT_RND (rnd_mode));
+ MPFR_CHANGE_SIGN (y);
+ return res;
+ }
+}
+
diff --git a/src/signbit.c b/src/signbit.c
new file mode 100644
index 000000000..20b1e18cb
--- /dev/null
+++ b/src/signbit.c
@@ -0,0 +1,30 @@
+/* mpfr_signbit -- Signbit of a MPFR number
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#undef mpfr_signbit
+int
+mpfr_signbit (mpfr_srcptr x)
+{
+ return MPFR_SIGN (x) < 0;
+}
diff --git a/src/sin.c b/src/sin.c
new file mode 100644
index 000000000..010d2406f
--- /dev/null
+++ b/src/sin.c
@@ -0,0 +1,180 @@
+/* mpfr_sin -- sine of a floating-point number
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+static int
+mpfr_sin_fast (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ int inex;
+
+ inex = mpfr_sincos_fast (y, NULL, x, rnd_mode);
+ inex = inex & 3; /* 0: exact, 1: rounded up, 2: rounded down */
+ return (inex == 2) ? -1 : inex;
+}
+
+int
+mpfr_sin (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t c, xr;
+ mpfr_srcptr xx;
+ mpfr_exp_t expx, err;
+ mpfr_prec_t precy, m;
+ int inexact, sign, reduce;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN (x) || MPFR_IS_INF (x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ MPFR_RET (0);
+ }
+ }
+
+ /* sin(x) = x - x^3/6 + ... so the error is < 2^(3*EXP(x)-2) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, -2 * MPFR_GET_EXP (x), 2, 0,
+ rnd_mode, {});
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Compute initial precision */
+ precy = MPFR_PREC (y);
+
+ if (precy >= MPFR_SINCOS_THRESHOLD)
+ return mpfr_sin_fast (y, x, rnd_mode);
+
+ m = precy + MPFR_INT_CEIL_LOG2 (precy) + 13;
+ expx = MPFR_GET_EXP (x);
+
+ mpfr_init (c);
+ mpfr_init (xr);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ /* first perform argument reduction modulo 2*Pi (if needed),
+ also helps to determine the sign of sin(x) */
+ if (expx >= 2) /* If Pi < x < 4, we need to reduce too, to determine
+ the sign of sin(x). For 2 <= |x| < Pi, we could avoid
+ the reduction. */
+ {
+ reduce = 1;
+ /* As expx + m - 1 will silently be converted into mpfr_prec_t
+ in the mpfr_set_prec call, the assert below may be useful to
+ avoid undefined behavior. */
+ MPFR_ASSERTN (expx + m - 1 <= MPFR_PREC_MAX);
+ mpfr_set_prec (c, expx + m - 1);
+ mpfr_set_prec (xr, m);
+ mpfr_const_pi (c, MPFR_RNDN);
+ mpfr_mul_2ui (c, c, 1, MPFR_RNDN);
+ mpfr_remainder (xr, x, c, MPFR_RNDN);
+ /* The analysis is similar to that of cos.c:
+ |xr - x - 2kPi| <= 2^(2-m). Thus we can decide the sign
+ of sin(x) if xr is at distance at least 2^(2-m) of both
+ 0 and +/-Pi. */
+ mpfr_div_2ui (c, c, 1, MPFR_RNDN);
+ /* Since c approximates Pi with an error <= 2^(2-expx-m) <= 2^(-m),
+ it suffices to check that c - |xr| >= 2^(2-m). */
+ if (MPFR_SIGN (xr) > 0)
+ mpfr_sub (c, c, xr, MPFR_RNDZ);
+ else
+ mpfr_add (c, c, xr, MPFR_RNDZ);
+ if (MPFR_IS_ZERO(xr)
+ || MPFR_EXP(xr) < (mpfr_exp_t) 3 - (mpfr_exp_t) m
+ || MPFR_EXP(c) < (mpfr_exp_t) 3 - (mpfr_exp_t) m)
+ goto ziv_next;
+
+ /* |xr - x - 2kPi| <= 2^(2-m), thus |sin(xr) - sin(x)| <= 2^(2-m) */
+ xx = xr;
+ }
+ else /* the input argument is already reduced */
+ {
+ reduce = 0;
+ xx = x;
+ }
+
+ sign = MPFR_SIGN(xx);
+ /* now that the argument is reduced, precision m is enough */
+ mpfr_set_prec (c, m);
+ mpfr_cos (c, xx, MPFR_RNDZ); /* can't be exact */
+ mpfr_nexttoinf (c); /* now c = cos(x) rounded away */
+ mpfr_mul (c, c, c, MPFR_RNDU); /* away */
+ mpfr_ui_sub (c, 1, c, MPFR_RNDZ);
+ mpfr_sqrt (c, c, MPFR_RNDZ);
+ if (MPFR_IS_NEG_SIGN(sign))
+ MPFR_CHANGE_SIGN(c);
+
+ /* Warning: c may be 0! */
+ if (MPFR_UNLIKELY (MPFR_IS_ZERO (c)))
+ {
+ /* Huge cancellation: increase prec a lot! */
+ m = MAX (m, MPFR_PREC (x));
+ m = 2 * m;
+ }
+ else
+ {
+ /* the absolute error on c is at most 2^(3-m-EXP(c)),
+ plus 2^(2-m) if there was an argument reduction.
+ Since EXP(c) <= 1, 3-m-EXP(c) >= 2-m, thus the error
+ is at most 2^(3-m-EXP(c)) in case of argument reduction. */
+ err = 2 * MPFR_GET_EXP (c) + (mpfr_exp_t) m - 3 - (reduce != 0);
+ if (MPFR_CAN_ROUND (c, err, precy, rnd_mode))
+ break;
+
+ /* check for huge cancellation (Near 0) */
+ if (err < (mpfr_exp_t) MPFR_PREC (y))
+ m += MPFR_PREC (y) - err;
+ /* Check if near 1 */
+ if (MPFR_GET_EXP (c) == 1)
+ m += m;
+ }
+
+ ziv_next:
+ /* Else generic increase */
+ MPFR_ZIV_NEXT (loop, m);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (y, c, rnd_mode);
+ /* inexact cannot be 0, since this would mean that c was representable
+ within the target precision, but in that case mpfr_can_round will fail */
+
+ mpfr_clear (c);
+ mpfr_clear (xr);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/sin_cos.c b/src/sin_cos.c
new file mode 100644
index 000000000..fd3f56577
--- /dev/null
+++ b/src/sin_cos.c
@@ -0,0 +1,662 @@
+/* mpfr_sin_cos -- sine and cosine of a floating-point number
+
+Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#define INEXPOS(y) ((y) == 0 ? 0 : (((y) > 0) ? 1 : 2))
+#define INEX(y,z) (INEXPOS(y) | (INEXPOS(z) << 2))
+
+/* (y, z) <- (sin(x), cos(x)), return value is 0 iff both results are exact
+ ie, iff x = 0 */
+int
+mpfr_sin_cos (mpfr_ptr y, mpfr_ptr z, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t prec, m;
+ int neg, reduce;
+ mpfr_t c, xr;
+ mpfr_srcptr xx;
+ mpfr_exp_t err, expx;
+ int inexy, inexz;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_ASSERTN (y != z);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (x)))
+ {
+ if (MPFR_IS_NAN(x) || MPFR_IS_INF(x))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_SET_NAN (z);
+ MPFR_RET_NAN;
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, x);
+ /* y = 0, thus exact, but z is inexact in case of underflow
+ or overflow */
+ inexy = 0; /* y is exact */
+ inexz = mpfr_set_ui (z, 1, rnd_mode);
+ return INEX(inexy,inexz);
+ }
+ }
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("sin[%#R]=%R cos[%#R]=%R", y, y, z, z));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ prec = MAX (MPFR_PREC (y), MPFR_PREC (z));
+ m = prec + MPFR_INT_CEIL_LOG2 (prec) + 13;
+ expx = MPFR_GET_EXP (x);
+
+ /* When x is close to 0, say 2^(-k), then there is a cancellation of about
+ 2k bits in 1-cos(x)^2. FIXME: in that case, it would be more efficient
+ to compute sin(x) directly. VL: This is partly done by using
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT from the mpfr_sin and mpfr_cos
+ functions. Moreover, any overflow on m is avoided. */
+ if (expx < 0)
+ {
+ /* Warning: in case y = x, and the first call to
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT succeeds but the second fails,
+ we will have clobbered the original value of x.
+ The workaround is to first compute z = cos(x) in that case, since
+ y and z are different. */
+ if (y != x)
+ /* y and x differ, thus we can safely try to compute y first */
+ {
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (
+ y, x, -2 * expx, 2, 0, rnd_mode,
+ { inexy = _inexact;
+ goto small_input; });
+ if (0)
+ {
+ small_input:
+ /* we can go here only if we can round sin(x) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (
+ z, __gmpfr_one, -2 * expx, 1, 0, rnd_mode,
+ { inexz = _inexact;
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ goto end; });
+ }
+
+ /* if we go here, one of the two MPFR_FAST_COMPUTE_IF_SMALL_INPUT
+ calls failed */
+ }
+ else /* y and x are the same variable: try to compute z first, which
+ necessarily differs */
+ {
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (
+ z, __gmpfr_one, -2 * expx, 1, 0, rnd_mode,
+ { inexz = _inexact;
+ goto small_input2; });
+ if (0)
+ {
+ small_input2:
+ /* we can go here only if we can round cos(x) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (
+ y, x, -2 * expx, 2, 0, rnd_mode,
+ { inexy = _inexact;
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ goto end; });
+ }
+ }
+ m += 2 * (-expx);
+ }
+
+ if (prec >= MPFR_SINCOS_THRESHOLD)
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_sincos_fast (y, z, x, rnd_mode);
+ }
+
+ mpfr_init (c);
+ mpfr_init (xr);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ /* the following is copied from sin.c */
+ if (expx >= 2) /* reduce the argument */
+ {
+ reduce = 1;
+ mpfr_set_prec (c, expx + m - 1);
+ mpfr_set_prec (xr, m);
+ mpfr_const_pi (c, MPFR_RNDN);
+ mpfr_mul_2ui (c, c, 1, MPFR_RNDN);
+ mpfr_remainder (xr, x, c, MPFR_RNDN);
+ mpfr_div_2ui (c, c, 1, MPFR_RNDN);
+ if (MPFR_SIGN (xr) > 0)
+ mpfr_sub (c, c, xr, MPFR_RNDZ);
+ else
+ mpfr_add (c, c, xr, MPFR_RNDZ);
+ if (MPFR_IS_ZERO(xr)
+ || MPFR_EXP(xr) < (mpfr_exp_t) 3 - (mpfr_exp_t) m
+ || MPFR_EXP(c) < (mpfr_exp_t) 3 - (mpfr_exp_t) m)
+ goto next_step;
+ xx = xr;
+ }
+ else /* the input argument is already reduced */
+ {
+ reduce = 0;
+ xx = x;
+ }
+
+ neg = MPFR_IS_NEG (xx); /* gives sign of sin(x) */
+ mpfr_set_prec (c, m);
+ mpfr_cos (c, xx, MPFR_RNDZ);
+ /* If no argument reduction was performed, the error is at most ulp(c),
+ otherwise it is at most ulp(c) + 2^(2-m). Since |c| < 1, we have
+ ulp(c) <= 2^(-m), thus the error is bounded by 2^(3-m) in that later
+ case. */
+ if (reduce == 0)
+ err = m;
+ else
+ err = MPFR_GET_EXP (c) + (mpfr_exp_t) (m - 3);
+ if (!mpfr_can_round (c, err, MPFR_RNDN, MPFR_RNDZ,
+ MPFR_PREC (z) + (rnd_mode == MPFR_RNDN)))
+ goto next_step;
+
+ /* we can't set z now, because in case z = x, and the mpfr_can_round()
+ call below fails, we will have clobbered the input */
+ mpfr_set_prec (xr, MPFR_PREC(c));
+ mpfr_swap (xr, c); /* save the approximation of the cosine in xr */
+ mpfr_sqr (c, xr, MPFR_RNDU); /* the absolute error is bounded by
+ 2^(5-m) if reduce=1, and by 2^(2-m)
+ otherwise */
+ mpfr_ui_sub (c, 1, c, MPFR_RNDN); /* error bounded by 2^(6-m) if reduce
+ is 1, and 2^(3-m) otherwise */
+ mpfr_sqrt (c, c, MPFR_RNDN); /* the absolute error is bounded by
+ 2^(6-m-Exp(c)) if reduce=1, and
+ 2^(3-m-Exp(c)) otherwise */
+ err = 3 + 3 * reduce - MPFR_GET_EXP (c);
+ if (neg)
+ MPFR_CHANGE_SIGN (c);
+
+ /* the absolute error on c is at most 2^(err-m), which we must put
+ in the form 2^(EXP(c)-err). */
+ err = MPFR_GET_EXP (c) + (mpfr_exp_t) m - err;
+ if (mpfr_can_round (c, err, MPFR_RNDN, MPFR_RNDZ,
+ MPFR_PREC (y) + (rnd_mode == MPFR_RNDN)))
+ break;
+ /* check for huge cancellation */
+ if (err < (mpfr_exp_t) MPFR_PREC (y))
+ m += MPFR_PREC (y) - err;
+ /* Check if near 1 */
+ if (MPFR_GET_EXP (c) == 1
+ && MPFR_MANT (c)[MPFR_LIMB_SIZE (c)-1] == MPFR_LIMB_HIGHBIT)
+ m += m;
+
+ next_step:
+ MPFR_ZIV_NEXT (loop, m);
+ mpfr_set_prec (c, m);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexy = mpfr_set (y, c, rnd_mode);
+ inexz = mpfr_set (z, xr, rnd_mode);
+
+ mpfr_clear (c);
+ mpfr_clear (xr);
+
+ end:
+ MPFR_SAVE_EXPO_FREE (expo);
+ mpfr_check_range (y, inexy, rnd_mode);
+ mpfr_check_range (z, inexz, rnd_mode);
+ MPFR_RET (INEX(inexy,inexz));
+}
+
+/*************** asymptotically fast implementation below ********************/
+
+/* truncate Q from R to at most prec bits.
+ Return the number of truncated bits.
+ */
+static mpfr_prec_t
+reduce (mpz_t Q, mpz_srcptr R, mpfr_prec_t prec)
+{
+ mpfr_prec_t l = mpz_sizeinbase (R, 2);
+
+ l = (l > prec) ? l - prec : 0;
+ mpz_fdiv_q_2exp (Q, R, l);
+ return l;
+}
+
+/* truncate S and C so that the smaller has prec bits.
+ Return the number of truncated bits.
+ */
+static unsigned long
+reduce2 (mpz_t S, mpz_t C, mpfr_prec_t prec)
+{
+ unsigned long ls = mpz_sizeinbase (S, 2);
+ unsigned long lc = mpz_sizeinbase (C, 2);
+ unsigned long l;
+
+ l = (ls < lc) ? ls : lc; /* smaller length */
+ l = (l > prec) ? l - prec : 0;
+ mpz_fdiv_q_2exp (S, S, l);
+ mpz_fdiv_q_2exp (C, C, l);
+ return l;
+}
+
+/* return in S0/Q0 a rational approximation of sin(X) with absolute error
+ bounded by 9*2^(-prec), where 0 <= X=p/2^r <= 1/2,
+ and in C0/Q0 a rational approximation of cos(X), with relative error
+ bounded by 9*2^(-prec) (and also absolute error, since
+ |cos(X)| <= 1).
+ We have sin(X)/X = sum((-1)^i*(p/2^r)^i/(2i+1)!, i=0..infinity).
+ We use the following binary splitting formula:
+ P(a,b) = (-p)^(b-a)
+ Q(a,b) = (2a)*(2a+1)*2^r if a+1=b [except Q(0,1)=1], Q(a,c)*Q(c,b) otherwise
+ T(a,b) = 1 if a+1=b, Q(c,b)*T(a,c)+P(a,c)*T(c,b) otherwise.
+
+ Since we use P(a,b) for b-a=2^k only, we compute only p^(2^k).
+ We do not store the factor 2^r in Q().
+
+ Then sin(X)/X ~ T(0,i)/Q(0,i) for i so that (p/2^r)^i/i! is small enough.
+
+ Return l such that Q0 has to be multiplied by 2^l.
+
+ Assumes prec >= 10.
+*/
+static unsigned long
+sin_bs_aux (mpz_t Q0, mpz_t S0, mpz_t C0, mpz_srcptr p, mpfr_prec_t r,
+ mpfr_prec_t prec)
+{
+ mpz_t T[GMP_NUMB_BITS], Q[GMP_NUMB_BITS], ptoj[GMP_NUMB_BITS], pp;
+ mpfr_prec_t log2_nb_terms[GMP_NUMB_BITS], mult[GMP_NUMB_BITS];
+ mpfr_prec_t accu[GMP_NUMB_BITS], size_ptoj[GMP_NUMB_BITS];
+ mpfr_prec_t prec_i_have, r0 = r;
+ unsigned long alloc, i, j, k;
+ mpfr_prec_t l;
+
+ if (MPFR_UNLIKELY(mpz_cmp_ui (p, 0) == 0)) /* sin(x)/x -> 1 */
+ {
+ mpz_set_ui (Q0, 1);
+ mpz_set_ui (S0, 1);
+ mpz_set_ui (C0, 1);
+ return 0;
+ }
+
+ /* check that X=p/2^r <= 1/2 */
+ MPFR_ASSERTN(mpz_sizeinbase (p, 2) - (mpfr_exp_t) r <= -1);
+
+ mpz_init (pp);
+
+ /* normalize p (non-zero here) */
+ l = mpz_scan1 (p, 0);
+ mpz_fdiv_q_2exp (pp, p, l); /* p = pp * 2^l */
+ mpz_mul (pp, pp, pp);
+ r = 2 * (r - l); /* x^2 = (p/2^r0)^2 = pp / 2^r */
+
+ /* now p is odd */
+ alloc = 2;
+ mpz_init_set_ui (T[0], 6);
+ mpz_init_set_ui (Q[0], 6);
+ mpz_init_set (ptoj[0], pp); /* ptoj[i] = pp^(2^i) */
+ mpz_init (T[1]);
+ mpz_init (Q[1]);
+ mpz_init (ptoj[1]);
+ mpz_mul (ptoj[1], pp, pp); /* ptoj[1] = pp^2 */
+ size_ptoj[1] = mpz_sizeinbase (ptoj[1], 2);
+
+ mpz_mul_2exp (T[0], T[0], r);
+ mpz_sub (T[0], T[0], pp); /* 6*2^r - pp = 6*2^r*(1 - x^2/6) */
+ log2_nb_terms[0] = 1;
+
+ /* already take into account the factor x=p/2^r in sin(x) = x * (...) */
+ mult[0] = r - mpz_sizeinbase (pp, 2) + r0 - mpz_sizeinbase (p, 2);
+ /* we have x^3 < 1/2^mult[0] */
+
+ for (i = 2, k = 0, prec_i_have = mult[0]; prec_i_have < prec; i += 2)
+ {
+ /* i is even here */
+ /* invariant: Q[0]*Q[1]*...*Q[k] equals (2i-1)!,
+ we have already summed terms of index < i
+ in S[0]/Q[0], ..., S[k]/Q[k] */
+ k ++;
+ if (k + 1 >= alloc) /* necessarily k + 1 = alloc */
+ {
+ alloc ++;
+ mpz_init (T[k+1]);
+ mpz_init (Q[k+1]);
+ mpz_init (ptoj[k+1]);
+ mpz_mul (ptoj[k+1], ptoj[k], ptoj[k]); /* pp^(2^(k+1)) */
+ size_ptoj[k+1] = mpz_sizeinbase (ptoj[k+1], 2);
+ }
+ /* for i even, we have Q[k] = (2*i)*(2*i+1), T[k] = 1,
+ then Q[k+1] = (2*i+2)*(2*i+3), T[k+1] = 1,
+ which reduces to T[k] = (2*i+2)*(2*i+3)*2^r-pp,
+ Q[k] = (2*i)*(2*i+1)*(2*i+2)*(2*i+3). */
+ log2_nb_terms[k] = 1;
+ mpz_set_ui (Q[k], (2 * i + 2) * (2 * i + 3));
+ mpz_mul_2exp (T[k], Q[k], r);
+ mpz_sub (T[k], T[k], pp);
+ mpz_mul_ui (Q[k], Q[k], (2 * i) * (2 * i + 1));
+ /* the next term of the series is divided by Q[k] and multiplied
+ by pp^2/2^(2r), thus the mult. factor < 1/2^mult[k] */
+ mult[k] = mpz_sizeinbase (Q[k], 2) + 2 * r - size_ptoj[1] - 1;
+ /* the absolute contribution of the next term is 1/2^accu[k] */
+ accu[k] = (k == 0) ? mult[k] : mult[k] + accu[k-1];
+ prec_i_have = accu[k]; /* the current term is < 1/2^accu[k] */
+ j = (i + 2) / 2;
+ l = 1;
+ while ((j & 1) == 0) /* combine and reduce */
+ {
+ mpz_mul (T[k], T[k], ptoj[l]);
+ mpz_mul (T[k-1], T[k-1], Q[k]);
+ mpz_mul_2exp (T[k-1], T[k-1], r << l);
+ mpz_add (T[k-1], T[k-1], T[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ log2_nb_terms[k-1] ++; /* number of terms in S[k-1]
+ is a power of 2 by construction */
+ prec_i_have = mpz_sizeinbase (Q[k], 2);
+ mult[k-1] += prec_i_have + (r << l) - size_ptoj[l] - 1;
+ accu[k-1] = (k == 1) ? mult[k-1] : mult[k-1] + accu[k-2];
+ prec_i_have = accu[k-1];
+ l ++;
+ j >>= 1;
+ k --;
+ }
+ }
+
+ /* accumulate all products in T[0] and Q[0]. Warning: contrary to above,
+ here we do not have log2_nb_terms[k-1] = log2_nb_terms[k]+1. */
+ l = 0; /* number of accumulated terms in the right part T[k]/Q[k] */
+ while (k > 0)
+ {
+ j = log2_nb_terms[k-1];
+ mpz_mul (T[k], T[k], ptoj[j]);
+ mpz_mul (T[k-1], T[k-1], Q[k]);
+ l += 1 << log2_nb_terms[k];
+ mpz_mul_2exp (T[k-1], T[k-1], r * l);
+ mpz_add (T[k-1], T[k-1], T[k]);
+ mpz_mul (Q[k-1], Q[k-1], Q[k]);
+ k--;
+ }
+
+ l = r0 + r * (i - 1); /* implicit multiplier 2^r for Q0 */
+ /* at this point T[0]/(2^l*Q[0]) is an approximation of sin(x) where the 1st
+ neglected term has contribution < 1/2^prec, thus since the series has
+ alternate signs, the error is < 1/2^prec */
+
+ /* we truncate Q0 to prec bits: the relative error is at most 2^(1-prec),
+ which means that Q0 = Q[0] * (1+theta) with |theta| <= 2^(1-prec)
+ [up to a power of two] */
+ l += reduce (Q0, Q[0], prec);
+ l -= reduce (T[0], T[0], prec);
+ /* multiply by x = p/2^l */
+ mpz_mul (S0, T[0], p);
+ l -= reduce (S0, S0, prec); /* S0 = T[0] * (1 + theta)^2 up to power of 2 */
+ /* sin(X) ~ S0/Q0*(1 + theta)^3 + err with |theta| <= 2^(1-prec) and
+ |err| <= 2^(-prec), thus since |S0/Q0| <= 1:
+ |sin(X) - S0/Q0| <= 4*|theta*S0/Q0| + |err| <= 9*2^(-prec) */
+
+ mpz_clear (pp);
+ for (j = 0; j < alloc; j ++)
+ {
+ mpz_clear (T[j]);
+ mpz_clear (Q[j]);
+ mpz_clear (ptoj[j]);
+ }
+
+ /* compute cos(X) from sin(X): sqrt(1-(S/Q)^2) = sqrt(Q^2-S^2)/Q
+ = sqrt(Q0^2*2^(2l)-S0^2)/Q0.
+ Write S/Q = sin(X) + eps with |eps| <= 9*2^(-prec),
+ then sqrt(Q^2-S^2) = sqrt(Q^2-Q^2*(sin(X)+eps)^2)
+ = sqrt(Q^2*cos(X)^2-Q^2*(2*sin(X)*eps+eps^2))
+ = sqrt(Q^2*cos(X)^2-Q^2*eps1) with |eps1|<=9*2^(-prec)
+ [using X<=1/2 and eps<=9*2^(-prec) and prec>=10]
+
+ Since we truncate the square root, we get:
+ sqrt(Q^2*cos(X)^2-Q^2*eps1)+eps2 with |eps2|<1
+ = Q*sqrt(cos(X)^2-eps1)+eps2
+ = Q*cos(X)*(1+eps3)+eps2 with |eps3| <= 6*2^(-prec)
+ = Q*cos(X)*(1+eps3+eps2/(Q*cos(X)))
+ = Q*cos(X)*(1+eps4) with |eps4| <= 9*2^(-prec)
+ since |Q| >= 2^(prec-1) */
+ /* we assume that Q0*2^l >= 2^(prec-1) */
+ MPFR_ASSERTN(l + mpz_sizeinbase (Q0, 2) >= prec);
+ mpz_mul (C0, Q0, Q0);
+ mpz_mul_2exp (C0, C0, 2 * l);
+ mpz_submul (C0, S0, S0);
+ mpz_sqrt (C0, C0);
+
+ return l;
+}
+
+/* Put in s and c approximations of sin(x) and cos(x) respectively.
+ Assumes 0 < x < Pi/4 and PREC(s) = PREC(c) >= 10.
+ Return err such that the relative error is bounded by 2^err ulps.
+*/
+static int
+sincos_aux (mpfr_t s, mpfr_t c, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t prec_s, sh;
+ mpz_t Q, S, C, Q2, S2, C2, y;
+ mpfr_t x2;
+ unsigned long l, l2, j, err;
+
+ MPFR_ASSERTD(MPFR_PREC(s) == MPFR_PREC(c));
+
+ prec_s = MPFR_PREC(s);
+
+ mpfr_init2 (x2, MPFR_PREC(x));
+ mpz_init (Q);
+ mpz_init (S);
+ mpz_init (C);
+ mpz_init (Q2);
+ mpz_init (S2);
+ mpz_init (C2);
+ mpz_init (y);
+
+ mpfr_set (x2, x, MPFR_RNDN); /* exact */
+ mpz_set_ui (Q, 1);
+ l = 0;
+ mpz_set_ui (S, 0); /* sin(0) = S/(2^l*Q), exact */
+ mpz_set_ui (C, 1); /* cos(0) = C/(2^l*Q), exact */
+
+ /* Invariant: x = X + x2/2^(sh-1), where the part X was already treated,
+ S/(2^l*Q) ~ sin(X), C/(2^l*Q) ~ cos(X), and x2/2^(sh-1) < Pi/4.
+ 'sh-1' is the number of already shifted bits in x2.
+ */
+
+ for (sh = 1, j = 0; mpfr_cmp_ui (x2, 0) != 0 && sh <= prec_s; sh <<= 1, j++)
+ {
+ if (sh > prec_s / 2) /* sin(x) = x + O(x^3), cos(x) = 1 + O(x^2) */
+ {
+ l2 = -mpfr_get_z_2exp (S2, x2); /* S2/2^l2 = x2 */
+ l2 += sh - 1;
+ mpz_set_ui (Q2, 1);
+ mpz_set_ui (C2, 1);
+ mpz_mul_2exp (C2, C2, l2);
+ mpfr_set_ui (x2, 0, MPFR_RNDN);
+ }
+ else
+ {
+ /* y <- trunc(x2 * 2^sh) = trunc(x * 2^(2*sh-1)) */
+ mpfr_mul_2exp (x2, x2, sh, MPFR_RNDN); /* exact */
+ mpfr_get_z (y, x2, MPFR_RNDZ); /* round towards zero: now
+ 0 <= x2 < 2^sh, thus
+ 0 <= x2/2^(sh-1) < 2^(1-sh) */
+ if (mpz_cmp_ui (y, 0) == 0)
+ continue;
+ mpfr_sub_z (x2, x2, y, MPFR_RNDN); /* should be exact */
+ l2 = sin_bs_aux (Q2, S2, C2, y, 2 * sh - 1, prec_s);
+ /* we now have |S2/Q2/2^l2 - sin(X)| <= 9*2^(prec_s)
+ and |C2/Q2/2^l2 - cos(X)| <= 6*2^(prec_s), with X=y/2^(2sh-1) */
+ }
+ if (sh == 1) /* S=0, C=1 */
+ {
+ l = l2;
+ mpz_swap (Q, Q2);
+ mpz_swap (S, S2);
+ mpz_swap (C, C2);
+ }
+ else
+ {
+ /* s <- s*c2+c*s2, c <- c*c2-s*s2, using Karatsuba:
+ a = s+c, b = s2+c2, t = a*b, d = s*s2, e = c*c2,
+ s <- t - d - e, c <- e - d */
+ mpz_add (y, S, C); /* a */
+ mpz_mul (C, C, C2); /* e */
+ mpz_add (C2, C2, S2); /* b */
+ mpz_mul (S2, S, S2); /* d */
+ mpz_mul (y, y, C2); /* a*b */
+ mpz_sub (S, y, S2); /* t - d */
+ mpz_sub (S, S, C); /* t - d - e */
+ mpz_sub (C, C, S2); /* e - d */
+ mpz_mul (Q, Q, Q2);
+ /* after j loops, the error is <= (11j-2)*2^(prec_s) */
+ l += l2;
+ /* reduce Q to prec_s bits */
+ l += reduce (Q, Q, prec_s);
+ /* reduce S,C to prec_s bits, error <= 11*j*2^(prec_s) */
+ l -= reduce2 (S, C, prec_s);
+ }
+ }
+
+ j = 11 * j;
+ for (err = 0; j > 1; j = (j + 1) / 2, err ++);
+
+ mpfr_set_z (s, S, MPFR_RNDN);
+ mpfr_div_z (s, s, Q, MPFR_RNDN);
+ mpfr_div_2exp (s, s, l, MPFR_RNDN);
+
+ mpfr_set_z (c, C, MPFR_RNDN);
+ mpfr_div_z (c, c, Q, MPFR_RNDN);
+ mpfr_div_2exp (c, c, l, MPFR_RNDN);
+
+ mpz_clear (Q);
+ mpz_clear (S);
+ mpz_clear (C);
+ mpz_clear (Q2);
+ mpz_clear (S2);
+ mpz_clear (C2);
+ mpz_clear (y);
+ mpfr_clear (x2);
+ return err;
+}
+
+/* Assumes x is neither NaN, +/-Inf, nor +/- 0.
+ One of s and c might be NULL, in which case the corresponding value is
+ not computed.
+ Assumes s differs from c.
+ */
+int
+mpfr_sincos_fast (mpfr_t s, mpfr_t c, mpfr_srcptr x, mpfr_rnd_t rnd)
+{
+ int inexs, inexc;
+ mpfr_t x_red, ts, tc;
+ mpfr_prec_t w;
+ mpfr_exp_t err, errs, errc;
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_ASSERTN(s != c);
+ if (s == NULL)
+ w = MPFR_PREC(c);
+ else if (c == NULL)
+ w = MPFR_PREC(s);
+ else
+ w = MPFR_PREC(s) >= MPFR_PREC(c) ? MPFR_PREC(s) : MPFR_PREC(c);
+ w += MPFR_INT_CEIL_LOG2(w) + 9; /* ensures w >= 10 (needed by sincos_aux) */
+ mpfr_init2 (ts, w);
+ mpfr_init2 (tc, w);
+
+ MPFR_ZIV_INIT (loop, w);
+ for (;;)
+ {
+ /* if 0 < x <= Pi/4, we can call sincos_aux directly */
+ if (MPFR_IS_POS(x) && mpfr_cmp_ui_2exp (x, 1686629713, -31) <= 0)
+ {
+ err = sincos_aux (ts, tc, x, MPFR_RNDN);
+ }
+ /* if -Pi/4 <= x < 0, use sin(-x)=-sin(x) */
+ else if (MPFR_IS_NEG(x) && mpfr_cmp_si_2exp (x, -1686629713, -31) >= 0)
+ {
+ mpfr_init2 (x_red, MPFR_PREC(x));
+ mpfr_neg (x_red, x, rnd); /* exact */
+ err = sincos_aux (ts, tc, x_red, MPFR_RNDN);
+ mpfr_neg (ts, ts, MPFR_RNDN);
+ mpfr_clear (x_red);
+ }
+ else /* argument reduction is needed */
+ {
+ long q;
+ mpfr_t pi;
+ int neg = 0;
+
+ mpfr_init2 (x_red, w);
+ mpfr_init2 (pi, (MPFR_EXP(x) > 0) ? w + MPFR_EXP(x) : w);
+ mpfr_const_pi (pi, MPFR_RNDN);
+ mpfr_div_2exp (pi, pi, 1, MPFR_RNDN); /* Pi/2 */
+ mpfr_remquo (x_red, &q, x, pi, MPFR_RNDN);
+ /* x = q * (Pi/2 + eps1) + x_red + eps2,
+ where |eps1| <= 1/2*ulp(Pi/2) = 2^(-w-MAX(0,EXP(x))),
+ and eps2 <= 1/2*ulp(x_red) <= 1/2*ulp(Pi/2) = 2^(-w)
+ Since |q| <= x/(Pi/2) <= |x|, we have
+ q*|eps1| <= 2^(-w), thus
+ |x - q * Pi/2 - x_red| <= 2^(1-w) */
+ /* now -Pi/4 <= x_red <= Pi/4: if x_red < 0, consider -x_red */
+ if (MPFR_IS_NEG(x_red))
+ {
+ mpfr_neg (x_red, x_red, MPFR_RNDN);
+ neg = 1;
+ }
+ err = sincos_aux (ts, tc, x_red, MPFR_RNDN);
+ err ++; /* to take into account the argument reduction */
+ if (neg) /* sin(-x) = -sin(x), cos(-x) = cos(x) */
+ mpfr_neg (ts, ts, MPFR_RNDN);
+ if (q & 2) /* sin(x+Pi) = -sin(x), cos(x+Pi) = -cos(x) */
+ {
+ mpfr_neg (ts, ts, MPFR_RNDN);
+ mpfr_neg (tc, tc, MPFR_RNDN);
+ }
+ if (q & 1) /* sin(x+Pi/2) = cos(x), cos(x+Pi/2) = -sin(x) */
+ {
+ mpfr_neg (ts, ts, MPFR_RNDN);
+ mpfr_swap (ts, tc);
+ }
+ mpfr_clear (x_red);
+ mpfr_clear (pi);
+ }
+ /* adjust errors with respect to absolute values */
+ errs = err - MPFR_EXP(ts);
+ errc = err - MPFR_EXP(tc);
+ if ((s == NULL || MPFR_CAN_ROUND (ts, w - errs, MPFR_PREC(s), rnd)) &&
+ (c == NULL || MPFR_CAN_ROUND (tc, w - errc, MPFR_PREC(c), rnd)))
+ break;
+ MPFR_ZIV_NEXT (loop, w);
+ mpfr_set_prec (ts, w);
+ mpfr_set_prec (tc, w);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexs = (s == NULL) ? 0 : mpfr_set (s, ts, rnd);
+ inexc = (c == NULL) ? 0 : mpfr_set (c, tc, rnd);
+
+ mpfr_clear (ts);
+ mpfr_clear (tc);
+ return INEX(inexs,inexc);
+}
diff --git a/src/sinh.c b/src/sinh.c
new file mode 100644
index 000000000..d3d30ac66
--- /dev/null
+++ b/src/sinh.c
@@ -0,0 +1,182 @@
+/* mpfr_sinh -- hyperbolic sine
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+ /* The computation of sinh is done by
+ sinh(x) = 1/2 [e^(x)-e^(-x)] */
+
+int
+mpfr_sinh (mpfr_ptr y, mpfr_srcptr xt, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t x;
+ int inexact;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", xt, xt, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (xt)))
+ {
+ if (MPFR_IS_NAN (xt))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (xt))
+ {
+ MPFR_SET_INF (y);
+ MPFR_SET_SAME_SIGN (y, xt);
+ MPFR_RET (0);
+ }
+ else /* xt is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (xt));
+ MPFR_SET_ZERO (y); /* sinh(0) = 0 */
+ MPFR_SET_SAME_SIGN (y, xt);
+ MPFR_RET (0);
+ }
+ }
+
+ /* sinh(x) = x + x^3/6 + ... so the error is < 2^(3*EXP(x)-2) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, xt, -2 * MPFR_GET_EXP(xt), 2, 1,
+ rnd_mode, {});
+
+ MPFR_TMP_INIT_ABS (x, xt);
+
+ {
+ mpfr_t t, ti;
+ mpfr_exp_t d;
+ mpfr_prec_t Nt; /* Precision of the intermediary variable */
+ long int err; /* Precision of error */
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_GROUP_DECL (group);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* compute the precision of intermediary variable */
+ Nt = MAX (MPFR_PREC (x), MPFR_PREC (y));
+ /* the optimal number of bits : see algorithms.ps */
+ Nt = Nt + MPFR_INT_CEIL_LOG2 (Nt) + 4;
+ /* If x is near 0, exp(x) - 1/exp(x) = 2*x+x^3/3+O(x^5) */
+ if (MPFR_GET_EXP (x) < 0)
+ Nt -= 2*MPFR_GET_EXP (x);
+
+ /* initialise of intermediary variables */
+ MPFR_GROUP_INIT_2 (group, Nt, t, ti);
+
+ /* First computation of sinh */
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* compute sinh */
+ MPFR_BLOCK (flags, mpfr_exp (t, x, MPFR_RNDD));
+ if (MPFR_OVERFLOW (flags))
+ /* exp(x) does overflow */
+ {
+ /* sinh(x) = 2 * sinh(x/2) * cosh(x/2) */
+ mpfr_div_2ui (ti, x, 1, MPFR_RNDD); /* exact */
+
+ /* t <- cosh(x/2): error(t) <= 1 ulp(t) */
+ MPFR_BLOCK (flags, mpfr_cosh (t, ti, MPFR_RNDD));
+ if (MPFR_OVERFLOW (flags))
+ /* when x>1 we have |sinh(x)| >= cosh(x/2), so sinh(x)
+ overflows too */
+ {
+ inexact = mpfr_overflow (y, rnd_mode, MPFR_SIGN (xt));
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+
+ /* ti <- sinh(x/2): , error(ti) <= 1 ulp(ti)
+ cannot overflow because 0 < sinh(x) < cosh(x) when x > 0 */
+ mpfr_sinh (ti, ti, MPFR_RNDD);
+
+ /* multiplication below, error(t) <= 5 ulp(t) */
+ MPFR_BLOCK (flags, mpfr_mul (t, t, ti, MPFR_RNDD));
+ if (MPFR_OVERFLOW (flags))
+ {
+ inexact = mpfr_overflow (y, rnd_mode, MPFR_SIGN (xt));
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+
+ /* doubling below, exact */
+ MPFR_BLOCK (flags, mpfr_mul_2ui (t, t, 1, MPFR_RNDN));
+ if (MPFR_OVERFLOW (flags))
+ {
+ inexact = mpfr_overflow (y, rnd_mode, MPFR_SIGN (xt));
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+
+ /* we have lost at most 3 bits of precision */
+ err = Nt - 3;
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, MPFR_PREC (y),
+ rnd_mode)))
+ {
+ inexact = mpfr_set4 (y, t, rnd_mode, MPFR_SIGN (xt));
+ break;
+ }
+ err = Nt; /* double the precision */
+ }
+ else
+ {
+ d = MPFR_GET_EXP (t);
+ mpfr_ui_div (ti, 1, t, MPFR_RNDU); /* 1/exp(x) */
+ mpfr_sub (t, t, ti, MPFR_RNDN); /* exp(x) - 1/exp(x) */
+ mpfr_div_2ui (t, t, 1, MPFR_RNDN); /* 1/2(exp(x) - 1/exp(x)) */
+
+ /* it may be that t is zero (in fact, it can only occur when te=1,
+ and thus ti=1 too) */
+ if (MPFR_IS_ZERO (t))
+ err = Nt; /* double the precision */
+ else
+ {
+ /* calculation of the error */
+ d = d - MPFR_GET_EXP (t) + 2;
+ /* error estimate: err = Nt-(__gmpfr_ceil_log2(1+pow(2,d)));*/
+ err = Nt - (MAX (d, 0) + 1);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (t, err, MPFR_PREC (y),
+ rnd_mode)))
+ {
+ inexact = mpfr_set4 (y, t, rnd_mode, MPFR_SIGN (xt));
+ break;
+ }
+ }
+ }
+
+ /* actualisation of the precision */
+ Nt += err;
+ MPFR_ZIV_NEXT (loop, Nt);
+ MPFR_GROUP_REPREC_2 (group, Nt, t, ti);
+ }
+ MPFR_ZIV_FREE (loop);
+ MPFR_GROUP_CLEAR (group);
+ MPFR_SAVE_EXPO_FREE (expo);
+ }
+
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/sinh_cosh.c b/src/sinh_cosh.c
new file mode 100644
index 000000000..af83fff68
--- /dev/null
+++ b/src/sinh_cosh.c
@@ -0,0 +1,157 @@
+/* mpfr_sinh_cosh -- hyperbolic sine and cosine
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#define INEXPOS(y) ((y) == 0 ? 0 : (((y) > 0) ? 1 : 2))
+#define INEX(y,z) (INEXPOS(y) | (INEXPOS(z) << 2))
+
+ /* The computations are done by
+ cosh(x) = 1/2 [e^(x)+e^(-x)]
+ sinh(x) = 1/2 [e^(x)-e^(-x)]
+ Adapted from mpfr_sinh.c */
+
+int
+mpfr_sinh_cosh (mpfr_ptr sh, mpfr_ptr ch, mpfr_srcptr xt, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t x;
+ int inexact_sh, inexact_ch;
+
+ MPFR_ASSERTN (sh != ch);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", xt, xt, rnd_mode),
+ ("sh[%#R]=%R ch[%#R]=%R", sh, sh, ch, ch));
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (xt)))
+ {
+ if (MPFR_IS_NAN (xt))
+ {
+ MPFR_SET_NAN (ch);
+ MPFR_SET_NAN (sh);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (xt))
+ {
+ MPFR_SET_INF (sh);
+ MPFR_SET_SAME_SIGN (sh, xt);
+ MPFR_SET_INF (ch);
+ MPFR_SET_POS (ch);
+ MPFR_RET (0);
+ }
+ else /* xt is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (xt));
+ MPFR_SET_ZERO (sh); /* sinh(0) = 0 */
+ MPFR_SET_SAME_SIGN (sh, xt);
+ inexact_sh = 0;
+ inexact_ch = mpfr_set_ui (ch, 1, rnd_mode); /* cosh(0) = 1 */
+ return INEX(inexact_sh,inexact_ch);
+ }
+ }
+
+ /* Warning: if we use MPFR_FAST_COMPUTE_IF_SMALL_INPUT here, make sure
+ that the code also works in case of overlap (see sin_cos.c) */
+
+ MPFR_TMP_INIT_ABS (x, xt);
+
+ {
+ mpfr_t s, c, ti;
+ mpfr_exp_t d;
+ mpfr_prec_t N; /* Precision of the intermediary variables */
+ long int err; /* Precision of error */
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_GROUP_DECL (group);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* compute the precision of intermediary variable */
+ N = MPFR_PREC (ch);
+ N = MAX (N, MPFR_PREC (sh));
+ /* the optimal number of bits : see algorithms.ps */
+ N = N + MPFR_INT_CEIL_LOG2 (N) + 4;
+
+ /* initialise of intermediary variables */
+ MPFR_GROUP_INIT_3 (group, N, s, c, ti);
+
+ /* First computation of sinh_cosh */
+ MPFR_ZIV_INIT (loop, N);
+ for (;;)
+ {
+ MPFR_BLOCK_DECL (flags);
+
+ /* compute sinh_cosh */
+ MPFR_BLOCK (flags, mpfr_exp (s, x, MPFR_RNDD));
+ if (MPFR_OVERFLOW (flags))
+ /* exp(x) does overflow */
+ {
+ /* since cosh(x) >= exp(x), cosh(x) overflows too */
+ inexact_ch = mpfr_overflow (ch, rnd_mode, MPFR_SIGN_POS);
+ /* sinh(x) may be representable */
+ inexact_sh = mpfr_sinh (sh, xt, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ break;
+ }
+ d = MPFR_GET_EXP (s);
+ mpfr_ui_div (ti, 1, s, MPFR_RNDU); /* 1/exp(x) */
+ mpfr_add (c, s, ti, MPFR_RNDU); /* exp(x) + 1/exp(x) */
+ mpfr_sub (s, s, ti, MPFR_RNDN); /* exp(x) - 1/exp(x) */
+ mpfr_div_2ui (c, c, 1, MPFR_RNDN); /* 1/2(exp(x) + 1/exp(x)) */
+ mpfr_div_2ui (s, s, 1, MPFR_RNDN); /* 1/2(exp(x) - 1/exp(x)) */
+
+ /* it may be that s is zero (in fact, it can only occur when exp(x)=1,
+ and thus ti=1 too) */
+ if (MPFR_IS_ZERO (s))
+ err = N; /* double the precision */
+ else
+ {
+ /* calculation of the error */
+ d = d - MPFR_GET_EXP (s) + 2;
+ /* error estimate: err = N-(__gmpfr_ceil_log2(1+pow(2,d)));*/
+ err = N - (MAX (d, 0) + 1);
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s, err, MPFR_PREC (sh),
+ rnd_mode) && \
+ MPFR_CAN_ROUND (c, err, MPFR_PREC (ch),
+ rnd_mode)))
+ {
+ inexact_sh = mpfr_set4 (sh, s, rnd_mode, MPFR_SIGN (xt));
+ inexact_ch = mpfr_set (ch, c, rnd_mode);
+ break;
+ }
+ }
+ /* actualisation of the precision */
+ N += err;
+ MPFR_ZIV_NEXT (loop, N);
+ MPFR_GROUP_REPREC_3 (group, N, s, c, ti);
+ }
+ MPFR_ZIV_FREE (loop);
+ MPFR_GROUP_CLEAR (group);
+ MPFR_SAVE_EXPO_FREE (expo);
+ }
+
+ /* now, let's raise the flags if needed */
+ inexact_sh = mpfr_check_range (sh, inexact_sh, rnd_mode);
+ inexact_ch = mpfr_check_range (ch, inexact_ch, rnd_mode);
+
+ return INEX(inexact_sh,inexact_ch);
+}
diff --git a/src/sqr.c b/src/sqr.c
new file mode 100644
index 000000000..2ac7d0b2f
--- /dev/null
+++ b/src/sqr.c
@@ -0,0 +1,107 @@
+/* mpfr_sqr -- Floating square
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_sqr (mpfr_ptr a, mpfr_srcptr b, mpfr_rnd_t rnd_mode)
+{
+ int cc, inexact;
+ mpfr_exp_t ax;
+ mp_limb_t *tmp;
+ mp_limb_t b1;
+ mpfr_prec_t bq;
+ mp_size_t bn, tn;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", b, b, rnd_mode),
+ ("y[%#R]=%R inexact=%d", a, a, inexact));
+
+ /* deal with special cases */
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(b)))
+ {
+ if (MPFR_IS_NAN(b))
+ {
+ MPFR_SET_NAN(a);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_POS (a);
+ if (MPFR_IS_INF(b))
+ MPFR_SET_INF(a);
+ else
+ ( MPFR_ASSERTD(MPFR_IS_ZERO(b)), MPFR_SET_ZERO(a) );
+ MPFR_RET(0);
+ }
+ ax = 2 * MPFR_GET_EXP (b);
+ bq = MPFR_PREC(b);
+
+ MPFR_ASSERTD (2 * bq > bq); /* PREC_MAX is /2 so no integer overflow */
+
+ bn = MPFR_LIMB_SIZE(b); /* number of limbs of b */
+ tn = 1 + (2 * bq - 1) / GMP_NUMB_BITS; /* number of limbs of square,
+ 2*bn or 2*bn-1 */
+
+ MPFR_TMP_MARK(marker);
+ tmp = (mp_limb_t *) MPFR_TMP_ALLOC((size_t) 2 * bn * BYTES_PER_MP_LIMB);
+
+ /* Multiplies the mantissa in temporary allocated space */
+ mpn_sqr_n (tmp, MPFR_MANT(b), bn);
+ b1 = tmp[2 * bn - 1];
+
+ /* now tmp[0]..tmp[2*bn-1] contains the product of both mantissa,
+ with tmp[2*bn-1]>=2^(GMP_NUMB_BITS-2) */
+ b1 >>= GMP_NUMB_BITS - 1; /* msb from the product */
+
+ /* if the mantissas of b and c are uniformly distributed in ]1/2, 1],
+ then their product is in ]1/4, 1/2] with probability 2*ln(2)-1 ~ 0.386
+ and in [1/2, 1] with probability 2-2*ln(2) ~ 0.614 */
+ tmp += 2 * bn - tn; /* +0 or +1 */
+ if (MPFR_UNLIKELY(b1 == 0))
+ mpn_lshift (tmp, tmp, tn, 1); /* tn <= k, so no stack corruption */
+
+ cc = mpfr_round_raw (MPFR_MANT (a), tmp, 2 * bq, 0,
+ MPFR_PREC (a), rnd_mode, &inexact);
+ /* cc = 1 ==> result is a power of two */
+ if (MPFR_UNLIKELY(cc))
+ MPFR_MANT(a)[MPFR_LIMB_SIZE(a)-1] = MPFR_LIMB_HIGHBIT;
+
+ MPFR_TMP_FREE(marker);
+ {
+ mpfr_exp_t ax2 = ax + (mpfr_exp_t) (b1 - 1 + cc);
+ if (MPFR_UNLIKELY( ax2 > __gmpfr_emax))
+ return mpfr_overflow (a, rnd_mode, MPFR_SIGN_POS);
+ if (MPFR_UNLIKELY( ax2 < __gmpfr_emin))
+ {
+ /* In the rounding to the nearest mode, if the exponent of the exact
+ result (i.e. before rounding, i.e. without taking cc into account)
+ is < __gmpfr_emin - 1 or the exact result is a power of 2 (i.e. if
+ both arguments are powers of 2), then round to zero. */
+ if (rnd_mode == MPFR_RNDN &&
+ (ax + (mpfr_exp_t) b1 < __gmpfr_emin || mpfr_powerof2_raw (b)))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (a, rnd_mode, MPFR_SIGN_POS);
+ }
+ MPFR_SET_EXP (a, ax2);
+ MPFR_SET_POS (a);
+ }
+ MPFR_RET (inexact);
+}
diff --git a/src/sqrt.c b/src/sqrt.c
new file mode 100644
index 000000000..fa9a63ac2
--- /dev/null
+++ b/src/sqrt.c
@@ -0,0 +1,256 @@
+/* mpfr_sqrt -- square root of a floating-point number
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_sqrt (mpfr_ptr r, mpfr_srcptr u, mpfr_rnd_t rnd_mode)
+{
+ mp_size_t rsize; /* number of limbs of r */
+ mp_size_t rrsize;
+ mp_size_t usize; /* number of limbs of u */
+ mp_size_t tsize; /* number of limbs of the sqrtrem remainder */
+ mp_size_t k;
+ mp_size_t l;
+ mp_ptr rp;
+ mp_ptr up;
+ mp_ptr sp;
+ mp_ptr tp;
+ mp_limb_t sticky0; /* truncated part of input */
+ mp_limb_t sticky1; /* truncated part of rp[0] */
+ mp_limb_t sticky;
+ int odd_exp;
+ int sh; /* number of extra bits in rp[0] */
+ int inexact; /* return ternary flag */
+ mpfr_exp_t expr;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", u, u, rnd_mode),
+ ("y[%#R]=%R inexact=%d", r, r, inexact));
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(u)))
+ {
+ if (MPFR_IS_NAN(u))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_ZERO(u))
+ {
+ /* 0+ or 0- */
+ MPFR_SET_SAME_SIGN(r, u);
+ MPFR_SET_ZERO(r);
+ MPFR_RET(0); /* zero is exact */
+ }
+ else
+ {
+ MPFR_ASSERTD(MPFR_IS_INF(u));
+ /* sqrt(-Inf) = NAN */
+ if (MPFR_IS_NEG(u))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_POS(r);
+ MPFR_SET_INF(r);
+ MPFR_RET(0);
+ }
+ }
+ if (MPFR_UNLIKELY(MPFR_IS_NEG(u)))
+ {
+ MPFR_SET_NAN(r);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_POS(r);
+
+ rsize = MPFR_LIMB_SIZE(r); /* number of limbs of r */
+ rrsize = rsize + rsize;
+ usize = MPFR_LIMB_SIZE(u); /* number of limbs of u */
+ rp = MPFR_MANT(r);
+ up = MPFR_MANT(u);
+ sticky0 = MPFR_LIMB_ZERO; /* truncated part of input */
+ sticky1 = MPFR_LIMB_ZERO; /* truncated part of rp[0] */
+ odd_exp = (unsigned int) MPFR_GET_EXP (u) & 1;
+ inexact = -1; /* return ternary flag */
+
+ MPFR_TMP_MARK (marker);
+ sp = (mp_limb_t *) MPFR_TMP_ALLOC (rrsize * sizeof (mp_limb_t));
+
+ /* copy the most significant limbs of u to {sp, rrsize} */
+ if (MPFR_LIKELY(usize <= rrsize)) /* in case r and u have the same precision,
+ we have indeed rrsize = 2 * usize */
+ {
+ k = rrsize - usize;
+ if (MPFR_LIKELY(k))
+ MPN_ZERO (sp, k);
+ if (odd_exp)
+ {
+ if (MPFR_LIKELY(k))
+ sp[k - 1] = mpn_rshift (sp + k, up, usize, 1);
+ else
+ sticky0 = mpn_rshift (sp, up, usize, 1);
+ }
+ else
+ MPN_COPY (sp + rrsize - usize, up, usize);
+ }
+ else /* usize > rrsize: truncate the input */
+ {
+ k = usize - rrsize;
+ if (odd_exp)
+ sticky0 = mpn_rshift (sp, up + k, rrsize, 1);
+ else
+ MPN_COPY (sp, up + k, rrsize);
+ l = k;
+ while (sticky0 == MPFR_LIMB_ZERO && l != 0)
+ sticky0 = up[--l];
+ }
+
+ /* sticky0 is non-zero iff the truncated part of the input is non-zero */
+
+ tsize = mpn_sqrtrem (rp, tp = sp, sp, rrsize);
+
+ l = tsize;
+ sticky = sticky0;
+ while (sticky == MPFR_LIMB_ZERO && l != 0)
+ sticky = tp[--l];
+
+ /* truncated low bits of rp[0] */
+ MPFR_UNSIGNED_MINUS_MODULO(sh,MPFR_PREC(r));
+ sticky1 = rp[0] & MPFR_LIMB_MASK(sh);
+ rp[0] -= sticky1;
+
+ sticky = sticky || sticky1;
+
+ expr = (MPFR_GET_EXP(u) + odd_exp) / 2; /* exact */
+
+ if (rnd_mode == MPFR_RNDZ || rnd_mode == MPFR_RNDD || sticky == MPFR_LIMB_ZERO)
+ {
+ inexact = (sticky == MPFR_LIMB_ZERO) ? 0 : -1;
+ goto truncate;
+ }
+ else if (rnd_mode == MPFR_RNDN)
+ {
+ /* if sh>0, the round bit is bit (sh-1) of sticky1
+ and the sticky bit is formed by the low sh-1 bits from
+ sticky1, together with {tp, tsize} and sticky0. */
+ if (sh)
+ {
+ if (sticky1 & (MPFR_LIMB_ONE << (sh - 1)))
+ { /* round bit is set */
+ if (sticky1 == (MPFR_LIMB_ONE << (sh - 1)) && tsize == 0
+ && sticky0 == 0)
+ goto even_rule;
+ else
+ goto add_one_ulp;
+ }
+ else /* round bit is zero */
+ goto truncate; /* with the default inexact=-1 */
+ }
+ else
+ {
+ /* if sh=0, we have to compare {tp, tsize} with {rp, rsize}:
+ if {tp, tsize} < {rp, rsize}: truncate
+ if {tp, tsize} > {rp, rsize}: round up
+ if {tp, tsize} = {rp, rsize}: compare the truncated part of the
+ input to 1/4
+ if < 1/4: truncate
+ if > 1/4: round up
+ if = 1/4: even rounding rule
+ Set inexact = -1 if truncate
+ inexact = 1 if add one ulp
+ inexact = 0 if even rounding rule
+ */
+ if (tsize < rsize)
+ inexact = -1;
+ else if (tsize > rsize) /* FIXME: may happen? */
+ inexact = 1;
+ else /* tsize = rsize */
+ {
+ int cmp;
+
+ cmp = mpn_cmp (tp, rp, rsize);
+ if (cmp > 0)
+ inexact = 1;
+ else if (cmp < 0 || sticky0 == MPFR_LIMB_ZERO)
+ inexact = -1;
+ /* now tricky case {tp, tsize} = {rp, rsize} */
+ /* in case usize <= rrsize, the only case where sticky0 <> 0
+ is when the exponent of u is odd and usize = rrsize (k=0),
+ but in that case the truncated part is exactly 1/2, thus
+ we have to round up.
+ If the exponent of u is odd, and up[k] is odd, the truncated
+ part is >= 1/2, so we round up too. */
+ else if (usize <= rrsize || (odd_exp && (up[k] & MPFR_LIMB_ONE)))
+ inexact = 1;
+ else
+ {
+ /* now usize > rrsize:
+ (a) if the exponent of u is even, the 1/4 bit is the
+ 2nd most significant bit of up[k-1];
+ (b) if the exponent of u is odd, the 1/4 bit is the
+ 1st most significant bit of up[k-1]; */
+ sticky1 = MPFR_LIMB_ONE << (GMP_NUMB_BITS - 2 + odd_exp);
+ if (up[k - 1] < sticky1)
+ inexact = -1;
+ else if (up[k - 1] > sticky1)
+ inexact = 1;
+ else
+ {
+ /* up[k - 1] == sticky1: consider low k-1 limbs */
+ while (--k > 0 && up[k - 1] == MPFR_LIMB_ZERO)
+ ;
+ inexact = (k != 0);
+ }
+ } /* end of case {tp, tsize} = {rp, rsize} */
+ } /* end of case tsize = rsize */
+ if (inexact == -1)
+ goto truncate;
+ else if (inexact == 1)
+ goto add_one_ulp;
+ /* else go through even_rule */
+ }
+ }
+ else /* rnd_mode=GMP_RDNU, necessarily sticky <> 0, thus add 1 ulp */
+ goto add_one_ulp;
+
+ even_rule: /* has to set inexact */
+ inexact = (rp[0] & (MPFR_LIMB_ONE << sh)) ? 1 : -1;
+ if (inexact == -1)
+ goto truncate;
+ /* else go through add_one_ulp */
+
+ add_one_ulp:
+ inexact = 1; /* always here */
+ if (mpn_add_1 (rp, rp, rsize, MPFR_LIMB_ONE << sh))
+ {
+ expr ++;
+ rp[rsize - 1] = MPFR_LIMB_HIGHBIT;
+ }
+
+ truncate: /* inexact = 0 or -1 */
+
+ MPFR_ASSERTN (expr >= MPFR_EMIN_MIN && expr <= MPFR_EMAX_MAX);
+ MPFR_EXP (r) = expr;
+
+ MPFR_TMP_FREE(marker);
+ return mpfr_check_range (r, inexact, rnd_mode);
+}
diff --git a/src/sqrt_ui.c b/src/sqrt_ui.c
new file mode 100644
index 000000000..270540a07
--- /dev/null
+++ b/src/sqrt_ui.c
@@ -0,0 +1,54 @@
+/* mpfr_sqrt_ui -- square root of a machine integer
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_sqrt_ui (mpfr_ptr r, unsigned long u, mpfr_rnd_t rnd_mode)
+{
+ if (u)
+ {
+ mpfr_t uu;
+ mp_limb_t up[1];
+ unsigned long cnt;
+ int inex;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_TMP_INIT1 (up, uu, GMP_NUMB_BITS);
+ MPFR_ASSERTN (u == (mp_limb_t) u);
+ count_leading_zeros (cnt, (mp_limb_t) u);
+ *up = (mp_limb_t) u << cnt;
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt);
+ inex = mpfr_sqrt(r, uu, rnd_mode);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range(r, inex, rnd_mode);
+ }
+ else /* sqrt(0) = 0 */
+ {
+ MPFR_SET_ZERO(r);
+ MPFR_SET_POS(r);
+ MPFR_RET(0);
+ }
+}
diff --git a/src/stack_interface.c b/src/stack_interface.c
new file mode 100644
index 000000000..e0a58e21f
--- /dev/null
+++ b/src/stack_interface.c
@@ -0,0 +1,104 @@
+/* mpfr_stack -- initialize a floating-point number with given allocation area
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+#undef mpfr_custom_get_size
+size_t
+mpfr_custom_get_size (mpfr_prec_t prec)
+{
+ return (prec + GMP_NUMB_BITS -1) / GMP_NUMB_BITS * BYTES_PER_MP_LIMB;
+}
+
+#undef mpfr_custom_init
+void
+mpfr_custom_init (void *mantissa, mpfr_prec_t prec)
+{
+ return ;
+}
+
+#undef mpfr_custom_get_significand
+void *
+mpfr_custom_get_significand (mpfr_srcptr x)
+{
+ return (void*) MPFR_MANT (x);
+}
+
+#undef mpfr_custom_get_exp
+mpfr_exp_t
+mpfr_custom_get_exp (mpfr_srcptr x)
+{
+ return MPFR_EXP (x);
+}
+
+#undef mpfr_custom_move
+void
+mpfr_custom_move (mpfr_ptr x, void *new_position)
+{
+ MPFR_MANT (x) = (mp_limb_t *) new_position;
+}
+
+#undef mpfr_custom_init_set
+void
+mpfr_custom_init_set (mpfr_ptr x, int kind, mpfr_exp_t exp,
+ mpfr_prec_t prec, void *mantissa)
+{
+ mpfr_kind_t t;
+ int s;
+ mpfr_exp_t e;
+
+ if (kind >= 0)
+ {
+ t = (mpfr_kind_t) kind;
+ s = MPFR_SIGN_POS;
+ }
+ else
+ {
+ t = (mpfr_kind_t) -kind;
+ s = MPFR_SIGN_NEG;
+ }
+ MPFR_ASSERTD (t <= MPFR_REGULAR_KIND);
+ e = MPFR_LIKELY (t == MPFR_REGULAR_KIND) ? exp :
+ MPFR_UNLIKELY (t == MPFR_NAN_KIND) ? MPFR_EXP_NAN :
+ MPFR_UNLIKELY (t == MPFR_INF_KIND) ? MPFR_EXP_INF : MPFR_EXP_ZERO;
+
+ MPFR_PREC (x) = prec;
+ MPFR_SET_SIGN (x, s);
+ MPFR_EXP (x) = e;
+ MPFR_MANT (x) = (mp_limb_t*) mantissa;
+ return;
+}
+
+#undef mpfr_custom_get_kind
+int
+mpfr_custom_get_kind (mpfr_srcptr x)
+{
+ if (MPFR_LIKELY (!MPFR_IS_SINGULAR (x)))
+ return (int) MPFR_REGULAR_KIND * MPFR_INT_SIGN (x);
+ if (MPFR_IS_INF (x))
+ return (int) MPFR_INF_KIND * MPFR_INT_SIGN (x);
+ if (MPFR_IS_NAN (x))
+ return (int) MPFR_NAN_KIND;
+ MPFR_ASSERTD (MPFR_IS_ZERO (x));
+ return (int) MPFR_ZERO_KIND * MPFR_INT_SIGN (x);
+}
+
diff --git a/src/strtofr.c b/src/strtofr.c
new file mode 100644
index 000000000..242d24adb
--- /dev/null
+++ b/src/strtofr.c
@@ -0,0 +1,825 @@
+/* mpfr_strtofr -- set a floating-point number from a string
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include <stdlib.h> /* For strtol */
+#include <ctype.h> /* For isspace */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+#define MPFR_MAX_BASE 62
+
+struct parsed_string {
+ int negative; /* non-zero iff the number is negative */
+ int base; /* base of the string */
+ unsigned char *mantissa; /* raw significand (without any point) */
+ unsigned char *mant; /* stripped significand (without starting and
+ ending zeroes) */
+ size_t prec; /* length of mant (zero for +/-0) */
+ size_t alloc; /* allocation size of mantissa */
+ mpfr_exp_t exp_base; /* number of digits before the point */
+ mpfr_exp_t exp_bin; /* exponent in case base=2 or 16, and the pxxx
+ format is used (i.e., exponent is given in
+ base 10) */
+};
+
+/* This table has been generated by the following program.
+ For 2 <= b <= MPFR_MAX_BASE,
+ RedInvLog2Table[b-2][0] / RedInvLog2Table[b-2][1]
+ is an upper approximation of log(2)/log(b).
+*/
+static const unsigned long RedInvLog2Table[MPFR_MAX_BASE-1][2] = {
+ {1UL, 1UL},
+ {53UL, 84UL},
+ {1UL, 2UL},
+ {4004UL, 9297UL},
+ {53UL, 137UL},
+ {2393UL, 6718UL},
+ {1UL, 3UL},
+ {665UL, 2108UL},
+ {4004UL, 13301UL},
+ {949UL, 3283UL},
+ {53UL, 190UL},
+ {5231UL, 19357UL},
+ {2393UL, 9111UL},
+ {247UL, 965UL},
+ {1UL, 4UL},
+ {4036UL, 16497UL},
+ {665UL, 2773UL},
+ {5187UL, 22034UL},
+ {4004UL, 17305UL},
+ {51UL, 224UL},
+ {949UL, 4232UL},
+ {3077UL, 13919UL},
+ {53UL, 243UL},
+ {73UL, 339UL},
+ {5231UL, 24588UL},
+ {665UL, 3162UL},
+ {2393UL, 11504UL},
+ {4943UL, 24013UL},
+ {247UL, 1212UL},
+ {3515UL, 17414UL},
+ {1UL, 5UL},
+ {4415UL, 22271UL},
+ {4036UL, 20533UL},
+ {263UL, 1349UL},
+ {665UL, 3438UL},
+ {1079UL, 5621UL},
+ {5187UL, 27221UL},
+ {2288UL, 12093UL},
+ {4004UL, 21309UL},
+ {179UL, 959UL},
+ {51UL, 275UL},
+ {495UL, 2686UL},
+ {949UL, 5181UL},
+ {3621UL, 19886UL},
+ {3077UL, 16996UL},
+ {229UL, 1272UL},
+ {53UL, 296UL},
+ {109UL, 612UL},
+ {73UL, 412UL},
+ {1505UL, 8537UL},
+ {5231UL, 29819UL},
+ {283UL, 1621UL},
+ {665UL, 3827UL},
+ {32UL, 185UL},
+ {2393UL, 13897UL},
+ {1879UL, 10960UL},
+ {4943UL, 28956UL},
+ {409UL, 2406UL},
+ {247UL, 1459UL},
+ {231UL, 1370UL},
+ {3515UL, 20929UL} };
+#if 0
+#define N 8
+int main ()
+{
+ unsigned long tab[N];
+ int i, n, base;
+ mpfr_t x, y;
+ mpq_t q1, q2;
+ int overflow = 0, base_overflow;
+
+ mpfr_init2 (x, 200);
+ mpfr_init2 (y, 200);
+ mpq_init (q1);
+ mpq_init (q2);
+
+ for (base = 2 ; base < 63 ; base ++)
+ {
+ mpfr_set_ui (x, base, MPFR_RNDN);
+ mpfr_log2 (x, x, MPFR_RNDN);
+ mpfr_ui_div (x, 1, x, MPFR_RNDN);
+ printf ("Base: %d x=%e ", base, mpfr_get_d1 (x));
+ for (i = 0 ; i < N ; i++)
+ {
+ mpfr_floor (y, x);
+ tab[i] = mpfr_get_ui (y, MPFR_RNDN);
+ mpfr_sub (x, x, y, MPFR_RNDN);
+ mpfr_ui_div (x, 1, x, MPFR_RNDN);
+ }
+ for (i = N-1 ; i >= 0 ; i--)
+ if (tab[i] != 0)
+ break;
+ mpq_set_ui (q1, tab[i], 1);
+ for (i = i-1 ; i >= 0 ; i--)
+ {
+ mpq_inv (q1, q1);
+ mpq_set_ui (q2, tab[i], 1);
+ mpq_add (q1, q1, q2);
+ }
+ printf("Approx: ", base);
+ mpq_out_str (stdout, 10, q1);
+ printf (" = %e\n", mpq_get_d (q1) );
+ fprintf (stderr, "{");
+ mpz_out_str (stderr, 10, mpq_numref (q1));
+ fprintf (stderr, "UL, ");
+ mpz_out_str (stderr, 10, mpq_denref (q1));
+ fprintf (stderr, "UL},\n");
+ if (mpz_cmp_ui (mpq_numref (q1), 1<<16-1) >= 0
+ || mpz_cmp_ui (mpq_denref (q1), 1<<16-1) >= 0)
+ overflow = 1, base_overflow = base;
+ }
+
+ mpq_clear (q2);
+ mpq_clear (q1);
+ mpfr_clear (y);
+ mpfr_clear (x);
+ if (overflow )
+ printf ("OVERFLOW for base =%d!\n", base_overflow);
+}
+#endif
+
+
+/* Compatible with any locale, but one still assumes that 'a', 'b', 'c',
+ ..., 'z', and 'A', 'B', 'C', ..., 'Z' are consecutive values (like
+ in any ASCII-based character set). */
+static int
+digit_value_in_base (int c, int base)
+{
+ int digit;
+
+ MPFR_ASSERTD (base > 0 && base <= MPFR_MAX_BASE);
+
+ if (c >= '0' && c <= '9')
+ digit = c - '0';
+ else if (c >= 'a' && c <= 'z')
+ digit = (base >= 37) ? c - 'a' + 36 : c - 'a' + 10;
+ else if (c >= 'A' && c <= 'Z')
+ digit = c - 'A' + 10;
+ else
+ return -1;
+
+ return MPFR_LIKELY (digit < base) ? digit : -1;
+}
+
+/* Compatible with any locale, but one still assumes that 'a', 'b', 'c',
+ ..., 'z', and 'A', 'B', 'C', ..., 'Z' are consecutive values (like
+ in any ASCII-based character set). */
+/* TODO: support EBCDIC. */
+static int
+fast_casecmp (const char *s1, const char *s2)
+{
+ unsigned char c1, c2;
+
+ do
+ {
+ c2 = *(const unsigned char *) s2++;
+ if (c2 == '\0')
+ return 0;
+ c1 = *(const unsigned char *) s1++;
+ if (c1 >= 'A' && c1 <= 'Z')
+ c1 = c1 - 'A' + 'a';
+ }
+ while (c1 == c2);
+ return 1;
+}
+
+/* Parse a string and fill pstr.
+ Return the advanced ptr too.
+ It returns:
+ -1 if invalid string,
+ 0 if special string (like nan),
+ 1 if the string is ok.
+ 2 if overflows
+ So it doesn't return the ternary value
+ BUT if it returns 0 (NAN or INF), the ternary value is also '0'
+ (ie NAN and INF are exact) */
+static int
+parse_string (mpfr_t x, struct parsed_string *pstr,
+ const char **string, int base)
+{
+ const char *str = *string;
+ unsigned char *mant;
+ int point;
+ int res = -1; /* Invalid input return value */
+ const char *prefix_str;
+ int decimal_point;
+
+ decimal_point = (unsigned char) MPFR_DECIMAL_POINT;
+
+ /* Init variable */
+ pstr->mantissa = NULL;
+
+ /* Optional leading whitespace */
+ while (isspace((unsigned char) *str)) str++;
+
+ /* An optional sign `+' or `-' */
+ pstr->negative = (*str == '-');
+ if (*str == '-' || *str == '+')
+ str++;
+
+ /* Can be case-insensitive NAN */
+ if (fast_casecmp (str, "@nan@") == 0)
+ {
+ str += 5;
+ goto set_nan;
+ }
+ if (base <= 16 && fast_casecmp (str, "nan") == 0)
+ {
+ str += 3;
+ set_nan:
+ /* Check for "(dummychars)" */
+ if (*str == '(')
+ {
+ const char *s;
+ for (s = str+1 ; *s != ')' ; s++)
+ if (!(*s >= 'A' && *s <= 'Z')
+ && !(*s >= 'a' && *s <= 'z')
+ && !(*s >= '0' && *s <= '9')
+ && *s != '_')
+ break;
+ if (*s == ')')
+ str = s+1;
+ }
+ *string = str;
+ MPFR_SET_NAN(x);
+ /* MPFR_RET_NAN not used as the return value isn't a ternary value */
+ __gmpfr_flags |= MPFR_FLAGS_NAN;
+ return 0;
+ }
+
+ /* Can be case-insensitive INF */
+ if (fast_casecmp (str, "@inf@") == 0)
+ {
+ str += 5;
+ goto set_inf;
+ }
+ if (base <= 16 && fast_casecmp (str, "infinity") == 0)
+ {
+ str += 8;
+ goto set_inf;
+ }
+ if (base <= 16 && fast_casecmp (str, "inf") == 0)
+ {
+ str += 3;
+ set_inf:
+ *string = str;
+ MPFR_SET_INF (x);
+ (pstr->negative) ? MPFR_SET_NEG (x) : MPFR_SET_POS (x);
+ return 0;
+ }
+
+ /* If base=0 or 16, it may include '0x' prefix */
+ prefix_str = NULL;
+ if ((base == 0 || base == 16) && str[0]=='0'
+ && (str[1]=='x' || str[1] == 'X'))
+ {
+ prefix_str = str;
+ base = 16;
+ str += 2;
+ }
+ /* If base=0 or 2, it may include '0b' prefix */
+ if ((base == 0 || base == 2) && str[0]=='0'
+ && (str[1]=='b' || str[1] == 'B'))
+ {
+ prefix_str = str;
+ base = 2;
+ str += 2;
+ }
+ /* Else if base=0, we assume decimal base */
+ if (base == 0)
+ base = 10;
+ pstr->base = base;
+
+ /* Alloc mantissa */
+ pstr->alloc = (size_t) strlen (str) + 1;
+ pstr->mantissa = (unsigned char*) (*__gmp_allocate_func) (pstr->alloc);
+
+ /* Read mantissa digits */
+ parse_begin:
+ mant = pstr->mantissa;
+ point = 0;
+ pstr->exp_base = 0;
+ pstr->exp_bin = 0;
+
+ for (;;) /* Loop until an invalid character is read */
+ {
+ int c = (unsigned char) *str++;
+ /* The cast to unsigned char is needed because of digit_value_in_base;
+ decimal_point uses this convention too. */
+ if (c == '.' || c == decimal_point)
+ {
+ if (MPFR_UNLIKELY(point)) /* Second '.': stop parsing */
+ break;
+ point = 1;
+ continue;
+ }
+ c = digit_value_in_base (c, base);
+ if (c == -1)
+ break;
+ MPFR_ASSERTN (c >= 0); /* c is representable in an unsigned char */
+ *mant++ = (unsigned char) c;
+ if (!point)
+ pstr->exp_base ++;
+ }
+ str--; /* The last read character was invalid */
+
+ /* Update the # of char in the mantissa */
+ pstr->prec = mant - pstr->mantissa;
+ /* Check if there are no characters in the mantissa (Invalid argument) */
+ if (pstr->prec == 0)
+ {
+ /* Check if there was a prefix (in such a case, we have to read
+ again the mantissa without skipping the prefix)
+ The allocated mantissa is still big enough since we will
+ read only 0, and we alloc one more char than needed.
+ FIXME: Not really friendly. Maybe cleaner code? */
+ if (prefix_str != NULL)
+ {
+ str = prefix_str;
+ prefix_str = NULL;
+ goto parse_begin;
+ }
+ goto end;
+ }
+
+ /* Valid entry */
+ res = 1;
+ MPFR_ASSERTD (pstr->exp_base >= 0);
+
+ /* an optional exponent (e or E, p or P, @) */
+ if ( (*str == '@' || (base <= 10 && (*str == 'e' || *str == 'E')))
+ && (!isspace((unsigned char) str[1])) )
+ {
+ char *endptr[1];
+ /* the exponent digits are kept in ASCII */
+ mpfr_exp_t read_exp = strtol (str + 1, endptr, 10);
+ mpfr_exp_t sum = 0;
+ if (endptr[0] != str+1)
+ str = endptr[0];
+ MPFR_ASSERTN (read_exp == (long) read_exp);
+ MPFR_SADD_OVERFLOW (sum, read_exp, pstr->exp_base,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ res = 2, res = 3);
+ /* Since exp_base was positive, read_exp + exp_base can't
+ do a negative overflow. */
+ MPFR_ASSERTD (res != 3);
+ pstr->exp_base = sum;
+ }
+ else if ((base == 2 || base == 16)
+ && (*str == 'p' || *str == 'P')
+ && (!isspace((unsigned char) str[1])))
+ {
+ char *endptr[1];
+ pstr->exp_bin = (mpfr_exp_t) strtol (str + 1, endptr, 10);
+ if (endptr[0] != str+1)
+ str = endptr[0];
+ }
+
+ /* Remove 0's at the beginning and end of mant_s[0..prec_s-1] */
+ mant = pstr->mantissa;
+ for ( ; (pstr->prec > 0) && (*mant == 0) ; mant++, pstr->prec--)
+ pstr->exp_base--;
+ for ( ; (pstr->prec > 0) && (mant[pstr->prec - 1] == 0); pstr->prec--);
+ pstr->mant = mant;
+
+ /* Check if x = 0 */
+ if (pstr->prec == 0)
+ {
+ MPFR_SET_ZERO (x);
+ if (pstr->negative)
+ MPFR_SET_NEG(x);
+ else
+ MPFR_SET_POS(x);
+ res = 0;
+ }
+
+ *string = str;
+ end:
+ if (pstr->mantissa != NULL && res != 1)
+ (*__gmp_free_func) (pstr->mantissa, pstr->alloc);
+ return res;
+}
+
+/* Transform a parsed string to a mpfr_t according to the rounding mode
+ and the precision of x.
+ Returns the ternary value. */
+static int
+parsed_string_to_mpfr (mpfr_t x, struct parsed_string *pstr, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t prec;
+ mpfr_exp_t exp;
+ mpfr_exp_t ysize_bits;
+ mp_limb_t *y, *result;
+ int count, exact;
+ size_t pstr_size;
+ mp_size_t ysize, real_ysize;
+ int res, err;
+ MPFR_ZIV_DECL (loop);
+ MPFR_TMP_DECL (marker);
+
+ /* initialize the working precision */
+ prec = MPFR_PREC (x) + MPFR_INT_CEIL_LOG2 (MPFR_PREC (x));
+
+ /* compute y as long as rounding is not possible */
+ MPFR_TMP_MARK(marker);
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ /* Set y to the value of the ~prec most significant bits of pstr->mant
+ (as long as we guarantee correct rounding, we don't need to get
+ exactly prec bits). */
+ ysize = (prec - 1) / GMP_NUMB_BITS + 1;
+ /* prec bits corresponds to ysize limbs */
+ ysize_bits = ysize * GMP_NUMB_BITS;
+ /* and to ysize_bits >= prec > MPFR_PREC (x) bits */
+ y = (mp_limb_t*) MPFR_TMP_ALLOC ((2 * ysize + 1) * sizeof (mp_limb_t));
+ y += ysize; /* y has (ysize+1) allocated limbs */
+
+ /* pstr_size is the number of characters we read in pstr->mant
+ to have at least ysize full limbs.
+ We must have base^(pstr_size-1) >= (2^(GMP_NUMB_BITS))^ysize
+ (in the worst case, the first digit is one and all others are zero).
+ i.e., pstr_size >= 1 + ysize*GMP_NUMB_BITS/log2(base)
+ Since ysize ~ prec/GMP_NUMB_BITS and prec < Umax/2 =>
+ ysize*GMP_NUMB_BITS can not overflow.
+ We compute pstr_size = 1 + ceil(ysize_bits * Num / Den)
+ where Num/Den >= 1/log2(base)
+ It is not exactly ceil(1/log2(base)) but could be one more (base 2)
+ Quite ugly since it tries to avoid overflow:
+ let Num = RedInvLog2Table[pstr->base-2][0]
+ and Den = RedInvLog2Table[pstr->base-2][1],
+ and ysize_bits = a*Den+b,
+ then ysize_bits * Num/Den = a*Num + (b * Num)/Den,
+ thus ceil(ysize_bits * Num/Den) = a*Num + floor(b * Num + Den - 1)/Den
+ */
+ {
+ unsigned long Num = RedInvLog2Table[pstr->base-2][0];
+ unsigned long Den = RedInvLog2Table[pstr->base-2][1];
+ pstr_size = ((ysize_bits / Den) * Num)
+ + (((ysize_bits % Den) * Num + Den - 1) / Den)
+ + 1;
+ }
+
+ /* since pstr_size corresponds to at least ysize_bits full bits,
+ and ysize_bits > prec, the weight of the neglected part of
+ pstr->mant (if any) is < ulp(y) < ulp(x) */
+
+ /* if the number of wanted characters is more than what we have in
+ pstr->mant, round it down */
+ if (pstr_size >= pstr->prec)
+ pstr_size = pstr->prec;
+ MPFR_ASSERTD (pstr_size == (mpfr_exp_t) pstr_size);
+
+ /* convert str into binary: note that pstr->mant is big endian,
+ thus no offset is needed */
+ real_ysize = mpn_set_str (y, pstr->mant, pstr_size, pstr->base);
+ MPFR_ASSERTD (real_ysize <= ysize+1);
+
+ /* normalize y: warning we can get even get ysize+1 limbs! */
+ MPFR_ASSERTD (y[real_ysize - 1] != 0); /* mpn_set_str guarantees this */
+ count_leading_zeros (count, y[real_ysize - 1]);
+ /* exact means that the number of limbs of the output of mpn_set_str
+ is less or equal to ysize */
+ exact = real_ysize <= ysize;
+ if (exact) /* shift y to the left in that case y should be exact */
+ {
+ /* we have enough limbs to store {y, real_ysize} */
+ /* shift {y, num_limb} for count bits to the left */
+ if (count != 0)
+ mpn_lshift (y + ysize - real_ysize, y, real_ysize, count);
+ if (real_ysize != ysize)
+ {
+ if (count == 0)
+ MPN_COPY_DECR (y + ysize - real_ysize, y, real_ysize);
+ MPN_ZERO (y, ysize - real_ysize);
+ }
+ /* for each bit shift decrease exponent of y */
+ /* (This should not overflow) */
+ exp = - ((ysize - real_ysize) * GMP_NUMB_BITS + count);
+ }
+ else /* shift y to the right, by doing this we might lose some
+ bits from the result of mpn_set_str (in addition to the
+ characters neglected from pstr->mant) */
+ {
+ /* shift {y, num_limb} for (GMP_NUMB_BITS - count) bits
+ to the right. FIXME: can we prove that count cannot be zero here,
+ since mpn_rshift does not accept a shift of GMP_NUMB_BITS? */
+ MPFR_ASSERTD (count != 0);
+ exact = mpn_rshift (y, y, real_ysize, GMP_NUMB_BITS - count) ==
+ MPFR_LIMB_ZERO;
+ /* for each bit shift increase exponent of y */
+ exp = GMP_NUMB_BITS - count;
+ }
+
+ /* compute base^(exp_s-pr) on n limbs */
+ if (IS_POW2 (pstr->base))
+ {
+ /* Base: 2, 4, 8, 16, 32 */
+ int pow2;
+ mpfr_exp_t tmp;
+
+ count_leading_zeros (pow2, (mp_limb_t) pstr->base);
+ pow2 = GMP_NUMB_BITS - pow2 - 1; /* base = 2^pow2 */
+ MPFR_ASSERTD (0 < pow2 && pow2 <= 5);
+ /* exp += pow2 * (pstr->exp_base - pstr_size) + pstr->exp_bin
+ with overflow checking
+ and check that we can add/substract 2 to exp without overflow */
+ MPFR_SADD_OVERFLOW (tmp, pstr->exp_base, -(mpfr_exp_t) pstr_size,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto overflow, goto underflow);
+ /* On some FreeBsd/Alpha, LONG_MIN/1 produced an exception
+ so we used to check for this before doing the division.
+ Since this bug is closed now (Nov 26, 2009), we remove
+ that check (http://www.freebsd.org/cgi/query-pr.cgi?pr=72024) */
+ if (tmp > 0 && MPFR_EXP_MAX / pow2 <= tmp)
+ goto overflow;
+ else if (tmp < 0 && MPFR_EXP_MIN / pow2 >= tmp)
+ goto underflow;
+ tmp *= pow2;
+ MPFR_SADD_OVERFLOW (tmp, tmp, pstr->exp_bin,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto overflow, goto underflow);
+ MPFR_SADD_OVERFLOW (exp, exp, tmp,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN+2, MPFR_EXP_MAX-2,
+ goto overflow, goto underflow);
+ result = y;
+ err = 0;
+ }
+ /* case non-power-of-two-base, and pstr->exp_base > pstr_size */
+ else if (pstr->exp_base > (mpfr_exp_t) pstr_size)
+ {
+ mp_limb_t *z;
+ mpfr_exp_t exp_z;
+
+ result = (mp_limb_t*) MPFR_TMP_ALLOC ((2*ysize+1)*BYTES_PER_MP_LIMB);
+
+ /* z = base^(exp_base-sptr_size) using space allocated at y-ysize */
+ z = y - ysize;
+ /* NOTE: exp_base-pstr_size can't overflow since pstr_size > 0 */
+ err = mpfr_mpn_exp (z, &exp_z, pstr->base,
+ pstr->exp_base - pstr_size, ysize);
+ if (err == -2)
+ goto overflow;
+ exact = exact && (err == -1);
+
+ /* If exact is non zero, then z equals exactly the value of the
+ pstr_size most significant digits from pstr->mant, i.e., the
+ only difference can come from the neglected pstr->prec-pstr_size
+ least significant digits of pstr->mant.
+ If exact is zero, then z is rounded toward zero with respect
+ to that value. */
+
+ /* multiply(y = 0.mant_s[0]...mant_s[pr-1])_base by base^(exp_s-g) */
+ mpn_mul_n (result, y, z, ysize);
+
+ /* compute the error on the product */
+ if (err == -1)
+ err = 0;
+ err ++;
+
+ /* compute the exponent of y */
+ /* exp += exp_z + ysize_bits with overflow checking
+ and check that we can add/substract 2 to exp without overflow */
+ MPFR_SADD_OVERFLOW (exp_z, exp_z, ysize_bits,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto overflow, goto underflow);
+ MPFR_SADD_OVERFLOW (exp, exp, exp_z,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN+2, MPFR_EXP_MAX-2,
+ goto overflow, goto underflow);
+
+ /* normalize result */
+ if (MPFR_LIMB_MSB (result[2 * ysize - 1]) == 0)
+ {
+ mp_limb_t *r = result + ysize - 1;
+ mpn_lshift (r, r, ysize + 1, 1);
+ /* Overflow checking not needed */
+ exp --;
+ }
+
+ /* if the low ysize limbs of {result, 2*ysize} are all zero,
+ then the result is still "exact" (if it was before) */
+ exact = exact && (mpn_scan1 (result, 0)
+ >= (unsigned long) ysize_bits);
+ result += ysize;
+ }
+ /* case exp_base < pstr_size */
+ else if (pstr->exp_base < (mpfr_exp_t) pstr_size)
+ {
+ mp_limb_t *z;
+ mpfr_exp_t exp_z;
+
+ result = (mp_limb_t*) MPFR_TMP_ALLOC ((3*ysize+1) * BYTES_PER_MP_LIMB);
+
+ /* set y to y * K^ysize */
+ y = y - ysize; /* we have allocated ysize limbs at y - ysize */
+ MPN_ZERO (y, ysize);
+
+ /* pstr_size - pstr->exp_base can overflow */
+ MPFR_SADD_OVERFLOW (exp_z, (mpfr_exp_t) pstr_size, -pstr->exp_base,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto underflow, goto overflow);
+
+ /* (z, exp_z) = base^(exp_base-pstr_size) */
+ z = result + 2*ysize + 1;
+ err = mpfr_mpn_exp (z, &exp_z, pstr->base, exp_z, ysize);
+ exact = exact && (err == -1);
+ if (err == -2)
+ goto underflow; /* FIXME: Sure? */
+ if (err == -1)
+ err = 0;
+
+ /* compute y / z */
+ /* result will be put into result + n, and remainder into result */
+ mpn_tdiv_qr (result + ysize, result, (mp_size_t) 0, y,
+ 2 * ysize, z, ysize);
+
+ /* exp -= exp_z + ysize_bits with overflow checking
+ and check that we can add/substract 2 to exp without overflow */
+ MPFR_SADD_OVERFLOW (exp_z, exp_z, ysize_bits,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto underflow, goto overflow);
+ MPFR_SADD_OVERFLOW (exp, exp, -exp_z,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN+2, MPFR_EXP_MAX-2,
+ goto overflow, goto underflow);
+ err += 2;
+ /* if the remainder of the division is zero, then the result is
+ still "exact" if it was before */
+ exact = exact && (mpn_popcount (result, ysize) == 0);
+
+ /* normalize result */
+ if (result[2 * ysize] == MPFR_LIMB_ONE)
+ {
+ mp_limb_t *r = result + ysize;
+ exact = exact && ((*r & MPFR_LIMB_ONE) == 0);
+ mpn_rshift (r, r, ysize + 1, 1);
+ /* Overflow Checking not needed */
+ exp ++;
+ }
+ result += ysize;
+ }
+ /* case exp_base = pstr_size: no multiplication or division needed */
+ else
+ {
+ /* base^(exp_s-pr) = 1 nothing to compute */
+ result = y;
+ err = 0;
+ }
+
+ /* If result is exact, we still have to consider the neglected part
+ of the input string. For a directed rounding, in that case we could
+ still correctly round, since the neglected part is less than
+ one ulp, but that would make the code more complex, and give a
+ speedup for rare cases only. */
+ exact = exact && (pstr_size == pstr->prec);
+
+ /* at this point, result is an approximation rounded toward zero
+ of the pstr_size most significant digits of pstr->mant, with
+ equality in case exact is non-zero. */
+
+ /* test if rounding is possible, and if so exit the loop */
+ if (exact || mpfr_can_round_raw (result, ysize,
+ (pstr->negative) ? -1 : 1,
+ ysize_bits - err - 1,
+ MPFR_RNDN, rnd, MPFR_PREC(x)))
+ break;
+
+ /* update the prec for next loop */
+ MPFR_ZIV_NEXT (loop, prec);
+ } /* loop */
+ MPFR_ZIV_FREE (loop);
+
+ /* round y */
+ if (mpfr_round_raw (MPFR_MANT (x), result,
+ ysize_bits,
+ pstr->negative, MPFR_PREC(x), rnd, &res ))
+ {
+ /* overflow when rounding y */
+ MPFR_MANT (x)[MPFR_LIMB_SIZE (x) - 1] = MPFR_LIMB_HIGHBIT;
+ /* Overflow Checking not needed */
+ exp ++;
+ }
+
+ if (res == 0) /* fix ternary value */
+ {
+ exact = exact && (pstr_size == pstr->prec);
+ if (!exact)
+ res = (pstr->negative) ? 1 : -1;
+ }
+
+ /* Set sign of x before exp since check_range needs a valid sign */
+ (pstr->negative) ? MPFR_SET_NEG (x) : MPFR_SET_POS (x);
+
+ /* DO NOT USE MPFR_SET_EXP. The exp may be out of range! */
+ MPFR_SADD_OVERFLOW (exp, exp, ysize_bits,
+ mpfr_exp_t, mpfr_uexp_t,
+ MPFR_EXP_MIN, MPFR_EXP_MAX,
+ goto overflow, goto underflow);
+ MPFR_EXP (x) = exp;
+ res = mpfr_check_range (x, res, rnd);
+ goto end;
+
+ underflow:
+ /* This is called when there is a huge overflow
+ (Real expo < MPFR_EXP_MIN << __gmpfr_emin */
+ if (rnd == MPFR_RNDN)
+ rnd = MPFR_RNDZ;
+ res = mpfr_underflow (x, rnd, (pstr->negative) ? -1 : 1);
+ goto end;
+
+ overflow:
+ res = mpfr_overflow (x, rnd, (pstr->negative) ? -1 : 1);
+
+ end:
+ MPFR_TMP_FREE (marker);
+ return res;
+}
+
+static void
+free_parsed_string (struct parsed_string *pstr)
+{
+ (*__gmp_free_func) (pstr->mantissa, pstr->alloc);
+}
+
+int
+mpfr_strtofr (mpfr_t x, const char *string, char **end, int base,
+ mpfr_rnd_t rnd)
+{
+ int res;
+ struct parsed_string pstr;
+
+ /* For base <= 36, parsing is case-insensitive. */
+ MPFR_ASSERTN (base == 0 || (base >= 2 && base <= 62));
+
+ /* If an error occured, it must return 0 */
+ MPFR_SET_ZERO (x);
+ MPFR_SET_POS (x);
+
+ MPFR_ASSERTN (MPFR_MAX_BASE >= 62);
+ res = parse_string (x, &pstr, &string, base);
+ /* If res == 0, then it was exact (NAN or INF),
+ so it is also the ternary value */
+ if (MPFR_UNLIKELY (res == -1)) /* invalid data */
+ res = 0; /* x is set to 0, which is exact, thus ternary value is 0 */
+ else if (res == 1)
+ {
+ res = parsed_string_to_mpfr (x, &pstr, rnd);
+ free_parsed_string (&pstr);
+ }
+ else if (res == 2)
+ res = mpfr_overflow (x, rnd, (pstr.negative) ? -1 : 1);
+ MPFR_ASSERTD (res != 3);
+#if 0
+ else if (res == 3)
+ {
+ /* This is called when there is a huge overflow
+ (Real expo < MPFR_EXP_MIN << __gmpfr_emin */
+ if (rnd == MPFR_RNDN)
+ rnd = MPFR_RNDZ;
+ res = mpfr_underflow (x, rnd, (pstr.negative) ? -1 : 1);
+ }
+#endif
+
+ if (end != NULL)
+ *end = (char *) string;
+ return res;
+}
diff --git a/src/sub.c b/src/sub.c
new file mode 100644
index 000000000..5eb44d45a
--- /dev/null
+++ b/src/sub.c
@@ -0,0 +1,111 @@
+/* mpfr_sub -- subtract two floating-point numbers
+
+Copyright 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_sub (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ MPFR_LOG_FUNC (("b[%#R]=%R c[%#R]=%R rnd=%d", b, b, c, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ if (MPFR_ARE_SINGULAR (b,c))
+ {
+ if (MPFR_IS_NAN (b) || MPFR_IS_NAN (c))
+ {
+ MPFR_SET_NAN (a);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (b))
+ {
+ if (!MPFR_IS_INF (c) || MPFR_SIGN (b) != MPFR_SIGN(c))
+ {
+ MPFR_SET_INF (a);
+ MPFR_SET_SAME_SIGN (a, b);
+ MPFR_RET (0); /* exact */
+ }
+ else
+ {
+ MPFR_SET_NAN (a); /* Inf - Inf */
+ MPFR_RET_NAN;
+ }
+ }
+ else if (MPFR_IS_INF (c))
+ {
+ MPFR_SET_INF (a);
+ MPFR_SET_OPPOSITE_SIGN (a, c);
+ MPFR_RET (0); /* exact */
+ }
+ else if (MPFR_IS_ZERO (b))
+ {
+ if (MPFR_IS_ZERO (c))
+ {
+ int sign = rnd_mode != MPFR_RNDD
+ ? ((MPFR_IS_NEG(b) && MPFR_IS_POS(c)) ? -1 : 1)
+ : ((MPFR_IS_POS(b) && MPFR_IS_NEG(c)) ? 1 : -1);
+ MPFR_SET_SIGN (a, sign);
+ MPFR_SET_ZERO (a);
+ MPFR_RET(0); /* 0 - 0 is exact */
+ }
+ else
+ return mpfr_neg (a, c, rnd_mode);
+ }
+ else
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (c));
+ return mpfr_set (a, b, rnd_mode);
+ }
+ }
+ MPFR_ASSERTD (MPFR_IS_PURE_FP (b) && MPFR_IS_PURE_FP (c));
+
+ if (MPFR_LIKELY (MPFR_SIGN (b) == MPFR_SIGN (c)))
+ { /* signs are equal, it's a real subtraction */
+ if (MPFR_LIKELY (MPFR_PREC (a) == MPFR_PREC (b)
+ && MPFR_PREC (b) == MPFR_PREC (c)))
+ return mpfr_sub1sp (a, b, c, rnd_mode);
+ else
+ return mpfr_sub1 (a, b, c, rnd_mode);
+ }
+ else
+ { /* signs differ, it's an addition */
+ if (MPFR_GET_EXP (b) < MPFR_GET_EXP (c))
+ { /* exchange rounding modes toward +/- infinity */
+ int inexact;
+ rnd_mode = MPFR_INVERT_RND (rnd_mode);
+ if (MPFR_LIKELY (MPFR_PREC (a) == MPFR_PREC (b)
+ && MPFR_PREC (b) == MPFR_PREC (c)))
+ inexact = mpfr_add1sp (a, c, b, rnd_mode);
+ else
+ inexact = mpfr_add1 (a, c, b, rnd_mode);
+ MPFR_CHANGE_SIGN (a);
+ return -inexact;
+ }
+ else
+ {
+ if (MPFR_LIKELY (MPFR_PREC (a) == MPFR_PREC (b)
+ && MPFR_PREC (b) == MPFR_PREC (c)))
+ return mpfr_add1sp (a, b, c, rnd_mode);
+ else
+ return mpfr_add1 (a, b, c, rnd_mode);
+ }
+ }
+}
diff --git a/src/sub1.c b/src/sub1.c
new file mode 100644
index 000000000..f67b392fb
--- /dev/null
+++ b/src/sub1.c
@@ -0,0 +1,538 @@
+/* mpfr_sub1 -- internal function to perform a "real" subtraction
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* compute sign(b) * (|b| - |c|), with |b| > |c|, diff_exp = EXP(b) - EXP(c)
+ Returns 0 iff result is exact,
+ a negative value when the result is less than the exact value,
+ a positive value otherwise.
+*/
+
+int
+mpfr_sub1 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ int sign;
+ mpfr_uexp_t diff_exp;
+ mpfr_prec_t cancel, cancel1;
+ mp_size_t cancel2, an, bn, cn, cn0;
+ mp_limb_t *ap, *bp, *cp;
+ mp_limb_t carry, bb, cc, borrow = 0;
+ int inexact, shift_b, shift_c, is_exact = 1, down = 0, add_exp = 0;
+ int sh, k;
+ MPFR_TMP_DECL(marker);
+
+ MPFR_TMP_MARK(marker);
+ ap = MPFR_MANT(a);
+ an = MPFR_LIMB_SIZE(a);
+
+ sign = mpfr_cmp2 (b, c, &cancel);
+ if (MPFR_UNLIKELY(sign == 0))
+ {
+ if (rnd_mode == MPFR_RNDD)
+ MPFR_SET_NEG (a);
+ else
+ MPFR_SET_POS (a);
+ MPFR_SET_ZERO (a);
+ MPFR_RET (0);
+ }
+
+ /*
+ * If subtraction: sign(a) = sign * sign(b)
+ * If addition: sign(a) = sign of the larger argument in absolute value.
+ *
+ * Both cases can be simplidied in:
+ * if (sign>0)
+ * if addition: sign(a) = sign * sign(b) = sign(b)
+ * if subtraction, b is greater, so sign(a) = sign(b)
+ * else
+ * if subtraction, sign(a) = - sign(b)
+ * if addition, sign(a) = sign(c) (since c is greater)
+ * But if it is an addition, sign(b) and sign(c) are opposed!
+ * So sign(a) = - sign(b)
+ */
+
+ if (sign < 0) /* swap b and c so that |b| > |c| */
+ {
+ mpfr_srcptr t;
+ MPFR_SET_OPPOSITE_SIGN (a,b);
+ t = b; b = c; c = t;
+ }
+ else
+ MPFR_SET_SAME_SIGN (a,b);
+
+ /* Check if c is too small.
+ A more precise test is to replace 2 by
+ (rnd == MPFR_RNDN) + mpfr_power2_raw (b)
+ but it is more expensive and not very useful */
+ if (MPFR_UNLIKELY (MPFR_GET_EXP (c) <= MPFR_GET_EXP (b)
+ - (mpfr_exp_t) MAX (MPFR_PREC (a), MPFR_PREC (b)) - 2))
+ {
+ /* Remember, we can't have an exact result! */
+ /* A.AAAAAAAAAAAAAAAAA
+ = B.BBBBBBBBBBBBBBB
+ - C.CCCCCCCCCCCCC */
+ /* A = S*ABS(B) +/- ulp(a) */
+ MPFR_SET_EXP (a, MPFR_GET_EXP (b));
+ MPFR_RNDRAW_EVEN (inexact, a, MPFR_MANT (b), MPFR_PREC (b),
+ rnd_mode, MPFR_SIGN (a),
+ if (MPFR_UNLIKELY ( ++MPFR_EXP (a) > __gmpfr_emax))
+ inexact = mpfr_overflow (a, rnd_mode, MPFR_SIGN (a)));
+ /* inexact = mpfr_set4 (a, b, rnd_mode, MPFR_SIGN (a)); */
+ if (inexact == 0)
+ {
+ /* a = b (Exact)
+ But we know it isn't (Since we have to remove `c')
+ So if we round to Zero, we have to remove one ulp.
+ Otherwise the result is correctly rounded. */
+ if (MPFR_IS_LIKE_RNDZ (rnd_mode, MPFR_IS_NEG (a)))
+ {
+ mpfr_nexttozero (a);
+ MPFR_RET (- MPFR_INT_SIGN (a));
+ }
+ MPFR_RET (MPFR_INT_SIGN (a));
+ }
+ else
+ {
+ /* A.AAAAAAAAAAAAAA
+ = B.BBBBBBBBBBBBBBB
+ - C.CCCCCCCCCCCCC */
+ /* It isn't exact so Prec(b) > Prec(a) and the last
+ Prec(b)-Prec(a) bits of `b' are not zeros.
+ Which means that removing c from b can't generate a carry
+ execpt in case of even rounding.
+ In all other case the result and the inexact flag should be
+ correct (We can't have an exact result).
+ In case of EVEN rounding:
+ 1.BBBBBBBBBBBBBx10
+ - 1.CCCCCCCCCCCC
+ = 1.BBBBBBBBBBBBBx01 Rounded to Prec(b)
+ = 1.BBBBBBBBBBBBBx Nearest / Rounded to Prec(a)
+ Set gives:
+ 1.BBBBBBBBBBBBB0 if inexact == EVEN_INEX (x == 0)
+ 1.BBBBBBBBBBBBB1+1 if inexact == -EVEN_INEX (x == 1)
+ which means we get a wrong rounded result if x==1,
+ i.e. inexact= MPFR_EVEN_INEX */
+ if (MPFR_UNLIKELY (inexact == MPFR_EVEN_INEX*MPFR_INT_SIGN (a)))
+ {
+ mpfr_nexttozero (a);
+ inexact = -MPFR_INT_SIGN (a);
+ }
+ MPFR_RET (inexact);
+ }
+ }
+
+ diff_exp = (mpfr_uexp_t) MPFR_GET_EXP (b) - MPFR_GET_EXP (c);
+
+ /* reserve a space to store b aligned with the result, i.e. shifted by
+ (-cancel) % GMP_NUMB_BITS to the right */
+ bn = MPFR_LIMB_SIZE (b);
+ MPFR_UNSIGNED_MINUS_MODULO (shift_b, cancel);
+ cancel1 = (cancel + shift_b) / GMP_NUMB_BITS;
+
+ /* the high cancel1 limbs from b should not be taken into account */
+ if (MPFR_UNLIKELY (shift_b == 0))
+ {
+ bp = MPFR_MANT(b); /* no need of an extra space */
+ /* Ensure ap != bp */
+ if (MPFR_UNLIKELY (ap == bp))
+ {
+ bp = (mp_ptr) MPFR_TMP_ALLOC(bn * BYTES_PER_MP_LIMB);
+ MPN_COPY (bp, ap, bn);
+ }
+ }
+ else
+ {
+ bp = (mp_ptr) MPFR_TMP_ALLOC ((bn + 1) * BYTES_PER_MP_LIMB);
+ bp[0] = mpn_rshift (bp + 1, MPFR_MANT(b), bn++, shift_b);
+ }
+
+ /* reserve a space to store c aligned with the result, i.e. shifted by
+ (diff_exp-cancel) % GMP_NUMB_BITS to the right */
+ cn = MPFR_LIMB_SIZE(c);
+ if ((UINT_MAX % GMP_NUMB_BITS) == (GMP_NUMB_BITS-1)
+ && ((-(unsigned) 1)%GMP_NUMB_BITS > 0))
+ shift_c = (diff_exp - cancel) % GMP_NUMB_BITS;
+ else
+ {
+ shift_c = diff_exp - (cancel % GMP_NUMB_BITS);
+ shift_c = (shift_c + GMP_NUMB_BITS) % GMP_NUMB_BITS;
+ }
+ MPFR_ASSERTD( shift_c >= 0 && shift_c < GMP_NUMB_BITS);
+
+ if (MPFR_UNLIKELY(shift_c == 0))
+ {
+ cp = MPFR_MANT(c);
+ /* Ensure ap != cp */
+ if (ap == cp)
+ {
+ cp = (mp_ptr) MPFR_TMP_ALLOC (cn * BYTES_PER_MP_LIMB);
+ MPN_COPY(cp, ap, cn);
+ }
+ }
+ else
+ {
+ cp = (mp_ptr) MPFR_TMP_ALLOC ((cn + 1) * BYTES_PER_MP_LIMB);
+ cp[0] = mpn_rshift (cp + 1, MPFR_MANT(c), cn++, shift_c);
+ }
+
+#ifdef DEBUG
+ printf ("shift_b=%d shift_c=%d diffexp=%lu\n", shift_b, shift_c,
+ (unsigned long) diff_exp);
+#endif
+
+ MPFR_ASSERTD (ap != cp);
+ MPFR_ASSERTD (bp != cp);
+
+ /* here we have shift_c = (diff_exp - cancel) % GMP_NUMB_BITS,
+ 0 <= shift_c < GMP_NUMB_BITS
+ thus we want cancel2 = ceil((cancel - diff_exp) / GMP_NUMB_BITS) */
+
+ cancel2 = (long int) (cancel - (diff_exp - shift_c)) / GMP_NUMB_BITS;
+ /* the high cancel2 limbs from b should not be taken into account */
+#ifdef DEBUG
+ printf ("cancel=%lu cancel1=%lu cancel2=%ld\n",
+ (unsigned long) cancel, (unsigned long) cancel1, (long) cancel2);
+#endif
+
+ /* ap[an-1] ap[0]
+ <----------------+-----------|---->
+ <----------PREC(a)----------><-sh->
+ cancel1
+ limbs bp[bn-cancel1-1]
+ <--...-----><----------------+-----------+----------->
+ cancel2
+ limbs cp[cn-cancel2-1] cancel2 >= 0
+ <--...--><----------------+----------------+---------------->
+ (-cancel2) cancel2 < 0
+ limbs <----------------+---------------->
+ */
+
+ /* first part: put in ap[0..an-1] the value of high(b) - high(c),
+ where high(b) consists of the high an+cancel1 limbs of b,
+ and high(c) consists of the high an+cancel2 limbs of c.
+ */
+
+ /* copy high(b) into a */
+ if (MPFR_LIKELY(an + (mp_size_t) cancel1 <= bn))
+ /* a: <----------------+-----------|---->
+ b: <-----------------------------------------> */
+ MPN_COPY (ap, bp + bn - (an + cancel1), an);
+ else
+ /* a: <----------------+-----------|---->
+ b: <-------------------------> */
+ if ((mp_size_t) cancel1 < bn) /* otherwise b does not overlap with a */
+ {
+ MPN_ZERO (ap, an + cancel1 - bn);
+ MPN_COPY (ap + an + cancel1 - bn, bp, bn - cancel1);
+ }
+ else
+ MPN_ZERO (ap, an);
+
+#ifdef DEBUG
+ printf("after copying high(b), a="); mpfr_print_binary(a); putchar('\n');
+#endif
+
+ /* subtract high(c) */
+ if (MPFR_LIKELY(an + cancel2 > 0)) /* otherwise c does not overlap with a */
+ {
+ mp_limb_t *ap2;
+
+ if (cancel2 >= 0)
+ {
+ if (an + cancel2 <= cn)
+ /* a: <----------------------------->
+ c: <-----------------------------------------> */
+ mpn_sub_n (ap, ap, cp + cn - (an + cancel2), an);
+ else
+ /* a: <---------------------------->
+ c: <-------------------------> */
+ {
+ ap2 = ap + an + cancel2 - cn;
+ if (cn > cancel2)
+ mpn_sub_n (ap2, ap2, cp, cn - cancel2);
+ }
+ }
+ else /* cancel2 < 0 */
+ {
+ if (an + cancel2 <= cn)
+ /* a: <----------------------------->
+ c: <-----------------------------> */
+ borrow = mpn_sub_n (ap, ap, cp + cn - (an + cancel2),
+ an + cancel2);
+ else
+ /* a: <---------------------------->
+ c: <----------------> */
+ {
+ ap2 = ap + an + cancel2 - cn;
+ borrow = mpn_sub_n (ap2, ap2, cp, cn);
+ }
+ ap2 = ap + an + cancel2;
+ mpn_sub_1 (ap2, ap2, -cancel2, borrow);
+ }
+ }
+
+#ifdef DEBUG
+ printf("after subtracting high(c), a=");
+ mpfr_print_binary(a);
+ putchar('\n');
+#endif
+
+ /* now perform rounding */
+ sh = (mpfr_prec_t) an * GMP_NUMB_BITS - MPFR_PREC(a);
+ /* last unused bits from a */
+ carry = ap[0] & MPFR_LIMB_MASK (sh);
+ ap[0] -= carry;
+
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ if (MPFR_LIKELY(sh))
+ {
+ is_exact = (carry == 0);
+ /* can decide except when carry = 2^(sh-1) [middle]
+ or carry = 0 [truncate, but cannot decide inexact flag] */
+ down = (carry < (MPFR_LIMB_ONE << (sh - 1)));
+ if (carry > (MPFR_LIMB_ONE << (sh - 1)))
+ goto add_one_ulp;
+ else if ((0 < carry) && down)
+ {
+ inexact = -1; /* result if smaller than exact value */
+ goto truncate;
+ }
+ }
+ }
+ else /* directed rounding: set rnd_mode to RNDZ iff toward zero */
+ {
+ if (MPFR_IS_RNDUTEST_OR_RNDDNOTTEST(rnd_mode, MPFR_IS_NEG(a)))
+ rnd_mode = MPFR_RNDZ;
+
+ if (carry)
+ {
+ if (rnd_mode == MPFR_RNDZ)
+ {
+ inexact = -1;
+ goto truncate;
+ }
+ else /* round away */
+ goto add_one_ulp;
+ }
+ }
+
+ /* we have to consider the low (bn - (an+cancel1)) limbs from b,
+ and the (cn - (an+cancel2)) limbs from c. */
+ bn -= an + cancel1;
+ cn0 = cn;
+ cn -= (long int) an + cancel2;
+
+#ifdef DEBUG
+ printf ("last %d bits from a are %lu, bn=%ld, cn=%ld\n",
+ sh, (unsigned long) carry, (long) bn, (long) cn);
+#endif
+
+ for (k = 0; (bn > 0) || (cn > 0); k = 1)
+ {
+ /* get next limbs */
+ bb = (bn > 0) ? bp[--bn] : 0;
+ if ((cn > 0) && (cn-- <= cn0))
+ cc = cp[cn];
+ else
+ cc = 0;
+
+ /* down is set when low(b) < low(c) */
+ if (down == 0)
+ down = (bb < cc);
+
+ /* the case rounding to nearest with sh=0 is special since one couldn't
+ subtract above 1/2 ulp in the trailing limb of the result */
+ if ((rnd_mode == MPFR_RNDN) && sh == 0 && k == 0)
+ {
+ mp_limb_t half = MPFR_LIMB_HIGHBIT;
+
+ is_exact = (bb == cc);
+
+ /* add one ulp if bb > cc + half
+ truncate if cc - half < bb < cc + half
+ sub one ulp if bb < cc - half
+ */
+
+ if (down)
+ {
+ if (cc >= half)
+ cc -= half;
+ else
+ bb += half;
+ }
+ else /* bb >= cc */
+ {
+ if (cc < half)
+ cc += half;
+ else
+ bb -= half;
+ }
+ }
+
+#ifdef DEBUG
+ printf (" bb=%lu cc=%lu down=%d is_exact=%d\n",
+ (unsigned long) bb, (unsigned long) cc, down, is_exact);
+#endif
+ if (bb < cc)
+ {
+ if (rnd_mode == MPFR_RNDZ)
+ goto sub_one_ulp;
+ else if (rnd_mode != MPFR_RNDN) /* round away */
+ {
+ inexact = 1;
+ goto truncate;
+ }
+ else /* round to nearest: special case here since for sh=k=0
+ bb = bb0 - MPFR_LIMB_HIGHBIT */
+ {
+ if (is_exact && sh == 0)
+ {
+ /* For k=0 we can't decide exactness since it may depend
+ from low order bits.
+ For k=1, the first low limbs matched: low(b)-low(c)<0. */
+ if (k)
+ {
+ inexact = 1;
+ goto truncate;
+ }
+ }
+ else if (down && sh == 0)
+ goto sub_one_ulp;
+ else
+ {
+ inexact = (is_exact) ? 1 : -1;
+ goto truncate;
+ }
+ }
+ }
+ else if (bb > cc)
+ {
+ if (rnd_mode == MPFR_RNDZ)
+ {
+ inexact = -1;
+ goto truncate;
+ }
+ else if (rnd_mode != MPFR_RNDN) /* round away */
+ goto add_one_ulp;
+ else /* round to nearest */
+ {
+ if (is_exact)
+ {
+ inexact = -1;
+ goto truncate;
+ }
+ else if (down)
+ {
+ inexact = 1;
+ goto truncate;
+ }
+ else
+ goto add_one_ulp;
+ }
+ }
+ }
+
+ if ((rnd_mode == MPFR_RNDN) && !is_exact)
+ {
+ /* even rounding rule */
+ if ((ap[0] >> sh) & 1)
+ {
+ if (down)
+ goto sub_one_ulp;
+ else
+ goto add_one_ulp;
+ }
+ else
+ inexact = (down) ? 1 : -1;
+ }
+ else
+ inexact = 0;
+ goto truncate;
+
+ sub_one_ulp: /* sub one unit in last place to a */
+ mpn_sub_1 (ap, ap, an, MPFR_LIMB_ONE << sh);
+ inexact = -1;
+ goto end_of_sub;
+
+ add_one_ulp: /* add one unit in last place to a */
+ if (MPFR_UNLIKELY(mpn_add_1 (ap, ap, an, MPFR_LIMB_ONE << sh)))
+ /* result is a power of 2: 11111111111111 + 1 = 1000000000000000 */
+ {
+ ap[an-1] = MPFR_LIMB_HIGHBIT;
+ add_exp = 1;
+ }
+ inexact = 1; /* result larger than exact value */
+
+ truncate:
+ if (MPFR_UNLIKELY((ap[an-1] >> (GMP_NUMB_BITS - 1)) == 0))
+ /* case 1 - epsilon */
+ {
+ ap[an-1] = MPFR_LIMB_HIGHBIT;
+ add_exp = 1;
+ }
+
+ end_of_sub:
+ /* we have to set MPFR_EXP(a) to MPFR_EXP(b) - cancel + add_exp, taking
+ care of underflows/overflows in that computation, and of the allowed
+ exponent range */
+ if (MPFR_LIKELY(cancel))
+ {
+ mpfr_exp_t exp_a;
+
+ cancel -= add_exp; /* still valid as unsigned long */
+ exp_a = MPFR_GET_EXP (b) - cancel;
+ if (MPFR_UNLIKELY(exp_a < __gmpfr_emin))
+ {
+ MPFR_TMP_FREE(marker);
+ if (rnd_mode == MPFR_RNDN &&
+ (exp_a < __gmpfr_emin - 1 ||
+ (inexact >= 0 && mpfr_powerof2_raw (a))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (a, rnd_mode, MPFR_SIGN(a));
+ }
+ MPFR_SET_EXP (a, exp_a);
+ }
+ else /* cancel = 0: MPFR_EXP(a) <- MPFR_EXP(b) + add_exp */
+ {
+ /* in case cancel = 0, add_exp can still be 1, in case b is just
+ below a power of two, c is very small, prec(a) < prec(b),
+ and rnd=away or nearest */
+ mpfr_exp_t exp_b;
+
+ exp_b = MPFR_GET_EXP (b);
+ if (MPFR_UNLIKELY(add_exp && exp_b == __gmpfr_emax))
+ {
+ MPFR_TMP_FREE(marker);
+ return mpfr_overflow (a, rnd_mode, MPFR_SIGN(a));
+ }
+ MPFR_SET_EXP (a, exp_b + add_exp);
+ }
+ MPFR_TMP_FREE(marker);
+#ifdef DEBUG
+ printf ("result is a="); mpfr_print_binary(a); putchar('\n');
+#endif
+ /* check that result is msb-normalized */
+ MPFR_ASSERTD(ap[an-1] > ~ap[an-1]);
+ MPFR_RET (inexact * MPFR_INT_SIGN (a));
+}
diff --git a/src/sub1sp.c b/src/sub1sp.c
new file mode 100644
index 000000000..728101930
--- /dev/null
+++ b/src/sub1sp.c
@@ -0,0 +1,809 @@
+/* mpfr_sub1sp -- internal function to perform a "real" substraction
+ All the op must have the same precision
+
+Copyright 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Check if we have to check the result of mpfr_sub1sp with mpfr_sub1 */
+#ifdef WANT_ASSERT
+# if WANT_ASSERT >= 2
+
+int mpfr_sub1sp2 (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode);
+int mpfr_sub1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t tmpa, tmpb, tmpc;
+ int inexb, inexc, inexact, inexact2;
+
+ mpfr_init2 (tmpa, MPFR_PREC (a));
+ mpfr_init2 (tmpb, MPFR_PREC (b));
+ mpfr_init2 (tmpc, MPFR_PREC (c));
+
+ inexb = mpfr_set (tmpb, b, MPFR_RNDN);
+ MPFR_ASSERTN (inexb == 0);
+
+ inexc = mpfr_set (tmpc, c, MPFR_RNDN);
+ MPFR_ASSERTN (inexc == 0);
+
+ inexact2 = mpfr_sub1 (tmpa, tmpb, tmpc, rnd_mode);
+ inexact = mpfr_sub1sp2(a, b, c, rnd_mode);
+
+ if (mpfr_cmp (tmpa, a) || inexact != inexact2)
+ {
+ fprintf (stderr, "sub1 & sub1sp return different values for %s\n"
+ "Prec_a = %lu, Prec_b = %lu, Prec_c = %lu\nB = ",
+ mpfr_print_rnd_mode (rnd_mode), (unsigned long) MPFR_PREC (a),
+ (unsigned long) MPFR_PREC (b), (unsigned long) MPFR_PREC (c));
+ mpfr_fprint_binary (stderr, tmpb);
+ fprintf (stderr, "\nC = ");
+ mpfr_fprint_binary (stderr, tmpc);
+ fprintf (stderr, "\nSub1 : ");
+ mpfr_fprint_binary (stderr, tmpa);
+ fprintf (stderr, "\nSub1sp: ");
+ mpfr_fprint_binary (stderr, a);
+ fprintf (stderr, "\nInexact sp = %d | Inexact = %d\n",
+ inexact, inexact2);
+ MPFR_ASSERTN (0);
+ }
+ mpfr_clears (tmpa, tmpb, tmpc, (mpfr_ptr) 0);
+ return inexact;
+}
+# define mpfr_sub1sp mpfr_sub1sp2
+# endif
+#endif
+
+/* Debugging support */
+#ifdef DEBUG
+# undef DEBUG
+# define DEBUG(x) (x)
+#else
+# define DEBUG(x) /**/
+#endif
+
+/* Rounding Sub */
+
+/*
+ compute sgn(b)*(|b| - |c|) if |b|>|c| else -sgn(b)*(|c| -|b|)
+ Returns 0 iff result is exact,
+ a negative value when the result is less than the exact value,
+ a positive value otherwise.
+*/
+
+/* A0...Ap-1
+ * Cp Cp+1 ....
+ * <- C'p+1 ->
+ * Cp = -1 if calculated from c mantissa
+ * Cp = 0 if 0 from a or c
+ * Cp = 1 if calculated from a.
+ * C'p+1 = First bit not null or 0 if there isn't one
+ *
+ * Can't have Cp=-1 and C'p+1=1*/
+
+/* RND = MPFR_RNDZ:
+ * + if Cp=0 and C'p+1=0,1, Truncate.
+ * + if Cp=0 and C'p+1=-1, SubOneUlp
+ * + if Cp=-1, SubOneUlp
+ * + if Cp=1, AddOneUlp
+ * RND = MPFR_RNDA (Away)
+ * + if Cp=0 and C'p+1=0,-1, Truncate
+ * + if Cp=0 and C'p+1=1, AddOneUlp
+ * + if Cp=1, AddOneUlp
+ * + if Cp=-1, Truncate
+ * RND = MPFR_RNDN
+ * + if Cp=0, Truncate
+ * + if Cp=1 and C'p+1=1, AddOneUlp
+ * + if Cp=1 and C'p+1=-1, Truncate
+ * + if Cp=1 and C'p+1=0, Truncate if Ap-1=0, AddOneUlp else
+ * + if Cp=-1 and C'p+1=-1, SubOneUlp
+ * + if Cp=-1 and C'p+1=0, Truncate if Ap-1=0, SubOneUlp else
+ *
+ * If AddOneUlp:
+ * If carry, then it is 11111111111 + 1 = 10000000000000
+ * ap[n-1]=MPFR_HIGHT_BIT
+ * If SubOneUlp:
+ * If we lose one bit, it is 1000000000 - 1 = 0111111111111
+ * Then shift, and put as last bit x which is calculated
+ * according Cp, Cp-1 and rnd_mode.
+ * If Truncate,
+ * If it is a power of 2,
+ * we may have to suboneulp in some special cases.
+ *
+ * To simplify, we don't use Cp = 1.
+ *
+ */
+
+int
+mpfr_sub1sp (mpfr_ptr a, mpfr_srcptr b, mpfr_srcptr c, mpfr_rnd_t rnd_mode)
+{
+ mpfr_exp_t bx,cx;
+ mpfr_uexp_t d;
+ mpfr_prec_t p, sh, cnt;
+ mp_size_t n;
+ mp_limb_t *ap, *bp, *cp;
+ mp_limb_t limb;
+ int inexact;
+ mp_limb_t bcp,bcp1; /* Cp and C'p+1 */
+ mp_limb_t bbcp = (mp_limb_t) -1, bbcp1 = (mp_limb_t) -1; /* Cp+1 and C'p+2,
+ gcc claims that they might be used uninitialized. We fill them with invalid
+ values, which should produce a failure if so. See README.dev file. */
+
+ MPFR_TMP_DECL(marker);
+
+ MPFR_TMP_MARK(marker);
+
+ MPFR_ASSERTD(MPFR_PREC(a) == MPFR_PREC(b) && MPFR_PREC(b) == MPFR_PREC(c));
+ MPFR_ASSERTD(MPFR_IS_PURE_FP(b) && MPFR_IS_PURE_FP(c));
+
+ /* Read prec and num of limbs */
+ p = MPFR_PREC(b);
+ n = (p-1)/GMP_NUMB_BITS+1;
+
+ /* Fast cmp of |b| and |c|*/
+ bx = MPFR_GET_EXP (b);
+ cx = MPFR_GET_EXP (c);
+ if (MPFR_UNLIKELY(bx == cx))
+ {
+ mp_size_t k = n - 1;
+ /* Check mantissa since exponent are equals */
+ bp = MPFR_MANT(b);
+ cp = MPFR_MANT(c);
+ while (k>=0 && MPFR_UNLIKELY(bp[k] == cp[k]))
+ k--;
+ if (MPFR_UNLIKELY(k < 0))
+ /* b == c ! */
+ {
+ /* Return exact number 0 */
+ if (rnd_mode == MPFR_RNDD)
+ MPFR_SET_NEG(a);
+ else
+ MPFR_SET_POS(a);
+ MPFR_SET_ZERO(a);
+ MPFR_RET(0);
+ }
+ else if (bp[k] > cp[k])
+ goto BGreater;
+ else
+ {
+ MPFR_ASSERTD(bp[k]<cp[k]);
+ goto CGreater;
+ }
+ }
+ else if (MPFR_UNLIKELY(bx < cx))
+ {
+ /* Swap b and c and set sign */
+ mpfr_srcptr t;
+ mpfr_exp_t tx;
+ CGreater:
+ MPFR_SET_OPPOSITE_SIGN(a,b);
+ t = b; b = c; c = t;
+ tx = bx; bx = cx; cx = tx;
+ }
+ else
+ {
+ /* b > c */
+ BGreater:
+ MPFR_SET_SAME_SIGN(a,b);
+ }
+
+ /* Now b > c */
+ MPFR_ASSERTD(bx >= cx);
+ d = (mpfr_uexp_t) bx - cx;
+ DEBUG (printf ("New with diff=%lu\n", (unsigned long) d));
+
+ if (MPFR_UNLIKELY(d <= 1))
+ {
+ if (MPFR_LIKELY(d < 1))
+ {
+ /* <-- b -->
+ <-- c --> : exact sub */
+ ap = MPFR_MANT(a);
+ mpn_sub_n (ap, MPFR_MANT(b), MPFR_MANT(c), n);
+ /* Normalize */
+ ExactNormalize:
+ limb = ap[n-1];
+ if (MPFR_LIKELY(limb))
+ {
+ /* First limb is not zero. */
+ count_leading_zeros(cnt, limb);
+ /* cnt could be == 0 <= SubD1Lose */
+ if (MPFR_LIKELY(cnt))
+ {
+ mpn_lshift(ap, ap, n, cnt); /* Normalize number */
+ bx -= cnt; /* Update final expo */
+ }
+ /* Last limb should be ok */
+ MPFR_ASSERTD(!(ap[0] & MPFR_LIMB_MASK((unsigned int) (-p)
+ % GMP_NUMB_BITS)));
+ }
+ else
+ {
+ /* First limb is zero */
+ mp_size_t k = n-1, len;
+ /* Find the first limb not equal to zero.
+ FIXME:It is assume it exists (since |b| > |c| and same prec)*/
+ do
+ {
+ MPFR_ASSERTD( k > 0 );
+ limb = ap[--k];
+ }
+ while (limb == 0);
+ MPFR_ASSERTD(limb != 0);
+ count_leading_zeros(cnt, limb);
+ k++;
+ len = n - k; /* Number of last limb */
+ MPFR_ASSERTD(k >= 0);
+ if (MPFR_LIKELY(cnt))
+ mpn_lshift(ap+len, ap, k, cnt); /* Normalize the High Limb*/
+ else
+ {
+ /* Must use DECR since src and dest may overlap & dest>=src*/
+ MPN_COPY_DECR(ap+len, ap, k);
+ }
+ MPN_ZERO(ap, len); /* Zeroing the last limbs */
+ bx -= cnt + len*GMP_NUMB_BITS; /* Update Expo */
+ /* Last limb should be ok */
+ MPFR_ASSERTD(!(ap[len]&MPFR_LIMB_MASK((unsigned int) (-p)
+ % GMP_NUMB_BITS)));
+ }
+ /* Check expo underflow */
+ if (MPFR_UNLIKELY(bx < __gmpfr_emin))
+ {
+ MPFR_TMP_FREE(marker);
+ /* inexact=0 */
+ DEBUG( printf("(D==0 Underflow)\n") );
+ if (rnd_mode == MPFR_RNDN &&
+ (bx < __gmpfr_emin - 1 ||
+ (/*inexact >= 0 &&*/ mpfr_powerof2_raw (a))))
+ rnd_mode = MPFR_RNDZ;
+ return mpfr_underflow (a, rnd_mode, MPFR_SIGN(a));
+ }
+ MPFR_SET_EXP (a, bx);
+ /* No rounding is necessary since the result is exact */
+ MPFR_ASSERTD(ap[n-1] > ~ap[n-1]);
+ MPFR_TMP_FREE(marker);
+ return 0;
+ }
+ else /* if (d == 1) */
+ {
+ /* | <-- b -->
+ | <-- c --> */
+ mp_limb_t c0, mask;
+ mp_size_t k;
+ MPFR_UNSIGNED_MINUS_MODULO(sh, p);
+ /* If we lose at least one bit, compute 2*b-c (Exact)
+ * else compute b-c/2 */
+ bp = MPFR_MANT(b);
+ cp = MPFR_MANT(c);
+ k = n-1;
+ limb = bp[k] - cp[k]/2;
+ if (limb > MPFR_LIMB_HIGHBIT)
+ {
+ /* We can't lose precision: compute b-c/2 */
+ /* Shift c in the allocated temporary block */
+ SubD1NoLose:
+ c0 = cp[0] & (MPFR_LIMB_ONE<<sh);
+ cp = (mp_limb_t*) MPFR_TMP_ALLOC(n * BYTES_PER_MP_LIMB);
+ mpn_rshift(cp, MPFR_MANT(c), n, 1);
+ if (MPFR_LIKELY(c0 == 0))
+ {
+ /* Result is exact: no need of rounding! */
+ ap = MPFR_MANT(a);
+ mpn_sub_n (ap, bp, cp, n);
+ MPFR_SET_EXP(a, bx); /* No expo overflow! */
+ /* No truncate or normalize is needed */
+ MPFR_ASSERTD(ap[n-1] > ~ap[n-1]);
+ /* No rounding is necessary since the result is exact */
+ MPFR_TMP_FREE(marker);
+ return 0;
+ }
+ ap = MPFR_MANT(a);
+ mask = ~MPFR_LIMB_MASK(sh);
+ cp[0] &= mask; /* Delete last bit of c */
+ mpn_sub_n (ap, bp, cp, n);
+ MPFR_SET_EXP(a, bx); /* No expo overflow! */
+ MPFR_ASSERTD( !(ap[0] & ~mask) ); /* Check last bits */
+ /* No normalize is needed */
+ MPFR_ASSERTD(ap[n-1] > ~ap[n-1]);
+ /* Rounding is necessary since c0 = 1*/
+ /* Cp =-1 and C'p+1=0 */
+ bcp = 1; bcp1 = 0;
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ /* Even Rule apply: Check Ap-1 */
+ if (MPFR_LIKELY( (ap[0] & (MPFR_LIMB_ONE<<sh)) == 0) )
+ goto truncate;
+ else
+ goto sub_one_ulp;
+ }
+ MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
+ if (rnd_mode == MPFR_RNDZ)
+ goto sub_one_ulp;
+ else
+ goto truncate;
+ }
+ else if (MPFR_LIKELY(limb < MPFR_LIMB_HIGHBIT))
+ {
+ /* We lose at least one bit of prec */
+ /* Calcul of 2*b-c (Exact) */
+ /* Shift b in the allocated temporary block */
+ SubD1Lose:
+ bp = (mp_limb_t*) MPFR_TMP_ALLOC (n * BYTES_PER_MP_LIMB);
+ mpn_lshift (bp, MPFR_MANT(b), n, 1);
+ ap = MPFR_MANT(a);
+ mpn_sub_n (ap, bp, cp, n);
+ bx--;
+ goto ExactNormalize;
+ }
+ else
+ {
+ /* Case: limb = 100000000000 */
+ /* Check while b[k] == c'[k] (C' is C shifted by 1) */
+ /* If b[k]<c'[k] => We lose at least one bit*/
+ /* If b[k]>c'[k] => We don't lose any bit */
+ /* If k==-1 => We don't lose any bit
+ AND the result is 100000000000 0000000000 00000000000 */
+ mp_limb_t carry;
+ do {
+ carry = cp[k]&MPFR_LIMB_ONE;
+ k--;
+ } while (k>=0 &&
+ bp[k]==(carry=cp[k]/2+(carry<<(GMP_NUMB_BITS-1))));
+ if (MPFR_UNLIKELY(k<0))
+ {
+ /*If carry then (sh==0 and Virtual c'[-1] > Virtual b[-1]) */
+ if (MPFR_UNLIKELY(carry)) /* carry = cp[0]&MPFR_LIMB_ONE */
+ {
+ /* FIXME: Can be faster? */
+ MPFR_ASSERTD(sh == 0);
+ goto SubD1Lose;
+ }
+ /* Result is a power of 2 */
+ ap = MPFR_MANT (a);
+ MPN_ZERO (ap, n);
+ ap[n-1] = MPFR_LIMB_HIGHBIT;
+ MPFR_SET_EXP (a, bx); /* No expo overflow! */
+ /* No Normalize is needed*/
+ /* No Rounding is needed */
+ MPFR_TMP_FREE (marker);
+ return 0;
+ }
+ /* carry = cp[k]/2+(cp[k-1]&1)<<(GMP_NUMB_BITS-1) = c'[k]*/
+ else if (bp[k] > carry)
+ goto SubD1NoLose;
+ else
+ {
+ MPFR_ASSERTD(bp[k]<carry);
+ goto SubD1Lose;
+ }
+ }
+ }
+ }
+ else if (MPFR_UNLIKELY(d >= p))
+ {
+ ap = MPFR_MANT(a);
+ MPFR_UNSIGNED_MINUS_MODULO(sh, p);
+ /* We can't set A before since we use cp for rounding... */
+ /* Perform rounding: check if a=b or a=b-ulp(b) */
+ if (MPFR_UNLIKELY(d == p))
+ {
+ /* cp == -1 and c'p+1 = ? */
+ bcp = 1;
+ /* We need Cp+1 later for a very improbable case. */
+ bbcp = (MPFR_MANT(c)[n-1] & (MPFR_LIMB_ONE<<(GMP_NUMB_BITS-2)));
+ /* We need also C'p+1 for an even more unprobable case... */
+ if (MPFR_LIKELY( bbcp ))
+ bcp1 = 1;
+ else
+ {
+ cp = MPFR_MANT(c);
+ if (MPFR_UNLIKELY(cp[n-1] == MPFR_LIMB_HIGHBIT))
+ {
+ mp_size_t k = n-1;
+ do {
+ k--;
+ } while (k>=0 && cp[k]==0);
+ bcp1 = (k>=0);
+ }
+ else
+ bcp1 = 1;
+ }
+ DEBUG( printf("(D=P) Cp=-1 Cp+1=%d C'p+1=%d \n", bbcp!=0, bcp1!=0) );
+ bp = MPFR_MANT (b);
+
+ /* Even if src and dest overlap, it is ok using MPN_COPY */
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ if (MPFR_UNLIKELY( bcp && bcp1==0 ))
+ /* Cp=-1 and C'p+1=0: Even rule Apply! */
+ /* Check Ap-1 = Bp-1 */
+ if ((bp[0] & (MPFR_LIMB_ONE<<sh)) == 0)
+ {
+ MPN_COPY(ap, bp, n);
+ goto truncate;
+ }
+ MPN_COPY(ap, bp, n);
+ goto sub_one_ulp;
+ }
+ MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
+ if (rnd_mode == MPFR_RNDZ)
+ {
+ MPN_COPY(ap, bp, n);
+ goto sub_one_ulp;
+ }
+ else
+ {
+ MPN_COPY(ap, bp, n);
+ goto truncate;
+ }
+ }
+ else
+ {
+ /* Cp=0, Cp+1=-1 if d==p+1, C'p+1=-1 */
+ bcp = 0; bbcp = (d==p+1); bcp1 = 1;
+ DEBUG( printf("(D>P) Cp=%d Cp+1=%d C'p+1=%d\n", bcp!=0,bbcp!=0,bcp1!=0) );
+ /* Need to compute C'p+2 if d==p+1 and if rnd_mode=NEAREST
+ (Because of a very improbable case) */
+ if (MPFR_UNLIKELY(d==p+1 && rnd_mode==MPFR_RNDN))
+ {
+ cp = MPFR_MANT(c);
+ if (MPFR_UNLIKELY(cp[n-1] == MPFR_LIMB_HIGHBIT))
+ {
+ mp_size_t k = n-1;
+ do {
+ k--;
+ } while (k>=0 && cp[k]==0);
+ bbcp1 = (k>=0);
+ }
+ else
+ bbcp1 = 1;
+ DEBUG( printf("(D>P) C'p+2=%d\n", bbcp1!=0) );
+ }
+ /* Copy mantissa B in A */
+ MPN_COPY(ap, MPFR_MANT(b), n);
+ /* Round */
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ goto truncate;
+ MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
+ if (rnd_mode == MPFR_RNDZ)
+ goto sub_one_ulp;
+ else /* rnd_mode = AWAY */
+ goto truncate;
+ }
+ }
+ else
+ {
+ mpfr_uexp_t dm;
+ mp_size_t m;
+ mp_limb_t mask;
+
+ /* General case: 2 <= d < p */
+ MPFR_UNSIGNED_MINUS_MODULO(sh, p);
+ cp = (mp_limb_t*) MPFR_TMP_ALLOC(n * BYTES_PER_MP_LIMB);
+
+ /* Shift c in temporary allocated place */
+ dm = d % GMP_NUMB_BITS;
+ m = d / GMP_NUMB_BITS;
+ if (MPFR_UNLIKELY(dm == 0))
+ {
+ /* dm = 0 and m > 0: Just copy */
+ MPFR_ASSERTD(m!=0);
+ MPN_COPY(cp, MPFR_MANT(c)+m, n-m);
+ MPN_ZERO(cp+n-m, m);
+ }
+ else if (MPFR_LIKELY(m == 0))
+ {
+ /* dm >=2 and m == 0: just shift */
+ MPFR_ASSERTD(dm >= 2);
+ mpn_rshift(cp, MPFR_MANT(c), n, dm);
+ }
+ else
+ {
+ /* dm > 0 and m > 0: shift and zero */
+ mpn_rshift(cp, MPFR_MANT(c)+m, n-m, dm);
+ MPN_ZERO(cp+n-m, m);
+ }
+
+ DEBUG( mpfr_print_mant_binary("Before", MPFR_MANT(c), p) );
+ DEBUG( mpfr_print_mant_binary("B= ", MPFR_MANT(b), p) );
+ DEBUG( mpfr_print_mant_binary("After ", cp, p) );
+
+ /* Compute bcp=Cp and bcp1=C'p+1 */
+ if (MPFR_LIKELY(sh))
+ {
+ /* Try to compute them from C' rather than C (FIXME: Faster?) */
+ bcp = (cp[0] & (MPFR_LIMB_ONE<<(sh-1))) ;
+ if (MPFR_LIKELY( cp[0] & MPFR_LIMB_MASK(sh-1) ))
+ bcp1 = 1;
+ else
+ {
+ /* We can't compute C'p+1 from C'. Compute it from C */
+ /* Start from bit x=p-d+sh in mantissa C
+ (+sh since we have already looked sh bits in C'!) */
+ mpfr_prec_t x = p-d+sh-1;
+ if (MPFR_LIKELY(x>p))
+ /* We are already looked at all the bits of c, so C'p+1 = 0*/
+ bcp1 = 0;
+ else
+ {
+ mp_limb_t *tp = MPFR_MANT(c);
+ mp_size_t kx = n-1 - (x / GMP_NUMB_BITS);
+ mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
+ DEBUG (printf ("(First) x=%lu Kx=%ld Sx=%lu\n",
+ (unsigned long) x, (long) kx,
+ (unsigned long) sx));
+ /* Looks at the last bits of limb kx (if sx=0 does nothing)*/
+ if (tp[kx] & MPFR_LIMB_MASK(sx))
+ bcp1 = 1;
+ else
+ {
+ /*kx += (sx==0);*/
+ /*If sx==0, tp[kx] hasn't been checked*/
+ do {
+ kx--;
+ } while (kx>=0 && tp[kx]==0);
+ bcp1 = (kx >= 0);
+ }
+ }
+ }
+ }
+ else
+ {
+ /* Compute Cp and C'p+1 from C with sh=0 */
+ mp_limb_t *tp = MPFR_MANT(c);
+ /* Start from bit x=p-d in mantissa C */
+ mpfr_prec_t x = p-d;
+ mp_size_t kx = n-1 - (x / GMP_NUMB_BITS);
+ mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
+ MPFR_ASSERTD(p >= d);
+ bcp = (tp[kx] & (MPFR_LIMB_ONE<<sx));
+ /* Looks at the last bits of limb kx (If sx=0, does nothing)*/
+ if (tp[kx] & MPFR_LIMB_MASK(sx))
+ bcp1 = 1;
+ else
+ {
+ /*kx += (sx==0);*/ /*If sx==0, tp[kx] hasn't been checked*/
+ do {
+ kx--;
+ } while (kx>=0 && tp[kx]==0);
+ bcp1 = (kx>=0);
+ }
+ }
+ DEBUG( printf("sh=%lu Cp=%d C'p+1=%d\n", sh, bcp!=0, bcp1!=0) );
+
+ /* Check if we can lose a bit, and if so compute Cp+1 and C'p+2 */
+ bp = MPFR_MANT(b);
+ if (MPFR_UNLIKELY((bp[n-1]-cp[n-1]) <= MPFR_LIMB_HIGHBIT))
+ {
+ /* We can lose a bit so we precompute Cp+1 and C'p+2 */
+ /* Test for trivial case: since C'p+1=0, Cp+1=0 and C'p+2 =0 */
+ if (MPFR_LIKELY(bcp1 == 0))
+ {
+ bbcp = 0;
+ bbcp1 = 0;
+ }
+ else /* bcp1 != 0 */
+ {
+ /* We can lose a bit:
+ compute Cp+1 and C'p+2 from mantissa C */
+ mp_limb_t *tp = MPFR_MANT(c);
+ /* Start from bit x=(p+1)-d in mantissa C */
+ mpfr_prec_t x = p+1-d;
+ mp_size_t kx = n-1 - (x/GMP_NUMB_BITS);
+ mpfr_prec_t sx = GMP_NUMB_BITS-1-(x%GMP_NUMB_BITS);
+ MPFR_ASSERTD(p > d);
+ DEBUG (printf ("(pre) x=%lu Kx=%ld Sx=%lu\n",
+ (unsigned long) x, (long) kx,
+ (unsigned long) sx));
+ bbcp = (tp[kx] & (MPFR_LIMB_ONE<<sx)) ;
+ /* Looks at the last bits of limb kx (If sx=0, does nothing)*/
+ /* If Cp+1=0, since C'p+1!=0, C'p+2=1 ! */
+ if (MPFR_LIKELY(bbcp==0 || (tp[kx]&MPFR_LIMB_MASK(sx))))
+ bbcp1 = 1;
+ else
+ {
+ /*kx += (sx==0);*/ /*If sx==0, tp[kx] hasn't been checked*/
+ do {
+ kx--;
+ } while (kx>=0 && tp[kx]==0);
+ bbcp1 = (kx>=0);
+ DEBUG (printf ("(Pre) Scan done for %ld\n", (long) kx));
+ }
+ } /*End of Bcp1 != 0*/
+ DEBUG( printf("(Pre) Cp+1=%d C'p+2=%d\n", bbcp!=0, bbcp1!=0) );
+ } /* End of "can lose a bit" */
+
+ /* Clean shifted C' */
+ mask = ~MPFR_LIMB_MASK (sh);
+ cp[0] &= mask;
+
+ /* Substract the mantissa c from b in a */
+ ap = MPFR_MANT(a);
+ mpn_sub_n (ap, bp, cp, n);
+ DEBUG( mpfr_print_mant_binary("Sub= ", ap, p) );
+
+ /* Normalize: we lose at max one bit*/
+ if (MPFR_UNLIKELY(MPFR_LIMB_MSB(ap[n-1]) == 0))
+ {
+ /* High bit is not set and we have to fix it! */
+ /* Ap >= 010000xxx001 */
+ mpn_lshift(ap, ap, n, 1);
+ /* Ap >= 100000xxx010 */
+ if (MPFR_UNLIKELY(bcp!=0)) /* Check if Cp = -1 */
+ /* Since Cp == -1, we have to substract one more */
+ {
+ mpn_sub_1(ap, ap, n, MPFR_LIMB_ONE<<sh);
+ MPFR_ASSERTD(MPFR_LIMB_MSB(ap[n-1]) != 0);
+ }
+ /* Ap >= 10000xxx001 */
+ /* Final exponent -1 since we have shifted the mantissa */
+ bx--;
+ /* Update bcp and bcp1 */
+ MPFR_ASSERTN(bbcp != (mp_limb_t) -1);
+ MPFR_ASSERTN(bbcp1 != (mp_limb_t) -1);
+ bcp = bbcp;
+ bcp1 = bbcp1;
+ /* We dont't have anymore a valid Cp+1!
+ But since Ap >= 100000xxx001, the final sub can't unnormalize!*/
+ }
+ MPFR_ASSERTD( !(ap[0] & ~mask) );
+
+ /* Rounding */
+ if (MPFR_LIKELY(rnd_mode == MPFR_RNDN))
+ {
+ if (MPFR_LIKELY(bcp==0))
+ goto truncate;
+ else if ((bcp1) || ((ap[0] & (MPFR_LIMB_ONE<<sh)) != 0))
+ goto sub_one_ulp;
+ else
+ goto truncate;
+ }
+
+ /* Update rounding mode */
+ MPFR_UPDATE_RND_MODE(rnd_mode, MPFR_IS_NEG(a));
+ if (rnd_mode == MPFR_RNDZ && (MPFR_LIKELY(bcp || bcp1)))
+ goto sub_one_ulp;
+ goto truncate;
+ }
+ MPFR_RET_NEVER_GO_HERE ();
+
+ /* Sub one ulp to the result */
+ sub_one_ulp:
+ mpn_sub_1 (ap, ap, n, MPFR_LIMB_ONE << sh);
+ /* Result should be smaller than exact value: inexact=-1 */
+ inexact = -1;
+ /* Check normalisation */
+ if (MPFR_UNLIKELY(MPFR_LIMB_MSB(ap[n-1]) == 0))
+ {
+ /* ap was a power of 2, and we lose a bit */
+ /* Now it is 0111111111111111111[00000 */
+ mpn_lshift(ap, ap, n, 1);
+ bx--;
+ /* And the lost bit x depends on Cp+1, and Cp */
+ /* Compute Cp+1 if it isn't already compute (ie d==1) */
+ /* FIXME: Is this case possible? */
+ if (MPFR_UNLIKELY(d == 1))
+ bbcp = 0;
+ DEBUG( printf("(SubOneUlp)Cp=%d, Cp+1=%d C'p+1=%d\n", bcp!=0,bbcp!=0,bcp1!=0));
+ /* Compute the last bit (Since we have shifted the mantissa)
+ we need one more bit!*/
+ MPFR_ASSERTN(bbcp != (mp_limb_t) -1);
+ if ( (rnd_mode == MPFR_RNDZ && bcp==0)
+ || (rnd_mode==MPFR_RNDN && bbcp==0)
+ || (bcp && bcp1==0) ) /*Exact result*/
+ {
+ ap[0] |= MPFR_LIMB_ONE<<sh;
+ if (rnd_mode == MPFR_RNDN)
+ inexact = 1;
+ DEBUG( printf("(SubOneUlp) Last bit set\n") );
+ }
+ /* Result could be exact if C'p+1 = 0 and rnd == Zero
+ since we have had one more bit to the result */
+ /* Fixme: rnd_mode == MPFR_RNDZ needed ? */
+ if (bcp1==0 && rnd_mode==MPFR_RNDZ)
+ {
+ DEBUG( printf("(SubOneUlp) Exact result\n") );
+ inexact = 0;
+ }
+ }
+
+ goto end_of_sub;
+
+ truncate:
+ /* Check if the result is an exact power of 2: 100000000000
+ in which cases, we could have to do sub_one_ulp due to some nasty reasons:
+ If Result is a Power of 2:
+ + If rnd = AWAY,
+ | If Cp=-1 and C'p+1 = 0, SubOneUlp and the result is EXACT.
+ If Cp=-1 and C'p+1 =-1, SubOneUlp and the result is above.
+ Otherwise truncate
+ + If rnd = NEAREST,
+ If Cp= 0 and Cp+1 =-1 and C'p+2=-1, SubOneUlp and the result is above
+ If cp=-1 and C'p+1 = 0, SubOneUlp and the result is exact.
+ Otherwise truncate.
+ X bit should always be set if SubOneUlp*/
+ if (MPFR_UNLIKELY(ap[n-1] == MPFR_LIMB_HIGHBIT))
+ {
+ mp_size_t k = n-1;
+ do {
+ k--;
+ } while (k>=0 && ap[k]==0);
+ if (MPFR_UNLIKELY(k<0))
+ {
+ /* It is a power of 2! */
+ /* Compute Cp+1 if it isn't already compute (ie d==1) */
+ /* FIXME: Is this case possible? */
+ if (d == 1)
+ bbcp=0;
+ DEBUG( printf("(Truncate) Cp=%d, Cp+1=%d C'p+1=%d C'p+2=%d\n", \
+ bcp!=0, bbcp!=0, bcp1!=0, bbcp1!=0) );
+ MPFR_ASSERTN(bbcp != (mp_limb_t) -1);
+ MPFR_ASSERTN((rnd_mode != MPFR_RNDN) || (bcp != 0) || (bbcp == 0) || (bbcp1 != (mp_limb_t) -1));
+ if (((rnd_mode != MPFR_RNDZ) && bcp)
+ ||
+ ((rnd_mode == MPFR_RNDN) && (bcp == 0) && (bbcp) && (bbcp1)))
+ {
+ DEBUG( printf("(Truncate) Do sub\n") );
+ mpn_sub_1 (ap, ap, n, MPFR_LIMB_ONE << sh);
+ mpn_lshift(ap, ap, n, 1);
+ ap[0] |= MPFR_LIMB_ONE<<sh;
+ bx--;
+ /* FIXME: Explain why it works (or why not)... */
+ inexact = (bcp1 == 0) ? 0 : (rnd_mode==MPFR_RNDN) ? -1 : 1;
+ goto end_of_sub;
+ }
+ }
+ }
+
+ /* Calcul of Inexact flag.*/
+ inexact = MPFR_LIKELY(bcp || bcp1) ? 1 : 0;
+
+ end_of_sub:
+ /* Update Expo */
+ /* FIXME: Is this test really useful?
+ If d==0 : Exact case. This is never called.
+ if 1 < d < p : bx=MPFR_EXP(b) or MPFR_EXP(b)-1 > MPFR_EXP(c) > emin
+ if d == 1 : bx=MPFR_EXP(b). If we could lose any bits, the exact
+ normalisation is called.
+ if d >= p : bx=MPFR_EXP(b) >= MPFR_EXP(c) + p > emin
+ After SubOneUlp, we could have one bit less.
+ if 1 < d < p : bx >= MPFR_EXP(b)-2 >= MPFR_EXP(c) > emin
+ if d == 1 : bx >= MPFR_EXP(b)-1 = MPFR_EXP(c) > emin.
+ if d >= p : bx >= MPFR_EXP(b)-1 > emin since p>=2.
+ */
+ MPFR_ASSERTD( bx >= __gmpfr_emin);
+ /*
+ if (MPFR_UNLIKELY(bx < __gmpfr_emin))
+ {
+ DEBUG( printf("(Final Underflow)\n") );
+ if (rnd_mode == MPFR_RNDN &&
+ (bx < __gmpfr_emin - 1 ||
+ (inexact >= 0 && mpfr_powerof2_raw (a))))
+ rnd_mode = MPFR_RNDZ;
+ MPFR_TMP_FREE(marker);
+ return mpfr_underflow (a, rnd_mode, MPFR_SIGN(a));
+ }
+ */
+ MPFR_SET_EXP (a, bx);
+
+ MPFR_TMP_FREE(marker);
+ MPFR_RET (inexact * MPFR_INT_SIGN (a));
+}
diff --git a/src/sub_d.c b/src/sub_d.c
new file mode 100644
index 000000000..c06e45e04
--- /dev/null
+++ b/src/sub_d.c
@@ -0,0 +1,49 @@
+/* mpfr_sub_d -- subtract a machine double precision float from
+ a multiple precision floating-point number
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_sub_d (mpfr_ptr a, mpfr_srcptr b, double c, mpfr_rnd_t rnd_mode)
+{
+ int inexact;
+ mpfr_t d;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("b[%#R]=%R c=%.20g rnd=%d", b, b, c, rnd_mode),
+ ("a[%#R]=%R", a, a));
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ mpfr_init2 (d, IEEE_DBL_MANT_DIG);
+ inexact = mpfr_set_d (d, c, rnd_mode);
+ MPFR_ASSERTN (inexact == 0);
+
+ mpfr_clear_flags ();
+ inexact = mpfr_sub (a, b, d, rnd_mode);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+
+ mpfr_clear(d);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (a, inexact, rnd_mode);
+}
diff --git a/src/sub_ui.c b/src/sub_ui.c
new file mode 100644
index 000000000..ed184a1c0
--- /dev/null
+++ b/src/sub_ui.c
@@ -0,0 +1,54 @@
+/* mpfr_sub_ui -- subtract a floating-point number and a machine integer
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_sub_ui (mpfr_ptr y, mpfr_srcptr x, unsigned long int u, mpfr_rnd_t rnd_mode)
+{
+ if (MPFR_LIKELY (u != 0)) /* if u=0, do nothing */
+ {
+ mpfr_t uu;
+ mp_limb_t up[1];
+ unsigned long cnt;
+ int inex;
+
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_TMP_INIT1 (up, uu, GMP_NUMB_BITS);
+ MPFR_ASSERTN (u == (mp_limb_t) u);
+ count_leading_zeros (cnt, (mp_limb_t) u);
+ *up = (mp_limb_t) u << cnt;
+
+ /* Optimization note: Exponent save/restore operations may be
+ removed if mpfr_sub works even when uu is out-of-range. */
+ MPFR_SAVE_EXPO_MARK (expo);
+ MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt);
+ inex = mpfr_sub (y, x, uu, rnd_mode);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inex, rnd_mode);
+ }
+ else
+ return mpfr_set (y, x, rnd_mode);
+}
diff --git a/src/subnormal.c b/src/subnormal.c
new file mode 100644
index 000000000..e7e19cbd9
--- /dev/null
+++ b/src/subnormal.c
@@ -0,0 +1,146 @@
+/* mpfr_subnormalize -- Subnormalize a floating point number
+ emulating sub-normal numbers.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* For MPFR_RNDN, we can have a problem of double rounding.
+ In such a case, this table helps to conclude what to do (y positive):
+ Rounding Bit | Sticky Bit | inexact | Action | new inexact
+ 0 | ? | ? | Trunc | sticky
+ 1 | 0 | 1 | Trunc |
+ 1 | 0 | 0 | Trunc if even |
+ 1 | 0 | -1 | AddOneUlp |
+ 1 | 1 | ? | AddOneUlp |
+
+ For other rounding mode, there isn't such a problem.
+ Just round it again and merge the inexact flags.
+*/
+
+int
+mpfr_subnormalize (mpfr_ptr y, int old_inexact, mpfr_rnd_t rnd)
+{
+ int inexact = 0;
+
+ /* The subnormal exponent range are from:
+ mpfr_emin to mpfr_emin + MPFR_PREC(y) - 1 */
+ if (MPFR_LIKELY (MPFR_IS_SINGULAR (y)
+ || (MPFR_GET_EXP (y) >=
+ __gmpfr_emin + (mpfr_exp_t) MPFR_PREC (y) - 1)))
+ inexact = old_inexact;
+
+ /* We have to emulate one bit rounding if EXP(y) = emin */
+ else if (MPFR_GET_EXP (y) == __gmpfr_emin)
+ {
+ /* If this is a power of 2, we don't need rounding.
+ It handles cases when rouding away and y=0.1*2^emin */
+ if (mpfr_powerof2_raw (y))
+ inexact = old_inexact;
+ /* We keep the same sign for y.
+ Assuming Y is the real value and y the approximation
+ and since y is not a power of 2: 0.5*2^emin < Y < 1*2^emin
+ We also know the direction of the error thanks to inexact flag */
+ else if (rnd == MPFR_RNDN)
+ {
+ mp_limb_t *mant, rb ,sb;
+ mp_size_t s;
+ /* We need the rounding bit and the sticky bit. Read them
+ and use the previous table to conclude. */
+ s = MPFR_LIMB_SIZE (y) - 1;
+ mant = MPFR_MANT (y) + s;
+ rb = *mant & (MPFR_LIMB_HIGHBIT >> 1);
+ if (rb == 0)
+ goto set_min;
+ sb = *mant & ((MPFR_LIMB_HIGHBIT >> 1) - 1);
+ while (sb == 0 && s-- != 0)
+ sb = *--mant;
+ if (sb != 0)
+ goto set_min_p1;
+ /* Rounding bit is 1 and sticky bit is 0.
+ We need to examine old inexact flag to conclude. */
+ if ((old_inexact > 0 && MPFR_SIGN (y) > 0) ||
+ (old_inexact < 0 && MPFR_SIGN (y) < 0))
+ goto set_min;
+ /* If inexact != 0, return 0.1*2^(emin+1).
+ Otherwise, rounding bit = 1, sticky bit = 0 and inexact = 0
+ So we have 0.1100000000000000000000000*2^emin exactly.
+ We return 0.1*2^(emin+1) according to the even-rounding
+ rule on subnormals. */
+ goto set_min_p1;
+ }
+ else if (MPFR_IS_LIKE_RNDZ (rnd, MPFR_IS_NEG (y)))
+ {
+ set_min:
+ mpfr_setmin (y, __gmpfr_emin);
+ inexact = -MPFR_SIGN (y);
+ }
+ else
+ {
+ set_min_p1:
+ /* Note: mpfr_setmin will abort if __gmpfr_emax == __gmpfr_emin. */
+ mpfr_setmin (y, __gmpfr_emin + 1);
+ inexact = MPFR_SIGN (y);
+ }
+ }
+
+ else /* Hard case: It is more or less the same problem than mpfr_cache */
+ {
+ mpfr_t dest;
+ mpfr_prec_t q;
+ int sign;
+
+ /* Compute the intermediary precision */
+ q = (mpfr_uexp_t) MPFR_GET_EXP (y) - __gmpfr_emin + 1;
+ mpfr_init2 (dest, q);
+ /* Round y in dest */
+ sign = MPFR_SIGN (y);
+ MPFR_SET_EXP (dest, MPFR_GET_EXP (y));
+ MPFR_SET_SIGN (dest, sign);
+ MPFR_RNDRAW_EVEN (inexact, dest,
+ MPFR_MANT (y), MPFR_PREC (y), rnd, sign,
+ MPFR_SET_EXP (dest, MPFR_GET_EXP (dest)+1));
+ if (MPFR_LIKELY (old_inexact != 0))
+ {
+ if (MPFR_UNLIKELY(rnd == MPFR_RNDN && (inexact == MPFR_EVEN_INEX
+ || inexact == -MPFR_EVEN_INEX)))
+ {
+ /* if both roundings are in the same direction, we have to go
+ back in the other direction */
+ if (SAME_SIGN (inexact, old_inexact))
+ {
+ if (SAME_SIGN (inexact, MPFR_INT_SIGN (y)))
+ mpfr_nexttozero (dest);
+ else
+ mpfr_nexttoinf (dest);
+ inexact = -inexact;
+ }
+ }
+ else if (MPFR_UNLIKELY (inexact == 0))
+ inexact = old_inexact;
+ }
+ old_inexact = mpfr_set (y, dest, rnd);
+ MPFR_ASSERTN (old_inexact == 0);
+ MPFR_ASSERTN (MPFR_IS_PURE_FP (y));
+ mpfr_clear (dest);
+ }
+ return inexact;
+}
diff --git a/src/sum.c b/src/sum.c
new file mode 100644
index 000000000..a25e75c93
--- /dev/null
+++ b/src/sum.c
@@ -0,0 +1,315 @@
+/* Sum -- efficiently sum a list of floating-point numbers
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+/* Reference: James Demmel and Yozo Hida, Fast and accurate floating-point
+ summation with application to computational geometry, Numerical Algorithms,
+ volume 37, number 1-4, pages 101--112, 2004. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* I would really like to use "mpfr_srcptr const []" but the norm is buggy:
+ it doesn't automaticaly cast a "mpfr_ptr []" to "mpfr_srcptr const []"
+ if necessary. So the choice are:
+ mpfr_s ** : ok
+ mpfr_s *const* : ok
+ mpfr_s **const : ok
+ mpfr_s *const*const : ok
+ const mpfr_s *const* : no
+ const mpfr_s **const : no
+ const mpfr_s *const*const: no
+ VL: this is not a bug, but a feature. See the reason here:
+ http://c-faq.com/ansi/constmismatch.html
+*/
+static void heap_sort (mpfr_srcptr *const, unsigned long, mpfr_srcptr *);
+static void count_sort (mpfr_srcptr *const, unsigned long, mpfr_srcptr *,
+ mpfr_exp_t, mpfr_uexp_t);
+
+/* Either sort the tab in perm and returns 0
+ Or returns 1 for +INF, -1 for -INF and 2 for NAN */
+int
+mpfr_sum_sort (mpfr_srcptr *const tab, unsigned long n, mpfr_srcptr *perm)
+{
+ mpfr_exp_t min, max;
+ mpfr_uexp_t exp_num;
+ unsigned long i;
+ int sign_inf;
+
+ sign_inf = 0;
+ min = MPFR_EMIN_MAX;
+ max = MPFR_EMAX_MIN;
+ for (i = 0; i < n; i++)
+ {
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (tab[i])))
+ {
+ if (MPFR_IS_NAN (tab[i]))
+ return 2; /* Return NAN code */
+ else if (MPFR_IS_INF (tab[i]))
+ {
+ if (sign_inf == 0) /* No previous INF */
+ sign_inf = MPFR_SIGN (tab[i]);
+ else if (sign_inf != MPFR_SIGN (tab[i]))
+ return 2; /* Return NAN */
+ }
+ }
+ else
+ {
+ if (MPFR_GET_EXP (tab[i]) < min)
+ min = MPFR_GET_EXP(tab[i]);
+ if (MPFR_GET_EXP (tab[i]) > max)
+ max = MPFR_GET_EXP(tab[i]);
+ }
+ }
+ if (MPFR_UNLIKELY (sign_inf != 0))
+ return sign_inf;
+
+ exp_num = max - min + 1;
+ /* FIXME : better test */
+ if (exp_num > n * MPFR_INT_CEIL_LOG2 (n))
+ heap_sort (tab, n, perm);
+ else
+ count_sort (tab, n, perm, min, exp_num);
+ return 0;
+}
+
+#define GET_EXP1(x) (MPFR_IS_ZERO (x) ? min : MPFR_GET_EXP (x))
+/* Performs a count sort of the entries */
+static void
+count_sort (mpfr_srcptr *const tab, unsigned long n,
+ mpfr_srcptr *perm, mpfr_exp_t min, mpfr_uexp_t exp_num)
+{
+ unsigned long *account;
+ unsigned long target_rank, i;
+ MPFR_TMP_DECL(marker);
+
+ /* Reserve a place for potential 0 (with EXP min-1)
+ If there is no zero, we only lose one unused entry */
+ min--;
+ exp_num++;
+
+ /* Performs a counting sort of the entries */
+ MPFR_TMP_MARK (marker);
+ account = (unsigned long *) MPFR_TMP_ALLOC (exp_num * sizeof * account);
+ for (i = 0; i < exp_num; i++)
+ account[i] = 0;
+ for (i = 0; i < n; i++)
+ account[GET_EXP1 (tab[i]) - min]++;
+ for (i = exp_num - 1; i >= 1; i--)
+ account[i - 1] += account[i];
+ for (i = 0; i < n; i++)
+ {
+ target_rank = --account[GET_EXP1 (tab[i]) - min];
+ perm[target_rank] = tab[i];
+ }
+ MPFR_TMP_FREE (marker);
+}
+
+
+#define GET_EXP2(x) (MPFR_IS_ZERO (x) ? MPFR_EMIN_MIN : MPFR_GET_EXP (x))
+
+/* Performs a heap sort of the entries */
+static void
+heap_sort (mpfr_srcptr *const tab, unsigned long n, mpfr_srcptr *perm)
+{
+ unsigned long dernier_traite;
+ unsigned long i, pere;
+ mpfr_srcptr tmp;
+ unsigned long fils_gauche, fils_droit, fils_indigne;
+ /* Reminder of a heap structure :
+ node(i) has for left son node(2i +1) and right son node(2i)
+ and father(node(i)) = node((i - 1) / 2)
+ */
+
+ /* initialize the permutation to identity */
+ for (i = 0; i < n; i++)
+ perm[i] = tab[i];
+
+ /* insertion phase */
+ for (dernier_traite = 1; dernier_traite < n; dernier_traite++)
+ {
+ i = dernier_traite;
+ while (i > 0)
+ {
+ pere = (i - 1) / 2;
+ if (GET_EXP2 (perm[pere]) > GET_EXP2 (perm[i]))
+ {
+ tmp = perm[pere];
+ perm[pere] = perm[i];
+ perm[i] = tmp;
+ i = pere;
+ }
+ else
+ break;
+ }
+ }
+
+ /* extraction phase */
+ for (dernier_traite = n - 1; dernier_traite > 0; dernier_traite--)
+ {
+ tmp = perm[0];
+ perm[0] = perm[dernier_traite];
+ perm[dernier_traite] = tmp;
+
+ i = 0;
+ while (1)
+ {
+ fils_gauche = 2 * i + 1;
+ fils_droit = fils_gauche + 1;
+ if (fils_gauche < dernier_traite)
+ {
+ if (fils_droit < dernier_traite)
+ {
+ if (GET_EXP2(perm[fils_droit]) < GET_EXP2(perm[fils_gauche]))
+ fils_indigne = fils_droit;
+ else
+ fils_indigne = fils_gauche;
+
+ if (GET_EXP2 (perm[i]) > GET_EXP2 (perm[fils_indigne]))
+ {
+ tmp = perm[i];
+ perm[i] = perm[fils_indigne];
+ perm[fils_indigne] = tmp;
+ i = fils_indigne;
+ }
+ else
+ break;
+ }
+ else /* on a un fils gauche, pas de fils droit */
+ {
+ if (GET_EXP2 (perm[i]) > GET_EXP2 (perm[fils_gauche]))
+ {
+ tmp = perm[i];
+ perm[i] = perm[fils_gauche];
+ perm[fils_gauche] = tmp;
+ }
+ break;
+ }
+ }
+ else /* on n'a pas de fils */
+ break;
+ }
+ }
+}
+
+
+/* Sum a list of float with order given by permutation perm,
+ * intermediate size set to F.
+ * Internal use function.
+ */
+static int
+sum_once (mpfr_ptr ret, mpfr_srcptr *const tab, unsigned long n, mpfr_prec_t F)
+{
+ mpfr_t sum;
+ unsigned long i;
+ int error_trap;
+
+ MPFR_ASSERTD (n >= 2);
+
+ mpfr_init2 (sum, F);
+ error_trap = mpfr_set (sum, tab[0], MPFR_RNDN);
+ for (i = 1; i < n - 1; i++)
+ {
+ MPFR_ASSERTD (!MPFR_IS_NAN (sum) && !MPFR_IS_INF (sum));
+ error_trap |= mpfr_add (sum, sum, tab[i], MPFR_RNDN);
+ }
+ error_trap |= mpfr_add (ret, sum, tab[n - 1], MPFR_RNDN);
+ mpfr_clear (sum);
+ return error_trap;
+}
+
+/* Sum a list of floating-point numbers.
+ */
+
+int
+mpfr_sum (mpfr_ptr ret, mpfr_ptr *const tab_p, unsigned long n, mpfr_rnd_t rnd)
+{
+ mpfr_t cur_sum;
+ mpfr_prec_t prec;
+ mpfr_srcptr *perm, *const tab = (mpfr_srcptr *) tab_p;
+ int k, error_trap;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_TMP_DECL (marker);
+
+ if (MPFR_UNLIKELY (n <= 1))
+ {
+ if (n < 1)
+ {
+ MPFR_SET_ZERO (ret);
+ MPFR_SET_POS (ret);
+ return 0;
+ }
+ else
+ return mpfr_set (ret, tab[0], rnd);
+ }
+
+ /* Sort and treat special cases */
+ MPFR_TMP_MARK (marker);
+ perm = (mpfr_srcptr *) MPFR_TMP_ALLOC (n * sizeof *perm);
+ error_trap = mpfr_sum_sort (tab, n, perm);
+ /* Check if there was a NAN or a INF */
+ if (MPFR_UNLIKELY (error_trap != 0))
+ {
+ MPFR_TMP_FREE (marker);
+ if (error_trap == 2)
+ {
+ MPFR_SET_NAN (ret);
+ MPFR_RET_NAN;
+ }
+ MPFR_SET_INF (ret);
+ MPFR_SET_SIGN (ret, error_trap);
+ MPFR_RET (0);
+ }
+
+ /* Initial precision */
+ prec = MAX (MPFR_PREC (tab[0]), MPFR_PREC (ret));
+ k = MPFR_INT_CEIL_LOG2 (n) + 1;
+ prec += k + 2;
+ mpfr_init2 (cur_sum, prec);
+
+ /* Ziv Loop */
+ MPFR_SAVE_EXPO_MARK (expo);
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ error_trap = sum_once (cur_sum, perm, n, prec + k);
+ if (MPFR_LIKELY (error_trap == 0 ||
+ (!MPFR_IS_ZERO (cur_sum) &&
+ mpfr_can_round (cur_sum,
+ MPFR_GET_EXP (cur_sum) - prec + 2,
+ MPFR_RNDN, rnd, MPFR_PREC (ret)))))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (cur_sum, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+ MPFR_TMP_FREE (marker);
+
+ error_trap |= mpfr_set (ret, cur_sum, rnd);
+ mpfr_clear (cur_sum);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ error_trap |= mpfr_check_range (ret, 0, rnd);
+ return error_trap; /* It doesn't return the ternary value */
+}
+
+/* __END__ */
diff --git a/src/swap.c b/src/swap.c
new file mode 100644
index 000000000..4059ef288
--- /dev/null
+++ b/src/swap.c
@@ -0,0 +1,54 @@
+/* mpfr_swap (U, V) -- Swap U and V.
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Using memcpy is a few slower than swapping by hand. */
+
+void
+mpfr_swap (mpfr_ptr u, mpfr_ptr v)
+{
+ mpfr_prec_t p1, p2;
+ mpfr_sign_t s1, s2;
+ mpfr_exp_t e1, e2;
+ mp_limb_t *m1, *m2;
+
+ p1 = MPFR_PREC(u);
+ p2 = MPFR_PREC(v);
+ MPFR_PREC(v) = p1;
+ MPFR_PREC(u) = p2;
+
+ s1 = MPFR_SIGN(u);
+ s2 = MPFR_SIGN(v);
+ MPFR_SIGN(v) = s1;
+ MPFR_SIGN(u) = s2;
+
+ e1 = MPFR_EXP(u);
+ e2 = MPFR_EXP(v);
+ MPFR_EXP(v) = e1;
+ MPFR_EXP(u) = e2;
+
+ m1 = MPFR_MANT(u);
+ m2 = MPFR_MANT(v);
+ MPFR_MANT(v) = m1;
+ MPFR_MANT(u) = m2;
+}
diff --git a/src/tan.c b/src/tan.c
new file mode 100644
index 000000000..49adb3672
--- /dev/null
+++ b/src/tan.c
@@ -0,0 +1,87 @@
+/* mpfr_tan -- tangent of a floating-point number
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* computes tan(x) = sign(x)*sqrt(1/cos(x)^2-1) */
+int
+mpfr_tan (mpfr_ptr y, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_prec_t precy, m;
+ int inexact;
+ mpfr_t s, c;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_GROUP_DECL (group);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", x, x, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)))
+ {
+ if (MPFR_IS_NAN(x) || MPFR_IS_INF(x))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else /* x is zero */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(x));
+ MPFR_SET_ZERO(y);
+ MPFR_SET_SAME_SIGN(y, x);
+ MPFR_RET(0);
+ }
+ }
+
+ /* tan(x) = x + x^3/3 + ... so the error is < 2^(3*EXP(x)-1) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, x, -2 * MPFR_GET_EXP (x), 1, 1,
+ rnd_mode, {});
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Compute initial precision */
+ precy = MPFR_PREC (y);
+ m = precy + MPFR_INT_CEIL_LOG2 (precy) + 13;
+ MPFR_ASSERTD (m >= 2); /* needed for the error analysis in algorithms.tex */
+
+ MPFR_GROUP_INIT_2 (group, m, s, c);
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ /* The only way to get an overflow is to get ~ Pi/2
+ But the result will be ~ 2^Prec(y). */
+ mpfr_sin_cos (s, c, x, MPFR_RNDN); /* err <= 1/2 ulp on s and c */
+ mpfr_div (c, s, c, MPFR_RNDN); /* err <= 4 ulps */
+ MPFR_ASSERTD (!MPFR_IS_SINGULAR (c));
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (c, m - 2, precy, rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, m);
+ MPFR_GROUP_REPREC_2 (group, m, s, c);
+ }
+ MPFR_ZIV_FREE (loop);
+ inexact = mpfr_set (y, c, rnd_mode);
+ MPFR_GROUP_CLEAR (group);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/tanh.c b/src/tanh.c
new file mode 100644
index 000000000..23661b960
--- /dev/null
+++ b/src/tanh.c
@@ -0,0 +1,151 @@
+/* mpfr_tanh -- hyperbolic tangent
+
+Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_tanh (mpfr_ptr y, mpfr_srcptr xt , mpfr_rnd_t rnd_mode)
+{
+ /****** Declaration ******/
+ mpfr_t x;
+ int inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("x[%#R]=%R rnd=%d", xt, xt, rnd_mode),
+ ("y[%#R]=%R inexact=%d", y, y, inexact));
+
+ /* Special value checking */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (xt)))
+ {
+ if (MPFR_IS_NAN (xt))
+ {
+ MPFR_SET_NAN (y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (xt))
+ {
+ /* tanh(inf) = 1 && tanh(-inf) = -1 */
+ return mpfr_set_si (y, MPFR_INT_SIGN (xt), rnd_mode);
+ }
+ else /* tanh (0) = 0 and xt is zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO(xt));
+ MPFR_SET_ZERO (y);
+ MPFR_SET_SAME_SIGN (y, xt);
+ MPFR_RET (0);
+ }
+ }
+
+ /* tanh(x) = x - x^3/3 + ... so the error is < 2^(3*EXP(x)-1) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (y, xt, -2 * MPFR_GET_EXP (xt), 1, 0,
+ rnd_mode, {});
+
+ MPFR_TMP_INIT_ABS (x, xt);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* General case */
+ {
+ /* Declaration of the intermediary variable */
+ mpfr_t t, te;
+ mpfr_exp_t d;
+
+ /* Declaration of the size variable */
+ mpfr_prec_t Ny = MPFR_PREC(y); /* target precision */
+ mpfr_prec_t Nt; /* working precision */
+ long int err; /* error */
+ int sign = MPFR_SIGN (xt);
+ MPFR_ZIV_DECL (loop);
+ MPFR_GROUP_DECL (group);
+
+ /* First check for BIG overflow of exp(2*x):
+ For x > 0, exp(2*x) > 2^(2*x)
+ If 2 ^(2*x) > 2^emax or x>emax/2, there is an overflow */
+ if (MPFR_UNLIKELY (mpfr_cmp_si (x, __gmpfr_emax/2) >= 0)) {
+ /* initialise of intermediary variables
+ since 'set_one' label assumes the variables have been
+ initialize */
+ MPFR_GROUP_INIT_2 (group, MPFR_PREC_MIN, t, te);
+ goto set_one;
+ }
+
+ /* Compute the precision of intermediary variable */
+ /* The optimal number of bits: see algorithms.tex */
+ Nt = Ny + MPFR_INT_CEIL_LOG2 (Ny) + 4;
+ /* if x is small, there will be a cancellation in exp(2x)-1 */
+ if (MPFR_GET_EXP (x) < 0)
+ Nt += -MPFR_GET_EXP (x);
+
+ /* initialise of intermediary variable */
+ MPFR_GROUP_INIT_2 (group, Nt, t, te);
+
+ MPFR_ZIV_INIT (loop, Nt);
+ for (;;) {
+ /* tanh = (exp(2x)-1)/(exp(2x)+1) */
+ mpfr_mul_2ui (te, x, 1, MPFR_RNDN); /* 2x */
+ /* since x > 0, we can only have an overflow */
+ mpfr_exp (te, te, MPFR_RNDN); /* exp(2x) */
+ if (MPFR_UNLIKELY (MPFR_IS_INF (te))) {
+ set_one:
+ inexact = MPFR_FROM_SIGN_TO_INT (sign);
+ mpfr_set4 (y, __gmpfr_one, MPFR_RNDN, sign);
+ if (MPFR_IS_LIKE_RNDZ (rnd_mode, MPFR_IS_NEG_SIGN (sign)))
+ {
+ inexact = -inexact;
+ mpfr_nexttozero (y);
+ }
+ break;
+ }
+ d = MPFR_GET_EXP (te); /* For Error calculation */
+ mpfr_add_ui (t, te, 1, MPFR_RNDD); /* exp(2x) + 1*/
+ mpfr_sub_ui (te, te, 1, MPFR_RNDU); /* exp(2x) - 1*/
+ d = d - MPFR_GET_EXP (te);
+ mpfr_div (t, te, t, MPFR_RNDN); /* (exp(2x)-1)/(exp(2x)+1)*/
+
+ /* Calculation of the error */
+ d = MAX(3, d + 1);
+ err = Nt - (d + 1);
+
+ if (MPFR_LIKELY ((d <= Nt / 2) && MPFR_CAN_ROUND (t, err, Ny, rnd_mode)))
+ {
+ inexact = mpfr_set4 (y, t, rnd_mode, sign);
+ break;
+ }
+
+ /* if t=1, we still can round since |sinh(x)| < 1 */
+ if (MPFR_GET_EXP (t) == 1)
+ goto set_one;
+
+ /* Actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, Nt);
+ MPFR_GROUP_REPREC_2 (group, Nt, t, te);
+ }
+ MPFR_ZIV_FREE (loop);
+ MPFR_GROUP_CLEAR (group);
+ }
+ MPFR_SAVE_EXPO_FREE (expo);
+ inexact = mpfr_check_range (y, inexact, rnd_mode);
+
+ return inexact;
+}
+
diff --git a/src/uceil_exp2.c b/src/uceil_exp2.c
new file mode 100644
index 000000000..b2ebc5d45
--- /dev/null
+++ b/src/uceil_exp2.c
@@ -0,0 +1,65 @@
+/* __gmpfr_ceil_exp2 - returns y >= 2^d
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* returns y >= 2^d, assuming that d <= 1024
+ for d integer, returns exactly 2^d
+*/
+double
+__gmpfr_ceil_exp2 (double d)
+{
+ long exp;
+#if _GMP_IEEE_FLOATS
+ union ieee_double_extract x;
+#else
+ struct {double d;} x;
+#endif
+
+ MPFR_ASSERTN(d <= 1024.0);
+ exp = (long) d;
+ if (d != (double) exp)
+ exp++;
+ /* now exp = ceil(d) */
+ x.d = 1.0;
+#if _GMP_IEEE_FLOATS
+ x.s.exp = exp <= -1022 ? 1 : 1023 + exp;
+#else
+ if (exp >= 0)
+ {
+ while (exp != 0)
+ {
+ x.d *= 2.0;
+ exp--;
+ }
+ }
+ else
+ {
+ while (exp != 0)
+ {
+ x.d *= (1.0 / 2.0);
+ exp++;
+ }
+ }
+#endif
+ return x.d;
+}
diff --git a/src/uceil_log2.c b/src/uceil_log2.c
new file mode 100644
index 000000000..da06dd9ff
--- /dev/null
+++ b/src/uceil_log2.c
@@ -0,0 +1,63 @@
+/* __gmpfr_ceil_log2 - returns ceil(log(d)/log(2))
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* returns ceil(log(d)/log(2)) if d > 0,
+ -1023 if d = +0,
+ and floor(log(-d)/log(2))+1 if d < 0*/
+long
+__gmpfr_ceil_log2 (double d)
+{
+ long exp;
+#if _GMP_IEEE_FLOATS
+ union ieee_double_extract x;
+
+ x.d = d;
+ exp = x.s.exp - 1023;
+ x.s.exp = 1023; /* value for 1 <= d < 2 */
+ if (x.d != 1.0) /* d: not a power of two? */
+ exp++;
+ return exp;
+#else
+ double m;
+
+ if (d < 0.0)
+ return __gmpfr_floor_log2(-d)+1;
+ else if (d == 0.0)
+ return -1023;
+ else if (d >= 1.0)
+ {
+ exp = 0;
+ for( m= 1.0 ; m < d ; m *=2.0 )
+ exp++;
+ }
+ else
+ {
+ exp = 1;
+ for( m= 1.0 ; m >= d ; m *= (1.0/2.0) )
+ exp--;
+ }
+#endif
+ return exp;
+}
+
diff --git a/src/ufloor_log2.c b/src/ufloor_log2.c
new file mode 100644
index 000000000..04b66e565
--- /dev/null
+++ b/src/ufloor_log2.c
@@ -0,0 +1,53 @@
+/* __gmpfr_floor_log2 - returns floor(log(d)/log(2))
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* returns floor(log2(d)) */
+long
+__gmpfr_floor_log2 (double d)
+{
+#if _GMP_IEEE_FLOATS
+ union ieee_double_extract x;
+
+ x.d = d;
+ return (long) x.s.exp - 1023;
+#else
+ long exp;
+ double m;
+
+ MPFR_ASSERTD (d >= 0);
+ if (d >= 1.0)
+ {
+ exp = -1;
+ for( m= 1.0 ; m <= d ; m *=2.0 )
+ exp++;
+ }
+ else
+ {
+ exp = 0;
+ for( m= 1.0 ; m > d ; m *= (1.0/2.0) )
+ exp--;
+ }
+ return exp;
+#endif
+}
diff --git a/src/ui_div.c b/src/ui_div.c
new file mode 100644
index 000000000..42d184f41
--- /dev/null
+++ b/src/ui_div.c
@@ -0,0 +1,96 @@
+/* mpfr_ui_div -- divide a machine integer by a floating-point number
+ mpfr_si_div -- divide a machine number by a floating-point number
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_ui_div (mpfr_ptr y, unsigned long int u, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t uu;
+ mp_limb_t up[1];
+ unsigned long cnt;
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)))
+ {
+ if (MPFR_IS_NAN(x))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(x)) /* u/Inf = 0 */
+ {
+ MPFR_SET_ZERO(y);
+ MPFR_SET_SAME_SIGN(y,x);
+ MPFR_RET(0);
+ }
+ else /* u / 0 */
+ {
+ MPFR_ASSERTD(MPFR_IS_ZERO(x));
+ if (u)
+ {
+ /* u > 0, so y = sign(x) * Inf */
+ MPFR_SET_SAME_SIGN(y, x);
+ MPFR_SET_INF(y);
+ MPFR_RET(0);
+ }
+ else
+ {
+ /* 0 / 0 */
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ }
+ }
+ else if (MPFR_LIKELY(u != 0))
+ {
+ MPFR_TMP_INIT1(up, uu, GMP_NUMB_BITS);
+ MPFR_ASSERTN(u == (mp_limb_t) u);
+ count_leading_zeros(cnt, (mp_limb_t) u);
+ up[0] = (mp_limb_t) u << cnt;
+ MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt);
+ return mpfr_div (y, uu, x, rnd_mode);
+ }
+ else /* u = 0, and x != 0 */
+ {
+ MPFR_SET_ZERO(y); /* if u=0, then set y to 0 */
+ MPFR_SET_SAME_SIGN(y, x); /* u considered as +0: sign(+0/x) = sign(x) */
+ MPFR_RET(0);
+ }
+}
+
+
+int mpfr_si_div (mpfr_ptr y, long int u, mpfr_srcptr x,mpfr_rnd_t rnd_mode)
+{
+ int res;
+
+ if (u >= 0)
+ res = mpfr_ui_div (y, u, x, rnd_mode);
+ else
+ {
+ res = -mpfr_ui_div (y, -u, x, MPFR_INVERT_RND(rnd_mode));
+ MPFR_CHANGE_SIGN (y);
+ }
+ return res;
+}
diff --git a/src/ui_pow.c b/src/ui_pow.c
new file mode 100644
index 000000000..348394d5f
--- /dev/null
+++ b/src/ui_pow.c
@@ -0,0 +1,41 @@
+/* mpfr_ui_pow -- power of n function n^x
+
+Copyright 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_ui_pow (mpfr_ptr y, unsigned long int n, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t t;
+ int inexact;
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ mpfr_init2 (t, sizeof(n) * CHAR_BIT);
+ inexact = mpfr_set_ui (t, n, MPFR_RNDN);
+ MPFR_ASSERTN (!inexact);
+ inexact = mpfr_pow (y, t, x, rnd_mode);
+ mpfr_clear (t);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, __gmpfr_flags);
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (y, inexact, rnd_mode);
+}
diff --git a/src/ui_pow_ui.c b/src/ui_pow_ui.c
new file mode 100644
index 000000000..74d4556a2
--- /dev/null
+++ b/src/ui_pow_ui.c
@@ -0,0 +1,95 @@
+/* mpfr_ui_pow_ui -- compute the power beetween two machine integer
+
+Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+int
+mpfr_ui_pow_ui (mpfr_ptr x, unsigned long int y, unsigned long int n,
+ mpfr_rnd_t rnd)
+{
+ mpfr_exp_t err;
+ unsigned long m;
+ mpfr_t res;
+ mpfr_prec_t prec;
+ int size_n;
+ int inexact;
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ if (MPFR_UNLIKELY (n <= 1))
+ {
+ if (n == 1)
+ return mpfr_set_ui (x, y, rnd); /* y^1 = y */
+ else
+ return mpfr_set_ui (x, 1, rnd); /* y^0 = 1 for any y */
+ }
+ else if (MPFR_UNLIKELY (y <= 1))
+ {
+ if (y == 1)
+ return mpfr_set_ui (x, 1, rnd); /* 1^n = 1 for any n > 0 */
+ else
+ return mpfr_set_ui (x, 0, rnd); /* 0^n = 0 for any n > 0 */
+ }
+
+ for (size_n = 0, m = n; m; size_n++, m >>= 1);
+
+ MPFR_SAVE_EXPO_MARK (expo);
+ prec = MPFR_PREC (x) + 3 + size_n;
+ mpfr_init2 (res, prec);
+
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ int i = size_n;
+
+ inexact = mpfr_set_ui (res, y, MPFR_RNDU);
+ err = 1;
+ /* now 2^(i-1) <= n < 2^i: i=1+floor(log2(n)) */
+ for (i -= 2; i >= 0; i--)
+ {
+ inexact |= mpfr_mul (res, res, res, MPFR_RNDU);
+ err++;
+ if (n & (1UL << i))
+ inexact |= mpfr_mul_ui (res, res, y, MPFR_RNDU);
+ }
+ /* since the loop is executed floor(log2(n)) times,
+ we have err = 1+floor(log2(n)).
+ Since prec >= MPFR_PREC(x) + 4 + floor(log2(n)), prec > err */
+ err = prec - err;
+
+ if (MPFR_LIKELY (inexact == 0
+ || MPFR_CAN_ROUND (res, err, MPFR_PREC (x), rnd)))
+ break;
+
+ /* Actualisation of the precision */
+ MPFR_ZIV_NEXT (loop, prec);
+ mpfr_set_prec (res, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inexact = mpfr_set (x, res, rnd);
+
+ mpfr_clear (res);
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (x, inexact, rnd);
+}
diff --git a/src/ui_sub.c b/src/ui_sub.c
new file mode 100644
index 000000000..7b775aebb
--- /dev/null
+++ b/src/ui_sub.c
@@ -0,0 +1,63 @@
+/* mpfr_ui_sub -- subtract a floating-point number from an integer
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_ui_sub (mpfr_ptr y, unsigned long int u, mpfr_srcptr x, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t uu;
+ mp_limb_t up[1];
+ unsigned long cnt;
+
+ if (MPFR_UNLIKELY (u == 0))
+ return mpfr_neg (y, x, rnd_mode);
+
+ if (MPFR_UNLIKELY(MPFR_IS_SINGULAR(x)))
+ {
+ if (MPFR_IS_NAN(x))
+ {
+ MPFR_SET_NAN(y);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF(x))
+ {
+ /* u - Inf = -Inf and u - -Inf = +Inf */
+ MPFR_SET_INF(y);
+ MPFR_SET_OPPOSITE_SIGN(y,x);
+ MPFR_RET(0); /* +/-infinity is exact */
+ }
+ else /* x is zero */
+ /* u - 0 = u */
+ return mpfr_set_ui(y, u, rnd_mode);
+ }
+ else
+ {
+ MPFR_TMP_INIT1 (up, uu, GMP_NUMB_BITS);
+ MPFR_ASSERTN(u == (mp_limb_t) u);
+ count_leading_zeros (cnt, (mp_limb_t) u);
+ *up = (mp_limb_t) u << cnt;
+ MPFR_SET_EXP (uu, GMP_NUMB_BITS - cnt);
+ return mpfr_sub (y, uu, x, rnd_mode);
+ }
+}
diff --git a/src/urandom.c b/src/urandom.c
new file mode 100644
index 000000000..842d31e16
--- /dev/null
+++ b/src/urandom.c
@@ -0,0 +1,143 @@
+/* mpfr_urandom (rop, state, rnd_mode) -- Generate a uniform pseudorandom
+ real number between 0 and 1 (exclusive) and round it to the precision of rop
+ according to the given rounding mode.
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+
+static int
+random_rounding_bit (gmp_randstate_t rstate)
+{
+ mp_limb_t r;
+
+ mpfr_rand_raw (&r, rstate, 1);
+ return r & MPFR_LIMB_ONE;
+}
+
+
+int
+mpfr_urandom (mpfr_ptr rop, gmp_randstate_t rstate, mpfr_rnd_t rnd_mode)
+{
+ mp_ptr rp;
+ mpfr_prec_t nbits;
+ mp_size_t nlimbs;
+ mp_size_t n;
+ mpfr_exp_t exp;
+ mpfr_exp_t emin;
+ int cnt;
+ int inex;
+
+ rp = MPFR_MANT (rop);
+ nbits = MPFR_PREC (rop);
+ nlimbs = MPFR_LIMB_SIZE (rop);
+ MPFR_SET_POS (rop);
+ exp = 0;
+ emin = mpfr_get_emin ();
+ if (MPFR_UNLIKELY (emin > 0))
+ {
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDA
+ || (emin == 1 && rnd_mode == MPFR_RNDN
+ && random_rounding_bit (rstate)))
+ {
+ mpfr_set_ui_2exp (rop, 1, emin - 1, rnd_mode);
+ return +1;
+ }
+ else
+ {
+ MPFR_SET_ZERO (rop);
+ return -1;
+ }
+ }
+
+ /* Exponent */
+ cnt = GMP_NUMB_BITS;
+ while (cnt == GMP_NUMB_BITS)
+ {
+ /* generate one random limb rp[0] */
+ mpfr_rand_raw (rp, rstate, GMP_NUMB_BITS);
+ if (MPFR_UNLIKELY (rp[0] == 0))
+ cnt = GMP_NUMB_BITS;
+ else
+ count_leading_zeros (cnt, rp[0]);
+
+ if (MPFR_UNLIKELY (exp < emin + cnt))
+ {
+ /* To get here, we have been drawing more than -emin zeros
+ in a row, then return 0 or the smallest representable
+ positive number.
+
+ The rounding to nearest mode is subtle:
+ If exp - cnt == emin - 1, the rounding bit is set, except
+ if cnt == GMP_NUMB_BITS in which case the rounding bit is
+ outside rp[0] and must be generated. */
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDA
+ || (rnd_mode == MPFR_RNDN && cnt == exp - emin - 1
+ && (cnt != GMP_NUMB_BITS || random_rounding_bit (rstate))))
+ {
+ mpfr_set_ui_2exp (rop, 1, emin - 1, rnd_mode);
+ return +1;
+ }
+ else
+ {
+ MPFR_SET_ZERO (rop);
+ return -1;
+ }
+ }
+ exp -= cnt;
+ }
+ MPFR_EXP (rop) = exp; /* Warning: may be outside the current
+ exponent range */
+
+
+ /* Significand */
+ mpfr_rand_raw (rp, rstate, nlimbs * GMP_NUMB_BITS);
+
+ /* Set the msb to 1 since it was fixed by the exponent choice */
+ rp[nlimbs - 1] |= MPFR_LIMB_HIGHBIT;
+
+ /* If nbits isn't a multiple of GMP_NUMB_BITS, mask the low bits */
+ n = nlimbs * GMP_NUMB_BITS - nbits;
+ if (MPFR_LIKELY (n != 0))
+ rp[0] &= ~MPFR_LIMB_MASK (n);
+
+
+ /* Rounding */
+ if (rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDA
+ || (rnd_mode == MPFR_RNDN && random_rounding_bit (rstate)))
+ {
+ /* Take care of the exponent range: it may have been reduced */
+ if (exp < emin)
+ mpfr_set_ui_2exp (rop, 1, emin - 1, rnd_mode);
+ else if (exp > mpfr_get_emax ())
+ mpfr_set_inf (rop, +1); /* overflow, flag set by mpfr_check_range */
+ else
+ mpfr_nextabove (rop);
+ inex = +1;
+ }
+ else
+ inex = -1;
+
+ return mpfr_check_range (rop, inex, rnd_mode);
+}
diff --git a/src/urandomb.c b/src/urandomb.c
new file mode 100644
index 000000000..e632ca83d
--- /dev/null
+++ b/src/urandomb.c
@@ -0,0 +1,98 @@
+/* mpfr_urandomb (rop, state, nbits) -- Generate a uniform pseudorandom
+ real number between 0 (inclusive) and 1 (exclusive) of size NBITS,
+ using STATE as the random state previously initialized by a call to
+ gmp_randinit_lc_2exp_size().
+
+Copyright 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+void
+mpfr_rand_raw (mp_ptr mp, gmp_randstate_t rstate, unsigned long int nbits)
+{
+ mpz_t z;
+
+ /* To be sure to avoid the potential allocation of mpz_urandomb */
+ ALLOC(z) = SIZ(z) = (nbits / GMP_NUMB_BITS) + 1;
+ PTR(z) = mp;
+ mpz_urandomb(z, rstate, nbits);
+}
+
+int
+mpfr_urandomb (mpfr_ptr rop, gmp_randstate_t rstate)
+{
+ mp_ptr rp;
+ mpfr_prec_t nbits;
+ mp_size_t nlimbs;
+ mp_size_t k; /* number of high zero limbs */
+ mpfr_exp_t exp;
+ int cnt;
+
+ rp = MPFR_MANT (rop);
+ nbits = MPFR_PREC (rop);
+ nlimbs = MPFR_LIMB_SIZE (rop);
+ MPFR_SET_POS (rop);
+
+ /* Uniform non-normalized significand */
+ mpfr_rand_raw (rp, rstate, nlimbs * GMP_NUMB_BITS);
+
+ /* If nbits isn't a multiple of GMP_NUMB_BITS, mask the low bits */
+ cnt = nlimbs * GMP_NUMB_BITS - nbits;
+ if (MPFR_LIKELY (cnt != 0))
+ rp[0] &= ~MPFR_LIMB_MASK (cnt);
+
+ /* Count the null significant limbs and remaining limbs */
+ exp = 0;
+ k = 0;
+ while (nlimbs != 0 && rp[nlimbs - 1] == 0)
+ {
+ k ++;
+ nlimbs --;
+ exp -= GMP_NUMB_BITS;
+ }
+
+ if (MPFR_LIKELY (nlimbs != 0)) /* otherwise value is zero */
+ {
+ count_leading_zeros (cnt, rp[nlimbs - 1]);
+ /* Normalization */
+ if (mpfr_set_exp (rop, exp - cnt))
+ {
+ /* If the exponent is not in the current exponent range, we
+ choose to return a NaN as this is probably a user error.
+ Indeed this can happen only if the exponent range has been
+ reduced to a very small interval and/or the precision is
+ huge (very unlikely). */
+ MPFR_SET_NAN (rop);
+ __gmpfr_flags |= MPFR_FLAGS_NAN; /* Can't use MPFR_RET_NAN */
+ return 1;
+ }
+ if (cnt != 0)
+ mpn_lshift (rp + k, rp, nlimbs, cnt);
+ if (k != 0)
+ MPN_ZERO (rp, k);
+ }
+ else
+ MPFR_SET_ZERO (rop);
+
+ return 0;
+}
diff --git a/src/vasprintf.c b/src/vasprintf.c
new file mode 100644
index 000000000..d9efc9459
--- /dev/null
+++ b/src/vasprintf.c
@@ -0,0 +1,2204 @@
+/* mpfr_vasprintf -- main function for the printf functions family
+ plus helper macros & functions.
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* The mpfr_printf-like functions are defined only if stdarg.h exists */
+#ifdef HAVE_STDARG
+
+#include <stdarg.h>
+
+#ifndef HAVE_VA_COPY
+# ifdef HAVE___VA_COPY
+# define va_copy(dst,src) __va_copy(dst, src)
+# else
+/* autoconf manual advocates this fallback.
+ This is also the solution chosen by gmp */
+# define va_copy(dst,src) \
+ do { memcpy(&(dst), &(src), sizeof(va_list)); } while (0)
+# endif /* HAVE___VA_COPY */
+#endif /* HAVE_VA_COPY */
+
+#ifdef HAVE_WCHAR_H
+#include <wchar.h>
+#endif
+
+#if defined (__cplusplus)
+#include <cstddef>
+#define __STDC_LIMIT_MACROS /* SIZE_MAX defined with stdint.h inclusion */
+#else
+#include <stddef.h> /* for ptrdiff_t */
+#endif
+
+#if HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#if HAVE_STDINT_H
+# include <stdint.h>
+#endif
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/* Define a length modifier corresponding to mpfr_prec_t.
+ We use literal string instead of literal character so as to permit future
+ extension to long long int ("ll"). */
+#if _MPFR_PREC_FORMAT == 1
+#define MPFR_PREC_FORMAT_TYPE "h"
+#define MPFR_PREC_FORMAT_SIZE 1
+#elif _MPFR_PREC_FORMAT == 2
+#define MPFR_PREC_FORMAT_TYPE ""
+#define MPFR_PREC_FORMAT_SIZE 0
+#elif _MPFR_PREC_FORMAT == 3
+#define MPFR_PREC_FORMAT_TYPE "l"
+#define MPFR_PREC_FORMAT_SIZE 1
+#else
+#error "mpfr_prec_t size not supported"
+#endif
+
+#if (__GMP_MP_SIZE_T_INT == 1)
+#define MPFR_EXP_FORMAT_SPEC "i"
+#elif (__GMP_MP_SIZE_T_INT == 0)
+#define MPFR_EXP_FORMAT_SPEC "li"
+#else
+#error "mpfr_exp_t size not supported"
+#endif
+
+/* Output for special values defined in the C99 standard */
+#define MPFR_NAN_STRING_LC "nan"
+#define MPFR_NAN_STRING_UC "NAN"
+#define MPFR_NAN_STRING_LENGTH 3
+#define MPFR_INF_STRING_LC "inf"
+#define MPFR_INF_STRING_UC "INF"
+#define MPFR_INF_STRING_LENGTH 3
+
+/* The implicit \0 is useless, but we do not write num_to_text[16]
+ otherwise g++ complains. */
+static const char num_to_text[] = "0123456789abcdef";
+
+/* some macro and functions for parsing format string */
+
+/* Read an integer; saturate to INT_MAX. */
+#define READ_INT(ap, format, specinfo, field, label_out) \
+ do { \
+ while (*(format)) \
+ { \
+ int _i; \
+ switch (*(format)) \
+ { \
+ case '0': \
+ case '1': \
+ case '2': \
+ case '3': \
+ case '4': \
+ case '5': \
+ case '6': \
+ case '7': \
+ case '8': \
+ case '9': \
+ specinfo.field = (specinfo.field <= INT_MAX / 10) ? \
+ specinfo.field * 10 : INT_MAX; \
+ _i = *(format) - '0'; \
+ MPFR_ASSERTN (_i >= 0 && _i <= 9); \
+ specinfo.field = (specinfo.field <= INT_MAX - _i) ? \
+ specinfo.field + _i : INT_MAX; \
+ ++(format); \
+ break; \
+ case '*': \
+ specinfo.field = va_arg ((ap), int); \
+ ++(format); \
+ default: \
+ goto label_out; \
+ } \
+ } \
+ } while (0)
+
+/* arg_t contains all the types described by the 'type' field of the
+ format string */
+enum arg_t
+ {
+ NONE,
+ CHAR_ARG,
+ SHORT_ARG,
+ LONG_ARG,
+ LONG_LONG_ARG,
+ INTMAX_ARG,
+ SIZE_ARG,
+ PTRDIFF_ARG,
+ LONG_DOUBLE_ARG,
+ MPF_ARG,
+ MPQ_ARG,
+ MP_LIMB_ARG,
+ MP_LIMB_ARRAY_ARG,
+ MPZ_ARG,
+ MPFR_PREC_ARG,
+ MPFR_ARG,
+ UNSUPPORTED
+ };
+
+/* Each conversion specification of the format string will be translated in a
+ printf_spec structure by the parser.
+ This structure is adapted from the GNU libc one. */
+struct printf_spec
+{
+ unsigned int alt:1; /* # flag */
+ unsigned int space:1; /* Space flag */
+ unsigned int left:1; /* - flag */
+ unsigned int showsign:1; /* + flag */
+ unsigned int group:1; /* ' flag */
+
+ int width; /* Width */
+ int prec; /* Precision */
+
+ enum arg_t arg_type; /* Type of argument */
+ mpfr_rnd_t rnd_mode; /* Rounding mode */
+ char spec; /* Conversion specifier */
+
+ char pad; /* Padding character */
+};
+
+static void
+specinfo_init (struct printf_spec *specinfo)
+{
+ specinfo->alt = 0;
+ specinfo->space = 0;
+ specinfo->left = 0;
+ specinfo->showsign = 0;
+ specinfo->group = 0;
+ specinfo->width = 0;
+ specinfo->prec = 0;
+ specinfo->arg_type = NONE;
+ specinfo->rnd_mode = MPFR_RNDN;
+ specinfo->spec = '\0';
+ specinfo->pad = ' ';
+}
+
+#define FLOATING_POINT_ARG_TYPE(at) \
+ ((at) == MPFR_ARG || (at) == MPF_ARG || (at) == LONG_DOUBLE_ARG)
+
+#define INTEGER_LIKE_ARG_TYPE(at) \
+ ((at) == SHORT_ARG || (at) == LONG_ARG || (at) == LONG_LONG_ARG \
+ || (at) == INTMAX_ARG || (at) == MPFR_PREC_ARG || (at) == MPZ_ARG \
+ || (at) == MPQ_ARG || (at) == MP_LIMB_ARG || (at) == MP_LIMB_ARRAY_ARG \
+ || (at) == CHAR_ARG || (at) == SIZE_ARG || (at) == PTRDIFF_ARG)
+
+static int
+specinfo_is_valid (struct printf_spec spec)
+{
+ switch (spec.spec)
+ {
+ case 'n':
+ return -1;
+
+ case 'a': case 'A':
+ case 'e': case 'E':
+ case 'f': case 'F':
+ case 'g': case 'G':
+ return (spec.arg_type == NONE
+ || FLOATING_POINT_ARG_TYPE (spec.arg_type));
+
+ case 'b':
+ return spec.arg_type == MPFR_ARG;
+
+ case 'd': case 'i':
+ case 'u': case 'o':
+ case 'x': case 'X':
+ return (spec.arg_type == NONE
+ || INTEGER_LIKE_ARG_TYPE (spec.arg_type));
+
+ case 'c':
+ case 's':
+ return (spec.arg_type == NONE || spec.arg_type == LONG_ARG);
+
+ case 'p':
+ return spec.arg_type == NONE;
+
+ default:
+ return 0;
+ }
+}
+
+static const char *
+parse_flags (const char *format, struct printf_spec *specinfo)
+{
+ while (*format)
+ {
+ switch (*format)
+ {
+ case '0':
+ specinfo->pad = '0';
+ ++format;
+ break;
+ case '#':
+ specinfo->alt = 1;
+ ++format;
+ break;
+ case '+':
+ specinfo->showsign = 1;
+ ++format;
+ break;
+ case ' ':
+ specinfo->space = 1;
+ ++format;
+ break;
+ case '-':
+ specinfo->left = 1;
+ ++format;
+ break;
+ case '\'':
+ /* Single UNIX Specification for thousand separator */
+ specinfo->group = 1;
+ ++format;
+ break;
+ default:
+ return format;
+ }
+ }
+ return format;
+}
+
+static const char *
+parse_arg_type (const char *format, struct printf_spec *specinfo)
+{
+ switch (*format)
+ {
+ case '\0':
+ break;
+ case 'h':
+ if (*++format == 'h')
+#ifndef NPRINTF_HH
+ {
+ ++format;
+ specinfo->arg_type = CHAR_ARG;
+ }
+#else
+ specinfo->arg_type = UNSUPPORTED;
+#endif
+ else
+ specinfo->arg_type = SHORT_ARG;
+ break;
+ case 'l':
+ if (*++format == 'l')
+ {
+ ++format;
+#if defined (HAVE_LONG_LONG) && !defined(NPRINTF_LL)
+ specinfo->arg_type = LONG_LONG_ARG;
+#else
+ specinfo->arg_type = UNSUPPORTED;
+#endif
+ break;
+ }
+ else
+ {
+ specinfo->arg_type = LONG_ARG;
+ break;
+ }
+ case 'j':
+ ++format;
+#if defined(_MPFR_H_HAVE_INTMAX_T) && !defined(NPRINTF_J)
+ specinfo->arg_type = INTMAX_ARG;
+#else
+ specinfo->arg_type = UNSUPPORTED;
+#endif
+ break;
+ case 'z':
+ ++format;
+ specinfo->arg_type = SIZE_ARG;
+ break;
+ case 't':
+ ++format;
+#ifndef NPRINTF_T
+ specinfo->arg_type = PTRDIFF_ARG;
+#else
+ specinfo->arg_type = UNSUPPORTED;
+#endif
+ break;
+ case 'L':
+ ++format;
+#ifndef NPRINTF_L
+ specinfo->arg_type = LONG_DOUBLE_ARG;
+#else
+ specinfo->arg_type = UNSUPPORTED;
+#endif
+ break;
+ case 'F':
+ ++format;
+ specinfo->arg_type = MPF_ARG;
+ break;
+ case 'Q':
+ ++format;
+ specinfo->arg_type = MPQ_ARG;
+ break;
+ case 'M':
+ ++format;
+ /* The 'M' specifier was added in gmp 4.2.0 */
+ specinfo->arg_type = MP_LIMB_ARG;
+ break;
+ case 'N':
+ ++format;
+ specinfo->arg_type = MP_LIMB_ARRAY_ARG;
+ break;
+ case 'Z':
+ ++format;
+ specinfo->arg_type = MPZ_ARG;
+ break;
+
+ /* mpfr specific specifiers */
+ case 'P':
+ ++format;
+ specinfo->arg_type = MPFR_PREC_ARG;
+ break;
+ case 'R':
+ ++format;
+ specinfo->arg_type = MPFR_ARG;
+ }
+ return format;
+}
+
+
+/* some macros and functions filling the buffer */
+
+/* CONSUME_VA_ARG removes from va_list AP the type expected by SPECINFO */
+
+/* With a C++ compiler wchar_t and enumeration in va_list are converted to
+ integer type : int, unsigned int, long or unsigned long (unfortunately,
+ this is implementation dependant).
+ We follow gmp which assumes in print/doprnt.c that wchar_t is converted
+ to int (because wchar_t <= int).
+ For wint_t, we assume that the case WINT_MAX < INT_MAX yields an
+ integer promotion. */
+#ifdef HAVE_WCHAR_H
+#if defined(WINT_MAX) && WINT_MAX < INT_MAX
+typedef int mpfr_va_wint; /* integer promotion */
+#else
+typedef wint_t mpfr_va_wint;
+#endif
+#define CASE_LONG_ARG(specinfo, ap) \
+ case LONG_ARG: \
+ if (((specinfo).spec == 'd') || ((specinfo).spec == 'i') \
+ || ((specinfo).spec == 'o') || ((specinfo).spec == 'u') \
+ || ((specinfo).spec == 'x') || ((specinfo).spec == 'X')) \
+ (void) va_arg ((ap), long); \
+ else if ((specinfo).spec == 'c') \
+ (void) va_arg ((ap), mpfr_va_wint); \
+ else if ((specinfo).spec == 's') \
+ (void) va_arg ((ap), int); /* we assume integer promotion */ \
+ break;
+#else
+#define CASE_LONG_ARG(specinfo, ap) \
+ case LONG_ARG: \
+ (void) va_arg ((ap), long); \
+ break;
+#endif
+
+#if defined(_MPFR_H_HAVE_INTMAX_T)
+#define CASE_INTMAX_ARG(specinfo, ap) \
+ case INTMAX_ARG: \
+ (void) va_arg ((ap), intmax_t); \
+ break;
+#else
+#define CASE_INTMAX_ARG(specinfo, ap)
+#endif
+
+#ifdef HAVE_LONG_LONG
+#define CASE_LONG_LONG_ARG(specinfo, ap) \
+ case LONG_LONG_ARG: \
+ (void) va_arg ((ap), long long); \
+ break;
+#else
+#define CASE_LONG_LONG_ARG(specinfo, ap)
+#endif
+
+#define CONSUME_VA_ARG(specinfo, ap) \
+ do { \
+ switch ((specinfo).arg_type) \
+ { \
+ case CHAR_ARG: \
+ case SHORT_ARG: \
+ (void) va_arg ((ap), int); \
+ break; \
+ CASE_LONG_ARG (specinfo, ap) \
+ CASE_LONG_LONG_ARG (specinfo, ap) \
+ CASE_INTMAX_ARG (specinfo, ap) \
+ case SIZE_ARG: \
+ (void) va_arg ((ap), size_t); \
+ break; \
+ case PTRDIFF_ARG: \
+ (void) va_arg ((ap), ptrdiff_t); \
+ break; \
+ case LONG_DOUBLE_ARG: \
+ (void) va_arg ((ap), long double); \
+ break; \
+ case MPF_ARG: \
+ (void) va_arg ((ap), mpf_srcptr); \
+ break; \
+ case MPQ_ARG: \
+ (void) va_arg ((ap), mpq_srcptr); \
+ break; \
+ case MP_LIMB_ARG: \
+ (void) va_arg ((ap), mp_limb_t); \
+ break; \
+ case MP_LIMB_ARRAY_ARG: \
+ (void) va_arg ((ap), mp_ptr); \
+ (void) va_arg ((ap), mp_size_t); \
+ break; \
+ case MPZ_ARG: \
+ (void) va_arg ((ap), mpz_srcptr); \
+ break; \
+ default: \
+ switch ((specinfo).spec) \
+ { \
+ case 'd': \
+ case 'i': \
+ case 'o': \
+ case 'u': \
+ case 'x': \
+ case 'X': \
+ case 'c': \
+ (void) va_arg ((ap), int); \
+ break; \
+ case 'f': \
+ case 'F': \
+ case 'e': \
+ case 'E': \
+ case 'g': \
+ case 'G': \
+ case 'a': \
+ case 'A': \
+ (void) va_arg ((ap), double); \
+ break; \
+ case 's': \
+ (void) va_arg ((ap), char *); \
+ break; \
+ case 'p': \
+ (void) va_arg ((ap), void *); \
+ } \
+ } \
+ } while (0)
+
+/* process the format part which does not deal with mpfr types,
+ jump to external label 'error' if gmp_asprintf return -1. */
+#define FLUSH(flag, start, end, ap, buf_ptr) \
+ do { \
+ const size_t n = (end) - (start); \
+ if ((flag)) \
+ /* previous specifiers are understood by gmp_printf */ \
+ { \
+ MPFR_TMP_DECL (marker); \
+ char *fmt_copy; \
+ MPFR_TMP_MARK (marker); \
+ fmt_copy = (char*) MPFR_TMP_ALLOC (n + 1); \
+ strncpy (fmt_copy, (start), n); \
+ fmt_copy[n] = '\0'; \
+ if (sprntf_gmp ((buf_ptr), (fmt_copy), (ap)) == -1) \
+ { \
+ MPFR_TMP_FREE (marker); \
+ goto error; \
+ } \
+ (flag) = 0; \
+ MPFR_TMP_FREE (marker); \
+ } \
+ else if ((start) != (end)) \
+ /* no conversion specification, just simple characters */ \
+ buffer_cat ((buf_ptr), (start), n); \
+ } while (0)
+
+struct string_buffer
+{
+ char *start; /* beginning of the buffer */
+ char *curr; /* null terminating character */
+ size_t size; /* buffer capacity */
+};
+
+static void
+buffer_init (struct string_buffer *b, size_t s)
+{
+ b->start = (char *) (*__gmp_allocate_func) (s);
+ b->start[0] = '\0';
+ b->curr = b->start;
+ b->size = s;
+}
+
+/* Increase buffer size by a number of character being the least multiple of
+ 4096 greater than LEN+1. */
+static void
+buffer_widen (struct string_buffer *b, size_t len)
+{
+ const size_t pos = b->curr - b->start;
+ const size_t n = 0x1000 + (len & ~((size_t) 0xfff));
+ MPFR_ASSERTD (pos < b->size);
+
+ MPFR_ASSERTN ((len & ~((size_t) 4095)) <= (size_t)(SIZE_MAX - 4096));
+ MPFR_ASSERTN (b->size < SIZE_MAX - n);
+
+ b->start =
+ (char *) (*__gmp_reallocate_func) (b->start, b->size, b->size + n);
+ b->size += n;
+ b->curr = b->start + pos;
+
+ MPFR_ASSERTD (pos < b->size);
+ MPFR_ASSERTD (*b->curr == '\0');
+}
+
+/* Concatenate the LEN first characters of the string S to the buffer B and
+ expand it if needed. */
+static void
+buffer_cat (struct string_buffer *b, const char *s, size_t len)
+{
+ MPFR_ASSERTD (len != 0);
+ MPFR_ASSERTD (len <= strlen (s));
+
+ if (MPFR_UNLIKELY ((b->curr + len) >= (b->start + b->size)))
+ buffer_widen (b, len);
+
+ strncat (b->curr, s, len);
+ b->curr += len;
+
+ MPFR_ASSERTD (b->curr < b->start + b->size);
+ MPFR_ASSERTD (*b->curr == '\0');
+}
+
+/* Add N characters C to the end of buffer B */
+static void
+buffer_pad (struct string_buffer *b, const char c, const size_t n)
+{
+ MPFR_ASSERTD (n != 0);
+
+ MPFR_ASSERTN (b->size < SIZE_MAX - n - 1);
+ if (MPFR_UNLIKELY ((b->curr + n + 1) > (b->start + b->size)))
+ buffer_widen (b, n);
+
+ if (n == 1)
+ *b->curr = c;
+ else
+ memset (b->curr, c, n);
+ b->curr += n;
+ *b->curr = '\0';
+
+ MPFR_ASSERTD (b->curr < b->start + b->size);
+}
+
+/* Form a string by concatenating the first LEN characters of STR to TZ
+ zero(s), insert into one character C each 3 characters starting from end
+ to begining and concatenate the result to the buffer B. */
+static void
+buffer_sandwich (struct string_buffer *b, char *str, size_t len,
+ const size_t tz, const char c)
+{
+ const size_t step = 3;
+ const size_t size = len + tz;
+ const size_t r = size % step == 0 ? step : size % step;
+ const size_t q = size % step == 0 ? size / step - 1 : size / step;
+ size_t i;
+
+ MPFR_ASSERTD (size != 0);
+ if (c == '\0')
+ {
+ buffer_cat (b, str, len);
+ buffer_pad (b, '0', tz);
+ return;
+ }
+
+ MPFR_ASSERTN (b->size < SIZE_MAX - size - 1 - q);
+ MPFR_ASSERTD (len <= strlen (str));
+ if (MPFR_UNLIKELY ((b->curr + size + 1 + q) > (b->start + b->size)))
+ buffer_widen (b, size + q);
+
+ /* first R significant digits */
+ memcpy (b->curr, str, r);
+ b->curr += r;
+ str += r;
+ len -= r;
+
+ /* blocks of thousands. Warning: STR might end in the middle of a block */
+ for (i = 0; i < q; ++i)
+ {
+ *b->curr++ = c;
+ if (MPFR_LIKELY (len > 0))
+ {
+ if (MPFR_LIKELY (len >= step))
+ /* step significant digits */
+ {
+ memcpy (b->curr, str, step);
+ len -= step;
+ }
+ else
+ /* last digits in STR, fill up thousand block with zeros */
+ {
+ memcpy (b->curr, str, len);
+ memset (b->curr + len, '0', step - len);
+ len = 0;
+ }
+ }
+ else
+ /* trailing zeros */
+ memset (b->curr, '0', step);
+
+ b->curr += step;
+ str += step;
+ }
+
+ *b->curr = '\0';
+
+ MPFR_ASSERTD (b->curr < b->start + b->size);
+}
+
+/* let gmp_xprintf process the part it can understand */
+static int
+sprntf_gmp (struct string_buffer *b, const char *fmt, va_list ap)
+{
+ int length;
+ char *s;
+
+ length = gmp_vasprintf (&s, fmt, ap);
+ if (length > 0)
+ buffer_cat (b, s, length);
+
+ mpfr_free_str (s);
+ return length;
+}
+
+/* Helper struct and functions for temporary strings management */
+/* struct for easy string clearing */
+struct string_list
+{
+ char *string;
+ struct string_list *next; /* NULL in last node */
+};
+
+/* initialisation */
+static void
+init_string_list (struct string_list *sl)
+{
+ sl->string = NULL;
+ sl->next = NULL;
+}
+
+/* clear all strings in the list */
+static void
+clear_string_list (struct string_list *sl)
+{
+ struct string_list *n;
+
+ while (sl)
+ {
+ if (sl->string)
+ mpfr_free_str (sl->string);
+ n = sl->next;
+ (*__gmp_free_func) (sl, sizeof(struct string_list));
+ sl = n;
+ }
+}
+
+/* add a string in the list */
+static char *
+register_string (struct string_list *sl, char *new_string)
+{
+ /* look for the last node */
+ while (sl->next)
+ sl = sl->next;
+
+ sl->next = (struct string_list*)
+ (*__gmp_allocate_func) (sizeof (struct string_list));
+
+ sl = sl->next;
+ sl->next = NULL;
+ return sl->string = new_string;
+}
+
+/* padding type: where are the padding characters */
+enum pad_t
+ {
+ LEFT, /* spaces in left hand side for right justification */
+ LEADING_ZEROS, /* padding with '0' characters in integral part */
+ RIGHT /* spaces in right hand side for left justification */
+ };
+
+/* number_parts details how much characters are needed in each part of a float
+ print. */
+struct number_parts
+{
+ enum pad_t pad_type; /* Padding type */
+ size_t pad_size; /* Number of padding characters */
+
+ char sign; /* Sign character */
+
+ char *prefix_ptr; /* Pointer to prefix part */
+ size_t prefix_size; /* Number of characters in *prefix_ptr */
+
+ char thousands_sep; /* Thousands separator (only with style 'f') */
+
+ char *ip_ptr; /* Pointer to integral part characters*/
+ size_t ip_size; /* Number of digits in *ip_ptr */
+ int ip_trailing_zeros; /* Number of additional null digits in integral
+ part */
+
+ char point; /* Decimal point character */
+
+ int fp_leading_zeros; /* Number of additional leading zeros in fractional
+ part */
+ char *fp_ptr; /* Pointer to fractional part characters */
+ size_t fp_size; /* Number of digits in *fp_ptr */
+ int fp_trailing_zeros; /* Number of additional trailing zeros in fractional
+ part */
+
+ char *exp_ptr; /* Pointer to exponent part */
+ size_t exp_size; /* Number of characters in *exp_ptr */
+
+ struct string_list *sl; /* List of string buffers in use: we need such a
+ mechanism because fp_ptr may point into the same
+ string as ip_ptr */
+};
+
+/* For a real non zero number x, what is the base exponent f when rounding x
+ with rounding mode r to r(x) = m*b^f, where m is a digit and 1 <= m < b ?
+ Return non zero value if x is rounded up to b^f, return zero otherwise */
+static int
+next_base_power_p (mpfr_srcptr x, int base, mpfr_rnd_t rnd)
+{
+ mpfr_prec_t nbits;
+ mp_limb_t pm;
+ mp_limb_t xm;
+
+ MPFR_ASSERTD (MPFR_IS_PURE_FP (x));
+ MPFR_ASSERTD (base == 2 || base == 16);
+
+ /* Warning: the decimal point is AFTER THE FIRST DIGIT in this output
+ representation. */
+ nbits = base == 2 ? 1 : 4;
+
+ if (rnd == MPFR_RNDZ
+ || (rnd == MPFR_RNDD && MPFR_IS_POS (x))
+ || (rnd == MPFR_RNDU && MPFR_IS_NEG (x))
+ || MPFR_PREC (x) <= nbits)
+ /* no rounding when printing x with 1 digit */
+ return 0;
+
+ xm = MPFR_MANT (x) [MPFR_LIMB_SIZE (x) - 1];
+ pm = MPFR_LIMB_MASK (GMP_NUMB_BITS - nbits);
+ if ((xm & ~pm) ^ ~pm)
+ /* do no round up if some of the nbits first bits are 0s. */
+ return 0;
+
+ if (rnd == MPFR_RNDN)
+ /* mask for rounding bit */
+ pm = (MPFR_LIMB_ONE << (GMP_NUMB_BITS - nbits - 1));
+
+ /* round up if some remaining bits are 1 */
+ /* warning: the return value must be an int */
+ return xm & pm ? 1 : 0;
+}
+
+/* For a real non zero number x, what is the exponent f when rounding x with
+ rounding mode r to r(x) = m*10^f, where m has p+1 digits and 1 <= m < 10 ?
+
+ Return +1 if x is rounded up to 10^f, return zero otherwise.
+ If e is not NULL, *e is set to f. */
+static int
+round_to_10_power (mpfr_exp_t *e, mpfr_srcptr x, mpfr_prec_t p, mpfr_rnd_t r)
+{
+ mpfr_t f, u, v, y;
+ mpfr_prec_t m;
+ mpfr_exp_t ex;
+ mpfr_uexp_t uexp;
+ int roundup = -1; /* boolean (-1: not set) */
+
+ MPFR_ZIV_DECL (loop);
+
+ /* y = abs(x) */
+ MPFR_ALIAS (y, x, 1, MPFR_EXP(x));
+
+ /* we want f = floor(log(|x|)/log(10)) exactly.
+ we have |f| >= |Exp(x)|/3,
+ then m = ceil(log(uexp/3)/log(2)) > log(f)/log(2)
+ is a sufficient precision for f. */
+ ex = mpfr_get_exp (x);
+ uexp = SAFE_ABS (mpfr_uexp_t, ex) / 3;
+ m = 1;
+ while (uexp)
+ {
+ uexp >>= 1;
+ m++;
+ }
+ if (m < 2)
+ m = 2;
+ mpfr_init2 (f, m);
+ mpfr_log10 (f, y, MPFR_RNDD);
+ mpfr_floor (f, f);
+
+ /* In most cases, the output exponent is f. */
+ if (e != NULL)
+ *e = (mpfr_exp_t)mpfr_get_si (f, MPFR_RNDD);
+
+ if (r == MPFR_RNDZ
+ || (MPFR_IS_POS (x) && r == MPFR_RNDD)
+ || (MPFR_IS_NEG (x) && r == MPFR_RNDU))
+ /* If rounding toward zero, the exponent is f */
+ {
+ mpfr_clear (f);
+ return 0;
+ }
+
+ /* Is |x| less than 10^(f+1) - 10^(f-p)? */
+ {
+ int cmp;
+ int inex_u, inex_v, inex_w;
+ mpfr_exp_t exp_u, exp_v, exp_w;
+
+ m = MPFR_PREC (x);
+ m += MPFR_INT_CEIL_LOG2 (m);
+ mpfr_init2 (u, m);
+ mpfr_init2 (v, m);
+
+ MPFR_ZIV_INIT (loop, m);
+ for (;;)
+ {
+ mpfr_set_prec (u, m);
+ mpfr_set_prec (v, m);
+
+ /* u = o(10^(f+1)) rounding toward -infinity
+ error (u) < 1 ulp(u)
+ error(u) = 0 if inex_u = 0 */
+ mpfr_add_ui (u, f, 1, MPFR_RNDN);
+ inex_u = mpfr_ui_pow (u, 10, u, MPFR_RNDD);
+ exp_u = MPFR_EXP (u);
+
+ /* if r = rounding to nearest
+ v = o(0.5 * 10^(f-p)) rounding toward +infinity
+ else
+ v = o(10^(f-p)) rounding toward +infinity
+
+ error(v) < 1 ulp(v)
+ error(v) = 0 if inex_v = 0 */
+ mpfr_sub_ui (v, f, p, MPFR_RNDN);
+ inex_v = mpfr_ui_pow (v, 10, v, MPFR_RNDU);
+ if (r == MPFR_RNDN)
+ mpfr_div_2ui (v, v, 1, MPFR_RNDN);
+ exp_v = MPFR_EXP (v);
+
+ /* w = o(u-v) rounding toward -infinity
+ w is an approximation of 10^(f+1) - v with
+ error(w) < 1 ulp(w) + error(u) + error(v)
+ error(w) = 0 iff inex_u = inex_v = inex_diff = 0 */
+ inex_w = mpfr_sub (u, u, v, MPFR_RNDD);
+ exp_w = MPFR_EXP (u);
+
+ cmp = mpfr_cmp (y, u);
+
+ if (cmp < 0)
+ /* |x| < u <= 10^(f+1) - v, the exponent is f */
+ {
+ roundup = 0;
+ break;
+ }
+ else if (cmp == 0 && inex_u == 0 && inex_v == 0 && inex_w == 0)
+ /* |x| = u = 10^(f+1) - v, the exponent is f+1 */
+ {
+ if (e != NULL)
+ (*e)++;
+
+ roundup = +1;
+ break;
+ }
+
+ /* compare |x| with w + error(w) */
+ if (inex_u)
+ mpfr_set_ui_2exp (v, 1, exp_u - m, MPFR_RNDU);
+ else
+ mpfr_set_ui (v, 0, MPFR_RNDN);
+ if (inex_v)
+ mpfr_set_ui_2exp (v, 1, exp_v - m, MPFR_RNDU);
+ if (inex_w)
+ mpfr_set_ui_2exp (v, 1, exp_w - m, MPFR_RNDU);
+
+ mpfr_add (u, u, v, MPFR_RNDU);
+ if (mpfr_cmp (y, u) >= 0)
+ {
+ if (e != NULL)
+ *e = (mpfr_exp_t)mpfr_get_si (f, MPFR_RNDD) + 1;
+
+ roundup = +1;
+ break;
+ }
+
+ MPFR_ZIV_NEXT (loop, m);
+ }
+ MPFR_ZIV_FREE (loop);
+ mpfr_clear (u);
+ mpfr_clear (v);
+ }
+
+ MPFR_ASSERTD (roundup != -1);
+ mpfr_clear (f);
+ return roundup;
+}
+
+/* Determine the different parts of the string representation of the regular
+ number P when SPEC.SPEC is 'a', 'A', or 'b'.
+
+ return -1 if some field > INT_MAX */
+static int
+regular_ab (struct number_parts *np, mpfr_srcptr p,
+ const struct printf_spec spec)
+{
+ int uppercase;
+ int base;
+ char *str;
+ mpfr_exp_t exp;
+
+ uppercase = spec.spec == 'A';
+
+ /* sign */
+ if (MPFR_IS_NEG (p))
+ np->sign = '-';
+ else if (spec.showsign || spec.space)
+ np->sign = spec.showsign ? '+' : ' ';
+
+ if (spec.spec == 'a' || spec.spec == 'A')
+ /* prefix part */
+ {
+ np->prefix_size = 2;
+ str = (char *) (*__gmp_allocate_func) (1 + np->prefix_size);
+ str[0] = '0';
+ str[1] = uppercase ? 'X' : 'x';
+ str[2] = '\0';
+ np->prefix_ptr = register_string (np->sl, str);
+ }
+
+ /* integral part */
+ np->ip_size = 1;
+ base = (spec.spec == 'b') ? 2 : 16;
+
+ if (spec.prec != 0)
+ {
+ size_t nsd;
+
+ /* Number of significant digits:
+ - if no given precision, let mpfr_get_str determine it;
+ - if a non-zero precision is specified, then one digit before decimal
+ point plus SPEC.PREC after it. */
+ nsd = spec.prec < 0 ? 0 : spec.prec + np->ip_size;
+ str = mpfr_get_str (0, &exp, base, nsd, p, spec.rnd_mode);
+ register_string (np->sl, str);
+ np->ip_ptr = MPFR_IS_NEG (p) ? ++str : str; /* skip sign if any */
+
+ if (base == 16)
+ /* EXP is the exponent for radix sixteen with decimal point BEFORE the
+ first digit, we want the exponent for radix two and the decimal
+ point AFTER the first digit. */
+ {
+ MPFR_ASSERTN (exp > MPFR_EMIN_MIN /4); /* possible overflow */
+ exp = (exp - 1) * 4;
+ }
+ else
+ /* EXP is the exponent for decimal point BEFORE the first digit, we
+ want the exponent for decimal point AFTER the first digit. */
+ {
+ MPFR_ASSERTN (exp > MPFR_EMIN_MIN); /* possible overflow */
+ --exp;
+ }
+ }
+ else if (next_base_power_p (p, base, spec.rnd_mode))
+ {
+ str = (char *)(*__gmp_allocate_func) (2);
+ str[0] = '1';
+ str[1] = '\0';
+ np->ip_ptr = register_string (np->sl, str);
+
+ exp = MPFR_GET_EXP (p);
+ }
+ else if (base == 2)
+ {
+ str = (char *)(*__gmp_allocate_func) (2);
+ str[0] = '1';
+ str[1] = '\0';
+ np->ip_ptr = register_string (np->sl, str);
+
+ exp = MPFR_GET_EXP (p) - 1;
+ }
+ else
+ {
+ int digit;
+ mp_limb_t msl = MPFR_MANT (p)[MPFR_LIMB_SIZE (p) - 1];
+ int rnd_bit = GMP_NUMB_BITS - 5;
+
+ /* pick up the 4 first bits */
+ digit = msl >> (rnd_bit+1);
+ if (spec.rnd_mode == MPFR_RNDA
+ || (spec.rnd_mode == MPFR_RNDU && MPFR_IS_POS (p))
+ || (spec.rnd_mode == MPFR_RNDD && MPFR_IS_NEG (p))
+ || (spec.rnd_mode == MPFR_RNDN
+ && (msl & (MPFR_LIMB_ONE << rnd_bit))))
+ digit++;
+ MPFR_ASSERTD ((0 <= digit) && (digit <= 15));
+
+ str = (char *)(*__gmp_allocate_func) (1 + np->ip_size);
+ str[0] = num_to_text [digit];
+ str[1] = '\0';
+ np->ip_ptr = register_string (np->sl, str);
+
+ exp = MPFR_GET_EXP (p) - 4;
+ }
+
+ if (uppercase)
+ /* All digits in upper case */
+ {
+ char *s1 = str;
+ while (*s1)
+ {
+ switch (*s1)
+ {
+ case 'a':
+ *s1 = 'A';
+ break;
+ case 'b':
+ *s1 = 'B';
+ break;
+ case 'c':
+ *s1 = 'C';
+ break;
+ case 'd':
+ *s1 = 'D';
+ break;
+ case 'e':
+ *s1 = 'E';
+ break;
+ case 'f':
+ *s1 = 'F';
+ break;
+ }
+ s1++;
+ }
+ }
+
+ if (spec.spec == 'b' || spec.prec != 0)
+ /* compute the number of digits in fractional part */
+ {
+ char *ptr;
+ size_t str_len;
+
+ /* the sign has been skipped, skip also the first digit */
+ ++str;
+ str_len = strlen (str);
+ ptr = str + str_len - 1; /* points to the end of str */
+
+ if (spec.prec < 0)
+ /* remove trailing zeros, if any */
+ {
+ while ((*ptr == '0') && (str_len != 0))
+ {
+ --ptr;
+ --str_len;
+ }
+ }
+
+ if (str_len > INT_MAX)
+ /* too many digits in fractional part */
+ return -1;
+
+ if (str_len != 0)
+ /* there are some non-zero digits in fractional part */
+ {
+ np->fp_ptr = str;
+ np->fp_size = str_len;
+ if ((int) str_len < spec.prec)
+ np->fp_trailing_zeros = spec.prec - str_len;
+ }
+ }
+
+ /* decimal point */
+ if ((np->fp_size != 0) || spec.alt)
+ np->point = MPFR_DECIMAL_POINT;
+
+ /* the exponent part contains the character 'p', or 'P' plus the sign
+ character plus at least one digit and only as many more digits as
+ necessary to represent the exponent.
+ We assume that |EXP| < 10^INT_MAX. */
+ np->exp_size = 3;
+ {
+ mpfr_uexp_t x;
+
+ x = SAFE_ABS (mpfr_uexp_t, exp);
+ while (x > 9)
+ {
+ np->exp_size++;
+ x /= 10;
+ }
+ }
+ str = (char *) (*__gmp_allocate_func) (1 + np->exp_size);
+ np->exp_ptr = register_string (np->sl, str);
+ {
+ char exp_fmt[8]; /* contains at most 7 characters like in "p%+.1i",
+ or "P%+.2li" */
+
+ exp_fmt[0] = uppercase ? 'P' : 'p';
+ exp_fmt[1] = '\0';
+ strcat (exp_fmt, "%+.1" MPFR_EXP_FORMAT_SPEC);
+
+ if (sprintf (str, exp_fmt, exp) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Determine the different parts of the string representation of the regular
+ number P when SPEC.SPEC is 'e', 'E', 'g', or 'G'.
+
+ return -1 if some field > INT_MAX */
+static int
+regular_eg (struct number_parts *np, mpfr_srcptr p,
+ const struct printf_spec spec)
+{
+ char *str;
+ mpfr_exp_t exp;
+
+ const int uppercase = spec.spec == 'E' || spec.spec == 'G';
+ const int spec_g = spec.spec == 'g' || spec.spec == 'G';
+ const int keep_trailing_zeros = (spec_g && spec.alt)
+ || (!spec_g && (spec.prec > 0));
+
+ /* sign */
+ if (MPFR_IS_NEG (p))
+ np->sign = '-';
+ else if (spec.showsign || spec.space)
+ np->sign = spec.showsign ? '+' : ' ';
+
+ /* integral part */
+ np->ip_size = 1;
+ {
+ size_t nsd;
+
+ /* Number of significant digits:
+ - if no given precision, then let mpfr_get_str determine it,
+ - if a precision is specified, then one digit before decimal point
+ plus SPEC.PREC after it.
+ We use the fact here that mpfr_get_str allows us to ask for only one
+ significant digit when the base is not a power of 2. */
+ nsd = (spec.prec < 0) ? 0 : spec.prec + np->ip_size;
+ str = mpfr_get_str (0, &exp, 10, nsd, p, spec.rnd_mode);
+ }
+ register_string (np->sl, str);
+ np->ip_ptr = MPFR_IS_NEG (p) ? ++str : str; /* skip sign if any */
+
+ if (spec.prec != 0)
+ /* compute the number of digits in fractional part */
+ {
+ char *ptr;
+ size_t str_len;
+
+ /* the sign has been skipped, skip also the first digit */
+ ++str;
+ str_len = strlen (str);
+ ptr = str + str_len - 1; /* points to the end of str */
+
+ if (!keep_trailing_zeros)
+ /* remove trailing zeros, if any */
+ {
+ while ((*ptr == '0') && (str_len != 0))
+ {
+ --ptr;
+ --str_len;
+ }
+ }
+
+ if (str_len > INT_MAX)
+ /* too many digits in fractional part */
+ return -1;
+
+ if (str_len != 0)
+ /* there are some non-zero digits in fractional part */
+ {
+ np->fp_ptr = str;
+ np->fp_size = str_len;
+ if ((!spec_g || spec.alt) && (spec.prec > 0)
+ && ((int)str_len < spec.prec))
+ /* add missing trailing zeros */
+ np->fp_trailing_zeros = spec.prec - str_len;
+ }
+ }
+
+ /* decimal point */
+ if (np->fp_size != 0 || spec.alt)
+ np->point = MPFR_DECIMAL_POINT;
+
+ /* EXP is the exponent for decimal point BEFORE the first digit, we want
+ the exponent for decimal point AFTER the first digit.
+ Here, no possible overflow because exp < MPFR_EXP (p) / 3 */
+ exp--;
+
+ /* the exponent part contains the character 'e', or 'E' plus the sign
+ character plus at least two digits and only as many more digits as
+ necessary to represent the exponent.
+ We assume that |EXP| < 10^INT_MAX. */
+ np->exp_size = 3;
+ {
+ mpfr_uexp_t x;
+
+ x = SAFE_ABS (mpfr_uexp_t, exp);
+ while (x > 9)
+ {
+ np->exp_size++;
+ x /= 10;
+ }
+ }
+ if (np->exp_size < 4)
+ np->exp_size = 4;
+
+ str = (char *) (*__gmp_allocate_func) (1 + np->exp_size);
+ np->exp_ptr = register_string (np->sl, str);
+
+ {
+ char exp_fmt[8]; /* e.g. "e%+.2i", or "E%+.2li" */
+
+ exp_fmt[0] = uppercase ? 'E' : 'e';
+ exp_fmt[1] = '\0';
+ strcat (exp_fmt, "%+.2" MPFR_EXP_FORMAT_SPEC);
+
+ if (sprintf (str, exp_fmt, exp) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Determine the different parts of the string representation of the regular
+ number P when SPEC.SPEC is 'f', 'F', 'g', or 'G'.
+
+ return -1 if some field of number_parts is greater than INT_MAX */
+static int
+regular_fg (struct number_parts *np, mpfr_srcptr p,
+ const struct printf_spec spec)
+{
+ mpfr_exp_t exp;
+ char * str;
+ const int spec_g = (spec.spec == 'g' || spec.spec == 'G');
+ const int keep_trailing_zeros = spec_g && spec.alt;
+
+ /* WARNING: an empty precision field is forbidden (it means precision = 6
+ and it should have been changed to 6 before the function call) */
+ MPFR_ASSERTD (spec.prec >= 0);
+
+ /* sign */
+ if (MPFR_IS_NEG (p))
+ np->sign = '-';
+ else if (spec.showsign || spec.space)
+ np->sign = spec.showsign ? '+' : ' ';
+
+ if (MPFR_GET_EXP (p) <= 0)
+ /* 0 < |p| < 1 */
+ {
+ /* Most of the time, integral part is 0 */
+ np->ip_size = 1;
+ str = (char *) (*__gmp_allocate_func) (1 + np->ip_size);
+ str[0] = '0';
+ str[1] = '\0';
+ np->ip_ptr = register_string (np->sl, str);
+
+ if (spec.prec == 0)
+ /* only two possibilities: either 1 or 0. */
+ {
+ mpfr_t y;
+ /* y = abs(p) */
+ MPFR_ALIAS (y, p, 1, MPFR_EXP (p));
+
+ if (spec.rnd_mode == MPFR_RNDA
+ || (spec.rnd_mode == MPFR_RNDD && MPFR_IS_NEG (p))
+ || (spec.rnd_mode == MPFR_RNDU && MPFR_IS_POS (p))
+ || (spec.rnd_mode == MPFR_RNDN && mpfr_cmp_d (y, 0.5) > 0))
+ /* rounded up to 1: one digit '1' in integral part.
+ note that 0.5 is rounded to 0 with RNDN (round ties to even) */
+ np->ip_ptr[0] = '1';
+ }
+ else
+ {
+ /* exp = position of the most significant decimal digit. */
+ round_to_10_power (&exp, p, 0, MPFR_RNDZ);
+ MPFR_ASSERTD (exp < 0);
+
+ if (exp < -spec.prec)
+ /* only the last digit may be non zero */
+ {
+ int round_away;
+ switch (spec.rnd_mode)
+ {
+ case MPFR_RNDA:
+ round_away = 1;
+ break;
+ case MPFR_RNDD:
+ round_away = MPFR_IS_NEG (p);
+ break;
+ case MPFR_RNDU:
+ round_away = MPFR_IS_POS (p);
+ break;
+ case MPFR_RNDN:
+ {
+ /* compare |p| to y = 0.5*10^(-spec.prec) */
+ mpfr_t y;
+ mpfr_exp_t e = MAX (MPFR_PREC (p), 56);
+ mpfr_init2 (y, e + 8);
+ do
+ {
+ /* find a lower approximation of
+ 0.5*10^(-spec.prec) different from |p| */
+ e += 8;
+ mpfr_set_prec (y, e);
+ mpfr_set_si (y, -spec.prec, MPFR_RNDN);
+ mpfr_exp10 (y, y, MPFR_RNDD);
+ mpfr_div_2ui (y, y, 1, MPFR_RNDN);
+ } while (mpfr_cmpabs (y, p) == 0);
+
+ round_away = mpfr_cmpabs (y, p) < 0;
+ mpfr_clear (y);
+ }
+ break;
+ default:
+ round_away = 0;
+ }
+
+ if (round_away)
+ /* round away from zero: the last output digit is '1' */
+ {
+ np->fp_leading_zeros = spec.prec - 1;
+
+ np->fp_size = 1;
+ str =
+ (char *) (*__gmp_allocate_func) (1 + np->fp_size);
+ str[0] = '1';
+ str[1] = '\0';
+ np->fp_ptr = register_string (np->sl, str);
+ }
+ else
+ /* only zeros in fractional part */
+ {
+ MPFR_ASSERTD (!spec_g);
+ np->fp_leading_zeros = spec.prec;
+ }
+ }
+ else
+ /* the most significant digits are the last
+ spec.prec + exp + 1 digits in fractional part */
+ {
+ char *ptr;
+ size_t str_len;
+ size_t nsd = spec.prec + exp + 1;
+ /* WARNING: nsd may equal 1, but here we use the fact that
+ mpfr_get_str can return one digit with base ten
+ (undocumented feature, see comments in get_str.c) */
+
+ str = mpfr_get_str (NULL, &exp, 10, nsd, p, spec.rnd_mode);
+ register_string (np->sl, str);
+ if (MPFR_IS_NEG (p))
+ ++str;
+ if (exp == 1)
+ /* round up to 1 */
+ {
+ MPFR_ASSERTD (str[0] == '1');
+ np->ip_ptr[0] = '1';
+ if (!spec_g || spec.alt)
+ np->fp_leading_zeros = spec.prec;
+ }
+ else
+ {
+ /* skip sign */
+ np->fp_ptr = str;
+ np->fp_leading_zeros = -exp;
+ MPFR_ASSERTD (exp <= 0);
+
+ str_len = strlen (str); /* the sign has been skipped */
+ ptr = str + str_len - 1; /* points to the end of str */
+
+ if (!keep_trailing_zeros)
+ /* remove trailing zeros, if any */
+ {
+ while ((*ptr == '0') && str_len)
+ {
+ --ptr;
+ --str_len;
+ }
+ }
+
+ if (str_len > INT_MAX)
+ /* too many digits in fractional part */
+ return -1;
+
+ MPFR_ASSERTD (str_len > 0);
+ np->fp_size = str_len;
+
+ if ((!spec_g || spec.alt)
+ && spec.prec > 0
+ && (np->fp_leading_zeros + np->fp_size < spec.prec))
+ /* add missing trailing zeros */
+ np->fp_trailing_zeros = spec.prec - np->fp_leading_zeros
+ - np->fp_size;
+ }
+ }
+ }
+
+ if (spec.alt || np->fp_leading_zeros != 0 || np->fp_size != 0
+ || np->fp_trailing_zeros != 0)
+ np->point = MPFR_DECIMAL_POINT;
+ }
+ else
+ /* 1 <= |p| */
+ {
+ size_t nsd; /* Number of significant digits */
+
+ /* Determine the position of the most significant decimal digit. */
+ round_to_10_power (&exp, p, 0, MPFR_RNDZ);
+
+ MPFR_ASSERTD (exp >= 0);
+ if (exp > INT_MAX)
+ /* P is too large to print all its integral part digits */
+ return -1;
+
+ np->ip_size = exp + 1;
+
+ nsd = spec.prec + np->ip_size;
+ str = mpfr_get_str (NULL, &exp, 10, nsd, p, spec.rnd_mode);
+ register_string (np->sl, str);
+ np->ip_ptr = MPFR_IS_NEG (p) ? ++str : str; /* skip sign */
+
+ if (spec.group)
+ /* thousands separator in integral part */
+ np->thousands_sep = MPFR_THOUSANDS_SEPARATOR;
+
+ if (nsd == 0 || (spec_g && !spec.alt))
+ /* compute how much non-zero digits in integral and fractional
+ parts */
+ {
+ size_t str_len;
+ str_len = strlen (str); /* note: the sign has been skipped */
+
+ if (exp > str_len)
+ /* mpfr_get_str doesn't give the trailing zeros when p is a
+ multiple of 10 (p integer, so no fractional part) */
+ {
+ np->ip_trailing_zeros = exp - str_len;
+ np->ip_size = str_len;
+ if (spec.alt)
+ np->point = MPFR_DECIMAL_POINT;
+ }
+ else
+ /* str may contain some digits which are in fractional part */
+ {
+ char *ptr;
+
+ ptr = str + str_len - 1; /* points to the end of str */
+ str_len -= np->ip_size; /* number of digits in fractional
+ part */
+
+ if (!keep_trailing_zeros)
+ /* remove trailing zeros, if any */
+ {
+ while ((*ptr == '0') && (str_len != 0))
+ {
+ --ptr;
+ --str_len;
+ }
+ }
+
+ if (str_len > INT_MAX)
+ /* too many digits in fractional part */
+ return -1;
+
+ if (str_len != 0)
+ /* some digits in fractional part */
+ {
+ np->point = MPFR_DECIMAL_POINT;
+ np->fp_ptr = str + np->ip_size;
+ np->fp_size = str_len;
+ }
+ }
+ }
+ else
+ /* spec.prec digits in fractional part */
+ {
+ if (np->ip_size == exp - 1)
+ /* the absolute value of the number has been rounded up to a power
+ of ten.
+ Insert an additional zero in integral part and put the rest of
+ them in fractional part. */
+ np->ip_trailing_zeros = 1;
+
+ if (spec.prec != 0)
+ {
+ MPFR_ASSERTD (np->ip_size + np->ip_trailing_zeros == exp);
+ MPFR_ASSERTD (np->ip_size + spec.prec == nsd);
+
+ np->point = MPFR_DECIMAL_POINT;
+ np->fp_ptr = str + np->ip_size;
+ np->fp_size = spec.prec;
+ }
+ else if (spec.alt)
+ np->point = MPFR_DECIMAL_POINT;
+ }
+ }
+
+ return 0;
+}
+
+/* partition_number determines the different parts of the string
+ representation of the number p according to the given specification.
+ partition_number initializes the given structure np, so all previous
+ information in that variable is lost.
+ return the total number of characters to be written.
+ return -1 if an error occured, in that case np's fields are in an undefined
+ state but all string buffers have been freed. */
+static int
+partition_number (struct number_parts *np, mpfr_srcptr p,
+ struct printf_spec spec)
+{
+ char *str;
+ long total;
+ int uppercase;
+
+ /* WARNING: left justification means right space padding */
+ np->pad_type = spec.left ? RIGHT : spec.pad == '0' ? LEADING_ZEROS : LEFT;
+ np->pad_size = 0;
+ np->sign = '\0';
+ np->prefix_ptr =NULL;
+ np->prefix_size = 0;
+ np->thousands_sep = '\0';
+ np->ip_ptr = NULL;
+ np->ip_size = 0;
+ np->ip_trailing_zeros = 0;
+ np->point = '\0';
+ np->fp_leading_zeros = 0;
+ np->fp_ptr = NULL;
+ np->fp_size = 0;
+ np->fp_trailing_zeros = 0;
+ np->exp_ptr = NULL;
+ np->exp_size = 0;
+ np->sl = (struct string_list *)
+ (*__gmp_allocate_func) (sizeof (struct string_list));
+ init_string_list (np->sl);
+
+ uppercase = spec.spec == 'A' || spec.spec == 'E' || spec.spec == 'F'
+ || spec.spec == 'G';
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (p)))
+ {
+ if (MPFR_IS_NAN (p))
+ {
+ if (np->pad_type == LEADING_ZEROS)
+ /* don't want "0000nan", change to right justification padding
+ with left spaces instead */
+ np->pad_type = LEFT;
+
+ if (uppercase)
+ {
+ np->ip_size = MPFR_NAN_STRING_LENGTH;
+ str = (char *) (*__gmp_allocate_func) (1 + np->ip_size);
+ strcpy (str, MPFR_NAN_STRING_UC);
+ np->ip_ptr = register_string (np->sl, str);
+ }
+ else
+ {
+ np->ip_size = MPFR_NAN_STRING_LENGTH;
+ str = (char *) (*__gmp_allocate_func) (1 + np->ip_size);
+ strcpy (str, MPFR_NAN_STRING_LC);
+ np->ip_ptr = register_string (np->sl, str);
+ }
+ }
+ else if (MPFR_IS_INF (p))
+ {
+ if (np->pad_type == LEADING_ZEROS)
+ /* don't want "0000inf", change to right justification padding
+ with left spaces instead */
+ np->pad_type = LEFT;
+
+ if (MPFR_IS_NEG (p))
+ np->sign = '-';
+
+ if (uppercase)
+ {
+ np->ip_size = MPFR_INF_STRING_LENGTH;
+ str = (char *) (*__gmp_allocate_func) (1 + np->ip_size);
+ strcpy (str, MPFR_INF_STRING_UC);
+ np->ip_ptr = register_string (np->sl, str);
+ }
+ else
+ {
+ np->ip_size = MPFR_INF_STRING_LENGTH;
+ str = (char *) (*__gmp_allocate_func) (1 + np->ip_size);
+ strcpy (str, MPFR_INF_STRING_LC);
+ np->ip_ptr = register_string (np->sl, str);
+ }
+ }
+ else
+ /* p == 0 */
+ {
+ /* note: for 'g' spec, zero is always displayed with 'f'-style with
+ precision spec.prec - 1 and the trailing zeros are removed unless
+ the flag '#' is used. */
+ if (MPFR_IS_NEG (p))
+ /* signed zero */
+ np->sign = '-';
+ else if (spec.showsign || spec.space)
+ np->sign = spec.showsign ? '+' : ' ';
+
+ if (spec.spec == 'a' || spec.spec == 'A')
+ /* prefix part */
+ {
+ np->prefix_size = 2;
+ str = (char *) (*__gmp_allocate_func) (1 + np->prefix_size);
+ str[0] = '0';
+ str[1] = uppercase ? 'X' : 'x';
+ str[2] = '\0';
+ np->prefix_ptr = register_string (np->sl, str);
+ }
+
+ /* integral part */
+ np->ip_size = 1;
+ str = (char *) (*__gmp_allocate_func) (1 + np->ip_size);
+ str[0] = '0';
+ str[1] = '\0';
+ np->ip_ptr = register_string (np->sl, str);
+
+ if (spec.prec > 0
+ && ((spec.spec != 'g' && spec.spec != 'G') || spec.alt))
+ /* fractional part */
+ {
+ np->point = MPFR_DECIMAL_POINT;
+ np->fp_trailing_zeros = (spec.spec == 'g' && spec.spec == 'G') ?
+ spec.prec - 1 : spec.prec;
+ }
+ else if (spec.alt)
+ np->point = MPFR_DECIMAL_POINT;
+
+ if (spec.spec == 'a' || spec.spec == 'A' || spec.spec == 'b'
+ || spec.spec == 'e' || spec.spec == 'E')
+ /* exponent part */
+ {
+ np->exp_size = (spec.spec == 'e' || spec.spec == 'E') ? 4 : 3;
+ str = (char *) (*__gmp_allocate_func) (1 + np->exp_size);
+ if (spec.spec == 'e' || spec.spec == 'E')
+ strcpy (str, uppercase ? "E+00" : "e+00");
+ else
+ strcpy (str, uppercase ? "P+0" : "p+0");
+ np->exp_ptr = register_string (np->sl, str);
+ }
+ }
+ }
+ else
+ /* regular p, p != 0 */
+ {
+ if (spec.spec == 'a' || spec.spec == 'A' || spec.spec == 'b')
+ {
+ if (regular_ab (np, p, spec) == -1)
+ goto error;
+ }
+ else if (spec.spec == 'f' || spec.spec == 'F')
+ {
+ if (spec.prec == -1)
+ spec.prec = 6;
+ if (regular_fg (np, p, spec) == -1)
+ goto error;
+ }
+ else if (spec.spec == 'e' || spec.spec == 'E')
+ {
+ if (regular_eg (np, p, spec) == -1)
+ goto error;
+ }
+ else
+ /* %g case */
+ {
+ /* Use the C99 rules:
+ if T > X >= -4 then the conversion is with style 'f'/'F' and
+ precision T-(X+1).
+ otherwise, the conversion is with style 'e'/'E' and
+ precision T-1.
+ where T is the threshold computed below and X is the exponent
+ that would be displayed with style 'e' and precision T-1. */
+ int threshold;
+ mpfr_exp_t x;
+
+ threshold = (spec.prec < 0) ? 6 : (spec.prec == 0) ? 1 : spec.prec;
+ round_to_10_power (&x, p, threshold - 1, spec.rnd_mode);
+
+ if (threshold > x && x >= -4)
+ {
+ /* the conversion is with style 'f' */
+ spec.prec = threshold - x - 1;
+
+ if (regular_fg (np, p, spec) == -1)
+ goto error;
+ }
+ else
+ {
+ spec.prec = threshold - 1;
+
+ if (regular_eg (np, p, spec) == -1)
+ goto error;
+ }
+ }
+ }
+
+ /* compute the number of characters to be written verifying it is not too
+ much */
+ total = np->sign ? 1 : 0;
+ total += np->prefix_size;
+ total += np->ip_size;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+ total += np->ip_trailing_zeros;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+ if (np->thousands_sep)
+ /* ' flag, style f and the thousands separator in current locale is not
+ reduced to the null character */
+ total += (np->ip_size + np->ip_trailing_zeros) / 3;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+ if (np->point)
+ ++total;
+ total += np->fp_leading_zeros;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+ total += np->fp_size;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+ total += np->fp_trailing_zeros;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+ total += np->exp_size;
+ if (MPFR_UNLIKELY (total < 0 || total > INT_MAX))
+ goto error;
+
+ if (spec.width > total)
+ /* pad with spaces or zeros depending on np->pad_type */
+ {
+ np->pad_size = spec.width - total;
+ total += np->pad_size; /* here total == spec.width,
+ so 0 < total < INT_MAX */
+ }
+
+ return total;
+
+ error:
+ clear_string_list (np->sl);
+ np->prefix_ptr = NULL;
+ np->ip_ptr = NULL;
+ np->fp_ptr = NULL;
+ np->exp_ptr = NULL;
+ return -1;
+}
+
+/* sprnt_fp prints a mpfr_t according to spec.spec specification.
+
+ return the size of the string (not counting the terminating '\0')
+ return -1 if the built string is too long (i.e. has more than
+ INT_MAX characters). */
+static int
+sprnt_fp (struct string_buffer *buf, mpfr_srcptr p,
+ const struct printf_spec spec)
+{
+ int length;
+ struct number_parts np;
+
+ length = partition_number (&np, p, spec);
+ if (length < 0)
+ return -1;
+
+ /* right justification padding with left spaces */
+ if (np.pad_type == LEFT && np.pad_size != 0)
+ buffer_pad (buf, ' ', np.pad_size);
+
+ /* sign character (may be '-', '+', or ' ') */
+ if (np.sign)
+ buffer_pad (buf, np.sign, 1);
+
+ /* prefix part */
+ if (np.prefix_ptr)
+ buffer_cat (buf, np.prefix_ptr, np.prefix_size);
+
+ /* right justification padding with leading zeros */
+ if (np.pad_type == LEADING_ZEROS && np.pad_size != 0)
+ buffer_pad (buf, '0', np.pad_size);
+
+ /* integral part (may also be "nan" or "inf") */
+ MPFR_ASSERTN (np.ip_ptr != NULL); /* never empty */
+ if (MPFR_UNLIKELY (np.thousands_sep))
+ buffer_sandwich (buf, np.ip_ptr, np.ip_size, np.ip_trailing_zeros,
+ np.thousands_sep);
+ else
+ {
+ buffer_cat (buf, np.ip_ptr, np.ip_size);
+
+ /* trailing zeros in integral part */
+ if (np.ip_trailing_zeros != 0)
+ buffer_pad (buf, '0', np.ip_trailing_zeros);
+ }
+
+ /* decimal point */
+ if (np.point)
+ buffer_pad (buf, np.point, 1);
+
+ /* leading zeros in fractional part */
+ if (np.fp_leading_zeros != 0)
+ buffer_pad (buf, '0', np.fp_leading_zeros);
+
+ /* significant digits in fractional part */
+ if (np.fp_ptr)
+ buffer_cat (buf, np.fp_ptr, np.fp_size);
+
+ /* trailing zeros in fractional part */
+ if (np.fp_trailing_zeros != 0)
+ buffer_pad (buf, '0', np.fp_trailing_zeros);
+
+ /* exponent part */
+ if (np.exp_ptr)
+ buffer_cat (buf, np.exp_ptr, np.exp_size);
+
+ /* left justication padding with right spaces */
+ if (np.pad_type == RIGHT && np.pad_size != 0)
+ buffer_pad (buf, ' ', np.pad_size);
+
+ clear_string_list (np.sl);
+ return length;
+}
+
+int
+mpfr_vasprintf (char **ptr, const char *fmt, va_list ap)
+{
+ struct string_buffer buf;
+ size_t nbchar;
+
+ /* informations on the conversion specification filled by the parser */
+ struct printf_spec spec;
+ /* flag raised when previous part of fmt need to be processed by
+ gmp_vsnprintf */
+ int xgmp_fmt_flag;
+ /* beginning and end of the previous unprocessed part of fmt */
+ const char *start, *end;
+ /* pointer to arguments for gmp_vasprintf */
+ va_list ap2;
+
+ MPFR_SAVE_EXPO_DECL (expo);
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ nbchar = 0;
+ buffer_init (&buf, 4096);
+ xgmp_fmt_flag = 0;
+ va_copy (ap2, ap);
+ start = fmt;
+ while (*fmt)
+ {
+ /* Look for the next format specification */
+ while ((*fmt) && (*fmt != '%'))
+ ++fmt;
+
+ if (*fmt == '\0')
+ break;
+
+ if (*++fmt == '%')
+ /* %%: go one step further otherwise the second '%' would be
+ considered as a new conversion specification introducing
+ character */
+ {
+ ++fmt;
+ xgmp_fmt_flag = 1;
+ continue;
+ }
+
+ end = fmt - 1;
+
+ /* format string analysis */
+ specinfo_init (&spec);
+ fmt = parse_flags (fmt, &spec);
+
+ READ_INT (ap, fmt, spec, width, width_analysis);
+ width_analysis:
+ if (spec.width < 0)
+ {
+ spec.left = 1;
+ spec.width = -spec.width;
+ MPFR_ASSERTN (spec.width < INT_MAX);
+ }
+ if (*fmt == '.')
+ {
+ const char *f = ++fmt;
+ READ_INT (ap, fmt, spec, prec, prec_analysis);
+ prec_analysis:
+ if (f == fmt)
+ spec.prec = -1;
+ }
+ else
+ spec.prec = -1;
+
+ fmt = parse_arg_type (fmt, &spec);
+ if (spec.arg_type == UNSUPPORTED)
+ /* the current architecture doesn't support this type */
+ {
+ goto error;
+ }
+ else if (spec.arg_type == MPFR_ARG)
+ {
+ switch (*fmt)
+ {
+ case '\0':
+ break;
+ case '*':
+ ++fmt;
+ spec.rnd_mode = (mpfr_rnd_t) va_arg (ap, int);
+ break;
+ case 'D':
+ ++fmt;
+ spec.rnd_mode = MPFR_RNDD;
+ break;
+ case 'U':
+ ++fmt;
+ spec.rnd_mode = MPFR_RNDU;
+ break;
+ case 'Y':
+ ++fmt;
+ spec.rnd_mode = MPFR_RNDA;
+ break;
+ case 'Z':
+ ++fmt;
+ spec.rnd_mode = MPFR_RNDZ;
+ break;
+ case 'N':
+ ++fmt;
+ default:
+ spec.rnd_mode = MPFR_RNDN;
+ }
+ }
+
+ spec.spec = *fmt;
+ if (!specinfo_is_valid (spec))
+ goto error;
+
+ if (*fmt)
+ fmt++;
+
+ /* Format processing */
+ if (spec.spec == '\0')
+ /* end of the format string */
+ break;
+ else if (spec.spec == 'n')
+ /* put the number of characters written so far in the location pointed
+ by the next va_list argument; the types of pointer accepted are the
+ same as in GMP (except unsupported quad_t) plus pointer to a mpfr_t
+ so as to be able to accept the same format strings. */
+ {
+ void *p;
+ size_t nchar;
+
+ p = va_arg (ap, void *);
+ FLUSH (xgmp_fmt_flag, start, end, ap2, &buf);
+ va_end (ap2);
+ start = fmt;
+ nchar = buf.curr - buf.start;
+
+ switch (spec.arg_type)
+ {
+ case CHAR_ARG:
+ *(char *) p = (char) nchar;
+ break;
+ case SHORT_ARG:
+ *(short *) p = (short) nchar;
+ break;
+ case LONG_ARG:
+ *(long *) p = (long) nchar;
+ break;
+#ifdef HAVE_LONG_LONG
+ case LONG_LONG_ARG:
+ *(long long *) p = (long long) nchar;
+ break;
+#endif
+#ifdef _MPFR_H_HAVE_INTMAX_T
+ case INTMAX_ARG:
+ *(intmax_t *) p = (intmax_t) nchar;
+ break;
+#endif
+ case SIZE_ARG:
+ *(size_t *) p = nchar;
+ break;
+ case PTRDIFF_ARG:
+ *(ptrdiff_t *) p = (ptrdiff_t) nchar;
+ break;
+ case MPF_ARG:
+ mpf_set_ui ((mpf_ptr) p, (unsigned long) nchar);
+ break;
+ case MPQ_ARG:
+ mpq_set_ui ((mpq_ptr) p, (unsigned long) nchar, 1L);
+ break;
+ case MP_LIMB_ARG:
+ *(mp_limb_t *) p = (mp_limb_t) nchar;
+ break;
+ case MP_LIMB_ARRAY_ARG:
+ {
+ mp_limb_t *q = (mp_limb_t *) p;
+ mp_size_t n;
+ n = va_arg (ap, mp_size_t);
+ if (n < 0)
+ n = -n;
+ else if (n == 0)
+ break;
+
+ /* we assume here that mp_limb_t is wider than int */
+ *q = (mp_limb_t) nchar;
+ while (--n != 0)
+ {
+ q++;
+ *q = (mp_limb_t) 0;
+ }
+ }
+ break;
+ case MPZ_ARG:
+ mpz_set_ui ((mpz_ptr) p, (unsigned long) nchar);
+ break;
+
+ case MPFR_ARG:
+ mpfr_set_ui ((mpfr_ptr) p, (unsigned long) nchar,
+ spec.rnd_mode);
+ break;
+
+ default:
+ *(int *) p = (int) nchar;
+ }
+ va_copy (ap2, ap); /* after the switch, due to MP_LIMB_ARRAY_ARG
+ case */
+ }
+ else if (spec.arg_type == MPFR_PREC_ARG)
+ /* output mpfr_prec_t variable */
+ {
+ char *s;
+ char format[MPFR_PREC_FORMAT_SIZE + 6]; /* see examples below */
+ size_t length;
+ mpfr_prec_t prec;
+ prec = va_arg (ap, mpfr_prec_t);
+
+ FLUSH (xgmp_fmt_flag, start, end, ap2, &buf);
+ va_end (ap2);
+ va_copy (ap2, ap);
+ start = fmt;
+
+ /* construct format string, like "%*.*hu" "%*.*u" or "%*.*lu" */
+ format[0] = '%';
+ format[1] = '*';
+ format[2] = '.';
+ format[3] = '*';
+ format[4] = '\0';
+ strcat (format, MPFR_PREC_FORMAT_TYPE);
+ format[4 + MPFR_PREC_FORMAT_SIZE] = spec.spec;
+ format[5 + MPFR_PREC_FORMAT_SIZE] = '\0';
+ length = gmp_asprintf (&s, format, spec.width, spec.prec, prec);
+ if (buf.size <= INT_MAX - length)
+ {
+ buffer_cat (&buf, s, length);
+ mpfr_free_str (s);
+ }
+ else
+ {
+ mpfr_free_str (s);
+ goto overflow_error;
+ }
+ }
+ else if (spec.arg_type == MPFR_ARG)
+ /* output a mpfr_t variable */
+ {
+ mpfr_srcptr p;
+
+ p = va_arg (ap, mpfr_srcptr);
+
+ FLUSH (xgmp_fmt_flag, start, end, ap2, &buf);
+ va_end (ap2);
+ va_copy (ap2, ap);
+ start = fmt;
+
+ switch (spec.spec)
+ {
+ case 'a':
+ case 'A':
+ case 'b':
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ if (sprnt_fp (&buf, p, spec) < 0)
+ goto overflow_error;
+ break;
+
+ default:
+ /* unsupported specifier */
+ goto error;
+ }
+ }
+ else
+ /* gmp_printf specification, step forward in the va_list */
+ {
+ CONSUME_VA_ARG (spec, ap);
+ xgmp_fmt_flag = 1;
+ }
+ }
+
+ if (start != fmt)
+ FLUSH (xgmp_fmt_flag, start, fmt, ap2, &buf);
+
+ va_end (ap2);
+ nbchar = buf.curr - buf.start;
+ MPFR_ASSERTD (nbchar == strlen (buf.start));
+ buf.start =
+ (char *) (*__gmp_reallocate_func) (buf.start, buf.size, nbchar + 1);
+ buf.size = nbchar + 1; /* update needed for __gmp_free_func below when
+ nbchar is too large (overflow_error) */
+ *ptr = buf.start;
+
+ /* If nbchar is larger than INT_MAX, the ISO C99 standard is silent, but
+ POSIX says concerning the snprintf() function:
+ "[EOVERFLOW] The value of n is greater than {INT_MAX} or the
+ number of bytes needed to hold the output excluding the
+ terminating null is greater than {INT_MAX}." See:
+ http://www.opengroup.org/onlinepubs/009695399/functions/fprintf.html
+ But it doesn't say anything concerning the other printf-like functions.
+ A defect report has been submitted to austin-review-l (item 2532).
+ So, for the time being, we return a negative value and set the erange
+ flag, and set errno to EOVERFLOW in POSIX system. */
+ if (nbchar <= INT_MAX)
+ {
+ MPFR_SAVE_EXPO_FREE (expo);
+ return nbchar;
+ }
+
+ overflow_error:
+ MPFR_SAVE_EXPO_UPDATE_FLAGS(expo, MPFR_FLAGS_ERANGE);
+#ifdef EOVERFLOW
+ errno = EOVERFLOW;
+#endif
+
+ error:
+ MPFR_SAVE_EXPO_FREE (expo);
+ *ptr = NULL;
+ (*__gmp_free_func) (buf.start, buf.size);
+
+ return -1;
+}
+
+#endif /* HAVE_STDARG */
diff --git a/src/version.c b/src/version.c
new file mode 100644
index 000000000..60e567acb
--- /dev/null
+++ b/src/version.c
@@ -0,0 +1,29 @@
+/* mpfr_get_version -- MPFR version
+
+Copyright 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+const char *
+mpfr_get_version (void)
+{
+ return "3.1.0-dev";
+}
diff --git a/src/volatile.c b/src/volatile.c
new file mode 100644
index 000000000..a48505597
--- /dev/null
+++ b/src/volatile.c
@@ -0,0 +1,36 @@
+/* __gmpfr_longdouble_volatile -- support for LONGDOUBLE_NAN_ACTION.
+
+ THE FUNCTIONS IN THIS FILE ARE FOR INTERNAL USE ONLY. THEY'RE ALMOST
+ CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES OR DISAPPEAR COMPLETELY IN
+ FUTURE MPFR RELEASES.
+
+Copyright 2003, 2004, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#include "mpfr-impl.h"
+
+/* Only needed sometimes. */
+#ifdef WANT_GMPFR_LONGDOUBLE_VOLATILE
+long double
+__gmpfr_longdouble_volatile (long double x)
+{
+ return x;
+}
+#endif
diff --git a/src/yn.c b/src/yn.c
new file mode 100644
index 000000000..5e53b8e02
--- /dev/null
+++ b/src/yn.c
@@ -0,0 +1,420 @@
+/* mpfr_y0, mpfr_y1, mpfr_yn -- Bessel functions of 2nd kind, integer order.
+ http://www.opengroup.org/onlinepubs/009695399/functions/y0.html
+
+Copyright 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+static int mpfr_yn_asympt (mpfr_ptr, long, mpfr_srcptr, mpfr_rnd_t);
+
+int
+mpfr_y0 (mpfr_ptr res, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ return mpfr_yn (res, 0, z, r);
+}
+
+int
+mpfr_y1 (mpfr_ptr res, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ return mpfr_yn (res, 1, z, r);
+}
+
+/* compute in s an approximation of S1 = sum((n-k)!/k!*y^k,k=0..n)
+ return e >= 0 the exponent difference between the maximal value of |s|
+ during the for loop and the final value of |s|.
+*/
+static mpfr_exp_t
+mpfr_yn_s1 (mpfr_ptr s, mpfr_srcptr y, unsigned long n)
+{
+ unsigned long k;
+ mpz_t f;
+ mpfr_exp_t e, emax;
+
+ mpz_init_set_ui (f, 1);
+ /* we compute n!*S1 = sum(a[k]*y^k,k=0..n) where a[k] = n!*(n-k)!/k!,
+ a[0] = (n!)^2, a[1] = n!*(n-1)!, ..., a[n-1] = n, a[n] = 1 */
+ mpfr_set_ui (s, 1, MPFR_RNDN); /* a[n] */
+ emax = MPFR_EXP(s);
+ for (k = n; k-- > 0;)
+ {
+ /* a[k]/a[k+1] = (n-k)!/k!/(n-(k+1))!*(k+1)! = (k+1)*(n-k) */
+ mpfr_mul (s, s, y, MPFR_RNDN);
+ mpz_mul_ui (f, f, n - k);
+ mpz_mul_ui (f, f, k + 1);
+ /* invariant: f = a[k] */
+ mpfr_add_z (s, s, f, MPFR_RNDN);
+ e = MPFR_EXP(s);
+ if (e > emax)
+ emax = e;
+ }
+ /* now we have f = (n!)^2 */
+ mpz_sqrt (f, f);
+ mpfr_div_z (s, s, f, MPFR_RNDN);
+ mpz_clear (f);
+ return emax - MPFR_EXP(s);
+}
+
+/* compute in s an approximation of
+ S3 = c*sum((h(k)+h(n+k))*y^k/k!/(n+k)!,k=0..infinity)
+ where h(k) = 1 + 1/2 + ... + 1/k
+ k=0: h(n)
+ k=1: 1+h(n+1)
+ k=2: 3/2+h(n+2)
+ Returns e such that the error is bounded by 2^e ulp(s).
+*/
+static mpfr_exp_t
+mpfr_yn_s3 (mpfr_ptr s, mpfr_srcptr y, mpfr_srcptr c, unsigned long n)
+{
+ unsigned long k, zz;
+ mpfr_t t, u;
+ mpz_t p, q; /* p/q will store h(k)+h(n+k) */
+ mpfr_exp_t exps, expU;
+
+ zz = mpfr_get_ui (y, MPFR_RNDU); /* y = z^2/4 */
+ MPFR_ASSERTN (zz < ULONG_MAX - 2);
+ zz += 2; /* z^2 <= 2^zz */
+ mpz_init_set_ui (p, 0);
+ mpz_init_set_ui (q, 1);
+ /* initialize p/q to h(n) */
+ for (k = 1; k <= n; k++)
+ {
+ /* p/q + 1/k = (k*p+q)/(q*k) */
+ mpz_mul_ui (p, p, k);
+ mpz_add (p, p, q);
+ mpz_mul_ui (q, q, k);
+ }
+ mpfr_init2 (t, MPFR_PREC(s));
+ mpfr_init2 (u, MPFR_PREC(s));
+ mpfr_fac_ui (t, n, MPFR_RNDN);
+ mpfr_div (t, c, t, MPFR_RNDN); /* c/n! */
+ mpfr_mul_z (u, t, p, MPFR_RNDN);
+ mpfr_div_z (s, u, q, MPFR_RNDN);
+ exps = MPFR_EXP (s);
+ expU = exps;
+ for (k = 1; ;k ++)
+ {
+ /* update t */
+ mpfr_mul (t, t, y, MPFR_RNDN);
+ mpfr_div_ui (t, t, k, MPFR_RNDN);
+ mpfr_div_ui (t, t, n + k, MPFR_RNDN);
+ /* update p/q:
+ p/q + 1/k + 1/(n+k) = [p*k*(n+k) + q*(n+k) + q*k]/(q*k*(n+k)) */
+ mpz_mul_ui (p, p, k);
+ mpz_mul_ui (p, p, n + k);
+ mpz_addmul_ui (p, q, n + 2 * k);
+ mpz_mul_ui (q, q, k);
+ mpz_mul_ui (q, q, n + k);
+ mpfr_mul_z (u, t, p, MPFR_RNDN);
+ mpfr_div_z (u, u, q, MPFR_RNDN);
+ exps = MPFR_EXP (u);
+ if (exps > expU)
+ expU = exps;
+ mpfr_add (s, s, u, MPFR_RNDN);
+ exps = MPFR_EXP (s);
+ if (exps > expU)
+ expU = exps;
+ if (MPFR_EXP (u) + (mpfr_exp_t) MPFR_PREC (u) < MPFR_EXP (s) &&
+ zz / (2 * k) < k + n)
+ break;
+ }
+ mpfr_clear (t);
+ mpfr_clear (u);
+ mpz_clear (p);
+ mpz_clear (q);
+ exps = expU - MPFR_EXP (s);
+ /* the error is bounded by (6k^2+33/2k+11) 2^exps ulps
+ <= 8*(k+2)^2 2^exps ulps */
+ return 3 + 2 * MPFR_INT_CEIL_LOG2(k + 2) + exps;
+}
+
+int
+mpfr_yn (mpfr_ptr res, long n, mpfr_srcptr z, mpfr_rnd_t r)
+{
+ int inex;
+ unsigned long absn;
+
+ MPFR_LOG_FUNC (("x[%#R]=%R n=%d rnd=%d", z, z, n, r),
+ ("y[%#R]=%R", res, res));
+
+ absn = SAFE_ABS (unsigned long, n);
+
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (z)))
+ {
+ if (MPFR_IS_NAN (z))
+ {
+ MPFR_SET_NAN (res); /* y(n,NaN) = NaN */
+ MPFR_RET_NAN;
+ }
+ /* y(n,z) tends to zero when z goes to +Inf, oscillating around
+ 0. We choose to return +0 in that case. */
+ else if (MPFR_IS_INF (z))
+ {
+ if (MPFR_SIGN(z) > 0)
+ return mpfr_set_ui (res, 0, r);
+ else /* y(n,-Inf) = NaN */
+ {
+ MPFR_SET_NAN (res);
+ MPFR_RET_NAN;
+ }
+ }
+ else /* y(n,z) tends to -Inf for n >= 0 or n even, to +Inf otherwise,
+ when z goes to zero */
+ {
+ MPFR_SET_INF(res);
+ if (n >= 0 || (n & 1) == 0)
+ MPFR_SET_NEG(res);
+ else
+ MPFR_SET_POS(res);
+ MPFR_RET(0);
+ }
+ }
+
+ /* for z < 0, y(n,z) is imaginary except when j(n,|z|) = 0, which we
+ assume does not happen for a rational z. */
+ if (MPFR_SIGN(z) < 0)
+ {
+ MPFR_SET_NAN (res);
+ MPFR_RET_NAN;
+ }
+
+ /* now z is not singular, and z > 0 */
+
+ /* Deal with tiny arguments. We have:
+ y0(z) = 2 log(z)/Pi + 2 (euler - log(2))/Pi + O(log(z)*z^2), more
+ precisely for 0 <= z <= 1/2, with g(z) = 2/Pi + 2(euler-log(2))/Pi/log(z),
+ g(z) - 0.41*z^2 < y0(z)/log(z) < g(z)
+ thus since log(z) is negative:
+ g(z)*log(z) < y0(z) < (g(z) - z^2/2)*log(z)
+ and since |g(z)| >= 0.63 for 0 <= z <= 1/2, the relative error on
+ y0(z)/log(z) is bounded by 0.41*z^2/0.63 <= 0.66*z^2.
+ Note: we use both the main term in log(z) and the constant term, because
+ otherwise the relative error would be only in 1/log(|log(z)|).
+ */
+ if (n == 0 && MPFR_EXP(z) < - (mpfr_exp_t) (MPFR_PREC(res) / 2))
+ {
+ mpfr_t l, h, t, logz;
+ mpfr_prec_t prec;
+ int ok, inex2;
+
+ prec = MPFR_PREC(res) + 10;
+ mpfr_init2 (l, prec);
+ mpfr_init2 (h, prec);
+ mpfr_init2 (t, prec);
+ mpfr_init2 (logz, prec);
+ /* first enclose log(z) + euler - log(2) = log(z/2) + euler */
+ mpfr_log (logz, z, MPFR_RNDD); /* lower bound of log(z) */
+ mpfr_set (h, logz, MPFR_RNDU); /* exact */
+ mpfr_nextabove (h); /* upper bound of log(z) */
+ mpfr_const_euler (t, MPFR_RNDD); /* lower bound of euler */
+ mpfr_add (l, logz, t, MPFR_RNDD); /* lower bound of log(z) + euler */
+ mpfr_nextabove (t); /* upper bound of euler */
+ mpfr_add (h, h, t, MPFR_RNDU); /* upper bound of log(z) + euler */
+ mpfr_const_log2 (t, MPFR_RNDU); /* upper bound of log(2) */
+ mpfr_sub (l, l, t, MPFR_RNDD); /* lower bound of log(z/2) + euler */
+ mpfr_nextbelow (t); /* lower bound of log(2) */
+ mpfr_sub (h, h, t, MPFR_RNDU); /* upper bound of log(z/2) + euler */
+ mpfr_const_pi (t, MPFR_RNDU); /* upper bound of Pi */
+ mpfr_div (l, l, t, MPFR_RNDD); /* lower bound of (log(z/2)+euler)/Pi */
+ mpfr_nextbelow (t); /* lower bound of Pi */
+ mpfr_div (h, h, t, MPFR_RNDD); /* upper bound of (log(z/2)+euler)/Pi */
+ mpfr_mul_2ui (l, l, 1, MPFR_RNDD); /* lower bound on g(z)*log(z) */
+ mpfr_mul_2ui (h, h, 1, MPFR_RNDU); /* upper bound on g(z)*log(z) */
+ /* we now have l <= g(z)*log(z) <= h, and we need to add -z^2/2*log(z)
+ to h */
+ mpfr_mul (t, z, z, MPFR_RNDU); /* upper bound on z^2 */
+ /* since logz is negative, a lower bound corresponds to an upper bound
+ for its absolute value */
+ mpfr_neg (t, t, MPFR_RNDD);
+ mpfr_div_2ui (t, t, 1, MPFR_RNDD);
+ mpfr_mul (t, t, logz, MPFR_RNDU); /* upper bound on z^2/2*log(z) */
+ /* an underflow may happen in the above instructions, clear flag */
+ mpfr_clear_underflow ();
+ mpfr_add (h, h, t, MPFR_RNDU);
+ inex = mpfr_prec_round (l, MPFR_PREC(res), r);
+ inex2 = mpfr_prec_round (h, MPFR_PREC(res), r);
+ /* we need h=l and inex=inex2 */
+ ok = (inex == inex2) && (mpfr_cmp (l, h) == 0);
+ if (ok)
+ mpfr_set (res, h, r); /* exact */
+ mpfr_clear (l);
+ mpfr_clear (h);
+ mpfr_clear (t);
+ mpfr_clear (logz);
+ if (ok)
+ return inex;
+ }
+
+ /* small argument check for y1(z) = -2/Pi/z + O(log(z)):
+ for 0 <= z <= 1, |y1(z) + 2/Pi/z| <= 0.25 */
+ if (n == 1 && MPFR_EXP(z) + 1 < - (mpfr_exp_t) MPFR_PREC(res))
+ {
+ mpfr_t y;
+ mpfr_prec_t prec;
+ mpfr_exp_t err1;
+ int ok;
+ MPFR_BLOCK_DECL (flags);
+
+ /* since 2/Pi > 0.5, and |y1(z)| >= |2/Pi/z|, if z <= 2^(-emax-1),
+ then |y1(z)| > 2^emax */
+ prec = MPFR_PREC(res) + 10;
+ mpfr_init2 (y, prec);
+ mpfr_const_pi (y, MPFR_RNDU); /* Pi*(1+u)^2, where here and below u
+ represents a quantity <= 1/2^prec */
+ mpfr_mul (y, y, z, MPFR_RNDU); /* Pi*z * (1+u)^4, upper bound */
+ MPFR_BLOCK (flags, mpfr_ui_div (y, 2, y, MPFR_RNDZ));
+ /* 2/Pi/z * (1+u)^6, lower bound, with possible overflow */
+ if (MPFR_OVERFLOW (flags))
+ {
+ mpfr_clear (y);
+ return mpfr_overflow (res, r, -1);
+ }
+ mpfr_neg (y, y, MPFR_RNDN);
+ /* (1+u)^6 can be written 1+7u [for another value of u], thus the
+ error on 2/Pi/z is less than 7ulp(y). The truncation error is less
+ than 1/4, thus if ulp(y)>=1/4, the total error is less than 8ulp(y),
+ otherwise it is less than 1/4+7/8 <= 2. */
+ if (MPFR_EXP(y) + 2 >= MPFR_PREC(y)) /* ulp(y) >= 1/4 */
+ err1 = 3;
+ else /* ulp(y) <= 1/8 */
+ err1 = (mpfr_exp_t) MPFR_PREC(y) - MPFR_EXP(y) + 1;
+ ok = MPFR_CAN_ROUND (y, prec - err1, MPFR_PREC(res), r);
+ if (ok)
+ inex = mpfr_set (res, y, r);
+ mpfr_clear (y);
+ if (ok)
+ return inex;
+ }
+
+ /* we can use the asymptotic expansion as soon as z > p log(2)/2,
+ but to get some margin we use it for z > p/2 */
+ if (mpfr_cmp_ui (z, MPFR_PREC(res) / 2 + 3) > 0)
+ {
+ inex = mpfr_yn_asympt (res, n, z, r);
+ if (inex != 0)
+ return inex;
+ }
+
+ /* General case */
+ {
+ mpfr_prec_t prec;
+ mpfr_exp_t err1, err2, err3;
+ mpfr_t y, s1, s2, s3;
+ MPFR_ZIV_DECL (loop);
+
+ mpfr_init (y);
+ mpfr_init (s1);
+ mpfr_init (s2);
+ mpfr_init (s3);
+
+ prec = MPFR_PREC(res) + 2 * MPFR_INT_CEIL_LOG2 (MPFR_PREC (res)) + 13;
+ MPFR_ZIV_INIT (loop, prec);
+ for (;;)
+ {
+ mpfr_set_prec (y, prec);
+ mpfr_set_prec (s1, prec);
+ mpfr_set_prec (s2, prec);
+ mpfr_set_prec (s3, prec);
+
+ mpfr_mul (y, z, z, MPFR_RNDN);
+ mpfr_div_2ui (y, y, 2, MPFR_RNDN); /* z^2/4 */
+
+ /* store (z/2)^n temporarily in s2 */
+ mpfr_pow_ui (s2, z, absn, MPFR_RNDN);
+ mpfr_div_2si (s2, s2, absn, MPFR_RNDN);
+
+ /* compute S1 * (z/2)^(-n) */
+ if (n == 0)
+ {
+ mpfr_set_ui (s1, 0, MPFR_RNDN);
+ err1 = 0;
+ }
+ else
+ err1 = mpfr_yn_s1 (s1, y, absn - 1);
+ mpfr_div (s1, s1, s2, MPFR_RNDN); /* (z/2)^(-n) * S1 */
+ /* See algorithms.tex: the relative error on s1 is bounded by
+ (3n+3)*2^(e+1-prec). */
+ err1 = MPFR_INT_CEIL_LOG2 (3 * absn + 3) + err1 + 1;
+ /* rel_err(s1) <= 2^(err1-prec), thus err(s1) <= 2^err1 ulps */
+
+ /* compute (z/2)^n * S3 */
+ mpfr_neg (y, y, MPFR_RNDN); /* -z^2/4 */
+ err3 = mpfr_yn_s3 (s3, y, s2, absn); /* (z/2)^n * S3 */
+ /* the error on s3 is bounded by 2^err3 ulps */
+
+ /* add s1+s3 */
+ err1 += MPFR_EXP(s1);
+ mpfr_add (s1, s1, s3, MPFR_RNDN);
+ /* the error is bounded by 1/2 + 2^err1*2^(- EXP(s1))
+ + 2^err3*2^(EXP(s3) - EXP(s1)) */
+ err3 += MPFR_EXP(s3);
+ err1 = (err3 > err1) ? err3 + 1 : err1 + 1;
+ err1 -= MPFR_EXP(s1);
+ err1 = (err1 >= 0) ? err1 + 1 : 1;
+ /* now the error on s1 is bounded by 2^err1*ulp(s1) */
+
+ /* compute S2 */
+ mpfr_div_2ui (s2, z, 1, MPFR_RNDN); /* z/2 */
+ mpfr_log (s2, s2, MPFR_RNDN); /* log(z/2) */
+ mpfr_const_euler (s3, MPFR_RNDN);
+ err2 = MPFR_EXP(s2) > MPFR_EXP(s3) ? MPFR_EXP(s2) : MPFR_EXP(s3);
+ mpfr_add (s2, s2, s3, MPFR_RNDN); /* log(z/2) + gamma */
+ err2 -= MPFR_EXP(s2);
+ mpfr_mul_2ui (s2, s2, 1, MPFR_RNDN); /* 2*(log(z/2) + gamma) */
+ mpfr_jn (s3, absn, z, MPFR_RNDN); /* Jn(z) */
+ mpfr_mul (s2, s2, s3, MPFR_RNDN); /* 2*(log(z/2) + gamma)*Jn(z) */
+ err2 += 4; /* the error on s2 is bounded by 2^err2 ulps, see
+ algorithms.tex */
+
+ /* add all three sums */
+ err1 += MPFR_EXP(s1); /* the error on s1 is bounded by 2^err1 */
+ err2 += MPFR_EXP(s2); /* the error on s2 is bounded by 2^err2 */
+ mpfr_sub (s2, s2, s1, MPFR_RNDN); /* s2 - (s1+s3) */
+ err2 = (err1 > err2) ? err1 + 1 : err2 + 1;
+ err2 -= MPFR_EXP(s2);
+ err2 = (err2 >= 0) ? err2 + 1 : 1;
+ /* now the error on s2 is bounded by 2^err2*ulp(s2) */
+ mpfr_const_pi (y, MPFR_RNDN); /* error bounded by 1 ulp */
+ mpfr_div (s2, s2, y, MPFR_RNDN); /* error bounded by
+ 2^(err2+1)*ulp(s2) */
+ err2 ++;
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (s2, prec - err2, MPFR_PREC(res), r)))
+ break;
+ MPFR_ZIV_NEXT (loop, prec);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = (n >= 0 || (n & 1) == 0)
+ ? mpfr_set (res, s2, r)
+ : mpfr_neg (res, s2, r);
+
+ mpfr_clear (y);
+ mpfr_clear (s1);
+ mpfr_clear (s2);
+ mpfr_clear (s3);
+ }
+
+ return inex;
+}
+
+#define MPFR_YN
+#include "jyn_asympt.c"
diff --git a/src/zeta.c b/src/zeta.c
new file mode 100644
index 000000000..72c332a91
--- /dev/null
+++ b/src/zeta.c
@@ -0,0 +1,463 @@
+/* mpfr_zeta -- compute the Riemann Zeta function
+
+Copyright 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by Jean-Luc Re'my and the Spaces project, INRIA Lorraine.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+/*
+ Parameters:
+ s - the input floating-point number
+ n, p - parameters from the algorithm
+ tc - an array of p floating-point numbers tc[1]..tc[p]
+ Output:
+ b is the result, i.e.
+ sum(tc[i]*product((s+2j)*(s+2j-1)/n^2,j=1..i-1), i=1..p)*s*n^(-s-1)
+*/
+static void
+mpfr_zeta_part_b (mpfr_t b, mpfr_srcptr s, int n, int p, mpfr_t *tc)
+{
+ mpfr_t s1, d, u;
+ unsigned long n2;
+ int l, t;
+ MPFR_GROUP_DECL (group);
+
+ if (p == 0)
+ {
+ MPFR_SET_ZERO (b);
+ MPFR_SET_POS (b);
+ return;
+ }
+
+ n2 = n * n;
+ MPFR_GROUP_INIT_3 (group, MPFR_PREC (b), s1, d, u);
+
+ /* t equals 2p-2, 2p-3, ... ; s1 equals s+t */
+ t = 2 * p - 2;
+ mpfr_set (d, tc[p], MPFR_RNDN);
+ for (l = 1; l < p; l++)
+ {
+ mpfr_add_ui (s1, s, t, MPFR_RNDN); /* s + (2p-2l) */
+ mpfr_mul (d, d, s1, MPFR_RNDN);
+ t = t - 1;
+ mpfr_add_ui (s1, s, t, MPFR_RNDN); /* s + (2p-2l-1) */
+ mpfr_mul (d, d, s1, MPFR_RNDN);
+ t = t - 1;
+ mpfr_div_ui (d, d, n2, MPFR_RNDN);
+ mpfr_add (d, d, tc[p-l], MPFR_RNDN);
+ /* since s is positive and the tc[i] have alternate signs,
+ the following is unlikely */
+ if (MPFR_UNLIKELY (mpfr_cmpabs (d, tc[p-l]) > 0))
+ mpfr_set (d, tc[p-l], MPFR_RNDN);
+ }
+ mpfr_mul (d, d, s, MPFR_RNDN);
+ mpfr_add (s1, s, __gmpfr_one, MPFR_RNDN);
+ mpfr_neg (s1, s1, MPFR_RNDN);
+ mpfr_ui_pow (u, n, s1, MPFR_RNDN);
+ mpfr_mul (b, d, u, MPFR_RNDN);
+
+ MPFR_GROUP_CLEAR (group);
+}
+
+/* Input: p - an integer
+ Output: fills tc[1..p], tc[i] = bernoulli(2i)/(2i)!
+ tc[1]=1/12, tc[2]=-1/720, tc[3]=1/30240, ...
+*/
+static void
+mpfr_zeta_c (int p, mpfr_t *tc)
+{
+ mpfr_t d;
+ int k, l;
+
+ if (p > 0)
+ {
+ mpfr_init2 (d, MPFR_PREC (tc[1]));
+ mpfr_div_ui (tc[1], __gmpfr_one, 12, MPFR_RNDN);
+ for (k = 2; k <= p; k++)
+ {
+ mpfr_set_ui (d, k-1, MPFR_RNDN);
+ mpfr_div_ui (d, d, 12*k+6, MPFR_RNDN);
+ for (l=2; l < k; l++)
+ {
+ mpfr_div_ui (d, d, 4*(2*k-2*l+3)*(2*k-2*l+2), MPFR_RNDN);
+ mpfr_add (d, d, tc[l], MPFR_RNDN);
+ }
+ mpfr_div_ui (tc[k], d, 24, MPFR_RNDN);
+ MPFR_CHANGE_SIGN (tc[k]);
+ }
+ mpfr_clear (d);
+ }
+}
+
+/* Input: s - a floating-point number
+ n - an integer
+ Output: sum - a floating-point number approximating sum(1/i^s, i=1..n-1) */
+static void
+mpfr_zeta_part_a (mpfr_t sum, mpfr_srcptr s, int n)
+{
+ mpfr_t u, s1;
+ int i;
+ MPFR_GROUP_DECL (group);
+
+ MPFR_GROUP_INIT_2 (group, MPFR_PREC (sum), u, s1);
+
+ mpfr_neg (s1, s, MPFR_RNDN);
+ mpfr_ui_pow (u, n, s1, MPFR_RNDN);
+ mpfr_div_2ui (u, u, 1, MPFR_RNDN);
+ mpfr_set (sum, u, MPFR_RNDN);
+ for (i=n-1; i>1; i--)
+ {
+ mpfr_ui_pow (u, i, s1, MPFR_RNDN);
+ mpfr_add (sum, sum, u, MPFR_RNDN);
+ }
+ mpfr_add (sum, sum, __gmpfr_one, MPFR_RNDN);
+
+ MPFR_GROUP_CLEAR (group);
+}
+
+/* Input: s - a floating-point number >= 1/2.
+ rnd_mode - a rounding mode.
+ Assumes s is neither NaN nor Infinite.
+ Output: z - Zeta(s) rounded to the precision of z with direction rnd_mode
+*/
+static int
+mpfr_zeta_pos (mpfr_t z, mpfr_srcptr s, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t b, c, z_pre, f, s1;
+ double beta, sd, dnep;
+ mpfr_t *tc1;
+ mpfr_prec_t precz, precs, d, dint;
+ int p, n, l, add;
+ int inex;
+ MPFR_GROUP_DECL (group);
+ MPFR_ZIV_DECL (loop);
+
+ MPFR_ASSERTD (MPFR_IS_POS (s) && MPFR_GET_EXP (s) >= 0);
+
+ precz = MPFR_PREC (z);
+ precs = MPFR_PREC (s);
+
+ /* Zeta(x) = 1+1/2^x+1/3^x+1/4^x+1/5^x+O(1/6^x)
+ so with 2^(EXP(x)-1) <= x < 2^EXP(x)
+ So for x > 2^3, k^x > k^8, so 2/k^x < 2/k^8
+ Zeta(x) = 1 + 1/2^x*(1+(2/3)^x+(2/4)^x+...)
+ = 1 + 1/2^x*(1+sum((2/k)^x,k=3..infinity))
+ <= 1 + 1/2^x*(1+sum((2/k)^8,k=3..infinity))
+ And sum((2/k)^8,k=3..infinity) = -257+128*Pi^8/4725 ~= 0.0438035
+ So Zeta(x) <= 1 + 1/2^x*2 for x >= 8
+ The error is < 2^(-x+1) <= 2^(-2^(EXP(x)-1)+1) */
+ if (MPFR_GET_EXP (s) > 3)
+ {
+ mpfr_exp_t err;
+ err = MPFR_GET_EXP (s) - 1;
+ if (err > (mpfr_exp_t) (sizeof (mpfr_exp_t)*CHAR_BIT-2))
+ err = MPFR_EMAX_MAX;
+ else
+ err = ((mpfr_exp_t)1) << err;
+ err = 1 - (-err+1); /* GET_EXP(one) - (-err+1) = err :) */
+ MPFR_FAST_COMPUTE_IF_SMALL_INPUT (z, __gmpfr_one, err, 0, 1,
+ rnd_mode, {});
+ }
+
+ d = precz + MPFR_INT_CEIL_LOG2(precz) + 10;
+
+ /* we want that s1 = s-1 is exact, i.e. we should have PREC(s1) >= EXP(s) */
+ dint = (mpfr_uexp_t) MPFR_GET_EXP (s);
+ mpfr_init2 (s1, MAX (precs, dint));
+ inex = mpfr_sub (s1, s, __gmpfr_one, MPFR_RNDN);
+ MPFR_ASSERTD (inex == 0);
+
+ /* case s=1 */
+ if (MPFR_IS_ZERO (s1))
+ {
+ MPFR_SET_INF (z);
+ MPFR_SET_POS (z);
+ MPFR_ASSERTD (inex == 0);
+ goto clear_and_return;
+ }
+
+ MPFR_GROUP_INIT_4 (group, MPFR_PREC_MIN, b, c, z_pre, f);
+
+ MPFR_ZIV_INIT (loop, d);
+ for (;;)
+ {
+ /* Principal loop: we compute, in z_pre,
+ an approximation of Zeta(s), that we send to can_round */
+ if (MPFR_GET_EXP (s1) <= -(mpfr_exp_t) ((mpfr_prec_t) (d-3)/2))
+ /* Branch 1: when s-1 is very small, one
+ uses the approximation Zeta(s)=1/(s-1)+gamma,
+ where gamma is Euler's constant */
+ {
+ dint = MAX (d + 3, precs);
+ MPFR_TRACE (printf ("branch 1\ninternal precision=%lu\n",
+ (unsigned long) dint));
+ MPFR_GROUP_REPREC_4 (group, dint, b, c, z_pre, f);
+ mpfr_div (z_pre, __gmpfr_one, s1, MPFR_RNDN);
+ mpfr_const_euler (f, MPFR_RNDN);
+ mpfr_add (z_pre, z_pre, f, MPFR_RNDN);
+ }
+ else /* Branch 2 */
+ {
+ size_t size;
+
+ MPFR_TRACE (printf ("branch 2\n"));
+ /* Computation of parameters n, p and working precision */
+ dnep = (double) d * LOG2;
+ sd = mpfr_get_d (s, MPFR_RNDN);
+ /* beta = dnep + 0.61 + sd * log (6.2832 / sd);
+ but a larger value is ok */
+#define LOG6dot2832 1.83787940484160805532
+ beta = dnep + 0.61 + sd * (LOG6dot2832 - LOG2 *
+ __gmpfr_floor_log2 (sd));
+ if (beta <= 0.0)
+ {
+ p = 0;
+ /* n = 1 + (int) (exp ((dnep - LOG2) / sd)); */
+ n = 1 + (int) __gmpfr_ceil_exp2 ((d - 1.0) / sd);
+ }
+ else
+ {
+ p = 1 + (int) beta / 2;
+ n = 1 + (int) ((sd + 2.0 * (double) p - 1.0) / 6.2832);
+ }
+ MPFR_TRACE (printf ("\nn=%d\np=%d\n",n,p));
+ /* add = 4 + floor(1.5 * log(d) / log (2)).
+ We should have add >= 10, which is always fulfilled since
+ d = precz + 11 >= 12, thus ceil(log2(d)) >= 4 */
+ add = 4 + (3 * MPFR_INT_CEIL_LOG2 (d)) / 2;
+ MPFR_ASSERTD(add >= 10);
+ dint = d + add;
+ if (dint < precs)
+ dint = precs;
+
+ MPFR_TRACE (printf ("internal precision=%lu\n",
+ (unsigned long) dint));
+
+ size = (p + 1) * sizeof(mpfr_t);
+ tc1 = (mpfr_t*) (*__gmp_allocate_func) (size);
+ for (l=1; l<=p; l++)
+ mpfr_init2 (tc1[l], dint);
+ MPFR_GROUP_REPREC_4 (group, dint, b, c, z_pre, f);
+
+ MPFR_TRACE (printf ("precision of z = %lu\n",
+ (unsigned long) precz));
+
+ /* Computation of the coefficients c_k */
+ mpfr_zeta_c (p, tc1);
+ /* Computation of the 3 parts of the fonction Zeta. */
+ mpfr_zeta_part_a (z_pre, s, n);
+ mpfr_zeta_part_b (b, s, n, p, tc1);
+ /* s1 = s-1 is already computed above */
+ mpfr_div (c, __gmpfr_one, s1, MPFR_RNDN);
+ mpfr_ui_pow (f, n, s1, MPFR_RNDN);
+ mpfr_div (c, c, f, MPFR_RNDN);
+ MPFR_TRACE (MPFR_DUMP (c));
+ mpfr_add (z_pre, z_pre, c, MPFR_RNDN);
+ mpfr_add (z_pre, z_pre, b, MPFR_RNDN);
+ for (l=1; l<=p; l++)
+ mpfr_clear (tc1[l]);
+ (*__gmp_free_func) (tc1, size);
+ /* End branch 2 */
+ }
+
+ MPFR_TRACE (MPFR_DUMP (z_pre));
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (z_pre, d-3, precz, rnd_mode)))
+ break;
+ MPFR_ZIV_NEXT (loop, d);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ inex = mpfr_set (z, z_pre, rnd_mode);
+
+ MPFR_GROUP_CLEAR (group);
+ clear_and_return:
+ mpfr_clear (s1);
+
+ return inex;
+}
+
+int
+mpfr_zeta (mpfr_t z, mpfr_srcptr s, mpfr_rnd_t rnd_mode)
+{
+ mpfr_t z_pre, s1, y, p;
+ double sd, eps, m1, c;
+ long add;
+ mpfr_prec_t precz, prec1, precs, precs1;
+ int inex;
+ MPFR_GROUP_DECL (group);
+ MPFR_ZIV_DECL (loop);
+ MPFR_SAVE_EXPO_DECL (expo);
+
+ MPFR_LOG_FUNC (("s[%#R]=%R rnd=%d", s, s, rnd_mode),
+ ("z[%#R]=%R inexact=%d", z, z, inex));
+
+ /* Zero, Nan or Inf ? */
+ if (MPFR_UNLIKELY (MPFR_IS_SINGULAR (s)))
+ {
+ if (MPFR_IS_NAN (s))
+ {
+ MPFR_SET_NAN (z);
+ MPFR_RET_NAN;
+ }
+ else if (MPFR_IS_INF (s))
+ {
+ if (MPFR_IS_POS (s))
+ return mpfr_set_ui (z, 1, MPFR_RNDN); /* Zeta(+Inf) = 1 */
+ MPFR_SET_NAN (z); /* Zeta(-Inf) = NaN */
+ MPFR_RET_NAN;
+ }
+ else /* s iz zero */
+ {
+ MPFR_ASSERTD (MPFR_IS_ZERO (s));
+ mpfr_set_ui (z, 1, rnd_mode);
+ mpfr_div_2ui (z, z, 1, rnd_mode);
+ MPFR_CHANGE_SIGN (z);
+ MPFR_RET (0);
+ }
+ }
+
+ /* s is neither Nan, nor Inf, nor Zero */
+
+ /* check tiny s: we have zeta(s) = -1/2 - 1/2 log(2 Pi) s + ... around s=0,
+ and for |s| <= 0.074, we have |zeta(s) + 1/2| <= |s|.
+ Thus if |s| <= 1/4*ulp(1/2), we can deduce the correct rounding
+ (the 1/4 covers the case where |zeta(s)| < 1/2 and rounding to nearest).
+ A sufficient condition is that EXP(s) + 1 < -PREC(z). */
+ if (MPFR_EXP(s) + 1 < - (mpfr_exp_t) MPFR_PREC(z))
+ {
+ int signs = MPFR_SIGN(s);
+ mpfr_set_si_2exp (z, -1, -1, rnd_mode); /* -1/2 */
+ if (rnd_mode == MPFR_RNDA)
+ rnd_mode = MPFR_RNDD; /* the result is around -1/2, thus negative */
+ if ((rnd_mode == MPFR_RNDU || rnd_mode == MPFR_RNDZ) && signs < 0)
+ {
+ mpfr_nextabove (z); /* z = -1/2 + epsilon */
+ inex = 1;
+ }
+ else if (rnd_mode == MPFR_RNDD && signs > 0)
+ {
+ mpfr_nextbelow (z); /* z = -1/2 - epsilon */
+ inex = -1;
+ }
+ else
+ {
+ if (rnd_mode == MPFR_RNDU) /* s > 0: z = -1/2 */
+ inex = 1;
+ else if (rnd_mode == MPFR_RNDD)
+ inex = -1; /* s < 0: z = -1/2 */
+ else /* (MPFR_RNDZ and s > 0) or MPFR_RNDN: z = -1/2 */
+ inex = (signs > 0) ? 1 : -1;
+ }
+ return mpfr_check_range (z, inex, rnd_mode);
+ }
+
+ /* Check for case s= -2n */
+ if (MPFR_IS_NEG (s))
+ {
+ mpfr_t tmp;
+ tmp[0] = *s;
+ MPFR_EXP (tmp) = MPFR_EXP (s) - 1;
+ if (mpfr_integer_p (tmp))
+ {
+ MPFR_SET_ZERO (z);
+ MPFR_SET_POS (z);
+ MPFR_RET (0);
+ }
+ }
+
+ MPFR_SAVE_EXPO_MARK (expo);
+
+ /* Compute Zeta */
+ if (MPFR_IS_POS (s) && MPFR_GET_EXP (s) >= 0) /* Case s >= 1/2 */
+ inex = mpfr_zeta_pos (z, s, rnd_mode);
+ else /* use reflection formula
+ zeta(s) = 2^s*Pi^(s-1)*sin(Pi*s/2)*gamma(1-s)*zeta(1-s) */
+ {
+ int overflow = 0;
+
+ precz = MPFR_PREC (z);
+ precs = MPFR_PREC (s);
+
+ /* Precision precs1 needed to represent 1 - s, and s + 2,
+ without any truncation */
+ precs1 = precs + 2 + MAX (0, - MPFR_GET_EXP (s));
+ sd = mpfr_get_d (s, MPFR_RNDN) - 1.0;
+ if (sd < 0.0)
+ sd = -sd; /* now sd = abs(s-1.0) */
+ /* Precision prec1 is the precision on elementary computations;
+ it ensures a final precision prec1 - add for zeta(s) */
+ /* eps = pow (2.0, - (double) precz - 14.0); */
+ eps = __gmpfr_ceil_exp2 (- (double) precz - 14.0);
+ m1 = 1.0 + MAX(1.0 / eps, 2.0 * sd) * (1.0 + eps);
+ c = (1.0 + eps) * (1.0 + eps * MAX(8.0, m1));
+ /* add = 1 + floor(log(c*c*c*(13 + m1))/log(2)); */
+ add = __gmpfr_ceil_log2 (c * c * c * (13.0 + m1));
+ prec1 = precz + add;
+ prec1 = MAX (prec1, precs1) + 10;
+
+ MPFR_GROUP_INIT_4 (group, prec1, z_pre, s1, y, p);
+ MPFR_ZIV_INIT (loop, prec1);
+ for (;;)
+ {
+ mpfr_sub (s1, __gmpfr_one, s, MPFR_RNDN);/* s1 = 1-s */
+ mpfr_zeta_pos (z_pre, s1, MPFR_RNDN); /* zeta(1-s) */
+ mpfr_gamma (y, s1, MPFR_RNDN); /* gamma(1-s) */
+ if (MPFR_IS_INF (y)) /* Zeta(s) < 0 for -4k-2 < s < -4k,
+ Zeta(s) > 0 for -4k < s < -4k+2 */
+ {
+ mpfr_div_2ui (s1, s, 2, MPFR_RNDN); /* s/4, exact */
+ mpfr_frac (s1, s1, MPFR_RNDN); /* exact, -1 < s1 < 0 */
+ overflow = (mpfr_cmp_si_2exp (s1, -1, -1) > 0) ? -1 : 1;
+ break;
+ }
+ mpfr_mul (z_pre, z_pre, y, MPFR_RNDN); /* gamma(1-s)*zeta(1-s) */
+ mpfr_const_pi (p, MPFR_RNDD);
+ mpfr_mul (y, s, p, MPFR_RNDN);
+ mpfr_div_2ui (y, y, 1, MPFR_RNDN); /* s*Pi/2 */
+ mpfr_sin (y, y, MPFR_RNDN); /* sin(Pi*s/2) */
+ mpfr_mul (z_pre, z_pre, y, MPFR_RNDN);
+ mpfr_mul_2ui (y, p, 1, MPFR_RNDN); /* 2*Pi */
+ mpfr_neg (s1, s1, MPFR_RNDN); /* s-1 */
+ mpfr_pow (y, y, s1, MPFR_RNDN); /* (2*Pi)^(s-1) */
+ mpfr_mul (z_pre, z_pre, y, MPFR_RNDN);
+ mpfr_mul_2ui (z_pre, z_pre, 1, MPFR_RNDN);
+
+ if (MPFR_LIKELY (MPFR_CAN_ROUND (z_pre, prec1 - add, precz,
+ rnd_mode)))
+ break;
+
+ MPFR_ZIV_NEXT (loop, prec1);
+ MPFR_GROUP_REPREC_4 (group, prec1, z_pre, s1, y, p);
+ }
+ MPFR_ZIV_FREE (loop);
+ if (overflow != 0)
+ {
+ inex = mpfr_overflow (z, rnd_mode, overflow);
+ MPFR_SAVE_EXPO_UPDATE_FLAGS (expo, MPFR_FLAGS_OVERFLOW);
+ }
+ else
+ inex = mpfr_set (z, z_pre, rnd_mode);
+ MPFR_GROUP_CLEAR (group);
+ }
+
+ MPFR_SAVE_EXPO_FREE (expo);
+ return mpfr_check_range (z, inex, rnd_mode);
+}
diff --git a/src/zeta_ui.c b/src/zeta_ui.c
new file mode 100644
index 000000000..eea589380
--- /dev/null
+++ b/src/zeta_ui.c
@@ -0,0 +1,229 @@
+/* mpfr_zeta_ui -- compute the Riemann Zeta function for integer argument.
+
+Copyright 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
+Contributed by the Arenaire and Caramel projects, INRIA.
+
+This file is part of the GNU MPFR Library.
+
+The GNU MPFR Library is free software; you can redistribute it and/or modify
+it under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 3 of the License, or (at your
+option) any later version.
+
+The GNU MPFR Library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with the GNU MPFR Library; see the file COPYING.LESSER. If not, see
+http://www.gnu.org/licenses/ or write to the Free Software Foundation, Inc.,
+51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */
+
+#define MPFR_NEED_LONGLONG_H
+#include "mpfr-impl.h"
+
+int
+mpfr_zeta_ui (mpfr_ptr z, unsigned long m, mpfr_rnd_t r)
+{
+ MPFR_ZIV_DECL (loop);
+
+ if (m == 0)
+ {
+ mpfr_set_ui (z, 1, r);
+ mpfr_div_2ui (z, z, 1, r);
+ MPFR_CHANGE_SIGN (z);
+ MPFR_RET (0);
+ }
+ else if (m == 1)
+ {
+ MPFR_SET_INF (z);
+ MPFR_SET_POS (z);
+ return 0;
+ }
+ else /* m >= 2 */
+ {
+ mpfr_prec_t p = MPFR_PREC(z);
+ unsigned long n, k, err, kbits;
+ mpz_t d, t, s, q;
+ mpfr_t y;
+ int inex;
+
+ if (r == MPFR_RNDA)
+ r = MPFR_RNDU; /* since the result is always positive */
+
+ if (m >= p) /* 2^(-m) < ulp(1) = 2^(1-p). This means that
+ 2^(-m) <= 1/2*ulp(1). We have 3^(-m)+4^(-m)+... < 2^(-m)
+ i.e. zeta(m) < 1+2*2^(-m) for m >= 3 */
+
+ {
+ if (m == 2) /* necessarily p=2 */
+ return mpfr_set_ui_2exp (z, 13, -3, r);
+ else if (r == MPFR_RNDZ || r == MPFR_RNDD || (r == MPFR_RNDN && m > p))
+ {
+ mpfr_set_ui (z, 1, r);
+ return -1;
+ }
+ else
+ {
+ mpfr_set_ui (z, 1, r);
+ mpfr_nextabove (z);
+ return 1;
+ }
+ }
+
+ /* now treat also the case where zeta(m) - (1+1/2^m) < 1/2*ulp(1),
+ and the result is either 1+2^(-m) or 1+2^(-m)+2^(1-p). */
+ mpfr_init2 (y, 31);
+
+ if (m >= p / 2) /* otherwise 4^(-m) > 2^(-p) */
+ {
+ /* the following is a lower bound for log(3)/log(2) */
+ mpfr_set_str_binary (y, "1.100101011100000000011010001110");
+ mpfr_mul_ui (y, y, m, MPFR_RNDZ); /* lower bound for log2(3^m) */
+ if (mpfr_cmp_ui (y, p + 2) >= 0)
+ {
+ mpfr_clear (y);
+ mpfr_set_ui (z, 1, MPFR_RNDZ);
+ mpfr_div_2ui (z, z, m, MPFR_RNDZ);
+ mpfr_add_ui (z, z, 1, MPFR_RNDZ);
+ if (r != MPFR_RNDU)
+ return -1;
+ mpfr_nextabove (z);
+ return 1;
+ }
+ }
+
+ mpz_init (s);
+ mpz_init (d);
+ mpz_init (t);
+ mpz_init (q);
+
+ p += MPFR_INT_CEIL_LOG2(p); /* account of the n term in the error */
+
+ p += MPFR_INT_CEIL_LOG2(p) + 15; /* initial value */
+
+ MPFR_ZIV_INIT (loop, p);
+ for(;;)
+ {
+ /* 0.39321985067869744 = log(2)/log(3+sqrt(8)) */
+ n = 1 + (unsigned long) (0.39321985067869744 * (double) p);
+ err = n + 4;
+
+ mpfr_set_prec (y, p);
+
+ /* computation of the d[k] */
+ mpz_set_ui (s, 0);
+ mpz_set_ui (t, 1);
+ mpz_mul_2exp (t, t, 2 * n - 1); /* t[n] */
+ mpz_set (d, t);
+ for (k = n; k > 0; k--)
+ {
+ count_leading_zeros (kbits, k);
+ kbits = GMP_NUMB_BITS - kbits;
+ /* if k^m is too large, use mpz_tdiv_q */
+ if (m * kbits > 2 * GMP_NUMB_BITS)
+ {
+ /* if we know in advance that k^m > d, then floor(d/k^m) will
+ be zero below, so there is no need to compute k^m */
+ kbits = (kbits - 1) * m + 1;
+ /* k^m has at least kbits bits */
+ if (kbits > mpz_sizeinbase (d, 2))
+ mpz_set_ui (q, 0);
+ else
+ {
+ mpz_ui_pow_ui (q, k, m);
+ mpz_tdiv_q (q, d, q);
+ }
+ }
+ else /* use several mpz_tdiv_q_ui calls */
+ {
+ unsigned long km = k, mm = m - 1;
+ while (mm > 0 && km < ULONG_MAX / k)
+ {
+ km *= k;
+ mm --;
+ }
+ mpz_tdiv_q_ui (q, d, km);
+ while (mm > 0)
+ {
+ km = k;
+ mm --;
+ while (mm > 0 && km < ULONG_MAX / k)
+ {
+ km *= k;
+ mm --;
+ }
+ mpz_tdiv_q_ui (q, q, km);
+ }
+ }
+ if (k % 2)
+ mpz_add (s, s, q);
+ else
+ mpz_sub (s, s, q);
+
+ /* we have d[k] = sum(t[i], i=k+1..n)
+ with t[i] = n*(n+i-1)!*4^i/(n-i)!/(2i)!
+ t[k-1]/t[k] = k*(2k-1)/(n-k+1)/(n+k-1)/2 */
+#if (GMP_NUMB_BITS == 32)
+#define KMAX 46341 /* max k such that k*(2k-1) < 2^32 */
+#elif (GMP_NUMB_BITS == 64)
+#define KMAX 3037000500
+#endif
+#ifdef KMAX
+ if (k <= KMAX)
+ mpz_mul_ui (t, t, k * (2 * k - 1));
+ else
+#endif
+ {
+ mpz_mul_ui (t, t, k);
+ mpz_mul_ui (t, t, 2 * k - 1);
+ }
+ mpz_fdiv_q_2exp (t, t, 1);
+ /* Warning: the test below assumes that an unsigned long
+ has no padding bits. */
+ if (n < 1UL << ((sizeof(unsigned long) * CHAR_BIT) / 2))
+ /* (n - k + 1) * (n + k - 1) < n^2 */
+ mpz_divexact_ui (t, t, (n - k + 1) * (n + k - 1));
+ else
+ {
+ mpz_divexact_ui (t, t, n - k + 1);
+ mpz_divexact_ui (t, t, n + k - 1);
+ }
+ mpz_add (d, d, t);
+ }
+
+ /* multiply by 1/(1-2^(1-m)) = 1 + 2^(1-m) + 2^(2-m) + ... */
+ mpz_fdiv_q_2exp (t, s, m - 1);
+ do
+ {
+ err ++;
+ mpz_add (s, s, t);
+ mpz_fdiv_q_2exp (t, t, m - 1);
+ }
+ while (mpz_cmp_ui (t, 0) > 0);
+
+ /* divide by d[n] */
+ mpz_mul_2exp (s, s, p);
+ mpz_tdiv_q (s, s, d);
+ mpfr_set_z (y, s, MPFR_RNDN);
+ mpfr_div_2ui (y, y, p, MPFR_RNDN);
+
+ err = MPFR_INT_CEIL_LOG2 (err);
+
+ if (MPFR_LIKELY(MPFR_CAN_ROUND (y, p - err, MPFR_PREC(z), r)))
+ break;
+
+ MPFR_ZIV_NEXT (loop, p);
+ }
+ MPFR_ZIV_FREE (loop);
+
+ mpz_clear (d);
+ mpz_clear (t);
+ mpz_clear (q);
+ mpz_clear (s);
+ inex = mpfr_set (z, y, r);
+ mpfr_clear (y);
+ return inex;
+ }
+}