summaryrefslogtreecommitdiff
path: root/mpf/get_d.c
diff options
context:
space:
mode:
authortege <tege@gmplib.org>2002-12-11 01:00:42 +0100
committertege <tege@gmplib.org>2002-12-11 01:00:42 +0100
commita2c7b95ab7d78bcd4de8bcde1ab3d26fe31206b2 (patch)
tree92072654ab06438f5aa968c139f923f09ab65ae1 /mpf/get_d.c
parent8719e5ffae45b9be1ec386478ea87c1759cca8d3 (diff)
downloadgmp-a2c7b95ab7d78bcd4de8bcde1ab3d26fe31206b2.tar.gz
(limb2dbl): New macro for conversion to `double'.
Define it to something non-trivial for 64-bit hppa.
Diffstat (limited to 'mpf/get_d.c')
-rw-r--r--mpf/get_d.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/mpf/get_d.c b/mpf/get_d.c
index bb5d6f98f..b5aac18d8 100644
--- a/mpf/get_d.c
+++ b/mpf/get_d.c
@@ -22,6 +22,20 @@ MA 02111-1307, USA. */
#include "gmp.h"
#include "gmp-impl.h"
+/* HPPA 8000, 8200, 8500, and 8600 traps FCNV,UDW,DBL for values >= 2^63. This
+ makes it slow. Worse, the Linux kernel apparently uses untested code in its
+ trap handling routines, and gets the sign wrong. Their compiler port
+ doesn't define __hppa as it should. Here is a workaround: */
+#if (defined (__hppa) || defined (__hppa__)) && GMP_LIMB_BITS == 64
+#define limb2dbl(limb) \
+ ((limb) >> (GMP_LIMB_BITS - 1) != 0 \
+ ? 2.0 * (double) (mp_limb_signed_t) (((limb) >> 1) | ((limb) & 1)) \
+ : (double) (mp_limb_signed_t) (limb))
+#else
+#define limb2dbl(limb) \
+ (double) (limb)
+#endif
+
double
mpf_get_d (mpf_srcptr src)
{
@@ -38,10 +52,10 @@ mpf_get_d (mpf_srcptr src)
size = ABS (size);
qp = PTR(src);
- res = qp[size - 1];
+ res = limb2dbl (qp[size - 1]);
n_limbs_to_use = MIN (LIMBS_PER_DOUBLE, size);
for (i = 2; i <= n_limbs_to_use; i++)
- res = res * MP_BASE_AS_DOUBLE + qp[size - i];
+ res = res * MP_BASE_AS_DOUBLE + limb2dbl (qp[size - i]);
res = __gmp_scale2 (res, (EXP(src) - n_limbs_to_use) * GMP_NUMB_BITS);