summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/emit-rtl.c6
-rw-r--r--gcc/loop-doloop.c2
-rw-r--r--gcc/lto-streamer-in.c2
-rw-r--r--gcc/real.c1
-rw-r--r--gcc/rtl.h5
-rw-r--r--gcc/tree-vect-generic.c2
-rw-r--r--gcc/tree.c55
-rw-r--r--gcc/tree.h16
-rw-r--r--gcc/wide-int.cc113
-rw-r--r--gcc/wide-int.h283
10 files changed, 288 insertions, 197 deletions
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index d72ba98b3dd..143f29882c3 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -579,8 +579,6 @@ immed_wide_int_const (const wide_int &v, enum machine_mode mode)
if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT)
return gen_int_mode (v.elt (0), mode);
- wide_int copy = v;
- wi::clear_undef (copy, SIGNED);
#if TARGET_SUPPORTS_WIDE_INT
{
unsigned int i;
@@ -599,12 +597,12 @@ immed_wide_int_const (const wide_int &v, enum machine_mode mode)
CWI_PUT_NUM_ELEM (value, len);
for (i = 0; i < len; i++)
- CONST_WIDE_INT_ELT (value, i) = copy.elt (i);
+ CONST_WIDE_INT_ELT (value, i) = v.elt (i);
return lookup_const_wide_int (value);
}
#else
- return immed_double_const (copy.elt (0), copy.elt (1), mode);
+ return immed_double_const (v.elt (0), v.elt (1), mode);
#endif
}
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index daeb26cb66f..0cd5c98dc99 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -674,7 +674,7 @@ doloop_optimize (struct loop *loop)
|| !wi::fits_shwi_p (iter))
iterations_max = const0_rtx;
else
- iterations_max = GEN_INT (iter.to_shwi ());
+ iterations_max = immed_wide_int_const (iter, mode);
level = get_loop_level (loop) + 1;
/* Generate looping insn. If the pattern FAILs then give up trying
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index ae418efefdf..d847c6421b4 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -1272,7 +1272,7 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
for (i = 0; i < len; i++)
a[i] = streamer_read_hwi (ib);
result = wide_int_to_tree (type, wide_int::from_array
- (a, len, TYPE_PRECISION (type), false));
+ (a, len, TYPE_PRECISION (type)));
streamer_tree_cache_append (data_in->reader_cache, result, hash);
}
else if (tag == LTO_tree_scc)
diff --git a/gcc/real.c b/gcc/real.c
index ffb7213aedf..0aba3086dba 100644
--- a/gcc/real.c
+++ b/gcc/real.c
@@ -2248,7 +2248,6 @@ real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
/* Clear out top bits so elt will work with precisions that aren't
a multiple of HOST_BITS_PER_WIDE_INT. */
val = wide_int::from (val, len, sgn);
- wi::clear_undef (val, sgn);
len = len / HOST_BITS_PER_WIDE_INT;
SET_REAL_EXP (r, len * HOST_BITS_PER_WIDE_INT + e);
diff --git a/gcc/rtl.h b/gcc/rtl.h
index fe78797c6fc..073b6f25b6b 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1431,14 +1431,19 @@ wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *,
switch (GET_CODE (x.first))
{
case CONST_INT:
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ gcc_checking_assert (INTVAL (x.first) == sext_hwi (INTVAL (x.first), precision));
+
return wi::storage_ref (&INTVAL (x.first), 1, precision);
case CONST_WIDE_INT:
return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0),
CONST_WIDE_INT_NUNITS (x.first), precision);
+#if TARGET_SUPPORTS_WIDE_INT == 0
case CONST_DOUBLE:
return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision);
+#endif
default:
gcc_unreachable ();
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index 00489dbbe22..3c2d69d4ed6 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -64,7 +64,7 @@ build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value)
a[i] = low;
return wide_int_to_tree
- (type, wide_int::from_array (a, n, TYPE_PRECISION (type), false));
+ (type, wide_int::from_array (a, n, TYPE_PRECISION (type)));
}
static GTY(()) tree vector_inner_type;
diff --git a/gcc/tree.c b/gcc/tree.c
index 895c58607b7..fc9f0f56848 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -1112,7 +1112,6 @@ force_fit_type (tree type, const wide_int_ref &cst,
|| (overflowable > 0 && sign == SIGNED))
{
wide_int tmp = wide_int::from (cst, TYPE_PRECISION (type), sign);
- wi::clear_undef (tmp, sign);
int l = tmp.get_len ();
tree t = make_int_cst (l);
if (l > 1)
@@ -1205,11 +1204,11 @@ wide_int_to_tree (tree type, const wide_int_ref &pcst)
}
wide_int cst = wide_int::from (pcst, prec, sgn);
- /* The following call makes sure that all tree-cst's are canonical.
- i.e. it really does sign or zero extend the top block of the
- value if the precision of the type is not an even multiple of the
- size of an HWI. */
- wi::clear_undef (cst, sgn);
+ int len = int (cst.get_len ());
+ int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
+ bool recanonize = sgn == UNSIGNED
+ && (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT == len
+ && small_prec;
switch (TREE_CODE (type))
{
@@ -1291,18 +1290,31 @@ wide_int_to_tree (tree type, const wide_int_ref &pcst)
t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
if (t)
{
- /* Make sure no one is clobbering the shared constant. */
+ /* Make sure no one is clobbering the shared constant. We
+ must be careful here because tree-csts and wide-ints are
+ not canonicalized in the same way. */
gcc_assert (TREE_TYPE (t) == type);
- gcc_assert (TREE_INT_CST_NUNITS (t) == int (cst.get_len ()));
- for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ gcc_assert (TREE_INT_CST_NUNITS (t) == len);
+ if (recanonize)
+ {
+ len--;
+ gcc_assert (sext_hwi (TREE_INT_CST_ELT (t, len), small_prec)
+ == cst.elt (len));
+ }
+ for (i = 0; i < len; i++)
gcc_assert (TREE_INT_CST_ELT (t, i) == cst.elt (i));
}
else
{
/* Create a new shared int. */
t = make_int_cst (cst.get_len ());
- TREE_INT_CST_NUNITS (t) = cst.get_len ();
- for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ TREE_INT_CST_NUNITS (t) = len;
+ if (recanonize)
+ {
+ len--;
+ TREE_INT_CST_ELT (t, len) = zext_hwi (cst.elt (len), small_prec);
+ }
+ for (i = 0; i < len; i++)
TREE_INT_CST_ELT (t, i) = cst.elt (i);
TREE_TYPE (t) = type;
@@ -1316,7 +1328,10 @@ wide_int_to_tree (tree type, const wide_int_ref &pcst)
/* Use the cache of larger shared ints. */
void **slot;
- TREE_INT_CST_ELT (int_cst_node, 0) = cst.elt (0);
+ if (recanonize)
+ TREE_INT_CST_ELT (int_cst_node, 0) = zext_hwi (cst.elt (0), small_prec);
+ else
+ TREE_INT_CST_ELT (int_cst_node, 0) = cst.elt (0);
TREE_TYPE (int_cst_node) = type;
slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
@@ -1336,8 +1351,14 @@ wide_int_to_tree (tree type, const wide_int_ref &pcst)
for the gc to take care of. There will not be enough of them
to worry about. */
void **slot;
- tree nt = make_int_cst (cst.get_len ());
- for (unsigned int i = 0; i < cst.get_len (); i++)
+ tree nt = make_int_cst (len);
+ TREE_INT_CST_NUNITS (nt) = len;
+ if (recanonize)
+ {
+ len--;
+ TREE_INT_CST_ELT (nt, len) = zext_hwi (cst.elt (len), small_prec);
+ }
+ for (int i = 0; i < len; i++)
TREE_INT_CST_ELT (nt, i) = cst.elt (i);
TREE_TYPE (nt) = type;
@@ -9560,13 +9581,11 @@ build_common_tree_nodes (bool signed_char, bool short_double)
#endif
/* Define a boolean type. This type only represents boolean values but
- may be larger than char depending on the value of BOOL_TYPE_SIZE.
- Front ends which want to override this size (i.e. Java) can redefine
- boolean_type_node before calling build_common_tree_nodes_2. */
+ may be larger than char depending on the value of BOOL_TYPE_SIZE. */
boolean_type_node = make_unsigned_type (BOOL_TYPE_SIZE);
TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE);
- TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
TYPE_PRECISION (boolean_type_node) = 1;
+ TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
/* Define what type to use for size_t. */
if (strcmp (SIZE_TYPE, "unsigned int") == 0)
diff --git a/gcc/tree.h b/gcc/tree.h
index 8b0b8d36a96..632b8a46b13 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -5172,6 +5172,7 @@ wi::int_traits <const_tree>::get_precision (const_tree tcst)
return TYPE_PRECISION (TREE_TYPE (tcst));
}
+/* Convert the tree_cst X into a wide_int. */
inline wi::storage_ref
wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *scratch,
unsigned int precision, const_tree x)
@@ -5185,9 +5186,20 @@ wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *scratch,
if (len > max_len)
return wi::storage_ref (val, max_len, precision);
- /* Otherwise we can use the constant as-is when not extending. */
if (precision <= xprecision)
- return wi::storage_ref (val, len, precision);
+ {
+ if (precision < HOST_BITS_PER_WIDE_INT
+ && TYPE_SIGN (TREE_TYPE (x)) == UNSIGNED)
+ {
+ /* The rep of wide-int is signed, so if the value comes from
+ an unsigned int_cst, we have to sign extend it to make it
+ correct. */
+ scratch[0] = sext_hwi (val[0], precision);
+ return wi::storage_ref (scratch, 1, precision);
+ }
+ /* Otherwise we can use the constant as-is when not extending. */
+ return wi::storage_ref (val, len, precision);
+ }
/* Widen the constant according to its sign. */
len = wi::force_to_size (scratch, val, len, xprecision, precision,
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
index fcf807cd812..0a637bd5198 100644
--- a/gcc/wide-int.cc
+++ b/gcc/wide-int.cc
@@ -48,6 +48,9 @@ static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
(PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
#define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1))
+/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
+ based on the top existing bit of VAL. */
+
static unsigned HOST_WIDE_INT
safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
{
@@ -176,6 +179,9 @@ void
wi::to_mpz (wide_int x, mpz_t result, signop sgn)
{
bool negative = false;
+ int len = x.get_len ();
+ const HOST_WIDE_INT *v = x.get_val ();
+ int small_prec = x.get_precision () & (HOST_BITS_PER_WIDE_INT - 1);
if (wi::neg_p (x, sgn))
{
@@ -185,8 +191,17 @@ wi::to_mpz (wide_int x, mpz_t result, signop sgn)
x = ~x;
}
- mpz_import (result, x.get_len (), -1, sizeof (HOST_WIDE_INT), 0, 0,
- x.get_val ());
+ if (sgn == UNSIGNED && small_prec)
+ {
+ HOST_WIDE_INT t[WIDE_INT_MAX_ELTS];
+
+ for (int i = 0; i < len - 1; i++)
+ t[i] = v[i];
+ t[len-1] = zext_hwi (v[len-1], small_prec);
+ mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
+ }
+ else
+ mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
if (negative)
mpz_com (result, result);
@@ -199,7 +214,9 @@ wide_int
wi::from_mpz (const_tree type, mpz_t x, bool wrap)
{
size_t count, numb;
- wide_int res = wide_int::create (TYPE_PRECISION (type));
+ int prec = TYPE_PRECISION (type);
+ int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
+ wide_int res = wide_int::create (prec);
unsigned int i;
if (!wrap)
@@ -238,6 +255,10 @@ wi::from_mpz (const_tree type, mpz_t x, bool wrap)
mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
+ /* Canonize for small_prec. */
+ if (small_prec && count == (size_t)BLOCKS_NEEDED (prec))
+ val[count-1] = sext_hwi (val[count-1], small_prec);
+
if (mpz_sgn (x) < 0)
res = -res;
@@ -304,10 +325,10 @@ wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
if (precision > xprecision)
{
/* Expanding. */
- unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
-
if (sgn == UNSIGNED)
{
+ unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
+
if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
else if (val[len - 1] < 0)
@@ -320,11 +341,6 @@ wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
val[len++] = 0;
}
}
- /* We have to do this because we cannot guarantee that there is
- not trash in the top block of an uncompressed value. For a
- compressed value, all the bits are significant. */
- else if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
- val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
}
else if (precision < xprecision)
/* Contracting. */
@@ -352,27 +368,18 @@ selt (const HOST_WIDE_INT *a, unsigned int len,
return 0;
}
- if (small_prec && index == blocks_needed - 1)
- {
- /* The top block is partially outside of the precision. */
- if (sgn == SIGNED)
- return sext_hwi (a[index], small_prec);
- else
- return zext_hwi (a[index], small_prec);
- }
- return a[index];
+ if (sgn == UNSIGNED && small_prec && index == blocks_needed - 1)
+ return zext_hwi (a[index], small_prec);
+ else
+ return a[index];
}
-/* Find the hignest bit represented in a wide int. This will in
+/* Find the highest bit represented in a wide int. This will in
general have the same value as the sign bit. */
static inline HOST_WIDE_INT
-top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
+top_bit_of (const HOST_WIDE_INT *a, unsigned int len)
{
- if (len == BLOCKS_NEEDED (prec)
- && (prec & (HOST_BITS_PER_WIDE_INT - 1)))
- return (a[len - 1] >> (prec & (HOST_BITS_PER_WIDE_INT - 1))) & 1;
- else
- return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1;
+ return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1;
}
/*
@@ -658,7 +665,7 @@ wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
/* If the bit we just set is at the msb of the block, make sure
that any higher bits are zeros. */
- if (bit + 1 < precision && bit == HOST_BITS_PER_WIDE_INT - 1)
+ if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
val[len++] = 0;
return len;
}
@@ -754,6 +761,8 @@ unsigned int
wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
bool negate, unsigned int prec)
{
+ int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
+
gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT);
if (start + width > prec)
@@ -780,6 +789,8 @@ wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
/* case 000111000 */
block = (((unsigned HOST_WIDE_INT) 1) << shift) - block - 1;
val[i++] = negate ? ~block : block;
+ if (i == BLOCKS_NEEDED (prec) && small_prec)
+ val[i - 1] = sext_hwi (val[i - 1], small_prec);
return i;
}
else
@@ -801,6 +812,9 @@ wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
else if (end < prec)
val[i++] = negate ? -1 : 0;
+ if (i == BLOCKS_NEEDED (prec) && small_prec)
+ val[i - 1] = sext_hwi (val[i - 1], small_prec);
+
return i;
}
@@ -821,7 +835,7 @@ wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask == 0)
{
l0 = l1;
@@ -839,7 +853,7 @@ wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask == 0)
len = l0 + 1;
else
@@ -879,7 +893,7 @@ wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask != 0)
{
l0 = l1;
@@ -897,7 +911,7 @@ wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask == 0)
len = l0 + 1;
else
@@ -937,7 +951,7 @@ wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask != 0)
{
l0 = l1;
@@ -955,7 +969,7 @@ wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask != 0)
len = l0 + 1;
else
@@ -995,7 +1009,7 @@ wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
if (op1mask == 0)
{
l0 = l1;
@@ -1013,7 +1027,7 @@ wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
}
else if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
if (op0mask != 0)
len = l0 + 1;
else
@@ -1052,7 +1066,7 @@ wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int len = MAX (op0len, op1len);
if (l0 > l1)
{
- HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len);
while (l0 > l1)
{
val[l0] = op0[l0] ^ op1mask;
@@ -1062,7 +1076,7 @@ wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
if (l1 > l0)
{
- HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len);
while (l1 > l0)
{
val[l1] = op0mask ^ op1[l1];
@@ -1101,8 +1115,8 @@ wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int i, small_prec;
unsigned int len = MAX (op0len, op1len);
- mask0 = -top_bit_of (op0, op0len, prec);
- mask1 = -top_bit_of (op1, op1len, prec);
+ mask0 = -top_bit_of (op0, op0len);
+ mask1 = -top_bit_of (op1, op1len);
/* Add all of the explicitly defined elements. */
for (i = 0; i < len; i++)
@@ -1142,6 +1156,7 @@ wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
}
}
+ /* Canonize the top of the top block. */
small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
{
@@ -1211,7 +1226,7 @@ wi_unpack (unsigned HOST_HALF_WIDE_INT *result,
if (sgn == SIGNED)
{
- mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
+ mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len);
mask &= HALF_INT_MASK;
}
else
@@ -1501,8 +1516,8 @@ wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
unsigned int i, small_prec;
unsigned int len = MAX (op0len, op1len);
- mask0 = -top_bit_of (op0, op0len, prec);
- mask1 = -top_bit_of (op1, op1len, prec);
+ mask0 = -top_bit_of (op0, op0len);
+ mask1 = -top_bit_of (op1, op1len);
/* Subtract all of the explicitly defined elements. */
for (i = 0; i < len; i++)
@@ -1541,7 +1556,7 @@ wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
}
}
-
+ /* Canonize the top of the top block. */
small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
if (small_prec != 0 && BLOCKS_NEEDED (prec) == len)
{
@@ -1775,10 +1790,10 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
unsigned HOST_WIDE_INT o1 = zext_hwi (divisor[0], divisor_prec);
if (quotient)
- quotient[0] = zext_hwi (o0 / o1, dividend_prec);
+ quotient[0] = sext_hwi (o0 / o1, dividend_prec);
if (remainder)
{
- remainder[0] = zext_hwi (o0 % o1, dividend_prec);
+ remainder[0] = sext_hwi (o0 % o1, dividend_prec);
*remainder_len = 1;
}
}
@@ -1790,14 +1805,14 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
did. */
if (sgn == SIGNED)
{
- if (top_bit_of (dividend, dividend_len, dividend_prec))
+ if (top_bit_of (dividend, dividend_len))
{
dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len,
dividend_prec, UNSIGNED, 0);
dividend = u0;
dividend_neg = true;
}
- if (top_bit_of (divisor, divisor_len, divisor_prec))
+ if (top_bit_of (divisor, divisor_len))
{
divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len,
divisor_prec, UNSIGNED, 0);
@@ -1811,12 +1826,12 @@ wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor,
divisor_len, divisor_blocks_needed, divisor_prec, sgn);
- if (top_bit_of (dividend, dividend_len, dividend_prec) && sgn == SIGNED)
+ if (top_bit_of (dividend, dividend_len) && sgn == SIGNED)
m = dividend_blocks_needed;
else
m = 2 * dividend_len;
- if (top_bit_of (divisor, divisor_len, divisor_prec) && sgn == SIGNED)
+ if (top_bit_of (divisor, divisor_len) && sgn == SIGNED)
n = divisor_blocks_needed;
else
n = 2 * divisor_len;
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
index d0ec37e8f09..2ff130b2dc9 100644
--- a/gcc/wide-int.h
+++ b/gcc/wide-int.h
@@ -346,10 +346,6 @@ namespace wi
template <typename T1, typename T2>
unsigned int get_binary_precision (const T1 &, const T2 &);
- /* FIXME: should disappear. */
- template <typename T>
- void clear_undef (T &, signop);
-
bool fits_shwi_p (const wide_int_ref &);
bool fits_uhwi_p (const wide_int_ref &);
bool neg_p (const wide_int_ref &, signop = SIGNED);
@@ -567,6 +563,8 @@ public:
HOST_WIDE_INT elt (unsigned int) const;
unsigned HOST_WIDE_INT ulow () const;
unsigned HOST_WIDE_INT uhigh () const;
+ HOST_WIDE_INT slow () const;
+ HOST_WIDE_INT shigh () const;
#define BINARY_PREDICATE(OP, F) \
template <typename T> \
@@ -682,7 +680,26 @@ generic_wide_int <storage>::sign_mask () const
return this->get_val ()[this->get_len () - 1] < 0 ? -1 : 0;
}
-/* Return the value of the least-significant explicitly-encoded block. */
+/* Return the signed value of the least-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::slow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the signed value of the most-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::shigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return the unsigned value of the least-significant
+ explicitly-encoded block. */
template <typename storage>
inline unsigned HOST_WIDE_INT
generic_wide_int <storage>::ulow () const
@@ -690,7 +707,8 @@ generic_wide_int <storage>::ulow () const
return this->get_val ()[0];
}
-/* Return the value of the most-significant explicitly-encoded block. */
+/* Return the unsigned value of the most-significant
+ explicitly-encoded block. */
template <typename storage>
inline unsigned HOST_WIDE_INT
generic_wide_int <storage>::uhigh () const
@@ -741,8 +759,8 @@ decompose (HOST_WIDE_INT *, unsigned int precision,
}
/* Provide the storage for a wide_int_ref. This acts like a read-only
- wide_int, with the optimization that VAL is normally a pointer to another
- integer's storage, so that no array copy is needed. */
+ wide_int, with the optimization that VAL is normally a pointer to
+ another integer's storage, so that no array copy is needed. */
struct wide_int_ref_storage : public wi::storage_ref
{
private:
@@ -758,8 +776,9 @@ public:
wide_int_ref_storage (const T &, unsigned int);
};
-/* Create a reference to integer X in its natural precision. Note that
- the natural precision is host-dependent for primitive types. */
+/* Create a reference to integer X in its natural precision. Note
+ that the natural precision is host-dependent for primitive
+ types. */
template <typename T>
inline wide_int_ref_storage::wide_int_ref_storage (const T &x)
: storage_ref (wi::int_traits <T>::decompose (scratch,
@@ -881,9 +900,9 @@ wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
return result;
}
-/* Create a wide_int from the explicit block encoding given by VAL and LEN.
- PRECISION is the precision of the integer. NEED_CANON_P is true if the
- encoding may have redundant trailing blocks. */
+/* Create a wide_int from the explicit block encoding given by VAL and
+ LEN. PRECISION is the precision of the integer. NEED_CANON_P is
+ true if the encoding may have redundant trailing blocks. */
inline wide_int
wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
unsigned int precision, bool need_canon_p)
@@ -1062,10 +1081,10 @@ get_binary_result (const T1 &, const T2 &)
return FIXED_WIDE_INT (N) ();
}
-/* Specify the result type for each supported combination of binary inputs.
- Note that CONST_PRECISION and VAR_PRECISION cannot be mixed, in order to
- give stronger type checking. When both inputs are CONST_PRECISION,
- they must have the same precision. */
+/* Specify the result type for each supported combination of binary
+ inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
+ mixed, in order to give stronger type checking. When both inputs
+ are CONST_PRECISION, they must have the same precision. */
namespace wi
{
template <>
@@ -1281,7 +1300,11 @@ decompose (HOST_WIDE_INT *scratch, unsigned int precision,
{
scratch[0] = x.val;
if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
- return wi::storage_ref (scratch, 1, precision);
+ {
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ scratch[0] = sext_hwi (scratch[0], precision);
+ return wi::storage_ref (scratch, 1, precision);
+ }
scratch[1] = 0;
return wi::storage_ref (scratch, 2, precision);
}
@@ -1303,9 +1326,11 @@ namespace wi
const HOST_WIDE_INT *, unsigned int, unsigned int);
int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
const HOST_WIDE_INT *, unsigned int, unsigned int);
- unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
unsigned int, unsigned int);
- unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
unsigned int, unsigned int);
unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
unsigned int, unsigned int, unsigned int);
@@ -1355,8 +1380,8 @@ wi::get_precision (const T &x)
return wi::int_traits <T>::get_precision (x);
}
-/* Return the number of bits that the result of a binary operation can hold
- when the input operands are X and Y. */
+/* Return the number of bits that the result of a binary operation can
+ hold when the input operands are X and Y. */
template <typename T1, typename T2>
inline unsigned int
wi::get_binary_precision (const T1 &x, const T2 &y)
@@ -1365,27 +1390,8 @@ wi::get_binary_precision (const T1 &x, const T2 &y)
get_binary_result (x, y));
}
-/* Extend undefined bits in X according to SGN. */
-template <typename T>
-inline void
-wi::clear_undef (T &x, signop sgn)
-{
- HOST_WIDE_INT *val = x.write_val ();
- unsigned int precision = x.get_precision ();
- unsigned int len = x.get_len ();
- unsigned int small_prec = precision % HOST_BITS_PER_WIDE_INT;
- if (small_prec
- && len == ((precision + HOST_BITS_PER_WIDE_INT - 1)
- / HOST_BITS_PER_WIDE_INT))
- {
- if (sgn == UNSIGNED)
- val[len - 1] = zext_hwi (val[len - 1], small_prec);
- else
- val[len - 1] = sext_hwi (val[len - 1], small_prec);
- }
-}
-
-/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
+/* Return true if X fits in a HOST_WIDE_INT with no loss of
+ precision. */
inline bool
wi::fits_shwi_p (const wide_int_ref &x)
{
@@ -1400,9 +1406,7 @@ wi::fits_uhwi_p (const wide_int_ref &x)
if (x.precision <= HOST_BITS_PER_WIDE_INT)
return true;
if (x.len == 1)
- return x.sign_mask () == 0;
- if (x.precision < 2 * HOST_BITS_PER_WIDE_INT)
- return zext_hwi (x.uhigh (), x.precision % HOST_BITS_PER_WIDE_INT) == 0;
+ return x.slow () >= 0;
return x.len == 2 && x.uhigh () == 0;
}
@@ -1413,14 +1417,11 @@ wi::neg_p (const wide_int_ref &x, signop sgn)
{
if (sgn == UNSIGNED)
return false;
- if (x.precision == 0)
- return false;
- if (x.len * HOST_BITS_PER_WIDE_INT > x.precision)
- return (x.uhigh () >> (x.precision % HOST_BITS_PER_WIDE_INT - 1)) & 1;
- return x.sign_mask () < 0;
+ return x.shigh () < 0;
}
-/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
+/* Return -1 if the top bit of X is set and 0 if the top bit is
+ clear. */
inline HOST_WIDE_INT
wi::sign_mask (const wide_int_ref &x)
{
@@ -1440,7 +1441,10 @@ wi::eq_p (const T1 &x, const T2 &y)
if (precision <= HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow ();
- return (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0;
+ bool result = (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0;
+ if (result)
+ gcc_assert (xi.ulow () == yi.ulow ());
+ return result;
}
return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
}
@@ -1459,13 +1463,10 @@ wi::lts_p (const wide_int_ref &x, const wide_int_ref &y)
{
if (x.precision <= HOST_BITS_PER_WIDE_INT
&& y.precision <= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision);
- HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision);
- return xl < yl;
- }
- return lts_p_large (x.val, x.len, x.precision, y.val, y.len,
- y.precision);
+ return x.slow () < y.slow ();
+ else
+ return lts_p_large (x.val, x.len, x.precision, y.val, y.len,
+ y.precision);
}
/* Return true if X < Y when both are treated as unsigned values. */
@@ -1479,7 +1480,9 @@ wi::ltu_p (const wide_int_ref &x, const wide_int_ref &y)
unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision);
return xl < yl;
}
- return ltu_p_large (x.val, x.len, x.precision, y.val, y.len, y.precision);
+ else
+ return ltu_p_large (x.val, x.len, x.precision,
+ y.val, y.len, y.precision);
}
/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
@@ -1572,8 +1575,8 @@ wi::cmps (const wide_int_ref &x, const wide_int_ref &y)
if (x.precision <= HOST_BITS_PER_WIDE_INT
&& y.precision <= HOST_BITS_PER_WIDE_INT)
{
- HOST_WIDE_INT xl = sext_hwi (x.ulow (), x.precision);
- HOST_WIDE_INT yl = sext_hwi (y.ulow (), y.precision);
+ HOST_WIDE_INT xl = x.slow ();
+ HOST_WIDE_INT yl = y.slow ();
if (xl < yl)
return -1;
else if (xl > yl)
@@ -1654,7 +1657,14 @@ wi::abs (const T &x)
{
if (neg_p (x))
return neg (x);
- return x;
+
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ wide_int_ref xi (x, get_precision(result));
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = xi.val[i];
+ result.set_len (xi.len);
+
+ return result;
}
/* Return the result of sign-extending the low OFFSET bits of X. */
@@ -1665,6 +1675,7 @@ wi::sext (const T &x, unsigned int offset)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
wide_int_ref xi (x, precision);
+
if (offset <= HOST_BITS_PER_WIDE_INT)
{
val[0] = sext_hwi (xi.ulow (), offset);
@@ -1683,6 +1694,17 @@ wi::zext (const T &x, unsigned int offset)
WI_UNARY_RESULT_VAR (result, val, T, x);
unsigned int precision = get_precision (result);
wide_int_ref xi (x, precision);
+
+ /* This is not just an optimization, it is actually required to
+ maintain canonization. */
+ if (offset >= precision)
+ {
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = xi.val[i];
+ result.set_len (xi.len);
+ return result;
+ }
+
if (offset < HOST_BITS_PER_WIDE_INT)
{
val[0] = zext_hwi (xi.ulow (), offset);
@@ -1851,13 +1873,14 @@ wi::bit_or (const T1 &x, const T2 &y)
unsigned int precision = get_precision (result);
wide_int_ref xi (x, precision);
wide_int_ref yi (y, precision);
- if (precision <= HOST_BITS_PER_WIDE_INT)
+ if (xi.len + yi.len == 2)
{
val[0] = xi.ulow () | yi.ulow ();
result.set_len (1);
}
else
- result.set_len (or_large (val, xi.val, xi.len, yi.val, yi.len, precision));
+ result.set_len (or_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision));
return result;
}
@@ -1896,7 +1919,8 @@ wi::bit_xor (const T1 &x, const T2 &y)
result.set_len (1);
}
else
- result.set_len (xor_large (val, xi.val, xi.len, yi.val, yi.len, precision));
+ result.set_len (xor_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision));
return result;
}
@@ -1911,11 +1935,12 @@ wi::add (const T1 &x, const T2 &y)
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () + yi.ulow ();
+ val[0] = sext_hwi (xi.ulow () + yi.ulow (), precision);
result.set_len (1);
}
else
- result.set_len (add_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
UNSIGNED, 0));
return result;
}
@@ -1938,15 +1963,17 @@ wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
if (precision == 0)
*overflow = false;
else if (sgn == SIGNED)
- *overflow = (((resultl ^ xl) & (resultl ^ yl)) >> (precision - 1)) & 1;
+ *overflow = (((resultl ^ xl) & (resultl ^ yl))
+ >> (precision - 1)) & 1;
else
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
< (xl << (HOST_BITS_PER_WIDE_INT - precision)));
- val[0] = resultl;
+ val[0] = sext_hwi (resultl, precision);
result.set_len (1);
}
else
- result.set_len (add_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
sgn, overflow));
return result;
}
@@ -1962,11 +1989,12 @@ wi::sub (const T1 &x, const T2 &y)
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () - yi.ulow ();
+ val[0] = sext_hwi (xi.ulow () - yi.ulow (), precision);
result.set_len (1);
}
else
- result.set_len (sub_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
UNSIGNED, 0));
return result;
}
@@ -1993,11 +2021,12 @@ wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
else
*overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
> (xl << (HOST_BITS_PER_WIDE_INT - precision)));
- val[0] = resultl;
+ val[0] = sext_hwi (resultl, precision);
result.set_len (1);
}
else
- result.set_len (sub_large (val, xi.val, xi.len, yi.val, yi.len, precision,
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
sgn, overflow));
return result;
}
@@ -2013,7 +2042,7 @@ wi::mul (const T1 &x, const T2 &y)
wide_int_ref yi (y, precision);
if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () * yi.ulow ();
+ val[0] = sext_hwi (xi.ulow () * yi.ulow (), precision);
result.set_len (1);
}
else
@@ -2032,7 +2061,8 @@ wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
unsigned int precision = get_precision (result);
wide_int_ref xi (x, precision);
wide_int_ref yi (y, precision);
- result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, precision,
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
sgn, overflow, false, false));
return result;
}
@@ -2065,7 +2095,8 @@ wi::mul_high (const T1 &x, const T2 &y, signop sgn)
unsigned int precision = get_precision (result);
wide_int_ref xi (x, precision);
wide_int_ref yi (y, precision);
- result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, precision,
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
sgn, 0, true, false));
return result;
}
@@ -2119,8 +2150,9 @@ wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn,
overflow));
remainder.set_len (remainder_len);
@@ -2160,8 +2192,9 @@ wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn,
overflow));
remainder.set_len (remainder_len);
@@ -2184,8 +2217,9 @@ wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn,
overflow));
remainder.set_len (remainder_len);
@@ -2227,8 +2261,9 @@ wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn, 0));
remainder.set_len (remainder_len);
@@ -2249,7 +2284,8 @@ wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- divmod_internal (0, &remainder_len, remainder_val, xi.val, xi.len, precision,
+ divmod_internal (0, &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn, overflow);
remainder.set_len (remainder_len);
@@ -2288,8 +2324,9 @@ wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn,
overflow));
remainder.set_len (remainder_len);
@@ -2323,8 +2360,9 @@ wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn,
overflow));
remainder.set_len (remainder_len);
@@ -2348,8 +2386,9 @@ wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
wide_int_ref yi (y);
unsigned int remainder_len;
- quotient.set_len (divmod_internal (quotient_val, &remainder_len,
- remainder_val, xi.val, xi.len, precision,
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
yi.val, yi.len, yi.precision, sgn,
overflow));
remainder.set_len (remainder_len);
@@ -2384,7 +2423,8 @@ wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
WI_BINARY_RESULT (T1, T2) *res)
{
WI_BINARY_RESULT (T1, T2) remainder;
- WI_BINARY_RESULT (T1, T2) quotient = divmod_trunc (x, y, sgn, &remainder);
+ WI_BINARY_RESULT (T1, T2) quotient
+ = divmod_trunc (x, y, sgn, &remainder);
if (remainder == 0)
{
*res = quotient;
@@ -2393,8 +2433,9 @@ wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
return false;
}
-/* Truncate the value of shift value X so that the value is within BITSIZE.
- PRECISION is the number of bits in the value being shifted. */
+/* Truncate the value of shift value X so that the value is within
+ BITSIZE. PRECISION is the number of bits in the value being
+ shifted. */
inline unsigned int
wi::trunc_shift (const wide_int_ref &x, unsigned int bitsize,
unsigned int precision)
@@ -2412,8 +2453,8 @@ wi::trunc_shift (const wide_int_ref &x, unsigned int bitsize,
return shift & (bitsize - 1);
}
-/* Return X << Y. If BITSIZE is nonzero, only use the low BITSIZE bits
- of Y. */
+/* Return X << Y. If BITSIZE is nonzero, only use the low BITSIZE
+ bits of Y. */
template <typename T>
inline WI_UNARY_RESULT (T)
wi::lshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
@@ -2430,16 +2471,17 @@ wi::lshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
}
else if (precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = xi.ulow () << shift;
+ val[0] = sext_hwi (xi.ulow () << shift, precision);
result.set_len (1);
}
else
- result.set_len (lshift_large (val, xi.val, xi.len, precision, shift));
+ result.set_len (lshift_large (val, xi.val, xi.len,
+ precision, shift));
return result;
}
-/* Return X >> Y, using a logical shift. If BITSIZE is nonzero, only use
- the low BITSIZE bits of Y. */
+/* Return X >> Y, using a logical shift. If BITSIZE is nonzero, only
+ use the low BITSIZE bits of Y. */
template <typename T>
inline WI_UNARY_RESULT (T)
wi::lrshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
@@ -2457,7 +2499,8 @@ wi::lrshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
}
else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = zext_hwi (xi.ulow (), xi.precision) >> shift;
+ val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift,
+ xi.precision);
result.set_len (1);
}
else
@@ -2466,8 +2509,8 @@ wi::lrshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
return result;
}
-/* Return X >> Y, using an arithmetic shift. If BITSIZE is nonzero, only use
- the low BITSIZE bits of Y. */
+/* Return X >> Y, using an arithmetic shift. If BITSIZE is nonzero,
+ only use the low BITSIZE bits of Y. */
template <typename T>
inline WI_UNARY_RESULT (T)
wi::arshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
@@ -2485,8 +2528,7 @@ wi::arshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
}
else if (xi.precision <= HOST_BITS_PER_WIDE_INT)
{
- val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift,
- xi.precision - shift);
+ val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
result.set_len (1);
}
else
@@ -2495,9 +2537,9 @@ wi::arshift (const T &x, const wide_int_ref &y, unsigned int bitsize)
return result;
}
-/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a logical
- shift otherwise. If BITSIZE is nonzero, only use the low BITSIZE bits
- of Y. */
+/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
+ logical shift otherwise. If BITSIZE is nonzero, only use the low
+ BITSIZE bits of Y. */
template <typename T>
inline WI_UNARY_RESULT (T)
wi::rshift (const T &x, const wide_int_ref &y, signop sgn,
@@ -2509,9 +2551,9 @@ wi::rshift (const T &x, const wide_int_ref &y, signop sgn,
return arshift (x, y, bitsize);
}
-/* Return the result of rotating the low WIDTH bits of X left by Y bits
- and zero-extending the result. Use a full-width rotate if WIDTH is
- zero. */
+/* Return the result of rotating the low WIDTH bits of X left by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
template <typename T>
inline WI_UNARY_RESULT (T)
wi::lrotate (const T &x, const wide_int_ref &y, unsigned int width)
@@ -2527,9 +2569,9 @@ wi::lrotate (const T &x, const wide_int_ref &y, unsigned int width)
return left | right;
}
-/* Return the result of rotating the low WIDTH bits of X right by Y bits
- and zero-extending the result. Use a full-width rotate if WIDTH is
- zero. */
+/* Return the result of rotating the low WIDTH bits of X right by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
template <typename T>
inline WI_UNARY_RESULT (T)
wi::rrotate (const T &x, const wide_int_ref &y, unsigned int width)
@@ -2704,15 +2746,16 @@ wi::mask (unsigned int width, bool negate_p)
}
/* Return an integer of type T in which the low START bits are clear,
- the next WIDTH bits are set, and the other bits are clear,
- or the inverse if NEGATE_P. */
+ the next WIDTH bits are set, and the other bits are clear, or the
+ inverse if NEGATE_P. */
template <typename T>
inline T
wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
{
STATIC_ASSERT (wi::int_traits<T>::precision);
T result;
- result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ result.set_len (shifted_mask (result.write_val (), start, width,
+ negate_p,
wi::int_traits <T>::precision));
return result;
}