summaryrefslogtreecommitdiff
path: root/gcc/expmed.c
diff options
context:
space:
mode:
authormrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4>2013-08-13 20:41:07 +0000
committermrs <mrs@138bc75d-0d04-0410-961f-82ee72b054a4>2013-08-13 20:41:07 +0000
commite913b5cd5b6a9bd3a2ad58c65f9e3cd2bb55a28c (patch)
treef52a097017e3dcf89fad6525984e4591489f961e /gcc/expmed.c
parent9a5942c1d4d9116ab74b0741cfe3894a89fd17fb (diff)
downloadgcc-e913b5cd5b6a9bd3a2ad58c65f9e3cd2bb55a28c.tar.gz
Add wide-int branch.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@201707 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/expmed.c')
-rw-r--r--gcc/expmed.c178
1 files changed, 71 insertions, 107 deletions
diff --git a/gcc/expmed.c b/gcc/expmed.c
index 79f3424961d..6d69e4135b2 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -55,7 +55,6 @@ static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT,
static rtx extract_fixed_bit_field (enum machine_mode, rtx,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, rtx, int, bool);
-static rtx mask_rtx (enum machine_mode, int, int, int);
static rtx lshift_value (enum machine_mode, rtx, int, int);
static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, int);
@@ -63,6 +62,19 @@ static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
+/* Return a constant integer mask value of mode MODE with BITSIZE ones
+ followed by BITPOS zeros, or the complement of that if COMPLEMENT.
+ The mask is truncated if necessary to the width of mode MODE. The
+ mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
+
+static inline rtx
+mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement)
+{
+ return immed_wide_int_const
+ (wide_int::shifted_mask (bitpos, bitsize, complement,
+ GET_MODE_PRECISION (mode)), mode);
+}
+
/* Test whether a value is zero of a power of two. */
#define EXACT_POWER_OF_2_OR_ZERO_P(x) \
(((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
@@ -1840,39 +1852,15 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0,
return expand_shift (RSHIFT_EXPR, mode, op0,
GET_MODE_BITSIZE (mode) - bitsize, target, 0);
}
-
-/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
- of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
- complement of that if COMPLEMENT. The mask is truncated if
- necessary to the width of mode MODE. The mask is zero-extended if
- BITSIZE+BITPOS is too small for MODE. */
-
-static rtx
-mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
-{
- double_int mask;
-
- mask = double_int::mask (bitsize);
- mask = mask.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT);
-
- if (complement)
- mask = ~mask;
-
- return immed_double_int_const (mask, mode);
-}
-
-/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
- VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */
+/* Return a constant integer rtx with the value VALUE truncated to
+ BITSIZE bits and then shifted left BITPOS bits. */
static rtx
lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize)
{
- double_int val;
-
- val = double_int::from_uhwi (INTVAL (value)).zext (bitsize);
- val = val.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT);
-
- return immed_double_int_const (val, mode);
+ return
+ immed_wide_int_const (wide_int (std::make_pair (value, mode))
+ .zext (bitsize).lshift (bitpos), mode);
}
/* Extract a bit field that is split across two words
@@ -3100,37 +3088,41 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
only if the constant value exactly fits in an `unsigned int' without
any truncation. This means that multiplying by negative values does
not work; results are off by 2^32 on a 32 bit machine. */
-
if (CONST_INT_P (scalar_op1))
{
coeff = INTVAL (scalar_op1);
is_neg = coeff < 0;
}
+#if TARGET_SUPPORTS_WIDE_INT
+ else if (CONST_WIDE_INT_P (scalar_op1))
+#else
else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
+#endif
{
- /* If we are multiplying in DImode, it may still be a win
- to try to work with shifts and adds. */
- if (CONST_DOUBLE_HIGH (scalar_op1) == 0
- && (CONST_DOUBLE_LOW (scalar_op1) > 0
- || (CONST_DOUBLE_LOW (scalar_op1) < 0
- && EXACT_POWER_OF_2_OR_ZERO_P
- (CONST_DOUBLE_LOW (scalar_op1)))))
+ int p = GET_MODE_PRECISION (mode);
+ wide_int val = std::make_pair (scalar_op1, mode);
+ int shift = val.exact_log2 ().to_shwi ();
+ /* Perfect power of 2. */
+ is_neg = false;
+ if (shift > 0)
{
- coeff = CONST_DOUBLE_LOW (scalar_op1);
- is_neg = false;
+ /* Do the shift count trucation against the bitsize, not
+ the precision. See the comment above
+ wide-int.c:trunc_shift for details. */
+ if (SHIFT_COUNT_TRUNCATED)
+ shift &= GET_MODE_BITSIZE (mode) - 1;
+ /* We could consider adding just a move of 0 to target
+ if the shift >= p */
+ if (shift < p)
+ return expand_shift (LSHIFT_EXPR, mode, op0,
+ shift, target, unsignedp);
+ /* Any positive number that fits in a word. */
+ coeff = CONST_WIDE_INT_ELT (scalar_op1, 0);
}
- else if (CONST_DOUBLE_LOW (scalar_op1) == 0)
+ else if (val.sign_mask () == 0)
{
- coeff = CONST_DOUBLE_HIGH (scalar_op1);
- if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
- {
- int shift = floor_log2 (coeff) + HOST_BITS_PER_WIDE_INT;
- if (shift < HOST_BITS_PER_DOUBLE_INT - 1
- || mode_bitsize <= HOST_BITS_PER_DOUBLE_INT)
- return expand_shift (LSHIFT_EXPR, mode, op0,
- shift, target, unsignedp);
- }
- goto skip_synth;
+ /* Any positive number that fits in a word. */
+ coeff = CONST_WIDE_INT_ELT (scalar_op1, 0);
}
else
goto skip_synth;
@@ -3308,7 +3300,7 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
unsigned HOST_WIDE_INT *multiplier_ptr,
int *post_shift_ptr, int *lgup_ptr)
{
- double_int mhigh, mlow;
+ wide_int mhigh, mlow;
int lgup, post_shift;
int pow, pow2;
@@ -3320,23 +3312,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
pow = n + lgup;
pow2 = n + lgup - precision;
- /* We could handle this with some effort, but this case is much
- better handled directly with a scc insn, so rely on caller using
- that. */
- gcc_assert (pow != HOST_BITS_PER_DOUBLE_INT);
-
/* mlow = 2^(N + lgup)/d */
- double_int val = double_int_zero.set_bit (pow);
- mlow = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR);
+ wide_int val = wide_int::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
+ mlow = val.udiv_trunc (d);
/* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
- val |= double_int_zero.set_bit (pow2);
- mhigh = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR);
-
- gcc_assert (!mhigh.high || val.high - d < d);
- gcc_assert (mhigh.high <= 1 && mlow.high <= 1);
- /* Assert that mlow < mhigh. */
- gcc_assert (mlow.ult (mhigh));
+ val |= wide_int::set_bit_in_zero(pow2, HOST_BITS_PER_DOUBLE_INT);
+ mhigh = val.udiv_trunc (d);
/* If precision == N, then mlow, mhigh exceed 2^N
(but they do not exceed 2^(N+1)). */
@@ -3344,14 +3326,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
/* Reduce to lowest terms. */
for (post_shift = lgup; post_shift > 0; post_shift--)
{
- int shft = HOST_BITS_PER_WIDE_INT - 1;
- unsigned HOST_WIDE_INT ml_lo = (mlow.high << shft) | (mlow.low >> 1);
- unsigned HOST_WIDE_INT mh_lo = (mhigh.high << shft) | (mhigh.low >> 1);
+ unsigned HOST_WIDE_INT ml_lo = mlow.extract_to_hwi (1, HOST_BITS_PER_WIDE_INT);
+ unsigned HOST_WIDE_INT mh_lo = mhigh.extract_to_hwi (1, HOST_BITS_PER_WIDE_INT);
if (ml_lo >= mh_lo)
break;
- mlow = double_int::from_uhwi (ml_lo);
- mhigh = double_int::from_uhwi (mh_lo);
+ mlow = wide_int::from_uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
+ mhigh = wide_int::from_uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
}
*post_shift_ptr = post_shift;
@@ -3359,13 +3340,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
if (n < HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
- *multiplier_ptr = mhigh.low & mask;
- return mhigh.low >= mask;
+ *multiplier_ptr = mhigh.to_uhwi () & mask;
+ return mhigh.to_uhwi () >= mask;
}
else
{
- *multiplier_ptr = mhigh.low;
- return mhigh.high;
+ *multiplier_ptr = mhigh.to_uhwi ();
+ return mhigh.extract_to_hwi (HOST_BITS_PER_WIDE_INT, 1);
}
}
@@ -3632,9 +3613,10 @@ expmed_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
static rtx
expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
{
- unsigned HOST_WIDE_INT masklow, maskhigh;
rtx result, temp, shift, label;
int logd;
+ wide_int mask;
+ int prec = GET_MODE_PRECISION (mode);
logd = floor_log2 (d);
result = gen_reg_rtx (mode);
@@ -3647,8 +3629,8 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
mode, 0, -1);
if (signmask)
{
+ HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
signmask = force_reg (mode, signmask);
- masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
/* Use the rtx_cost of a LSHIFTRT instruction to determine
@@ -3693,19 +3675,11 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
modulus. By including the signbit in the operation, many targets
can avoid an explicit compare operation in the following comparison
against zero. */
-
- masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- {
- masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1);
- maskhigh = -1;
- }
- else
- maskhigh = (HOST_WIDE_INT) -1
- << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
+ mask = wide_int::mask (logd, false, GET_MODE_PRECISION (mode));
+ mask = mask.set_bit (prec - 1);
temp = expand_binop (mode, and_optab, op0,
- immed_double_const (masklow, maskhigh, mode),
+ immed_wide_int_const (mask, mode),
result, 1, OPTAB_LIB_WIDEN);
if (temp != result)
emit_move_insn (result, temp);
@@ -3715,10 +3689,10 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
- masklow = (HOST_WIDE_INT) -1 << logd;
- maskhigh = -1;
+
+ mask = wide_int::mask (logd, true, GET_MODE_PRECISION (mode));
temp = expand_binop (mode, ior_optab, temp,
- immed_double_const (masklow, maskhigh, mode),
+ immed_wide_int_const (mask, mode),
result, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
@@ -4957,24 +4931,14 @@ make_tree (tree type, rtx x)
switch (GET_CODE (x))
{
case CONST_INT:
- {
- HOST_WIDE_INT hi = 0;
-
- if (INTVAL (x) < 0
- && !(TYPE_UNSIGNED (type)
- && (GET_MODE_BITSIZE (TYPE_MODE (type))
- < HOST_BITS_PER_WIDE_INT)))
- hi = -1;
-
- t = build_int_cst_wide (type, INTVAL (x), hi);
-
- return t;
- }
+ case CONST_WIDE_INT:
+ t = wide_int_to_tree (type, std::make_pair (x, TYPE_MODE (type)));
+ return t;
case CONST_DOUBLE:
- if (GET_MODE (x) == VOIDmode)
- t = build_int_cst_wide (type,
- CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
+ t = wide_int_to_tree (type, wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
+ HOST_BITS_PER_WIDE_INT * 2));
else
{
REAL_VALUE_TYPE d;