diff options
author | rsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-10-24 19:50:10 +0000 |
---|---|---|
committer | rsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4> | 2013-10-24 19:50:10 +0000 |
commit | 5de9d3edd3fb87520db0a0ecd247f64ed17d4a7e (patch) | |
tree | d6aa0255859e182cc4803056e6f5be3e322840fb | |
parent | 7acd91bc8e4f55287f385de48490b09d12a2ec12 (diff) | |
download | gcc-5de9d3edd3fb87520db0a0ecd247f64ed17d4a7e.tar.gz |
Rename max_wide_int to widest_int, addr_wide_int to offset_int,
wi::address to wi::to_offset and wi::extend to wi::to_widest.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/wide-int@204036 138bc75d-0d04-0410-961f-82ee72b054a4
56 files changed, 412 insertions, 411 deletions
diff --git a/gcc/alias.c b/gcc/alias.c index fff8da4ae25..dce95a25dc3 100644 --- a/gcc/alias.c +++ b/gcc/alias.c @@ -2345,15 +2345,15 @@ adjust_offset_for_component_ref (tree x, bool *known_p, { tree xoffset = component_ref_field_offset (x); tree field = TREE_OPERAND (x, 1); - addr_wide_int woffset; + offset_int woffset; if (TREE_CODE (xoffset) != INTEGER_CST) { *known_p = false; return; } - woffset = (wi::address (xoffset) - + wi::udiv_trunc (wi::address (DECL_FIELD_BIT_OFFSET (field)), + woffset = (wi::to_offset (xoffset) + + wi::udiv_trunc (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)), BITS_PER_UNIT)); if (!wi::fits_uhwi_p (woffset)) diff --git a/gcc/builtins.c b/gcc/builtins.c index 0304ec8faa7..5dfc2a701f0 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -8679,7 +8679,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, else if (TREE_CODE (src_base) == MEM_REF && TREE_CODE (dest_base) == MEM_REF) { - addr_wide_int off; + offset_int off; if (! operand_equal_p (TREE_OPERAND (src_base, 0), TREE_OPERAND (dest_base, 0), 0)) return NULL_TREE; diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index 18e0f52655c..a894793b2f6 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -1788,7 +1788,7 @@ get_loop_location (struct loop *loop) I_BOUND times. */ void -record_niter_bound (struct loop *loop, const max_wide_int &i_bound, +record_niter_bound (struct loop *loop, const widest_int &i_bound, bool realistic, bool upper) { /* Update the bounds only when there is no previous estimation, or when the @@ -1824,7 +1824,7 @@ record_niter_bound (struct loop *loop, const max_wide_int &i_bound, HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *loop) { - max_wide_int nit; + widest_int nit; HOST_WIDE_INT hwi_nit; if (!get_estimated_loop_iterations (loop, &nit)) @@ -1861,7 +1861,7 @@ max_stmt_executions_int (struct loop *loop) returns true. */ bool -get_estimated_loop_iterations (struct loop *loop, max_wide_int *nit) +get_estimated_loop_iterations (struct loop *loop, widest_int *nit) { /* Even if the bound is not recorded, possibly we can derrive one from profile. */ @@ -1885,7 +1885,7 @@ get_estimated_loop_iterations (struct loop *loop, max_wide_int *nit) false, otherwise returns true. */ bool -get_max_loop_iterations (struct loop *loop, max_wide_int *nit) +get_max_loop_iterations (struct loop *loop, widest_int *nit) { if (!loop->any_upper_bound) return false; @@ -1901,7 +1901,7 @@ get_max_loop_iterations (struct loop *loop, max_wide_int *nit) HOST_WIDE_INT get_max_loop_iterations_int (struct loop *loop) { - max_wide_int nit; + widest_int nit; HOST_WIDE_INT hwi_nit; if (!get_max_loop_iterations (loop, &nit)) diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h index 9dcc55fef5f..3aaa728c0d9 100644 --- a/gcc/cfgloop.h +++ b/gcc/cfgloop.h @@ -63,7 +63,7 @@ struct GTY ((chain_next ("%h.next"))) nb_iter_bound { overflows (as MAX + 1 is sometimes produced as the estimate on number of executions of STMT). b) it is consistent with the result of number_of_iterations_exit. */ - max_wide_int bound; + widest_int bound; /* True if the statement will cause the loop to be leaved the (at most) BOUND + 1-st time it is executed, that is, all the statements after it @@ -147,12 +147,12 @@ struct GTY ((chain_next ("%h.next"))) loop { /* An integer guaranteed to be greater or equal to nb_iterations. Only valid if any_upper_bound is true. */ - max_wide_int nb_iterations_upper_bound; + widest_int nb_iterations_upper_bound; /* An integer giving an estimate on nb_iterations. Unlike nb_iterations_upper_bound, there is no guarantee that it is at least nb_iterations. */ - max_wide_int nb_iterations_estimate; + widest_int nb_iterations_estimate; bool any_upper_bound; bool any_estimate; @@ -731,16 +731,16 @@ loop_outermost (struct loop *loop) return (*loop->superloops)[1]; } -extern void record_niter_bound (struct loop *, const max_wide_int &, bool, bool); +extern void record_niter_bound (struct loop *, const widest_int &, bool, bool); extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *); extern HOST_WIDE_INT get_max_loop_iterations_int (struct loop *); -extern bool get_estimated_loop_iterations (struct loop *loop, max_wide_int *nit); -extern bool get_max_loop_iterations (struct loop *loop, max_wide_int *nit); +extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit); +extern bool get_max_loop_iterations (struct loop *loop, widest_int *nit); extern int bb_loop_depth (const_basic_block); -/* Converts VAL to max_wide_int. */ +/* Converts VAL to widest_int. */ -static inline max_wide_int +static inline widest_int gcov_type_to_wide_int (gcov_type val) { HOST_WIDE_INT a[2]; @@ -752,6 +752,6 @@ gcov_type_to_wide_int (gcov_type val) val >>= 1; a[1] = (unsigned HOST_WIDE_INT) val; - return max_wide_int::from_array (a, 2); + return widest_int::from_array (a, 2); } #endif /* GCC_CFGLOOP_H */ diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c index be4263fb90a..36211de735e 100644 --- a/gcc/cp/decl.c +++ b/gcc/cp/decl.c @@ -12884,8 +12884,8 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc) { tree type = TREE_TYPE (prev_value); signop sgn = TYPE_SIGN (type); - max_wide_int wi = wi::add (wi::extend (prev_value), 1, sgn, - &overflowed); + widest_int wi = wi::add (wi::to_widest (prev_value), 1, sgn, + &overflowed); if (!overflowed) { bool pos = !wi::neg_p (wi, sgn); diff --git a/gcc/cp/init.c b/gcc/cp/init.c index 26e88ed0cf8..59a626ceb79 100644 --- a/gcc/cp/init.c +++ b/gcc/cp/init.c @@ -2242,10 +2242,10 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, /* For arrays, a bounds checks on the NELTS parameter. */ tree outer_nelts_check = NULL_TREE; bool outer_nelts_from_type = false; - addr_wide_int inner_nelts_count = 1; + offset_int inner_nelts_count = 1; tree alloc_call, alloc_expr; /* Size of the inner array elements. */ - addr_wide_int inner_size; + offset_int inner_size; /* The address returned by the call to "operator new". This node is a VAR_DECL and is therefore reusable. */ tree alloc_node; @@ -2300,9 +2300,8 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, if (TREE_CODE (inner_nelts_cst) == INTEGER_CST) { bool overflow; - addr_wide_int result = wi::mul (wi::address (inner_nelts_cst), - inner_nelts_count, SIGNED, - &overflow); + offset_int result = wi::mul (wi::to_offset (inner_nelts_cst), + inner_nelts_count, SIGNED, &overflow); if (overflow) { if (complain & tf_error) @@ -2404,10 +2403,10 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, { /* Maximum available size in bytes. Half of the address space minus the cookie size. */ - addr_wide_int max_size - = wi::set_bit_in_zero <addr_wide_int> (TYPE_PRECISION (sizetype) - 1); + offset_int max_size + = wi::set_bit_in_zero <offset_int> (TYPE_PRECISION (sizetype) - 1); /* Maximum number of outer elements which can be allocated. */ - addr_wide_int max_outer_nelts; + offset_int max_outer_nelts; tree max_outer_nelts_tree; gcc_assert (TREE_CODE (size) == INTEGER_CST); @@ -2417,9 +2416,9 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, /* Unconditionally subtract the cookie size. This decreases the maximum object size and is safe even if we choose not to use a cookie after all. */ - max_size -= wi::address (cookie_size); + max_size -= wi::to_offset (cookie_size); bool overflow; - inner_size = wi::mul (wi::address (size), inner_nelts_count, SIGNED, + inner_size = wi::mul (wi::to_offset (size), inner_nelts_count, SIGNED, &overflow); if (overflow || wi::gtu_p (inner_size, max_size)) { diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c index 8c1e13ef671..803a0d50dd5 100644 --- a/gcc/cp/mangle.c +++ b/gcc/cp/mangle.c @@ -3223,7 +3223,7 @@ write_array_type (const tree type) { /* The ABI specifies that we should mangle the number of elements in the array, not the largest allowed index. */ - addr_wide_int wmax = wi::address (max) + 1; + offset_int wmax = wi::to_offset (max) + 1; /* Truncate the result - this will mangle [0, SIZE_INT_MAX] number of elements as zero. */ wmax = wi::zext (wmax, TYPE_PRECISION (TREE_TYPE (max))); diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c index 535e4be961a..ccac781c3a8 100644 --- a/gcc/cp/tree.c +++ b/gcc/cp/tree.c @@ -2603,7 +2603,7 @@ cp_tree_equal (tree t1, tree t2) switch (code1) { case INTEGER_CST: - return wi::extend (t1) == wi::extend (t2); + return wi::to_widest (t1) == wi::to_widest (t2); case REAL_CST: return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c index d4d48a21cfd..3fc046619bc 100644 --- a/gcc/cp/typeck2.c +++ b/gcc/cp/typeck2.c @@ -1119,8 +1119,8 @@ process_init_constructor_array (tree type, tree init, { tree domain = TYPE_DOMAIN (type); if (domain && TREE_CONSTANT (TYPE_MAX_VALUE (domain))) - len = wi::ext (wi::address (TYPE_MAX_VALUE (domain)) - - wi::address (TYPE_MIN_VALUE (domain)) + 1, + len = wi::ext (wi::to_offset (TYPE_MAX_VALUE (domain)) + - wi::to_offset (TYPE_MIN_VALUE (domain)) + 1, TYPE_PRECISION (TREE_TYPE (domain)), TYPE_SIGN (TREE_TYPE (domain))).to_uhwi (); else diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index 31a5ef71022..cf7de1ad246 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -10304,7 +10304,7 @@ simple_type_size_in_bits (const_tree type) /* Similarly, but return a wide_int instead of UHWI. */ -static inline addr_wide_int +static inline offset_int wide_int_type_size_in_bits (const_tree type) { if (TREE_CODE (type) == ERROR_MARK) @@ -10312,7 +10312,7 @@ wide_int_type_size_in_bits (const_tree type) else if (TYPE_SIZE (type) == NULL_TREE) return 0; else if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) - return wi::address (TYPE_SIZE (type)); + return wi::to_offset (TYPE_SIZE (type)); else return TYPE_ALIGN (type); } @@ -14687,8 +14687,8 @@ simple_decl_align_in_bits (const_tree decl) /* Return the result of rounding T up to ALIGN. */ -static inline addr_wide_int -round_up_to_align (addr_wide_int t, unsigned int align) +static inline offset_int +round_up_to_align (offset_int t, unsigned int align) { t += align - 1; t = wi::udiv_trunc (t, align); @@ -14706,9 +14706,9 @@ round_up_to_align (addr_wide_int t, unsigned int align) static HOST_WIDE_INT field_byte_offset (const_tree decl) { - addr_wide_int object_offset_in_bits; - addr_wide_int object_offset_in_bytes; - addr_wide_int bitpos_int; + offset_int object_offset_in_bits; + offset_int object_offset_in_bytes; + offset_int bitpos_int; if (TREE_CODE (decl) == ERROR_MARK) return 0; @@ -14721,18 +14721,18 @@ field_byte_offset (const_tree decl) if (TREE_CODE (bit_position (decl)) != INTEGER_CST) return 0; - bitpos_int = wi::address (bit_position (decl)); + bitpos_int = wi::to_offset (bit_position (decl)); #ifdef PCC_BITFIELD_TYPE_MATTERS if (PCC_BITFIELD_TYPE_MATTERS) { tree type; tree field_size_tree; - addr_wide_int deepest_bitpos; - addr_wide_int field_size_in_bits; + offset_int deepest_bitpos; + offset_int field_size_in_bits; unsigned int type_align_in_bits; unsigned int decl_align_in_bits; - addr_wide_int type_size_in_bits; + offset_int type_size_in_bits; type = field_type (decl); type_size_in_bits = wide_int_type_size_in_bits (type); @@ -14747,7 +14747,7 @@ field_byte_offset (const_tree decl) /* If the size of the field is not constant, use the type size. */ if (TREE_CODE (field_size_tree) == INTEGER_CST) - field_size_in_bits = wi::address (field_size_tree); + field_size_in_bits = wi::to_offset (field_size_tree); else field_size_in_bits = type_size_in_bits; diff --git a/gcc/expr.c b/gcc/expr.c index 1dc0aef39bc..8c153594a0c 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -6520,7 +6520,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, enum machine_mode mode = VOIDmode; bool blkmode_bitfield = false; tree offset = size_zero_node; - addr_wide_int bit_offset = 0; + offset_int bit_offset = 0; /* First get the mode, signedness, and size. We do this from just the outermost expression. */ @@ -6581,7 +6581,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, switch (TREE_CODE (exp)) { case BIT_FIELD_REF: - bit_offset += wi::address (TREE_OPERAND (exp, 2)); + bit_offset += wi::to_offset (TREE_OPERAND (exp, 2)); break; case COMPONENT_REF: @@ -6596,7 +6596,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, break; offset = size_binop (PLUS_EXPR, offset, this_offset); - bit_offset += wi::address (DECL_FIELD_BIT_OFFSET (field)); + bit_offset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field)); /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */ } @@ -6649,7 +6649,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, tree off = TREE_OPERAND (exp, 1); if (!integer_zerop (off)) { - addr_wide_int boff, coff = mem_ref_offset (exp); + offset_int boff, coff = mem_ref_offset (exp); boff = wi::lshift (coff, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); bit_offset += boff; @@ -6675,8 +6675,8 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, this conversion. */ if (TREE_CODE (offset) == INTEGER_CST) { - addr_wide_int tem = wi::sext (wi::address (offset), - TYPE_PRECISION (sizetype)); + offset_int tem = wi::sext (wi::to_offset (offset), + TYPE_PRECISION (sizetype)); tem = wi::lshift (tem, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); tem += bit_offset; @@ -6693,11 +6693,11 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, /* Avoid returning a negative bitpos as this may wreak havoc later. */ if (wi::neg_p (bit_offset)) { - addr_wide_int mask - = wi::mask <addr_wide_int> (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT), - false); - addr_wide_int tem = bit_offset.and_not (mask); + offset_int mask + = wi::mask <offset_int> (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + false); + offset_int tem = bit_offset.and_not (mask); /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf. Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */ bit_offset -= tem; diff --git a/gcc/fold-const.c b/gcc/fold-const.c index d7b69f0988b..4be592bb7ac 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -1579,9 +1579,9 @@ static tree fold_convert_const_int_from_int (tree type, const_tree arg1) { /* Given an integer constant, make new constant with new type, - appropriately sign-extended or truncated. Use max_wide_int + appropriately sign-extended or truncated. Use widest_int so that any extension is done according ARG1's type. */ - return force_fit_type (type, wi::extend (arg1), + return force_fit_type (type, wi::to_widest (arg1), !POINTER_TYPE_P (TREE_TYPE (arg1)), TREE_OVERFLOW (arg1)); } @@ -6611,7 +6611,7 @@ fold_single_bit_test (location_t loc, enum tree_code code, not overflow, adjust BITNUM and INNER. */ if (TREE_CODE (inner) == RSHIFT_EXPR && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST - && wi::ltu_p (wi::extend (TREE_OPERAND (inner, 1)) + bitnum, + && wi::ltu_p (wi::to_widest (TREE_OPERAND (inner, 1)) + bitnum, TYPE_PRECISION (type))) { bitnum += tree_to_hwi (TREE_OPERAND (inner, 1)); @@ -7289,7 +7289,7 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len) int bitpos = byte * BITS_PER_UNIT; /* Extend EXPR according to TYPE_SIGN if the precision isn't a whole number of bytes. */ - value = wi::extract_uhwi (wi::extend (expr), bitpos, BITS_PER_UNIT); + value = wi::extract_uhwi (wi::to_widest (expr), bitpos, BITS_PER_UNIT); if (total_bytes > UNITS_PER_WORD) { @@ -10452,7 +10452,7 @@ fold_binary_loc (location_t loc, code11 = TREE_CODE (tree11); if (code01 == INTEGER_CST && code11 == INTEGER_CST - && (wi::extend (tree01) + wi::extend (tree11) + && (wi::to_widest (tree01) + wi::to_widest (tree11) == element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0))))) { tem = build2_loc (loc, LROTATE_EXPR, diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c index 36098806e9b..4f778b913a0 100644 --- a/gcc/fortran/trans-array.c +++ b/gcc/fortran/trans-array.c @@ -5362,7 +5362,7 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr) { gfc_constructor *c; tree tmp; - addr_wide_int wtmp; + offset_int wtmp; gfc_se se; tree index, range; vec<constructor_elt, va_gc> *v = NULL; @@ -5385,7 +5385,7 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr) else gfc_conv_structure (&se, expr, 1); - wtmp = wi::address (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1; + wtmp = wi::to_offset (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1; gcc_assert (wtmp != 0); /* This will probably eat buckets of memory for large arrays. */ while (wtmp != 0) diff --git a/gcc/gengtype.c b/gcc/gengtype.c index 654c35423e1..e2d9dd4f947 100644 --- a/gcc/gengtype.c +++ b/gcc/gengtype.c @@ -5475,8 +5475,8 @@ main (int argc, char **argv) POS_HERE (do_scalar_typedef ("FIXED_VALUE_TYPE", &pos)); POS_HERE (do_scalar_typedef ("double_int", &pos)); POS_HERE (do_scalar_typedef ("double_int_storage", &pos)); - POS_HERE (do_scalar_typedef ("addr_wide_int", &pos)); - POS_HERE (do_scalar_typedef ("max_wide_int", &pos)); + POS_HERE (do_scalar_typedef ("offset_int", &pos)); + POS_HERE (do_scalar_typedef ("widest_int", &pos)); POS_HERE (do_scalar_typedef ("uint64_t", &pos)); POS_HERE (do_scalar_typedef ("uint8", &pos)); POS_HERE (do_scalar_typedef ("uintptr_t", &pos)); diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index d809196be09..f9f141808e5 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -2806,10 +2806,10 @@ fold_array_ctor_reference (tree type, tree ctor, { unsigned HOST_WIDE_INT cnt; tree cfield, cval; - addr_wide_int low_bound; - addr_wide_int elt_size; - addr_wide_int index, max_index; - addr_wide_int access_index; + offset_int low_bound; + offset_int elt_size; + offset_int index, max_index; + offset_int access_index; tree domain_type = NULL_TREE, index_type = NULL_TREE; HOST_WIDE_INT inner_offset; @@ -2821,14 +2821,14 @@ fold_array_ctor_reference (tree type, tree ctor, /* Static constructors for variably sized objects makes no sense. */ gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type)) == INTEGER_CST); index_type = TREE_TYPE (TYPE_MIN_VALUE (domain_type)); - low_bound = wi::address (TYPE_MIN_VALUE (domain_type)); + low_bound = wi::to_offset (TYPE_MIN_VALUE (domain_type)); } else low_bound = 0; /* Static constructors for variably sized objects makes no sense. */ gcc_assert (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))) == INTEGER_CST); - elt_size = wi::address (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))); + elt_size = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))); /* We can handle only constantly sized accesses that are known to not be larger than size of array element. */ @@ -2838,7 +2838,7 @@ fold_array_ctor_reference (tree type, tree ctor, return NULL_TREE; /* Compute the array index we look for. */ - access_index = wi::udiv_trunc (addr_wide_int (offset / BITS_PER_UNIT), + access_index = wi::udiv_trunc (offset_int (offset / BITS_PER_UNIT), elt_size); access_index += low_bound; if (index_type) @@ -2866,12 +2866,12 @@ fold_array_ctor_reference (tree type, tree ctor, if (cfield) { if (TREE_CODE (cfield) == INTEGER_CST) - max_index = index = wi::address (cfield); + max_index = index = wi::to_offset (cfield); else { gcc_assert (TREE_CODE (cfield) == RANGE_EXPR); - index = wi::address (TREE_OPERAND (cfield, 0)); - max_index = wi::address (TREE_OPERAND (cfield, 1)); + index = wi::to_offset (TREE_OPERAND (cfield, 0)); + max_index = wi::to_offset (TREE_OPERAND (cfield, 1)); } } else @@ -2912,9 +2912,9 @@ fold_nonarray_ctor_reference (tree type, tree ctor, tree byte_offset = DECL_FIELD_OFFSET (cfield); tree field_offset = DECL_FIELD_BIT_OFFSET (cfield); tree field_size = DECL_SIZE (cfield); - addr_wide_int bitoffset; - addr_wide_int byte_offset_cst = wi::address (byte_offset); - addr_wide_int bitoffset_end, access_end; + offset_int bitoffset; + offset_int byte_offset_cst = wi::to_offset (byte_offset); + offset_int bitoffset_end, access_end; /* Variable sized objects in static constructors makes no sense, but field_size can be NULL for flexible array members. */ @@ -2925,15 +2925,15 @@ fold_nonarray_ctor_reference (tree type, tree ctor, : TREE_CODE (TREE_TYPE (cfield)) == ARRAY_TYPE)); /* Compute bit offset of the field. */ - bitoffset = (wi::address (field_offset) + bitoffset = (wi::to_offset (field_offset) + byte_offset_cst * BITS_PER_UNIT); /* Compute bit offset where the field ends. */ if (field_size != NULL_TREE) - bitoffset_end = bitoffset + wi::address (field_size); + bitoffset_end = bitoffset + wi::to_offset (field_size); else bitoffset_end = 0; - access_end = addr_wide_int (offset) + size; + access_end = offset_int (offset) + size; /* Is there any overlap between [OFFSET, OFFSET+SIZE) and [BITOFFSET, BITOFFSET_END)? */ @@ -2941,7 +2941,7 @@ fold_nonarray_ctor_reference (tree type, tree ctor, && (field_size == NULL_TREE || wi::lts_p (offset, bitoffset_end))) { - addr_wide_int inner_offset = addr_wide_int (offset) - bitoffset; + offset_int inner_offset = offset_int (offset) - bitoffset; /* We do have overlap. Now see if field is large enough to cover the access. Give up for accesses spanning multiple fields. */ @@ -3044,8 +3044,8 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree)) if ((TREE_CODE (low_bound) == INTEGER_CST) && (tree_fits_uhwi_p (unit_size))) { - addr_wide_int woffset - = wi::sext (wi::address (idx) - wi::address (low_bound), + offset_int woffset + = wi::sext (wi::to_offset (idx) - wi::to_offset (low_bound), TYPE_PRECISION (TREE_TYPE (idx))); if (wi::fits_shwi_p (woffset)) diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c index fb333122227..a45dd535d95 100644 --- a/gcc/gimple-pretty-print.c +++ b/gcc/gimple-pretty-print.c @@ -1728,7 +1728,7 @@ dump_ssaname_info (pretty_printer *buffer, tree node, int spc) if (!POINTER_TYPE_P (TREE_TYPE (node)) && SSA_NAME_RANGE_INFO (node)) { - max_wide_int min, max; + widest_int min, max; value_range_type range_type = get_range_info (node, &min, &max); if (range_type == VR_VARYING) diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c index e6ad3b9d843..d9a8ed22bd3 100644 --- a/gcc/gimple-ssa-strength-reduction.c +++ b/gcc/gimple-ssa-strength-reduction.c @@ -231,7 +231,7 @@ struct slsr_cand_d tree stride; /* The index constant i. */ - max_wide_int index; + widest_int index; /* The type of the candidate. This is normally the type of base_expr, but casts may have occurred when combining feeding instructions. @@ -306,7 +306,7 @@ typedef const struct cand_chain_d *const_cand_chain_t; struct incr_info_d { /* The increment that relates a candidate to its basis. */ - max_wide_int incr; + widest_int incr; /* How many times the increment occurs in the candidate tree. */ unsigned count; @@ -555,7 +555,7 @@ record_potential_basis (slsr_cand_t c) static slsr_cand_t alloc_cand_and_find_basis (enum cand_kind kind, gimple gs, tree base, - const max_wide_int &index, tree stride, tree ctype, + const widest_int &index, tree stride, tree ctype, unsigned savings) { slsr_cand_t c = (slsr_cand_t) obstack_alloc (&cand_obstack, @@ -764,7 +764,7 @@ slsr_process_phi (gimple phi, bool speed) int (i * S). Otherwise, just return double int zero. */ -static max_wide_int +static widest_int backtrace_base_for_ref (tree *pbase) { tree base_in = *pbase; @@ -792,7 +792,7 @@ backtrace_base_for_ref (tree *pbase) { /* X = B + (1 * S), S is integer constant. */ *pbase = base_cand->base_expr; - return wi::extend (base_cand->stride); + return wi::to_widest (base_cand->stride); } else if (base_cand->kind == CAND_ADD && TREE_CODE (base_cand->stride) == INTEGER_CST @@ -839,13 +839,13 @@ backtrace_base_for_ref (tree *pbase) *PINDEX: C1 + (C2 * C3) + C4 + (C5 * C3) */ static bool -restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex, +restructure_reference (tree *pbase, tree *poffset, widest_int *pindex, tree *ptype) { tree base = *pbase, offset = *poffset; - max_wide_int index = *pindex; + widest_int index = *pindex; tree mult_op0, t1, t2, type; - max_wide_int c1, c2, c3, c4, c5; + widest_int c1, c2, c3, c4, c5; if (!base || !offset @@ -856,18 +856,18 @@ restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex, return false; t1 = TREE_OPERAND (base, 0); - c1 = max_wide_int::from (mem_ref_offset (base), SIGNED); + c1 = widest_int::from (mem_ref_offset (base), SIGNED); type = TREE_TYPE (TREE_OPERAND (base, 1)); mult_op0 = TREE_OPERAND (offset, 0); - c3 = wi::extend (TREE_OPERAND (offset, 1)); + c3 = wi::to_widest (TREE_OPERAND (offset, 1)); if (TREE_CODE (mult_op0) == PLUS_EXPR) if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST) { t2 = TREE_OPERAND (mult_op0, 0); - c2 = wi::extend (TREE_OPERAND (mult_op0, 1)); + c2 = wi::to_widest (TREE_OPERAND (mult_op0, 1)); } else return false; @@ -877,7 +877,7 @@ restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex, if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST) { t2 = TREE_OPERAND (mult_op0, 0); - c2 = -wi::extend (TREE_OPERAND (mult_op0, 1)); + c2 = -wi::to_widest (TREE_OPERAND (mult_op0, 1)); } else return false; @@ -910,7 +910,7 @@ slsr_process_ref (gimple gs) HOST_WIDE_INT bitsize, bitpos; enum machine_mode mode; int unsignedp, volatilep; - max_wide_int index; + widest_int index; slsr_cand_t c; if (gimple_vdef (gs)) @@ -947,7 +947,7 @@ static slsr_cand_t create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - max_wide_int index; + widest_int index; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -979,7 +979,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) ============================ X = B + ((i' * S) * Z) */ base = base_cand->base_expr; - index = base_cand->index * wi::extend (base_cand->stride); + index = base_cand->index * wi::to_widest (base_cand->stride); stride = stride_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1017,7 +1017,7 @@ static slsr_cand_t create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - max_wide_int index, temp; + widest_int index, temp; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -1035,7 +1035,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) X = (B + i') * (S * c) */ base = base_cand->base_expr; index = base_cand->index; - temp = wi::extend (base_cand->stride) * wi::extend (stride_in); + temp = wi::to_widest (base_cand->stride) * wi::to_widest (stride_in); stride = wide_int_to_tree (TREE_TYPE (stride_in), temp); ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1065,7 +1065,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) =========================== X = (B + S) * c */ base = base_cand->base_expr; - index = wi::extend (base_cand->stride); + index = wi::to_widest (base_cand->stride); stride = stride_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1147,7 +1147,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, bool subtract_p, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL; - max_wide_int index; + widest_int index; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -1166,7 +1166,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, =========================== X = Y + ((+/-1 * S) * B) */ base = base_in; - index = wi::extend (addend_cand->stride); + index = wi::to_widest (addend_cand->stride); if (subtract_p) index = -index; stride = addend_cand->base_expr; @@ -1216,7 +1216,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, =========================== Value: X = Y + ((-1 * S) * B) */ base = base_in; - index = wi::extend (subtrahend_cand->stride); + index = wi::to_widest (subtrahend_cand->stride); index = -index; stride = subtrahend_cand->base_expr; ctype = TREE_TYPE (base_in); @@ -1258,11 +1258,11 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, about BASE_IN into the new candidate. Return the new candidate. */ static slsr_cand_t -create_add_imm_cand (gimple gs, tree base_in, max_wide_int index_in, bool speed) +create_add_imm_cand (gimple gs, tree base_in, widest_int index_in, bool speed) { enum cand_kind kind = CAND_ADD; tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - max_wide_int index, multiple; + widest_int index, multiple; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -1272,7 +1272,7 @@ create_add_imm_cand (gimple gs, tree base_in, max_wide_int index_in, bool speed) signop sign = TYPE_SIGN (TREE_TYPE (base_cand->stride)); if (TREE_CODE (base_cand->stride) == INTEGER_CST - && wi::multiple_of_p (index_in, wi::extend (base_cand->stride), + && wi::multiple_of_p (index_in, wi::to_widest (base_cand->stride), sign, &multiple)) { /* Y = (B + i') * S, S constant, c = kS for some integer k @@ -1358,10 +1358,10 @@ slsr_process_add (gimple gs, tree rhs1, tree rhs2, bool speed) } else { - max_wide_int index; + widest_int index; /* Record an interpretation for the add-immediate. */ - index = wi::extend (rhs2); + index = wi::to_widest (rhs2); if (subtract_p) index = -index; @@ -1871,7 +1871,7 @@ phi_dependent_cand_p (slsr_cand_t c) /* Calculate the increment required for candidate C relative to its basis. */ -static max_wide_int +static widest_int cand_increment (slsr_cand_t c) { slsr_cand_t basis; @@ -1894,10 +1894,10 @@ cand_increment (slsr_cand_t c) for this candidate, return the absolute value of that increment instead. */ -static inline max_wide_int +static inline widest_int cand_abs_increment (slsr_cand_t c) { - max_wide_int increment = cand_increment (c); + widest_int increment = cand_increment (c); if (!address_arithmetic_p && wi::neg_p (increment)) increment = -increment; @@ -1918,11 +1918,12 @@ cand_already_replaced (slsr_cand_t c) replace_conditional_candidate. */ static void -replace_mult_candidate (slsr_cand_t c, tree basis_name, const max_wide_int &bump_in) +replace_mult_candidate (slsr_cand_t c, tree basis_name, + const widest_int &bump_in) { tree target_type = TREE_TYPE (gimple_assign_lhs (c->cand_stmt)); enum tree_code cand_code = gimple_assign_rhs_code (c->cand_stmt); - max_wide_int bump = bump_in; + widest_int bump = bump_in; /* It is highly unlikely, but possible, that the resulting bump doesn't fit in a HWI. Abandon the replacement @@ -2022,13 +2023,13 @@ static void replace_unconditional_candidate (slsr_cand_t c) { slsr_cand_t basis; - max_wide_int bump; + widest_int bump; if (cand_already_replaced (c)) return; basis = lookup_cand (c->basis); - bump = cand_increment (c) * wi::extend (c->stride); + bump = cand_increment (c) * wi::to_widest (c->stride); replace_mult_candidate (c, gimple_assign_lhs (basis->cand_stmt), bump); } @@ -2038,7 +2039,7 @@ replace_unconditional_candidate (slsr_cand_t c) MAX_INCR_VEC_LEN increments have been found. */ static inline int -incr_vec_index (max_wide_int increment) +incr_vec_index (widest_int increment) { unsigned i; @@ -2058,7 +2059,7 @@ incr_vec_index (max_wide_int increment) static tree create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, - max_wide_int increment, edge e, location_t loc, + widest_int increment, edge e, location_t loc, bool known_stride) { basic_block insert_bb; @@ -2079,7 +2080,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, { tree bump_tree; enum tree_code code = PLUS_EXPR; - max_wide_int bump = increment * wi::extend (c->stride); + widest_int bump = increment * wi::to_widest (c->stride); if (wi::neg_p (bump)) { code = MINUS_EXPR; @@ -2171,7 +2172,7 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name, feeding_def = gimple_assign_lhs (basis->cand_stmt); else { - max_wide_int incr = -basis->index; + widest_int incr = -basis->index; feeding_def = create_add_on_incoming_edge (c, basis_name, incr, e, loc, known_stride); } @@ -2188,7 +2189,7 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name, else { slsr_cand_t arg_cand = base_cand_from_table (arg); - max_wide_int diff = arg_cand->index - basis->index; + widest_int diff = arg_cand->index - basis->index; feeding_def = create_add_on_incoming_edge (c, basis_name, diff, e, loc, known_stride); } @@ -2234,7 +2235,7 @@ replace_conditional_candidate (slsr_cand_t c) tree basis_name, name; slsr_cand_t basis; location_t loc; - max_wide_int bump; + widest_int bump; /* Look up the LHS SSA name from C's basis. This will be the RHS1 of the adds we will introduce to create new phi arguments. */ @@ -2247,7 +2248,7 @@ replace_conditional_candidate (slsr_cand_t c) name = create_phi_basis (c, lookup_cand (c->def_phi)->cand_stmt, basis_name, loc, KNOWN_STRIDE); /* Replace C with an add of the new basis phi and a constant. */ - bump = c->index * wi::extend (c->stride); + bump = c->index * wi::to_widest (c->stride); replace_mult_candidate (c, name, bump); } @@ -2379,11 +2380,12 @@ count_candidates (slsr_cand_t c) candidates with the same increment, also record T_0 for subsequent use. */ static void -record_increment (slsr_cand_t c, const max_wide_int &increment_in, bool is_phi_adjust) +record_increment (slsr_cand_t c, const widest_int &increment_in, + bool is_phi_adjust) { bool found = false; unsigned i; - max_wide_int increment = increment_in; + widest_int increment = increment_in; /* Treat increments that differ only in sign as identical so as to share initializers, unless we are generating pointer arithmetic. */ @@ -2490,7 +2492,7 @@ record_phi_increments (slsr_cand_t basis, gimple phi) else { slsr_cand_t arg_cand = base_cand_from_table (arg); - max_wide_int diff = arg_cand->index - basis->index; + widest_int diff = arg_cand->index - basis->index; record_increment (arg_cand, diff, PHI_ADJUST); } } @@ -2541,7 +2543,7 @@ record_increments (slsr_cand_t c) uses. */ static int -phi_incr_cost (slsr_cand_t c, const max_wide_int &incr, gimple phi, int *savings) +phi_incr_cost (slsr_cand_t c, const widest_int &incr, gimple phi, int *savings) { unsigned i; int cost = 0; @@ -2566,7 +2568,7 @@ phi_incr_cost (slsr_cand_t c, const max_wide_int &incr, gimple phi, int *savings else { slsr_cand_t arg_cand = base_cand_from_table (arg); - max_wide_int diff = arg_cand->index - basis->index; + widest_int diff = arg_cand->index - basis->index; if (incr == diff) { @@ -2631,10 +2633,10 @@ optimize_cands_for_speed_p (slsr_cand_t c) static int lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, - const max_wide_int &incr, bool count_phis) + const widest_int &incr, bool count_phis) { int local_cost, sib_cost, savings = 0; - max_wide_int cand_incr = cand_abs_increment (c); + widest_int cand_incr = cand_abs_increment (c); if (cand_already_replaced (c)) local_cost = cost_in; @@ -2677,11 +2679,11 @@ lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, would go dead. */ static int -total_savings (int repl_savings, slsr_cand_t c, const max_wide_int &incr, +total_savings (int repl_savings, slsr_cand_t c, const widest_int &incr, bool count_phis) { int savings = 0; - max_wide_int cand_incr = cand_abs_increment (c); + widest_int cand_incr = cand_abs_increment (c); if (incr == cand_incr && !cand_already_replaced (c)) savings += repl_savings + c->dead_savings; @@ -2885,7 +2887,7 @@ ncd_for_two_cands (basic_block bb1, basic_block bb2, candidates, return the earliest candidate in the block in *WHERE. */ static basic_block -ncd_with_phi (slsr_cand_t c, const max_wide_int &incr, gimple phi, +ncd_with_phi (slsr_cand_t c, const widest_int &incr, gimple phi, basic_block ncd, slsr_cand_t *where) { unsigned i; @@ -2905,7 +2907,7 @@ ncd_with_phi (slsr_cand_t c, const max_wide_int &incr, gimple phi, else { slsr_cand_t arg_cand = base_cand_from_table (arg); - max_wide_int diff = arg_cand->index - basis->index; + widest_int diff = arg_cand->index - basis->index; if ((incr == diff) || (!address_arithmetic_p && incr == -diff)) ncd = ncd_for_two_cands (ncd, gimple_bb (arg_cand->cand_stmt), @@ -2924,7 +2926,7 @@ ncd_with_phi (slsr_cand_t c, const max_wide_int &incr, gimple phi, return the earliest candidate in the block in *WHERE. */ static basic_block -ncd_of_cand_and_phis (slsr_cand_t c, const max_wide_int &incr, slsr_cand_t *where) +ncd_of_cand_and_phis (slsr_cand_t c, const widest_int &incr, slsr_cand_t *where) { basic_block ncd = NULL; @@ -2949,7 +2951,7 @@ ncd_of_cand_and_phis (slsr_cand_t c, const max_wide_int &incr, slsr_cand_t *wher *WHERE. */ static basic_block -nearest_common_dominator_for_cands (slsr_cand_t c, const max_wide_int &incr, +nearest_common_dominator_for_cands (slsr_cand_t c, const widest_int &incr, slsr_cand_t *where) { basic_block sib_ncd = NULL, dep_ncd = NULL, this_ncd = NULL, ncd; @@ -3025,7 +3027,7 @@ insert_initializers (slsr_cand_t c) slsr_cand_t where = NULL; gimple init_stmt; tree stride_type, new_name, incr_tree; - max_wide_int incr = incr_vec[i].incr; + widest_int incr = incr_vec[i].incr; if (!profitable_increment_p (i) || incr == 1 @@ -3117,7 +3119,7 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi) { int j; slsr_cand_t arg_cand = base_cand_from_table (arg); - max_wide_int increment = arg_cand->index - basis->index; + widest_int increment = arg_cand->index - basis->index; if (!address_arithmetic_p && wi::neg_p (increment)) increment = -increment; @@ -3225,7 +3227,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) tree orig_rhs1, orig_rhs2; tree rhs2; enum tree_code orig_code, repl_code; - max_wide_int cand_incr; + widest_int cand_incr; orig_code = gimple_assign_rhs_code (c->cand_stmt); orig_rhs1 = gimple_assign_rhs1 (c->cand_stmt); @@ -3365,7 +3367,7 @@ replace_profitable_candidates (slsr_cand_t c) { if (!cand_already_replaced (c)) { - max_wide_int increment = cand_abs_increment (c); + widest_int increment = cand_abs_increment (c); enum tree_code orig_code = gimple_assign_rhs_code (c->cand_stmt); int i; diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c index afbd89e9ab2..c82929b8b94 100644 --- a/gcc/graphite-sese-to-poly.c +++ b/gcc/graphite-sese-to-poly.c @@ -1006,7 +1006,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop, /* loop_i <= expr_nb_iters */ else if (!chrec_contains_undetermined (nb_iters)) { - max_wide_int nit; + widest_int nit; isl_pw_aff *aff; isl_set *valid; isl_local_space *ls; diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index c1d3380e120..dae58d9bf15 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -3640,7 +3640,7 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt, if (TYPE_ALIGN (type) > align) align = TYPE_ALIGN (type); } - misalign += (addr_wide_int::from (off, SIGNED) + misalign += (offset_int::from (off, SIGNED) * BITS_PER_UNIT).to_short_addr (); misalign = misalign & (align - 1); if (misalign != 0) diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c index 3fa156165bd..1299a5cce4e 100644 --- a/gcc/loop-doloop.c +++ b/gcc/loop-doloop.c @@ -409,7 +409,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, basic_block loop_end = desc->out_edge->src; enum machine_mode mode; rtx true_prob_val; - max_wide_int iterations; + widest_int iterations; jump_insn = BB_END (loop_end); @@ -617,7 +617,7 @@ doloop_optimize (struct loop *loop) struct niter_desc *desc; unsigned word_mode_size; unsigned HOST_WIDE_INT word_mode_max; - max_wide_int iter; + widest_int iter; int entered_at_top; if (dump_file) diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index f216f715522..f0864111842 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -646,7 +646,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i; struct niter_desc *desc; - max_wide_int iterations; + widest_int iterations; if (!(flags & UAP_UNROLL)) { @@ -939,7 +939,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; - max_wide_int iterations; + widest_int iterations; if (!(flags & UAP_UNROLL)) { @@ -1336,7 +1336,7 @@ static void decide_peel_simple (struct loop *loop, int flags) { unsigned npeel; - max_wide_int iterations; + widest_int iterations; if (!(flags & UAP_PEEL)) { @@ -1492,7 +1492,7 @@ decide_unroll_stupid (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; - max_wide_int iterations; + widest_int iterations; if (!(flags & UAP_UNROLL_ALL)) { diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c index a58c3446395..8975cd6dd70 100644 --- a/gcc/lto-streamer-in.c +++ b/gcc/lto-streamer-in.c @@ -702,7 +702,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn, for (i = 0; i < len; i++) a[i] = streamer_read_hwi (ib); - loop->nb_iterations_upper_bound = max_wide_int::from_array (a, len); + loop->nb_iterations_upper_bound = widest_int::from_array (a, len); } loop->any_estimate = streamer_read_hwi (ib); if (loop->any_estimate) @@ -714,7 +714,7 @@ input_cfg (struct lto_input_block *ib, struct function *fn, for (i = 0; i < len; i++) a[i] = streamer_read_hwi (ib); - loop->nb_iterations_estimate = max_wide_int::from_array (a, len); + loop->nb_iterations_estimate = widest_int::from_array (a, len); } place_new_loop (fn, loop); diff --git a/gcc/predict.c b/gcc/predict.c index 8b15e09d669..81c6a18d079 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -1298,12 +1298,12 @@ predict_iv_comparison (struct loop *loop, basic_block bb, { int probability; bool overflow, overall_overflow = false; - max_wide_int compare_count, tem, loop_count; + widest_int compare_count, tem, loop_count; - max_wide_int loop_bound = wi::extend (loop_bound_var); - max_wide_int compare_bound = wi::extend (compare_var); - max_wide_int base = wi::extend (compare_base); - max_wide_int compare_step = wi::extend (compare_step_var); + widest_int loop_bound = wi::to_widest (loop_bound_var); + widest_int compare_bound = wi::to_widest (compare_var); + widest_int base = wi::to_widest (compare_base); + widest_int compare_step = wi::to_widest (compare_step_var); /* (loop_bound - base) / compare_step */ tem = wi::sub (loop_bound, base, SIGNED, &overflow); diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index cad3f5c23dc..eed3b22e8df 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -2199,9 +2199,9 @@ layout_type (tree type) && tree_int_cst_lt (ub, lb)) { lb = wide_int_to_tree (ssizetype, - addr_wide_int::from (lb, SIGNED)); + offset_int::from (lb, SIGNED)); ub = wide_int_to_tree (ssizetype, - addr_wide_int::from (ub, SIGNED)); + offset_int::from (ub, SIGNED)); } length = fold_convert (sizetype, diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c index 1390b0a79c4..9321dcc0928 100644 --- a/gcc/tree-affine.c +++ b/gcc/tree-affine.c @@ -32,8 +32,8 @@ along with GCC; see the file COPYING3. If not see /* Extends CST as appropriate for the affine combinations COMB. */ -max_wide_int -wide_int_ext_for_comb (max_wide_int cst, aff_tree *comb) +widest_int +wide_int_ext_for_comb (widest_int cst, aff_tree *comb) { return wi::sext (cst, TYPE_PRECISION (comb->type)); } @@ -54,7 +54,7 @@ aff_combination_zero (aff_tree *comb, tree type) /* Sets COMB to CST. */ void -aff_combination_const (aff_tree *comb, tree type, const max_wide_int &cst) +aff_combination_const (aff_tree *comb, tree type, const widest_int &cst) { aff_combination_zero (comb, type); comb->offset = wide_int_ext_for_comb (cst, comb);; @@ -75,7 +75,7 @@ aff_combination_elt (aff_tree *comb, tree type, tree elt) /* Scales COMB by SCALE. */ void -aff_combination_scale (aff_tree *comb, max_wide_int scale) +aff_combination_scale (aff_tree *comb, widest_int scale) { unsigned i, j; @@ -92,7 +92,7 @@ aff_combination_scale (aff_tree *comb, max_wide_int scale) comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb); for (i = 0, j = 0; i < comb->n; i++) { - max_wide_int new_coef; + widest_int new_coef; new_coef = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb); /* A coefficient may become zero due to overflow. Remove the zero @@ -126,7 +126,7 @@ aff_combination_scale (aff_tree *comb, max_wide_int scale) /* Adds ELT * SCALE to COMB. */ void -aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale) +aff_combination_add_elt (aff_tree *comb, tree elt, widest_int scale) { unsigned i; tree type; @@ -138,7 +138,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale) for (i = 0; i < comb->n; i++) if (operand_equal_p (comb->elts[i].val, elt, 0)) { - max_wide_int new_coef; + widest_int new_coef; new_coef = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb); if (new_coef != 0) @@ -189,7 +189,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale) /* Adds CST to C. */ static void -aff_combination_add_cst (aff_tree *c, const max_wide_int &cst) +aff_combination_add_cst (aff_tree *c, const widest_int &cst) { c->offset = wide_int_ext_for_comb (c->offset + cst, c); } @@ -233,7 +233,7 @@ aff_combination_convert (aff_tree *comb, tree type) comb->offset = wide_int_ext_for_comb (comb->offset, comb); for (i = j = 0; i < comb->n; i++) { - max_wide_int new_coef = comb->elts[i].coef; + widest_int new_coef = comb->elts[i].coef; if (new_coef == 0) continue; comb->elts[j].coef = new_coef; @@ -269,7 +269,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) switch (code) { case INTEGER_CST: - aff_combination_const (comb, type, wi::extend (expr)); + aff_combination_const (comb, type, wi::to_widest (expr)); return; case POINTER_PLUS_EXPR: @@ -292,7 +292,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) if (TREE_CODE (cst) != INTEGER_CST) break; tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, wi::extend (cst)); + aff_combination_scale (comb, wi::to_widest (cst)); return; case NEGATE_EXPR: @@ -368,7 +368,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) combination COMB. */ static tree -add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale, +add_elt_to_tree (tree expr, tree type, tree elt, widest_int scale, aff_tree *comb ATTRIBUTE_UNUSED) { enum tree_code code; @@ -452,7 +452,7 @@ aff_combination_to_tree (aff_tree *comb) tree type = comb->type; tree expr = NULL_TREE; unsigned i; - max_wide_int off, sgn; + widest_int off, sgn; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; @@ -517,7 +517,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m) static void -aff_combination_add_product (aff_tree *c, const max_wide_int &coef, tree val, +aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val, aff_tree *r) { unsigned i; @@ -615,7 +615,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED, aff_tree to_add, current, curre; tree e, rhs; gimple def; - max_wide_int scale; + widest_int scale; void **slot; struct name_expansion *exp; @@ -760,10 +760,10 @@ free_affine_expand_cache (struct pointer_map_t **cache) is set to true. */ static bool -wide_int_constant_multiple_p (max_wide_int val, max_wide_int div, - bool *mult_set, max_wide_int *mult) +wide_int_constant_multiple_p (widest_int val, widest_int div, + bool *mult_set, widest_int *mult) { - max_wide_int rem, cst; + widest_int rem, cst; if (val == 0) { @@ -793,7 +793,7 @@ wide_int_constant_multiple_p (max_wide_int val, max_wide_int div, bool aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, - max_wide_int *mult) + widest_int *mult) { bool mult_set = false; unsigned i; @@ -877,7 +877,7 @@ debug_aff (aff_tree *val) location is stored to SIZE. */ void -get_inner_reference_aff (tree ref, aff_tree *addr, max_wide_int *size) +get_inner_reference_aff (tree ref, aff_tree *addr, widest_int *size) { HOST_WIDE_INT bitsize, bitpos; tree toff; @@ -908,9 +908,10 @@ get_inner_reference_aff (tree ref, aff_tree *addr, max_wide_int *size) size SIZE2 at position DIFF cannot overlap. */ bool -aff_comb_cannot_overlap_p (aff_tree *diff, const max_wide_int &size1, const max_wide_int &size2) +aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1, + const widest_int &size2) { - max_wide_int d, bound; + widest_int d, bound; /* Unless the difference is a constant, we fail. */ if (diff->n != 0) diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h index be91ff6969e..ad2307beda4 100644 --- a/gcc/tree-affine.h +++ b/gcc/tree-affine.h @@ -32,7 +32,7 @@ struct aff_comb_elt tree val; /* Its coefficient in the combination. */ - max_wide_int coef; + widest_int coef; }; typedef struct affine_tree_combination @@ -41,7 +41,7 @@ typedef struct affine_tree_combination tree type; /* Constant offset. */ - max_wide_int offset; + widest_int offset; /* Number of elements of the combination. */ unsigned n; @@ -60,25 +60,26 @@ typedef struct affine_tree_combination tree rest; } aff_tree; -max_wide_int wide_int_ext_for_comb (max_wide_int, aff_tree *); -void aff_combination_const (aff_tree *, tree, const max_wide_int &); +widest_int wide_int_ext_for_comb (widest_int, aff_tree *); +void aff_combination_const (aff_tree *, tree, const widest_int &); void aff_combination_elt (aff_tree *, tree, tree); -void aff_combination_scale (aff_tree *, max_wide_int); +void aff_combination_scale (aff_tree *, widest_int); void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *); void aff_combination_add (aff_tree *, aff_tree *); -void aff_combination_add_elt (aff_tree *, tree, max_wide_int); +void aff_combination_add_elt (aff_tree *, tree, widest_int); void aff_combination_remove_elt (aff_tree *, unsigned); void aff_combination_convert (aff_tree *, tree); void tree_to_aff_combination (tree, tree, aff_tree *); tree aff_combination_to_tree (aff_tree *); void unshare_aff_combination (aff_tree *); -bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, max_wide_int *); +bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, widest_int *); void aff_combination_expand (aff_tree *, struct pointer_map_t **); void tree_to_aff_combination_expand (tree, tree, aff_tree *, struct pointer_map_t **); -void get_inner_reference_aff (tree, aff_tree *, max_wide_int *); +void get_inner_reference_aff (tree, aff_tree *, widest_int *); void free_affine_expand_cache (struct pointer_map_t **); -bool aff_comb_cannot_overlap_p (aff_tree *, const max_wide_int &, const max_wide_int &); +bool aff_comb_cannot_overlap_p (aff_tree *, const widest_int &, + const widest_int &); /* Debugging functions. */ void debug_aff (aff_tree *); diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c index 21085ae2293..6fa9f3b5abc 100644 --- a/gcc/tree-data-ref.c +++ b/gcc/tree-data-ref.c @@ -773,7 +773,7 @@ dr_analyze_innermost (struct data_reference *dr, struct loop *nest) { if (!integer_zerop (TREE_OPERAND (base, 1))) { - addr_wide_int moff = mem_ref_offset (base); + offset_int moff = mem_ref_offset (base); tree mofft = wide_int_to_tree (sizetype, moff); if (!poffset) poffset = mofft; @@ -1370,7 +1370,7 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b, if (!loop_nest) { aff_tree off1, off2; - max_wide_int size1, size2; + widest_int size1, size2; get_inner_reference_aff (DR_REF (a), &off1, &size1); get_inner_reference_aff (DR_REF (b), &off2, &size2); aff_combination_scale (&off1, -1); @@ -1748,7 +1748,7 @@ analyze_ziv_subscript (tree chrec_a, static tree max_stmt_executions_tree (struct loop *loop) { - max_wide_int nit; + widest_int nit; if (!max_stmt_executions (loop, &nit)) return chrec_dont_know; diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c index 0a6bf90807e..688bf7e2041 100644 --- a/gcc/tree-dfa.c +++ b/gcc/tree-dfa.c @@ -383,7 +383,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, HOST_WIDE_INT bitsize = -1; HOST_WIDE_INT maxsize = -1; tree size_tree = NULL_TREE; - addr_wide_int bit_offset = 0; + offset_int bit_offset = 0; HOST_WIDE_INT hbit_offset; bool seen_variable_array_ref = false; tree base_type; @@ -422,7 +422,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, switch (TREE_CODE (exp)) { case BIT_FIELD_REF: - bit_offset += wi::address (TREE_OPERAND (exp, 2)); + bit_offset += wi::to_offset (TREE_OPERAND (exp, 2)); break; case COMPONENT_REF: @@ -432,11 +432,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (this_offset && TREE_CODE (this_offset) == INTEGER_CST) { - addr_wide_int woffset = wi::address (this_offset); + offset_int woffset = wi::to_offset (this_offset); woffset = wi::lshift (woffset, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); - woffset += wi::address (DECL_FIELD_BIT_OFFSET (field)); + woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field)); bit_offset += woffset; /* If we had seen a variable array ref already and we just @@ -497,10 +497,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, && (unit_size = array_ref_element_size (exp), TREE_CODE (unit_size) == INTEGER_CST)) { - addr_wide_int woffset - = wi::sext (wi::address (index) - wi::address (low_bound), + offset_int woffset + = wi::sext (wi::to_offset (index) - wi::to_offset (low_bound), TYPE_PRECISION (TREE_TYPE (index))); - woffset *= wi::address (unit_size); + woffset *= wi::to_offset (unit_size); woffset = wi::lshift (woffset, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); @@ -550,7 +550,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); else { - addr_wide_int off = mem_ref_offset (exp); + offset_int off = mem_ref_offset (exp); off = wi::lshift (off, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); off += bit_offset; @@ -580,7 +580,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, exp = TREE_OPERAND (TMR_BASE (exp), 0); else { - addr_wide_int off = mem_ref_offset (exp); + offset_int off = mem_ref_offset (exp); off = wi::lshift (off, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); off += bit_offset; diff --git a/gcc/tree-dfa.h b/gcc/tree-dfa.h index 017cb823dd8..66154d8af50 100644 --- a/gcc/tree-dfa.h +++ b/gcc/tree-dfa.h @@ -135,7 +135,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, { if (!integer_zerop (TREE_OPERAND (exp, 1))) { - addr_wide_int off = mem_ref_offset (exp); + offset_int off = mem_ref_offset (exp); byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); @@ -157,7 +157,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, return NULL_TREE; if (!integer_zerop (TMR_OFFSET (exp))) { - addr_wide_int off = mem_ref_offset (exp); + offset_int off = mem_ref_offset (exp); byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h index 85f4bc9910e..c2ede6abd62 100644 --- a/gcc/tree-flow-inline.h +++ b/gcc/tree-flow-inline.h @@ -1259,7 +1259,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, { if (!integer_zerop (TREE_OPERAND (exp, 1))) { - addr_wide_int off = mem_ref_offset (exp); + offset_int off = mem_ref_offset (exp); byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); @@ -1281,7 +1281,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, return NULL_TREE; if (!integer_zerop (TMR_OFFSET (exp))) { - addr_wide_int off = mem_ref_offset (exp); + offset_int off = mem_ref_offset (exp); byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h index 9d6ecb47d7e..37a4626c489 100644 --- a/gcc/tree-flow.h +++ b/gcc/tree-flow.h @@ -485,7 +485,7 @@ struct tree_niter_desc a loop (provided that assumptions == true and may_be_zero == false), more precisely the number of executions of the latch of the loop. */ - max_wide_int max; /* The upper bound on the number of iterations of + widest_int max; /* The upper bound on the number of iterations of the loop. */ /* The simplified shape of the exit condition. The loop exits if diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c index 92745538d0d..9c0ee852520 100644 --- a/gcc/tree-object-size.c +++ b/gcc/tree-object-size.c @@ -191,7 +191,7 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, } if (sz != unknown[object_size_type]) { - addr_wide_int dsz = addr_wide_int (sz) - mem_ref_offset (pt_var); + offset_int dsz = offset_int (sz) - mem_ref_offset (pt_var); if (wi::neg_p (dsz)) sz = 0; else if (wi::fits_uhwi_p (dsz)) diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c index d6274d5e28d..17f85a62f95 100644 --- a/gcc/tree-predcom.c +++ b/gcc/tree-predcom.c @@ -229,7 +229,7 @@ typedef struct dref_d unsigned distance; /* Number of iterations offset from the first reference in the component. */ - max_wide_int offset; + widest_int offset; /* Number of the reference in a component, in dominance ordering. */ unsigned pos; @@ -618,7 +618,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset, &name_expansions); - aff_combination_const (&delta, type, wi::extend (DR_INIT (dr))); + aff_combination_const (&delta, type, wi::to_widest (DR_INIT (dr))); aff_combination_add (offset, &delta); } @@ -630,7 +630,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) static bool determine_offset (struct data_reference *a, struct data_reference *b, - max_wide_int *off) + widest_int *off) { aff_tree diff, baseb, step; tree typea, typeb; @@ -734,7 +734,7 @@ split_data_refs_to_components (struct loop *loop, FOR_EACH_VEC_ELT (depends, i, ddr) { - max_wide_int dummy_off; + widest_int dummy_off; if (DDR_ARE_DEPENDENT (ddr) == chrec_known) continue; @@ -919,7 +919,7 @@ static void add_ref_to_chain (chain_p chain, dref ref) { dref root = get_chain_root (chain); - max_wide_int dist; + widest_int dist; gcc_assert (wi::les_p (root->offset, ref->offset)); dist = ref->offset - root->offset; @@ -1023,7 +1023,7 @@ valid_initializer_p (struct data_reference *ref, unsigned distance, struct data_reference *root) { aff_tree diff, base, step; - max_wide_int off; + widest_int off; /* Both REF and ROOT must be accessing the same object. */ if (!operand_equal_p (DR_BASE_ADDRESS (ref), DR_BASE_ADDRESS (root), 0)) @@ -1179,7 +1179,7 @@ determine_roots_comp (struct loop *loop, unsigned i; dref a; chain_p chain = NULL; - max_wide_int last_ofs = 0; + widest_int last_ofs = 0; /* Invariants are handled specially. */ if (comp->comp_step == RS_INVARIANT) diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index 6109a379274..c88d0731acf 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -1494,7 +1494,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, tree field, val; bool is_struct_init = false; bool is_array_init = false; - max_wide_int curidx; + widest_int curidx; pp_left_brace (buffer); if (TREE_CLOBBER_P (node)) pp_string (buffer, "CLOBBER"); @@ -1509,7 +1509,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, { tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))); is_array_init = true; - curidx = wi::extend (minv); + curidx = wi::to_widest (minv); } FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { @@ -1523,7 +1523,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, } else if (is_array_init && (TREE_CODE (field) != INTEGER_CST - || curidx != wi::extend (field))) + || curidx != wi::to_widest (field))) { pp_left_bracket (buffer); if (TREE_CODE (field) == RANGE_EXPR) @@ -1534,12 +1534,12 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, dump_generic_node (buffer, TREE_OPERAND (field, 1), spc, flags, false); if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST) - curidx = wi::extend (TREE_OPERAND (field, 1)); + curidx = wi::to_widest (TREE_OPERAND (field, 1)); } else dump_generic_node (buffer, field, spc, flags, false); if (TREE_CODE (field) == INTEGER_CST) - curidx = wi::extend (field); + curidx = wi::to_widest (field); pp_string (buffer, "]="); } } diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c index 7f0c086ab73..cdf8a5a8b57 100644 --- a/gcc/tree-ssa-address.c +++ b/gcc/tree-ssa-address.c @@ -203,7 +203,7 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, if (addr->offset && !integer_zerop (addr->offset)) { - addr_wide_int dc = addr_wide_int::from (addr->offset, SIGNED); + offset_int dc = offset_int::from (addr->offset, SIGNED); off = immed_wide_int_const (dc, pointer_mode); } else @@ -552,7 +552,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, addr_space_t as = TYPE_ADDR_SPACE (type); enum machine_mode address_mode = targetm.addr_space.address_mode (as); HOST_WIDE_INT coef; - addr_wide_int best_mult, amult, amult_neg; + offset_int best_mult, amult, amult_neg; unsigned best_mult_cost = 0, acost; tree mult_elt = NULL_TREE, elt; unsigned i, j; @@ -574,7 +574,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, if (acost > best_mult_cost) { best_mult_cost = acost; - best_mult = addr_wide_int::from (addr->elts[i].coef, SIGNED); + best_mult = offset_int::from (addr->elts[i].coef, SIGNED); } } @@ -584,7 +584,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, /* Collect elements multiplied by best_mult. */ for (i = j = 0; i < addr->n; i++) { - amult = addr_wide_int::from (addr->elts[i].coef, SIGNED); + amult = offset_int::from (addr->elts[i].coef, SIGNED); amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type)); if (amult == best_mult) diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c index c674b3432b5..e8ae191983c 100644 --- a/gcc/tree-ssa-alias.c +++ b/gcc/tree-ssa-alias.c @@ -874,7 +874,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, tree ptrtype1, dbase2; HOST_WIDE_INT offset1p = offset1, offset2p = offset2; HOST_WIDE_INT doffset1, doffset2; - addr_wide_int moff; + offset_int moff; gcc_checking_assert ((TREE_CODE (base1) == MEM_REF || TREE_CODE (base1) == TARGET_MEM_REF) @@ -961,7 +961,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (TREE_CODE (dbase2) == MEM_REF || TREE_CODE (dbase2) == TARGET_MEM_REF) { - addr_wide_int moff = mem_ref_offset (dbase2); + offset_int moff = mem_ref_offset (dbase2); moff = wi::lshift (moff, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); if (wi::neg_p (moff)) @@ -1053,7 +1053,7 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, && operand_equal_p (TMR_INDEX2 (base1), TMR_INDEX2 (base2), 0)))))) { - addr_wide_int moff; + offset_int moff; /* The offset embedded in MEM_REFs can be negative. Bias them so that the resulting offset adjustment is positive. */ moff = mem_ref_offset (base1); @@ -2018,11 +2018,11 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) if (!tree_int_cst_equal (TREE_OPERAND (base, 1), TREE_OPERAND (ref->base, 1))) { - addr_wide_int off1 = mem_ref_offset (base); + offset_int off1 = mem_ref_offset (base); off1 = wi::lshift (off1, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); off1 += offset; - addr_wide_int off2 = mem_ref_offset (ref->base); + offset_int off2 = mem_ref_offset (ref->base); off2 = wi::lshift (off2, (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); off2 += ref_offset; diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 6f0ba9bc66c..7c76a345a75 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -160,7 +160,7 @@ struct prop_value_d { with a CONSTANT lattice value X & ~mask == value & ~mask. The zero bits in the mask cover constant values. The ones mean no information. */ - max_wide_int mask; + widest_int mask; }; typedef struct prop_value_d prop_value_t; @@ -202,7 +202,7 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) } else { - wide_int cval = wi::bit_and_not (wi::extend (val.value), val.mask); + wide_int cval = wi::bit_and_not (wi::to_widest (val.value), val.mask); fprintf (outf, "%sCONSTANT ", prefix); print_hex (cval, outf); fprintf (outf, " ("); @@ -432,8 +432,8 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val) /* Bit-lattices have to agree in the still valid bits. */ if (TREE_CODE (old_val.value) == INTEGER_CST && TREE_CODE (new_val.value) == INTEGER_CST) - return (wi::bit_and_not (wi::extend (old_val.value), new_val.mask) - == wi::bit_and_not (wi::extend (new_val.value), new_val.mask)); + return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask) + == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask)); /* Otherwise constant values have to agree. */ return operand_equal_p (old_val.value, new_val.value, 0); @@ -458,8 +458,8 @@ set_lattice_value (tree var, prop_value_t new_val) && TREE_CODE (new_val.value) == INTEGER_CST && TREE_CODE (old_val->value) == INTEGER_CST) { - max_wide_int diff = (wi::extend (new_val.value) - ^ wi::extend (old_val->value)); + widest_int diff = (wi::to_widest (new_val.value) + ^ wi::to_widest (old_val->value)); new_val.mask = new_val.mask | old_val->mask | diff; } @@ -494,19 +494,19 @@ set_lattice_value (tree var, prop_value_t new_val) static prop_value_t get_value_for_expr (tree, bool); static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree); -static void bit_value_binop_1 (enum tree_code, tree, max_wide_int *, max_wide_int *, - tree, max_wide_int, max_wide_int, - tree, max_wide_int, max_wide_int); +static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *, + tree, widest_int, widest_int, + tree, widest_int, widest_int); -/* Return a max_wide_int that can be used for bitwise simplifications +/* Return a widest_int that can be used for bitwise simplifications from VAL. */ -static max_wide_int +static widest_int value_to_wide_int (prop_value_t val) { if (val.value && TREE_CODE (val.value) == INTEGER_CST) - return wi::extend (val.value); + return wi::to_widest (val.value); return 0; } @@ -526,7 +526,7 @@ get_value_from_alignment (tree expr) get_pointer_alignment_1 (expr, &align, &bitpos); val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) - ? wi::mask <max_wide_int> (TYPE_PRECISION (type), false) + ? wi::mask <widest_int> (TYPE_PRECISION (type), false) : -1).and_not (align / BITS_PER_UNIT - 1); val.lattice_val = val.mask == -1 ? VARYING : CONSTANT; if (val.lattice_val == CONSTANT) @@ -909,7 +909,8 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) For INTEGER_CSTs mask unequal bits. If no equal bits remain, drop to varying. */ val1->mask = (val1->mask | val2->mask - | (wi::extend (val1->value) ^ wi::extend (val2->value))); + | (wi::to_widest (val1->value) + ^ wi::to_widest (val2->value))); if (val1->mask == -1) { val1->lattice_val = VARYING; @@ -1098,8 +1099,8 @@ ccp_fold (gimple stmt) static void bit_value_unop_1 (enum tree_code code, tree type, - max_wide_int *val, max_wide_int *mask, - tree rtype, const max_wide_int &rval, const max_wide_int &rmask) + widest_int *val, widest_int *mask, + tree rtype, const widest_int &rval, const widest_int &rmask) { switch (code) { @@ -1110,7 +1111,7 @@ bit_value_unop_1 (enum tree_code code, tree type, case NEGATE_EXPR: { - max_wide_int temv, temm; + widest_int temv, temm; /* Return ~rval + 1. */ bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, @@ -1146,9 +1147,9 @@ bit_value_unop_1 (enum tree_code code, tree type, static void bit_value_binop_1 (enum tree_code code, tree type, - max_wide_int *val, max_wide_int *mask, - tree r1type, max_wide_int r1val, max_wide_int r1mask, - tree r2type, max_wide_int r2val, max_wide_int r2mask) + widest_int *val, widest_int *mask, + tree r1type, widest_int r1val, widest_int r1mask, + tree r2type, widest_int r2val, widest_int r2mask) { signop sgn = TYPE_SIGN (type); int width = TYPE_PRECISION (type); @@ -1254,7 +1255,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case PLUS_EXPR: case POINTER_PLUS_EXPR: { - max_wide_int lo, hi; + widest_int lo, hi; /* Do the addition with unknown bits set to zero, to give carry-ins of zero wherever possible. */ lo = r1val.and_not (r1mask) + r2val.and_not (r2mask); @@ -1276,7 +1277,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case MINUS_EXPR: { - max_wide_int temv, temm; + widest_int temv, temm; bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm, r2type, r2val, r2mask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, @@ -1298,7 +1299,7 @@ bit_value_binop_1 (enum tree_code code, tree type, } else if (r1tz + r2tz > 0) { - *mask = wi::ext (wi::mask <max_wide_int> (r1tz + r2tz, true), + *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true), width, sgn); *val = 0; } @@ -1308,7 +1309,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case EQ_EXPR: case NE_EXPR: { - max_wide_int m = r1mask | r2mask; + widest_int m = r1mask | r2mask; if (r1val.and_not (m) != r2val.and_not (m)) { *mask = 0; @@ -1328,7 +1329,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case LT_EXPR: case LE_EXPR: { - max_wide_int o1val, o2val, o1mask, o2mask; + widest_int o1val, o2val, o1mask, o2mask; int minmax, maxmin; if ((code == GE_EXPR) || (code == GT_EXPR)) @@ -1395,7 +1396,7 @@ static prop_value_t bit_value_unop (enum tree_code code, tree type, tree rhs) { prop_value_t rval = get_value_for_expr (rhs, true); - max_wide_int value, mask; + widest_int value, mask; prop_value_t val; if (rval.lattice_val == UNDEFINED) @@ -1430,7 +1431,7 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) { prop_value_t r1val = get_value_for_expr (rhs1, true); prop_value_t r2val = get_value_for_expr (rhs2, true); - max_wide_int value, mask; + widest_int value, mask; prop_value_t val; if (r1val.lattice_val == UNDEFINED @@ -1478,7 +1479,7 @@ bit_value_assume_aligned (gimple stmt) unsigned HOST_WIDE_INT aligni, misaligni = 0; prop_value_t ptrval = get_value_for_expr (ptr, true); prop_value_t alignval; - max_wide_int value, mask; + widest_int value, mask; prop_value_t val; if (ptrval.lattice_val == UNDEFINED) diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c index 0ad3293040c..1b5ea19f956 100644 --- a/gcc/tree-ssa-forwprop.c +++ b/gcc/tree-ssa-forwprop.c @@ -812,7 +812,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0), &def_rhs_offset))) { - addr_wide_int off = mem_ref_offset (lhs); + offset_int off = mem_ref_offset (lhs); tree new_ptr; off += def_rhs_offset; if (TREE_CODE (def_rhs_base) == MEM_REF) @@ -903,7 +903,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0), &def_rhs_offset))) { - addr_wide_int off = mem_ref_offset (rhs); + offset_int off = mem_ref_offset (rhs); tree new_ptr; off += def_rhs_offset; if (TREE_CODE (def_rhs_base) == MEM_REF) diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index bcbb5e8e71a..bfd2ca7148b 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -1633,7 +1633,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same object and their offset differ in such a way that the locations cannot overlap, then they cannot alias. */ - max_wide_int size1, size2; + widest_int size1, size2; aff_tree off1, off2; /* Perform basic offset and type-based disambiguation. */ diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c index 27bf34ef33b..6f765381c24 100644 --- a/gcc/tree-ssa-loop-ivcanon.c +++ b/gcc/tree-ssa-loop-ivcanon.c @@ -927,7 +927,7 @@ canonicalize_loop_induction_variables (struct loop *loop, by find_loop_niter_by_eval. Be sure to keep it for future. */ if (niter && TREE_CODE (niter) == INTEGER_CST) { - record_niter_bound (loop, wi::extend (niter), + record_niter_bound (loop, wi::to_widest (niter), exit == single_likely_exit (loop), true); } diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 5720b655017..8831dfd0f86 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -1563,12 +1563,12 @@ idx_record_use (tree base, tree *idx, signedness of TOP and BOT. */ static bool -constant_multiple_of (tree top, tree bot, max_wide_int *mul) +constant_multiple_of (tree top, tree bot, widest_int *mul) { tree mby; enum tree_code code; unsigned precision = TYPE_PRECISION (TREE_TYPE (top)); - max_wide_int res, p0, p1; + widest_int res, p0, p1; STRIP_NOPS (top); STRIP_NOPS (bot); @@ -1590,7 +1590,7 @@ constant_multiple_of (tree top, tree bot, max_wide_int *mul) if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res)) return false; - *mul = wi::sext (res * wi::extend (mby), precision); + *mul = wi::sext (res * wi::to_widest (mby), precision); return true; case PLUS_EXPR: @@ -1608,8 +1608,8 @@ constant_multiple_of (tree top, tree bot, max_wide_int *mul) if (TREE_CODE (bot) != INTEGER_CST) return false; - p0 = max_wide_int::from (top, SIGNED); - p1 = max_wide_int::from (bot, SIGNED); + p0 = widest_int::from (top, SIGNED); + p1 = widest_int::from (bot, SIGNED); if (p1 == 0) return false; *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision); @@ -2969,7 +2969,7 @@ get_computation_aff (struct loop *loop, tree common_type, var; tree uutype; aff_tree cbase_aff, var_aff; - max_wide_int rat; + widest_int rat; if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype)) { @@ -3968,7 +3968,7 @@ get_computation_cost_at (struct ivopts_data *data, HOST_WIDE_INT ratio, aratio; bool var_present, symbol_present, stmt_is_after_inc; comp_cost cost; - max_wide_int rat; + widest_int rat; bool speed = optimize_bb_for_speed_p (gimple_bb (at)); enum machine_mode mem_mode = (address_p ? TYPE_MODE (TREE_TYPE (*use->op_p)) @@ -4627,12 +4627,12 @@ may_eliminate_iv (struct ivopts_data *data, entire loop and compare against that instead. */ else { - max_wide_int period_value, max_niter; + widest_int period_value, max_niter; max_niter = desc->max; if (stmt_after_increment (loop, cand, use->stmt)) max_niter += 1; - period_value = wi::extend (period); + period_value = wi::to_widest (period); if (wi::gtu_p (max_niter, period_value)) { /* See if we can take advantage of inferred loop bound information. */ diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c index 1e3da664f33..e926951967c 100644 --- a/gcc/tree-ssa-loop-niter.c +++ b/gcc/tree-ssa-loop-niter.c @@ -446,7 +446,7 @@ end: difference of two values in TYPE. */ static void -bounds_add (bounds *bnds, max_wide_int delta, tree type) +bounds_add (bounds *bnds, widest_int delta, tree type) { mpz_t mdelta, max; @@ -548,7 +548,7 @@ static void number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, bounds *bnds, bool exit_must_be_taken) { - max_wide_int max; + widest_int max; mpz_t d; tree type = TREE_TYPE (c); bool bnds_u_valid = ((no_overflow && exit_must_be_taken) @@ -576,8 +576,7 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, the whole # of iterations analysis will fail). */ if (!no_overflow) { - max = wi::mask <max_wide_int> (TYPE_PRECISION (type) - wi::ctz (s), - false); + max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false); wi::to_mpz (max, bnd, UNSIGNED); return; } @@ -650,8 +649,8 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final, mpz_init (max); number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds, exit_must_be_taken); - niter->max = max_wide_int::from (wi::from_mpz (niter_type, max, false), - TYPE_SIGN (niter_type)); + niter->max = widest_int::from (wi::from_mpz (niter_type, max, false), + TYPE_SIGN (niter_type)); mpz_clear (max); /* First the trivial cases -- when the step is 1. */ @@ -806,7 +805,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1, niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, niter->may_be_zero, noloop); - bounds_add (bnds, wi::extend (mod), type); + bounds_add (bnds, wi::to_widest (mod), type); *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod); ret = true; @@ -896,7 +895,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1, tree assumption = boolean_true_node, bound, diff; tree mbz, mbzl, mbzr, type1; bool rolls_p, no_overflow_p; - max_wide_int dstep; + widest_int dstep; mpz_t mstep, max; /* We are going to compute the number of iterations as @@ -922,10 +921,10 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1, /* First check whether the answer does not follow from the bounds we gathered before. */ if (integer_nonzerop (iv0->step)) - dstep = wi::extend (iv0->step); + dstep = wi::to_widest (iv0->step); else { - dstep = wi::sext (wi::extend (iv1->step), TYPE_PRECISION (type)); + dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type)); dstep = -dstep; } @@ -1064,9 +1063,8 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1, niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node, iv1->base, iv0->base); niter->niter = delta; - niter->max = max_wide_int::from (wi::from_mpz (niter_type, bnds->up, - false), - TYPE_SIGN (niter_type)); + niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false), + TYPE_SIGN (niter_type)); return true; } @@ -1113,8 +1111,8 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1, mpz_add (tmp, bnds->up, mstep); mpz_sub_ui (tmp, tmp, 1); mpz_fdiv_q (tmp, tmp, mstep); - niter->max = max_wide_int::from (wi::from_mpz (niter_type, tmp, false), - TYPE_SIGN (niter_type)); + niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false), + TYPE_SIGN (niter_type)); mpz_clear (mstep); mpz_clear (tmp); @@ -1909,7 +1907,7 @@ number_of_iterations_exit (struct loop *loop, edge exit, /* If NITER has simplified into a constant, update MAX. */ if (TREE_CODE (niter->niter) == INTEGER_CST) - niter->max = wi::extend (niter->niter); + niter->max = wi::to_widest (niter->niter); if (integer_onep (niter->assumptions)) return true; @@ -2021,7 +2019,7 @@ find_loop_niter (struct loop *loop, edge *exit) bool finite_loop_p (struct loop *loop) { - max_wide_int nit; + widest_int nit; int flags; if (flag_unsafe_loop_optimizations) @@ -2335,13 +2333,13 @@ find_loop_niter_by_eval (struct loop *loop, edge *exit) */ -static max_wide_int derive_constant_upper_bound_ops (tree, tree, - enum tree_code, tree); +static widest_int derive_constant_upper_bound_ops (tree, tree, + enum tree_code, tree); /* Returns a constant upper bound on the value of the right-hand side of an assignment statement STMT. */ -static max_wide_int +static widest_int derive_constant_upper_bound_assign (gimple stmt) { enum tree_code code = gimple_assign_rhs_code (stmt); @@ -2356,7 +2354,7 @@ derive_constant_upper_bound_assign (gimple stmt) is considered to be unsigned. If its type is signed, its value must be nonnegative. */ -static max_wide_int +static widest_int derive_constant_upper_bound (tree val) { enum tree_code code; @@ -2370,12 +2368,12 @@ derive_constant_upper_bound (tree val) whose type is TYPE. The expression is considered to be unsigned. If its type is signed, its value must be nonnegative. */ -static max_wide_int +static widest_int derive_constant_upper_bound_ops (tree type, tree op0, enum tree_code code, tree op1) { tree subtype, maxt; - max_wide_int bnd, max, mmax, cst; + widest_int bnd, max, mmax, cst; gimple stmt; if (INTEGRAL_TYPE_P (type)) @@ -2383,12 +2381,12 @@ derive_constant_upper_bound_ops (tree type, tree op0, else maxt = upper_bound_in_type (type, type); - max = wi::extend (maxt); + max = wi::to_widest (maxt); switch (code) { case INTEGER_CST: - return wi::extend (op0); + return wi::to_widest (op0); CASE_CONVERT: subtype = TREE_TYPE (op0); @@ -2425,7 +2423,7 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to choose the most logical way how to treat this constant regardless of the signedness of the type. */ - cst = wi::sext (wi::extend (op1), TYPE_PRECISION (type)); + cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type)); if (code != MINUS_EXPR) cst = -cst; @@ -2485,13 +2483,13 @@ derive_constant_upper_bound_ops (tree type, tree op0, return max; bnd = derive_constant_upper_bound (op0); - return wi::udiv_floor (bnd, wi::extend (op1)); + return wi::udiv_floor (bnd, wi::to_widest (op1)); case BIT_AND_EXPR: if (TREE_CODE (op1) != INTEGER_CST || tree_int_cst_sign_bit (op1)) return max; - return wi::extend (op1); + return wi::to_widest (op1); case SSA_NAME: stmt = SSA_NAME_DEF_STMT (op0); @@ -2509,7 +2507,7 @@ derive_constant_upper_bound_ops (tree type, tree op0, static void do_warn_aggressive_loop_optimizations (struct loop *loop, - max_wide_int i_bound, gimple stmt) + widest_int i_bound, gimple stmt) { /* Don't warn if the loop doesn't have known constant bound. */ if (!loop->nb_iterations @@ -2548,10 +2546,10 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, BOUND times. I_BOUND is an unsigned wide_int upper estimate on BOUND. */ static void -record_estimate (struct loop *loop, tree bound, max_wide_int i_bound, +record_estimate (struct loop *loop, tree bound, widest_int i_bound, gimple at_stmt, bool is_exit, bool realistic, bool upper) { - max_wide_int delta; + widest_int delta; if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -2570,7 +2568,7 @@ record_estimate (struct loop *loop, tree bound, max_wide_int i_bound, if (TREE_CODE (bound) != INTEGER_CST) realistic = false; else - gcc_checking_assert (i_bound == wi::extend (bound)); + gcc_checking_assert (i_bound == wi::to_widest (bound)); if (!upper && !realistic) return; @@ -2627,7 +2625,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt, { tree niter_bound, extreme, delta; tree type = TREE_TYPE (base), unsigned_type; - max_wide_int max; + widest_int max; if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step)) return; @@ -2977,8 +2975,8 @@ infer_loop_bounds_from_undefined (struct loop *loop) static int wide_int_cmp (const void *p1, const void *p2) { - const max_wide_int *d1 = (const max_wide_int *)p1; - const max_wide_int *d2 = (const max_wide_int *)p2; + const widest_int *d1 = (const widest_int *) p1; + const widest_int *d2 = (const widest_int *) p2; return wi::cmpu (*d1, *d2); } @@ -2986,7 +2984,7 @@ wide_int_cmp (const void *p1, const void *p2) Lookup by binary search. */ static int -bound_index (vec<max_wide_int> bounds, const max_wide_int &bound) +bound_index (vec<widest_int> bounds, const widest_int &bound) { unsigned int end = bounds.length (); unsigned int begin = 0; @@ -2995,7 +2993,7 @@ bound_index (vec<max_wide_int> bounds, const max_wide_int &bound) while (begin != end) { unsigned int middle = (begin + end) / 2; - max_wide_int index = bounds[middle]; + widest_int index = bounds[middle]; if (index == bound) return middle; @@ -3018,7 +3016,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) { pointer_map_t *bb_bounds; struct nb_iter_bound *elt; - vec<max_wide_int> bounds = vNULL; + vec<widest_int> bounds = vNULL; vec<vec<basic_block> > queues = vNULL; vec<basic_block> queue = vNULL; ptrdiff_t queue_index; @@ -3028,7 +3026,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) /* Discover what bounds may interest us. */ for (elt = loop->bounds; elt; elt = elt->next) { - max_wide_int bound = elt->bound; + widest_int bound = elt->bound; /* Exit terminates loop at given iteration, while non-exits produce undefined effect on the next iteration. */ @@ -3054,7 +3052,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) /* Sort the bounds in decreasing order. */ qsort (bounds.address (), bounds.length (), - sizeof (max_wide_int), wide_int_cmp); + sizeof (widest_int), wide_int_cmp); /* For every basic block record the lowest bound that is guaranteed to terminate the loop. */ @@ -3062,7 +3060,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) bb_bounds = pointer_map_create (); for (elt = loop->bounds; elt; elt = elt->next) { - max_wide_int bound = elt->bound; + widest_int bound = elt->bound; if (!elt->is_exit) { bound += 1; @@ -3300,7 +3298,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) unsigned i; struct tree_niter_desc niter_desc; edge ex; - max_wide_int bound; + widest_int bound; edge likely_exit; /* Give up if we already have tried to compute an estimation. */ @@ -3358,7 +3356,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) && TREE_CODE (loop->nb_iterations) == INTEGER_CST) { loop->any_upper_bound = true; - loop->nb_iterations_upper_bound = wi::extend (loop->nb_iterations); + loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations); } } @@ -3368,7 +3366,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) the function returns false, otherwise returns true. */ bool -estimated_loop_iterations (struct loop *loop, max_wide_int *nit) +estimated_loop_iterations (struct loop *loop, widest_int *nit) { /* When SCEV information is available, try to update loop iterations estimate. Otherwise just return whatever we recorded earlier. */ @@ -3385,7 +3383,7 @@ estimated_loop_iterations (struct loop *loop, max_wide_int *nit) HOST_WIDE_INT estimated_loop_iterations_int (struct loop *loop) { - max_wide_int nit; + widest_int nit; HOST_WIDE_INT hwi_nit; if (!estimated_loop_iterations (loop, &nit)) @@ -3404,7 +3402,7 @@ estimated_loop_iterations_int (struct loop *loop) false, otherwise returns true. */ bool -max_loop_iterations (struct loop *loop, max_wide_int *nit) +max_loop_iterations (struct loop *loop, widest_int *nit) { /* When SCEV information is available, try to update loop iterations estimate. Otherwise just return whatever we recorded earlier. */ @@ -3421,7 +3419,7 @@ max_loop_iterations (struct loop *loop, max_wide_int *nit) HOST_WIDE_INT max_loop_iterations_int (struct loop *loop) { - max_wide_int nit; + widest_int nit; HOST_WIDE_INT hwi_nit; if (!max_loop_iterations (loop, &nit)) @@ -3458,9 +3456,9 @@ estimated_stmt_executions_int (struct loop *loop) false, otherwise returns true. */ bool -max_stmt_executions (struct loop *loop, max_wide_int *nit) +max_stmt_executions (struct loop *loop, widest_int *nit) { - max_wide_int nit_minus_one; + widest_int nit_minus_one; if (!max_loop_iterations (loop, nit)) return false; @@ -3477,9 +3475,9 @@ max_stmt_executions (struct loop *loop, max_wide_int *nit) false, otherwise returns true. */ bool -estimated_stmt_executions (struct loop *loop, max_wide_int *nit) +estimated_stmt_executions (struct loop *loop, widest_int *nit) { - max_wide_int nit_minus_one; + widest_int nit_minus_one; if (!estimated_loop_iterations (loop, nit)) return false; @@ -3560,7 +3558,7 @@ n_of_executions_at_most (gimple stmt, struct nb_iter_bound *niter_bound, tree niter) { - max_wide_int bound = niter_bound->bound; + widest_int bound = niter_bound->bound; tree nit_type = TREE_TYPE (niter), e; enum tree_code cmp; @@ -3658,7 +3656,7 @@ scev_probably_wraps_p (tree base, tree step, tree unsigned_type, valid_niter; tree type = TREE_TYPE (step); tree e; - max_wide_int niter; + widest_int niter; struct nb_iter_bound *bound; /* FIXME: We really need something like diff --git a/gcc/tree-ssa-loop-niter.h b/gcc/tree-ssa-loop-niter.h index a794843380d..63d0344f974 100644 --- a/gcc/tree-ssa-loop-niter.h +++ b/gcc/tree-ssa-loop-niter.h @@ -29,14 +29,14 @@ extern tree find_loop_niter (struct loop *, edge *); extern bool finite_loop_p (struct loop *); extern tree loop_niter_by_eval (struct loop *, edge); extern tree find_loop_niter_by_eval (struct loop *, edge *); -extern bool estimated_loop_iterations (struct loop *, max_wide_int *); +extern bool estimated_loop_iterations (struct loop *, widest_int *); extern HOST_WIDE_INT estimated_loop_iterations_int (struct loop *); -extern bool max_loop_iterations (struct loop *, max_wide_int *); +extern bool max_loop_iterations (struct loop *, widest_int *); extern HOST_WIDE_INT max_loop_iterations_int (struct loop *); extern HOST_WIDE_INT max_stmt_executions_int (struct loop *); extern HOST_WIDE_INT estimated_stmt_executions_int (struct loop *); -extern bool max_stmt_executions (struct loop *, max_wide_int *); -extern bool estimated_stmt_executions (struct loop *, max_wide_int *); +extern bool max_stmt_executions (struct loop *, widest_int *); +extern bool estimated_stmt_executions (struct loop *, widest_int *); extern void estimate_numbers_of_iterations (void); extern bool stmt_dominates_stmt_p (gimple, gimple); extern bool nowrap_type_p (tree); diff --git a/gcc/tree-ssa-loop.h b/gcc/tree-ssa-loop.h index d8fc0eded1f..3f6a6bcc5fa 100644 --- a/gcc/tree-ssa-loop.h +++ b/gcc/tree-ssa-loop.h @@ -54,7 +54,7 @@ struct tree_niter_desc a loop (provided that assumptions == true and may_be_zero == false), more precisely the number of executions of the latch of the loop. */ - max_wide_int max; /* The upper bound on the number of iterations of + widest_int max; /* The upper bound on the number of iterations of the loop. */ /* The simplified shape of the exit condition. The loop exits if diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index d21ffa8eb29..ac8d528ef8d 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -1581,9 +1581,9 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2, && TREE_CODE (op[1]) == INTEGER_CST && TREE_CODE (op[2]) == INTEGER_CST) { - addr_wide_int off = ((wi::address (op[0]) - - wi::address (op[1])) - * wi::address (op[2])); + offset_int off = ((wi::to_offset (op[0]) + - wi::to_offset (op[1])) + * wi::to_offset (op[2])); if (wi::fits_shwi_p (off)) newop.off = off.to_shwi (); } diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c index cd379c06dc7..a56c55b622f 100644 --- a/gcc/tree-ssa-sccvn.c +++ b/gcc/tree-ssa-sccvn.c @@ -800,9 +800,9 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) tree bit_offset = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); if (tree_to_hwi (bit_offset) % BITS_PER_UNIT == 0) { - addr_wide_int off - = (wi::address (this_offset) - + wi::lrshift (wi::address (bit_offset), + offset_int off + = (wi::to_offset (this_offset) + + wi::lrshift (wi::to_offset (bit_offset), BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT))); if (wi::fits_shwi_p (off)) @@ -822,9 +822,9 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) && TREE_CODE (temp.op1) == INTEGER_CST && TREE_CODE (temp.op2) == INTEGER_CST) { - addr_wide_int off = ((wi::address (temp.op0) - - wi::address (temp.op1)) - * wi::address (temp.op2)); + offset_int off = ((wi::to_offset (temp.op0) + - wi::to_offset (temp.op1)) + * wi::to_offset (temp.op2)); if (wi::fits_shwi_p (off)) temp.off = off.to_shwi(); } @@ -1146,7 +1146,7 @@ vn_reference_fold_indirect (vec<vn_reference_op_s> *ops, gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF); if (addr_base != TREE_OPERAND (op->op0, 0)) { - addr_wide_int off = addr_wide_int::from (mem_op->op0, SIGNED); + offset_int off = offset_int::from (mem_op->op0, SIGNED); off += addr_offset; mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); op->op0 = build_fold_addr_expr (addr_base); @@ -1168,7 +1168,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, vn_reference_op_t mem_op = &(*ops)[i - 1]; gimple def_stmt; enum tree_code code; - addr_wide_int off; + offset_int off; def_stmt = SSA_NAME_DEF_STMT (op->op0); if (!is_gimple_assign (def_stmt)) @@ -1179,7 +1179,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, && code != POINTER_PLUS_EXPR) return; - off = addr_wide_int::from (mem_op->op0, SIGNED); + off = offset_int::from (mem_op->op0, SIGNED); /* The only thing we have to do is from &OBJ.foo.bar add the offset from .foo.bar to the preceding MEM_REF offset and replace the @@ -1209,7 +1209,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, || TREE_CODE (ptroff) != INTEGER_CST) return; - off += wi::address (ptroff); + off += wi::to_offset (ptroff); op->op0 = ptr; } @@ -1367,9 +1367,9 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything) && TREE_CODE (vro->op1) == INTEGER_CST && TREE_CODE (vro->op2) == INTEGER_CST) { - addr_wide_int off = ((wi::address (vro->op0) - - wi::address (vro->op1)) - * wi::address (vro->op2)); + offset_int off = ((wi::to_offset (vro->op0) + - wi::to_offset (vro->op1)) + * wi::to_offset (vro->op2)); if (wi::fits_shwi_p (off)) vro->off = off.to_shwi (); } diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c index 21927d268b6..affc0b69dcc 100644 --- a/gcc/tree-ssa-structalias.c +++ b/gcc/tree-ssa-structalias.c @@ -3012,7 +3012,7 @@ get_constraint_for_ptr_offset (tree ptr, tree offset, else { /* Sign-extend the offset. */ - addr_wide_int soffset = addr_wide_int::from (offset, SIGNED); + offset_int soffset = offset_int::from (offset, SIGNED); if (!wi::fits_shwi_p (soffset)) rhsoffset = UNKNOWN_OFFSET; else diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c index b443cc372ca..b1ef37d6879 100644 --- a/gcc/tree-ssanames.c +++ b/gcc/tree-ssanames.c @@ -173,7 +173,7 @@ make_ssa_name_fn (struct function *fn, tree var, gimple stmt) /* Store range information MIN, and MAX to tree ssa_name NAME. */ void -set_range_info (tree name, max_wide_int min, max_wide_int max) +set_range_info (tree name, widest_int min, widest_int max) { gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name))); range_info_def *ri = SSA_NAME_RANGE_INFO (name); @@ -196,7 +196,7 @@ set_range_info (tree name, max_wide_int min, max_wide_int max) is used to determine if MIN and MAX are valid values. */ enum value_range_type -get_range_info (tree name, max_wide_int *min, max_wide_int *max) +get_range_info (tree name, widest_int *min, widest_int *max) { enum value_range_type range_type; gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name))); diff --git a/gcc/tree-ssanames.h b/gcc/tree-ssanames.h index dc490fb4ab7..2c92d5772d7 100644 --- a/gcc/tree-ssanames.h +++ b/gcc/tree-ssanames.h @@ -49,9 +49,9 @@ struct GTY(()) ptr_info_def struct GTY (()) range_info_def { /* Minimum for value range. */ - max_wide_int min; + widest_int min; /* Maximum for value range. */ - max_wide_int max; + widest_int max; }; @@ -68,10 +68,10 @@ struct GTY (()) range_info_def { enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING }; /* Sets the value range to SSA. */ -extern void set_range_info (tree ssa, max_wide_int min, max_wide_int max); +extern void set_range_info (tree ssa, widest_int min, widest_int max); /* Gets the value range from SSA. */ -extern enum value_range_type get_range_info (tree name, max_wide_int *min, - max_wide_int *max); +extern enum value_range_type get_range_info (tree name, widest_int *min, + widest_int *max); extern void init_ssanames (struct function *, int); extern void fini_ssanames (void); extern void ssanames_print_statistics (void); diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index c32ef7ef985..3764cfb98f8 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -2764,7 +2764,7 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, { if (off == NULL_TREE) { - addr_wide_int moff = mem_ref_offset (base); + offset_int moff = mem_ref_offset (base); off = wide_int_to_tree (sizetype, moff); } else diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index 2c8db19de6d..b88c6e4949f 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -3838,18 +3838,18 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, && (TREE_CODE (init) != SSA_NAME || get_value_range (init)->type == VR_RANGE)) { - max_wide_int nit; + widest_int nit; /* We are only entering here for loop header PHI nodes, so using the number of latch executions is the correct thing to use. */ if (max_loop_iterations (loop, &nit)) { value_range_t maxvr = VR_INITIALIZER; - max_wide_int wtmp; + widest_int wtmp; signop sgn = TYPE_SIGN (TREE_TYPE (step)); bool overflow; - wtmp = wi::mul (wi::extend (step), nit, sgn, &overflow); + wtmp = wi::mul (wi::to_widest (step), nit, sgn, &overflow); /* If the multiplication overflowed we can't do a meaningful adjustment. Likewise if the result doesn't fit in the type of the induction variable. For a signed type we have to @@ -6274,7 +6274,7 @@ search_for_addr_array (tree t, location_t location) { tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); tree low_bound, up_bound, el_sz; - addr_wide_int idx; + offset_int idx; if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE || !TYPE_DOMAIN (TREE_TYPE (tem))) @@ -6292,7 +6292,7 @@ search_for_addr_array (tree t, location_t location) return; idx = mem_ref_offset (t); - idx = wi::sdiv_trunc (idx, wi::address (el_sz)); + idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz)); if (wi::lts_p (idx, 0)) { if (dump_file && (dump_flags & TDF_DETAILS)) @@ -6305,8 +6305,8 @@ search_for_addr_array (tree t, location_t location) "array subscript is below array bounds"); TREE_NO_WARNING (t) = 1; } - else if (wi::gts_p (idx, (wi::address (up_bound) - - wi::address (low_bound) + 1))) + else if (wi::gts_p (idx, (wi::to_offset (up_bound) + - wi::to_offset (low_bound) + 1))) { if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -8698,7 +8698,7 @@ range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) { tree src_type; unsigned src_precision; - max_wide_int tem; + widest_int tem; signop src_sgn; /* We can only handle integral and pointer types. */ @@ -8732,11 +8732,11 @@ range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) /* Then we can perform the conversion on both ends and compare the result for equality. */ - tem = wi::ext (wi::extend (vr->min), dest_precision, dest_sgn); - if (tem != wi::extend (vr->min)) + tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn); + if (tem != wi::to_widest (vr->min)) return false; - tem = wi::ext (wi::extend (vr->max), dest_precision, dest_sgn); - if (tem != wi::extend (vr->max)) + tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn); + if (tem != wi::to_widest (vr->max)) return false; return true; @@ -8998,7 +8998,7 @@ simplify_conversion_using_ranges (gimple stmt) value_range_t *innervr; signop inner_sgn, middle_sgn, final_sgn; unsigned inner_prec, middle_prec, final_prec; - max_wide_int innermin, innermed, innermax, middlemin, middlemed, middlemax; + widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); if (!INTEGRAL_TYPE_P (finaltype)) @@ -9022,8 +9022,8 @@ simplify_conversion_using_ranges (gimple stmt) /* Simulate the conversion chain to check if the result is equal if the middle conversion is removed. */ - innermin = wi::extend (innervr->min); - innermax = wi::extend (innervr->max); + innermin = wi::to_widest (innervr->min); + innermax = wi::to_widest (innervr->max); inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); @@ -9032,14 +9032,14 @@ simplify_conversion_using_ranges (gimple stmt) /* If the first conversion is not injective, the second must not be widening. */ if (wi::gtu_p (innermax - innermin, - wi::mask <max_wide_int> (middle_prec, false)) + wi::mask <widest_int> (middle_prec, false)) && middle_prec < final_prec) return false; /* We also want a medium value so that we can track the effect that narrowing conversions with sign change have. */ inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); if (inner_sgn == UNSIGNED) - innermed = wi::shifted_mask <max_wide_int> (1, inner_prec - 1, false); + innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false); else innermed = 0; if (wi::cmp (innermin, innermed, inner_sgn) >= 0 @@ -9483,8 +9483,8 @@ vrp_finalize (void) && (TREE_CODE (vr_value[i]->max) == INTEGER_CST)) { if (vr_value[i]->type == VR_RANGE) - set_range_info (name, wi::extend (vr_value[i]->min), - wi::extend (vr_value[i]->max)); + set_range_info (name, wi::to_widest (vr_value[i]->min), + wi::to_widest (vr_value[i]->max)); else if (vr_value[i]->type == VR_ANTI_RANGE) { /* VR_ANTI_RANGE ~[min, max] is encoded compactly as @@ -9500,12 +9500,12 @@ vrp_finalize (void) { unsigned prec = TYPE_PRECISION (TREE_TYPE (name)); set_range_info (name, 1, - wi::mask <max_wide_int> (prec, false)); + wi::mask <widest_int> (prec, false)); } else set_range_info (name, - wi::extend (vr_value[i]->max) + 1, - wi::extend (vr_value[i]->min) - 1); + wi::to_widest (vr_value[i]->max) + 1, + wi::to_widest (vr_value[i]->min) - 1); } } } diff --git a/gcc/tree.c b/gcc/tree.c index 6f56479626a..e0d88abea7c 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -1116,7 +1116,7 @@ build_int_cst_type (tree type, HOST_WIDE_INT low) tree double_int_to_tree (tree type, double_int cst) { - return wide_int_to_tree (type, max_wide_int::from (cst, TYPE_SIGN (type))); + return wide_int_to_tree (type, widest_int::from (cst, TYPE_SIGN (type))); } /* We force the wide_int CST to the range of the type TYPE by sign or @@ -4314,10 +4314,10 @@ build_simple_mem_ref_loc (location_t loc, tree ptr) /* Return the constant offset of a MEM_REF or TARGET_MEM_REF tree T. */ -addr_wide_int +offset_int mem_ref_offset (const_tree t) { - return addr_wide_int::from (TREE_OPERAND (t, 1), SIGNED); + return offset_int::from (TREE_OPERAND (t, 1), SIGNED); } /* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE @@ -6898,7 +6898,7 @@ tree_int_cst_equal (const_tree t1, const_tree t2) if (TREE_CODE (t1) == INTEGER_CST && TREE_CODE (t2) == INTEGER_CST - && wi::extend (t1) == wi::extend (t2)) + && wi::to_widest (t1) == wi::to_widest (t2)) return 1; return 0; @@ -7070,7 +7070,7 @@ simple_cst_equal (const_tree t1, const_tree t2) switch (code1) { case INTEGER_CST: - return wi::extend (t1) == wi::extend (t2); + return wi::to_widest (t1) == wi::to_widest (t2); case REAL_CST: return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); diff --git a/gcc/tree.h b/gcc/tree.h index 35924a08edc..93be1e580fc 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -3701,7 +3701,7 @@ extern tree build_var_debug_value_stat (tree, tree MEM_STAT_DECL); extern tree double_int_to_tree (tree, double_int); -extern addr_wide_int mem_ref_offset (const_tree); +extern offset_int mem_ref_offset (const_tree); extern tree wide_int_to_tree (tree type, const wide_int_ref &cst); extern tree force_fit_type (tree, const wide_int_ref &, int, bool); @@ -5275,9 +5275,9 @@ namespace wi }; generic_wide_int <extended_tree <MAX_BITSIZE_MODE_ANY_INT> > - extend (const_tree); + to_widest (const_tree); - generic_wide_int <extended_tree <ADDR_MAX_PRECISION> > address (const_tree); + generic_wide_int <extended_tree <ADDR_MAX_PRECISION> > to_offset (const_tree); } inline unsigned int @@ -5312,13 +5312,13 @@ wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *, } inline generic_wide_int <wi::extended_tree <MAX_BITSIZE_MODE_ANY_INT> > -wi::extend (const_tree t) +wi::to_widest (const_tree t) { return t; } inline generic_wide_int <wi::extended_tree <ADDR_MAX_PRECISION> > -wi::address (const_tree t) +wi::to_offset (const_tree t) { return t; } diff --git a/gcc/varasm.c b/gcc/varasm.c index 8d0eb00edcf..2ad6b4488ab 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -4790,7 +4790,7 @@ array_size_for_constructor (tree val) tree max_index; unsigned HOST_WIDE_INT cnt; tree index, value, tmp; - addr_wide_int i; + offset_int i; /* This code used to attempt to handle string constants that are not arrays of single-bytes, but nothing else does, so there's no point in @@ -4812,10 +4812,10 @@ array_size_for_constructor (tree val) /* Compute the total number of array elements. */ tmp = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val))); - i = wi::address (max_index) - wi::address (tmp) + 1; + i = wi::to_offset (max_index) - wi::to_offset (tmp) + 1; /* Multiply by the array element unit size to find number of bytes. */ - i *= wi::address (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); + i *= wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); gcc_assert (wi::fits_uhwi_p (i)); return i.to_uhwi (); @@ -4898,10 +4898,9 @@ output_constructor_regular_field (oc_local_state *local) sign-extend the result because Ada has negative DECL_FIELD_OFFSETs but we are using an unsigned sizetype. */ unsigned prec = TYPE_PRECISION (sizetype); - addr_wide_int idx - = wi::sext (wi::address (local->index) - - wi::address (local->min_index), prec); - fieldpos = (idx * wi::address (TYPE_SIZE_UNIT (TREE_TYPE (local->val)))) + offset_int idx = wi::sext (wi::to_offset (local->index) + - wi::to_offset (local->min_index), prec); + fieldpos = (idx * wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (local->val)))) .to_shwi (); } else if (local->field != NULL_TREE) diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc index 00670ed8c15..9ef96e5cf54 100644 --- a/gcc/wide-int.cc +++ b/gcc/wide-int.cc @@ -2096,9 +2096,9 @@ wi::only_sign_bit_p (const wide_int_ref &x) * Private utilities. */ -void gt_ggc_mx(max_wide_int*) { } -void gt_pch_nx(max_wide_int*,void (*)(void*, void*), void*) { } -void gt_pch_nx(max_wide_int*) { } +void gt_ggc_mx (widest_int *) { } +void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { } +void gt_pch_nx (widest_int *) { } /* * Private debug printing routines. diff --git a/gcc/wide-int.h b/gcc/wide-int.h index 163615aa02e..678ef2dd6c7 100644 --- a/gcc/wide-int.h +++ b/gcc/wide-int.h @@ -53,7 +53,7 @@ along with GCC; see the file COPYING3. If not see multiply, division, shifts, comparisons, and operations that need overflow detected), the signedness must be specified separately. - 2) addr_wide_int. This is a fixed size representation that is + 2) offset_int. This is a fixed size representation that is guaranteed to be large enough to compute any bit or byte sized address calculation on the target. Currently the value is 64 + 4 bits rounded up to the next number even multiple of @@ -70,17 +70,17 @@ along with GCC; see the file COPYING3. If not see been no effort by the front ends to convert most addressing arithmetic to canonical types. - In the addr_wide_int, all numbers are represented as signed - numbers. There are enough bits in the internal representation so - that no infomation is lost by representing them this way. + In the offset_int, all numbers are represented as signed numbers. + There are enough bits in the internal representation so that no + infomation is lost by representing them this way. - 3) max_wide_int. This representation is an approximation of + 3) widest_int. This representation is an approximation of infinite precision math. However, it is not really infinite precision math as in the GMP library. It is really finite precision math where the precision is 4 times the size of the largest integer that the target port can represent. - Like, the addr_wide_ints, all numbers are inherently signed. + Like the offset_ints, all numbers are inherently signed. There are several places in the GCC where this should/must be used: @@ -96,14 +96,14 @@ along with GCC; see the file COPYING3. If not see works with induction variables of many different types at the same time. Because of this, it ends up doing many different calculations where the operands are not compatible types. The - max_wide_int makes this easy, because it provides a field where + widest_int makes this easy, because it provides a field where nothing is lost when converting from any variable, * There are a small number of passes that currently use the - max_wide_int that should use the default. These should be + widest_int that should use the default. These should be changed. - There are surprising features of addr_wide_int and max_wide_int + There are surprising features of offset_int and widest_int that the users should be careful about: 1) Shifts and rotations are just weird. You have to specify a @@ -117,7 +117,7 @@ along with GCC; see the file COPYING3. If not see produce a different answer if the first product is larger than what can be represented in the input precision. - The addr_wide_int and the max_wide_int flavors are more expensive + The offset_int and the widest_int flavors are more expensive than the default wide int, so in addition to the caveats with these two, the default is the prefered representation. @@ -130,7 +130,7 @@ along with GCC; see the file COPYING3. If not see A wide_int contains three fields: the vector (VAL), precision and a length (LEN). The length is the number of HWIs needed to - represent the value. For the max_wide_int and the addr_wide_int, + represent the value. For the widest_int and the offset_int, the precision is a constant that cannot be changed. For the default wide_int, the precision is set from the constructor. @@ -285,8 +285,8 @@ struct wide_int_storage; generic_wide_int < fixed_wide_int_storage <N> > typedef generic_wide_int <wide_int_storage> wide_int; -typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) addr_wide_int; -typedef FIXED_WIDE_INT (MAX_BITSIZE_MODE_ANY_INT) max_wide_int; +typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int; +typedef FIXED_WIDE_INT (MAX_BITSIZE_MODE_ANY_INT) widest_int; template <bool SE> struct wide_int_ref_storage; @@ -365,7 +365,7 @@ namespace wi template <typename T1, typename T2> struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION> { - typedef max_wide_int result_type; + typedef widest_int result_type; }; template <> @@ -755,7 +755,7 @@ generic_wide_int <storage>::to_uhwi (unsigned int precision) const } /* TODO: The compiler is half converted from using HOST_WIDE_INT to - represent addresses to using addr_wide_int to represent addresses. + represent addresses to using offset_int to represent addresses. We use to_short_addr at the interface from new code to old, unconverted code. */ template <typename storage> |