diff options
244 files changed, 13418 insertions, 5760 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in index f0b8c5a23f2..add63e3e1cd 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -868,7 +868,7 @@ RTL_BASE_H = coretypes.h rtl.h rtl.def $(MACHMODE_H) reg-notes.def \ insn-notes.def $(INPUT_H) $(REAL_H) statistics.h $(VEC_H) \ $(FIXED_VALUE_H) alias.h $(HASHTAB_H) FIXED_VALUE_H = fixed-value.h $(MACHMODE_H) double-int.h -RTL_H = $(RTL_BASE_H) $(FLAGS_H) genrtl.h +RTL_H = $(RTL_BASE_H) $(FLAGS_H) genrtl.h wide-int.h READ_MD_H = $(OBSTACK_H) $(HASHTAB_H) read-md.h PARAMS_H = params.h params.def BUILTINS_DEF = builtins.def sync-builtins.def omp-builtins.def \ @@ -898,7 +898,7 @@ FUNCTION_H = function.h $(HASHTAB_H) $(TM_H) hard-reg-set.h \ EXPR_H = expr.h insn-config.h $(FUNCTION_H) $(RTL_H) $(FLAGS_H) $(TREE_H) $(MACHMODE_H) $(EMIT_RTL_H) OPTABS_H = optabs.h insn-codes.h insn-opinit.h REGS_H = regs.h $(MACHMODE_H) hard-reg-set.h -CFGLOOP_H = cfgloop.h $(BASIC_BLOCK_H) double-int.h \ +CFGLOOP_H = cfgloop.h $(BASIC_BLOCK_H) double-int.h wide-int.h \ $(BITMAP_H) sbitmap.h IPA_UTILS_H = ipa-utils.h $(TREE_H) $(CGRAPH_H) IPA_REFERENCE_H = ipa-reference.h $(BITMAP_H) $(TREE_H) @@ -913,7 +913,7 @@ TIMEVAR_H = timevar.h timevar.def INSN_ATTR_H = insn-attr.h insn-attr-common.h $(INSN_ADDR_H) INSN_ADDR_H = $(srcdir)/insn-addr.h C_COMMON_H = c-family/c-common.h c-family/c-common.def $(TREE_H) \ - $(SPLAY_TREE_H) $(CPPLIB_H) $(GGC_H) $(DIAGNOSTIC_CORE_H) + $(SPLAY_TREE_H) $(CPPLIB_H) $(GGC_H) $(DIAGNOSTIC_CORE_H) wide-int.h C_PRAGMA_H = c-family/c-pragma.h $(CPPLIB_H) C_TREE_H = c/c-tree.h $(C_COMMON_H) $(DIAGNOSTIC_H) SYSTEM_H = system.h hwint.h $(srcdir)/../include/libiberty.h \ @@ -931,7 +931,7 @@ TREE_PASS_H = tree-pass.h $(TIMEVAR_H) $(DUMPFILE_H) TREE_FLOW_H = tree-flow.h tree-flow-inline.h tree-ssa-operands.h \ $(BITMAP_H) sbitmap.h $(BASIC_BLOCK_H) $(GIMPLE_H) \ $(HASHTAB_H) $(CGRAPH_H) $(IPA_REFERENCE_H) \ - tree-ssa-alias.h + tree-ssa-alias.h wide-int.h TREE_SSA_H = tree-ssa.h $(TREE_FLOW_H) PRETTY_PRINT_H = pretty-print.h $(INPUT_H) $(OBSTACK_H) TREE_PRETTY_PRINT_H = tree-pretty-print.h $(PRETTY_PRINT_H) @@ -941,7 +941,7 @@ DIAGNOSTIC_H = diagnostic.h $(DIAGNOSTIC_CORE_H) $(PRETTY_PRINT_H) C_PRETTY_PRINT_H = c-family/c-pretty-print.h $(PRETTY_PRINT_H) \ $(C_COMMON_H) $(TREE_H) TREE_INLINE_H = tree-inline.h -REAL_H = real.h $(MACHMODE_H) +REAL_H = real.h $(MACHMODE_H) signop.h LTO_STREAMER_H = lto-streamer.h $(LINKER_PLUGIN_API_H) $(TARGET_H) \ $(CGRAPH_H) $(VEC_H) $(HASH_TABLE_H) $(TREE_H) $(GIMPLE_H) \ $(GCOV_IO_H) $(DIAGNOSTIC_H) alloc-pool.h pointer-set.h @@ -1463,6 +1463,8 @@ OBJS = \ vmsdbgout.o \ vtable-verify.o \ web.o \ + wide-int.o \ + wide-int-print.o \ xcoffout.o \ $(out_object_file) \ $(EXTRA_OBJS) \ @@ -2225,7 +2227,7 @@ s-tm-texi: build/genhooks$(build_exeext) $(srcdir)/doc/tm.texi.in GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \ $(host_xm_file_list) \ $(tm_file_list) $(HASHTAB_H) $(SPLAY_TREE_H) $(srcdir)/bitmap.h \ - $(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \ + $(srcdir)/wide-int.h $(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \ $(srcdir)/optabs.h $(srcdir)/tree.h $(srcdir)/tree-core.h \ $(srcdir)/libfuncs.h $(SYMTAB_H) \ $(srcdir)/real.h $(srcdir)/function.h $(srcdir)/insn-addr.h $(srcdir)/hwint.h \ @@ -2236,6 +2238,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \ $(srcdir)/alias.c $(srcdir)/bitmap.c $(srcdir)/cselib.c $(srcdir)/cgraph.c \ $(srcdir)/ipa-prop.c $(srcdir)/ipa-cp.c $(srcdir)/ipa-utils.h \ $(srcdir)/dbxout.c \ + $(srcdir)/signop.h \ $(srcdir)/dwarf2out.h \ $(srcdir)/dwarf2asm.c \ $(srcdir)/dwarf2cfi.c \ @@ -2433,15 +2436,16 @@ CFLAGS-gengtype-parse.o += -DGENERATOR_FILE build/gengtype-parse.o: $(BCONFIG_H) gengtype-state.o build/gengtype-state.o: gengtype-state.c $(SYSTEM_H) \ - gengtype.h errors.h double-int.h version.h $(HASHTAB_H) $(OBSTACK_H) \ - $(XREGEX_H) + gengtype.h errors.h double-int.h version.h $(HASHTAB_H) \ + $(OBSTACK_H) $(XREGEX_H) gengtype-state.o: $(CONFIG_H) CFLAGS-gengtype-state.o += -DGENERATOR_FILE build/gengtype-state.o: $(BCONFIG_H) - +wide-int.h: $(GTM_H) $(TREE_H) hwint.h $(OPTIONS_H) \ + $(MACHMODE_H) double-int.h dumpfile.h $(REAL_H) signop.h gengtype.o build/gengtype.o : gengtype.c $(SYSTEM_H) gengtype.h \ - rtl.def insn-notes.def errors.h double-int.h version.h $(HASHTAB_H) \ - $(OBSTACK_H) $(XREGEX_H) + rtl.def insn-notes.def errors.h double-int.h version.h \ + $(HASHTAB_H) $(OBSTACK_H) $(XREGEX_H) gengtype.o: $(CONFIG_H) CFLAGS-gengtype.o += -DGENERATOR_FILE build/gengtype.o: $(BCONFIG_H) @@ -3731,7 +3735,7 @@ TAGS: lang.tags incs="$$incs --include $$dir/TAGS.sub"; \ fi; \ done; \ - etags -o TAGS.sub c-family/*.h c-family/*.c *.h *.c; \ + etags -o TAGS.sub c-family/*.h c-family/*.c *.h *.c *.cc; \ etags --include TAGS.sub $$incs) # ----------------------------------------------------- diff --git a/gcc/ada/gcc-interface/cuintp.c b/gcc/ada/gcc-interface/cuintp.c index 9b58b0ecc64..da575c0efc0 100644 --- a/gcc/ada/gcc-interface/cuintp.c +++ b/gcc/ada/gcc-interface/cuintp.c @@ -150,24 +150,24 @@ UI_From_gnu (tree Input) Int_Vector vec; #if HOST_BITS_PER_WIDE_INT == 64 - /* On 64-bit hosts, host_integerp tells whether the input fits in a + /* On 64-bit hosts, tree_fits_shwi_p tells whether the input fits in a signed 64-bit integer. Then a truncation tells whether it fits in a signed 32-bit integer. */ - if (host_integerp (Input, 0)) + if (tree_fits_shwi_p (Input)) { - HOST_WIDE_INT hw_input = TREE_INT_CST_LOW (Input); + HOST_WIDE_INT hw_input = tree_to_shwi (Input); if (hw_input == (int) hw_input) return UI_From_Int (hw_input); } else return No_Uint; #else - /* On 32-bit hosts, host_integerp tells whether the input fits in a + /* On 32-bit hosts, tree_fits_shwi_p tells whether the input fits in a signed 32-bit integer. Then a sign test tells whether it fits in a signed 64-bit integer. */ - if (host_integerp (Input, 0)) - return UI_From_Int (TREE_INT_CST_LOW (Input)); - else if (TREE_INT_CST_HIGH (Input) < 0 && TYPE_UNSIGNED (gnu_type)) + if (tree_fits_shwi_p (Input)) + return UI_From_Int (tree_to_shwi (Input)); + else if (wi::lts_p (Input, 0) && TYPE_UNSIGNED (gnu_type)) return No_Uint; #endif @@ -176,9 +176,9 @@ UI_From_gnu (tree Input) for (i = Max_For_Dint - 1; i >= 0; i--) { - v[i] = tree_low_cst (fold_build1 (ABS_EXPR, gnu_type, - fold_build2 (TRUNC_MOD_EXPR, gnu_type, - gnu_temp, gnu_base)), 0); + v[i] = tree_to_hwi (fold_build1 (ABS_EXPR, gnu_type, + fold_build2 (TRUNC_MOD_EXPR, gnu_type, + gnu_temp, gnu_base))); gnu_temp = fold_build2 (TRUNC_DIV_EXPR, gnu_type, gnu_temp, gnu_base); } diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c index 8fa73492667..a0f96036758 100644 --- a/gcc/ada/gcc-interface/decl.c +++ b/gcc/ada/gcc-interface/decl.c @@ -837,13 +837,13 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) align_cap = get_mode_alignment (ptr_mode); } - if (!host_integerp (TYPE_SIZE (gnu_type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (gnu_type)) || compare_tree_int (TYPE_SIZE (gnu_type), size_cap) > 0) align = 0; else if (compare_tree_int (TYPE_SIZE (gnu_type), align_cap) > 0) align = align_cap; else - align = ceil_pow2 (tree_low_cst (TYPE_SIZE (gnu_type), 1)); + align = ceil_pow2 (tree_to_uhwi (TYPE_SIZE (gnu_type))); /* But make sure not to under-align the object. */ if (align <= TYPE_ALIGN (gnu_type)) @@ -1478,10 +1478,10 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) && const_flag && gnu_expr && TREE_CONSTANT (gnu_expr) && AGGREGATE_TYPE_P (gnu_type) - && host_integerp (TYPE_SIZE_UNIT (gnu_type), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (gnu_type)) && !(TYPE_IS_PADDING_P (gnu_type) - && !host_integerp (TYPE_SIZE_UNIT - (TREE_TYPE (TYPE_FIELDS (gnu_type))), 1))) + && !tree_fits_uhwi_p (TYPE_SIZE_UNIT + (TREE_TYPE (TYPE_FIELDS (gnu_type)))))) static_p = true; /* Now create the variable or the constant and set various flags. */ @@ -3493,7 +3493,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) gnu_size = DECL_SIZE (gnu_old_field); if (RECORD_OR_UNION_TYPE_P (gnu_field_type) && !TYPE_FAT_POINTER_P (gnu_field_type) - && host_integerp (TYPE_SIZE (gnu_field_type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_field_type))) gnu_field_type = make_packable_type (gnu_field_type, true); } @@ -4534,7 +4534,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) NULL_TREE)) { unsigned int size - = TREE_INT_CST_LOW (TYPE_SIZE (gnu_return_type)); + = tree_to_hwi (TYPE_SIZE (gnu_return_type)); unsigned int i = BITS_PER_UNIT; enum machine_mode mode; @@ -4918,22 +4918,22 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) /* Consider an alignment as suspicious if the alignment/size ratio is greater or equal to the byte/bit ratio. */ - if (host_integerp (size, 1) - && align >= TREE_INT_CST_LOW (size) * BITS_PER_UNIT) + if (tree_fits_uhwi_p (size) + && align >= tree_to_uhwi (size) * BITS_PER_UNIT) post_error_ne ("?suspiciously large alignment specified for&", Expression (Alignment_Clause (gnat_entity)), gnat_entity); } } else if (Is_Atomic (gnat_entity) && !gnu_size - && host_integerp (TYPE_SIZE (gnu_type), 1) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_type)) && integer_pow2p (TYPE_SIZE (gnu_type))) align = MIN (BIGGEST_ALIGNMENT, - tree_low_cst (TYPE_SIZE (gnu_type), 1)); + tree_to_uhwi (TYPE_SIZE (gnu_type))); else if (Is_Atomic (gnat_entity) && gnu_size - && host_integerp (gnu_size, 1) + && tree_fits_uhwi_p (gnu_size) && integer_pow2p (gnu_size)) - align = MIN (BIGGEST_ALIGNMENT, tree_low_cst (gnu_size, 1)); + align = MIN (BIGGEST_ALIGNMENT, tree_to_uhwi (gnu_size)); /* See if we need to pad the type. If we did, and made a record, the name of the new type may be changed. So get it back for @@ -5579,7 +5579,7 @@ gnat_to_gnu_component_type (Entity_Id gnat_array, bool definition, && !Strict_Alignment (gnat_type) && RECORD_OR_UNION_TYPE_P (gnu_type) && !TYPE_FAT_POINTER_P (gnu_type) - && host_integerp (TYPE_SIZE (gnu_type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_type))) gnu_type = make_packable_type (gnu_type, false); if (Has_Atomic_Components (gnat_array)) @@ -6503,7 +6503,7 @@ gnat_to_gnu_field (Entity_Id gnat_field, tree gnu_record_type, int packed, if (!needs_strict_alignment && RECORD_OR_UNION_TYPE_P (gnu_field_type) && !TYPE_FAT_POINTER_P (gnu_field_type) - && host_integerp (TYPE_SIZE (gnu_field_type), 1) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_field_type)) && (packed == 1 || (gnu_size && (tree_int_cst_lt (gnu_size, TYPE_SIZE (gnu_field_type)) @@ -7492,11 +7492,11 @@ annotate_value (tree gnu_size) if (TREE_CODE (TREE_OPERAND (gnu_size, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (gnu_size, 1); - double_int signed_op1 - = tree_to_double_int (op1).sext (TYPE_PRECISION (sizetype)); - if (signed_op1.is_negative ()) + wide_int signed_op1 + = wide_int::from_tree (op1).sforce_to_size (TYPE_PRECISION (sizetype)); + if (signed_op1.neg_p ()) { - op1 = double_int_to_tree (sizetype, -signed_op1); + op1 = wide_int_to_tree (sizetype, -signed_op1); pre_op1 = annotate_value (build1 (NEGATE_EXPR, sizetype, op1)); } } @@ -8363,7 +8363,7 @@ create_field_decl_from (tree old_field, tree field_type, tree record_type, { tree t = TREE_VALUE (purpose_member (old_field, pos_list)); tree pos = TREE_VEC_ELT (t, 0), bitpos = TREE_VEC_ELT (t, 2); - unsigned int offset_align = tree_low_cst (TREE_VEC_ELT (t, 1), 1); + unsigned int offset_align = tree_to_uhwi (TREE_VEC_ELT (t, 1)); tree new_pos, new_field; unsigned int i; subst_pair *s; diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c index 3abe57b6bd9..a82f6fd8f82 100644 --- a/gcc/ada/gcc-interface/misc.c +++ b/gcc/ada/gcc-interface/misc.c @@ -591,7 +591,7 @@ gnat_type_max_size (const_tree gnu_type) /* If we don't have a constant, see what we can get from TYPE_ADA_SIZE, which should stay untouched. */ - if (!host_integerp (max_unitsize, 1) + if (!tree_fits_uhwi_p (max_unitsize) && RECORD_OR_UNION_TYPE_P (gnu_type) && !TYPE_FAT_POINTER_P (gnu_type) && TYPE_ADA_SIZE (gnu_type)) @@ -600,7 +600,7 @@ gnat_type_max_size (const_tree gnu_type) /* If we have succeeded in finding a constant, round it up to the type's alignment and return the result in units. */ - if (host_integerp (max_adasize, 1)) + if (tree_fits_uhwi_p (max_adasize)) max_unitsize = size_binop (CEIL_DIV_EXPR, round_up (max_adasize, TYPE_ALIGN (gnu_type)), diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c index 7eef8aa5cb0..b283b561c38 100644 --- a/gcc/ada/gcc-interface/trans.c +++ b/gcc/ada/gcc-interface/trans.c @@ -4239,7 +4239,7 @@ Call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target, gnu_actual = unchecked_convert (DECL_ARG_TYPE (gnu_formal), convert (gnat_type_for_size - (TREE_INT_CST_LOW (gnu_size), 1), + (tree_to_hwi (gnu_size), 1), integer_zero_node), false); else diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c index 24123907d30..0998c5b4a8c 100644 --- a/gcc/ada/gcc-interface/utils.c +++ b/gcc/ada/gcc-interface/utils.c @@ -771,7 +771,7 @@ make_aligning_type (tree type, unsigned int align, tree size, tree make_packable_type (tree type, bool in_record) { - unsigned HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE (type), 1); + unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE (type)); unsigned HOST_WIDE_INT new_size; tree new_type, old_field, field_list = NULL_TREE; unsigned int align; @@ -806,12 +806,12 @@ make_packable_type (tree type, bool in_record) /* Do not try to shrink the size if the RM size is not constant. */ if (TYPE_CONTAINS_TEMPLATE_P (type) - || !host_integerp (TYPE_ADA_SIZE (type), 1)) + || !tree_fits_uhwi_p (TYPE_ADA_SIZE (type))) return type; /* Round the RM size up to a unit boundary to get the minimal size for a BLKmode record. Give up if it's already the size. */ - new_size = TREE_INT_CST_LOW (TYPE_ADA_SIZE (type)); + new_size = tree_to_uhwi (TYPE_ADA_SIZE (type)); new_size = (new_size + BITS_PER_UNIT - 1) & -BITS_PER_UNIT; if (new_size == size) return type; @@ -832,7 +832,7 @@ make_packable_type (tree type, bool in_record) if (RECORD_OR_UNION_TYPE_P (new_field_type) && !TYPE_FAT_POINTER_P (new_field_type) - && host_integerp (TYPE_SIZE (new_field_type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (new_field_type))) new_field_type = make_packable_type (new_field_type, true); /* However, for the last field in a not already packed record type @@ -915,10 +915,10 @@ make_type_from_size (tree type, tree size_tree, bool for_biased) /* If size indicates an error, just return TYPE to avoid propagating the error. Likewise if it's too large to represent. */ - if (!size_tree || !host_integerp (size_tree, 1)) + if (!size_tree || !tree_fits_uhwi_p (size_tree)) return type; - size = tree_low_cst (size_tree, 1); + size = tree_to_uhwi (size_tree); switch (TREE_CODE (type)) { @@ -1741,26 +1741,26 @@ rest_of_record_type_compilation (tree record_type) if (!pos && TREE_CODE (curpos) == MULT_EXPR - && host_integerp (TREE_OPERAND (curpos, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1))) { tree offset = TREE_OPERAND (curpos, 0); - align = tree_low_cst (TREE_OPERAND (curpos, 1), 1); + align = tree_to_uhwi (TREE_OPERAND (curpos, 1)); align = scale_by_factor_of (offset, align); last_pos = round_up (last_pos, align); pos = compute_related_constant (curpos, last_pos); } else if (!pos && TREE_CODE (curpos) == PLUS_EXPR - && host_integerp (TREE_OPERAND (curpos, 1), 1) + && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1)) && TREE_CODE (TREE_OPERAND (curpos, 0)) == MULT_EXPR - && host_integerp - (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1), 1)) + && tree_fits_uhwi_p + (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1))) { tree offset = TREE_OPERAND (TREE_OPERAND (curpos, 0), 0); unsigned HOST_WIDE_INT addend - = tree_low_cst (TREE_OPERAND (curpos, 1), 1); + = tree_to_uhwi (TREE_OPERAND (curpos, 1)); align - = tree_low_cst (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1), 1); + = tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1)); align = scale_by_factor_of (offset, align); align = MIN (align, addend & -addend); last_pos = round_up (last_pos, align); @@ -2377,8 +2377,8 @@ create_field_decl (tree field_name, tree field_type, tree record_type, that an alignment of 0 is taken as infinite. */ unsigned int known_align; - if (host_integerp (pos, 1)) - known_align = tree_low_cst (pos, 1) & - tree_low_cst (pos, 1); + if (tree_fits_uhwi_p (pos)) + known_align = tree_to_uhwi (pos) & - tree_to_uhwi (pos); else known_align = BITS_PER_UNIT; @@ -2388,7 +2388,7 @@ create_field_decl (tree field_name, tree field_type, tree record_type, layout_decl (field_decl, known_align); SET_DECL_OFFSET_ALIGN (field_decl, - host_integerp (pos, 1) ? BIGGEST_ALIGNMENT + tree_fits_uhwi_p (pos) ? BIGGEST_ALIGNMENT : BITS_PER_UNIT); pos_from_bit (&DECL_FIELD_OFFSET (field_decl), &DECL_FIELD_BIT_OFFSET (field_decl), @@ -2548,8 +2548,8 @@ invalidate_global_renaming_pointers (void) bool value_factor_p (tree value, HOST_WIDE_INT factor) { - if (host_integerp (value, 1)) - return tree_low_cst (value, 1) % factor == 0; + if (tree_fits_uhwi_p (value)) + return tree_to_uhwi (value) % factor == 0; if (TREE_CODE (value) == MULT_EXPR) return (value_factor_p (TREE_OPERAND (value, 0), factor) @@ -2570,7 +2570,7 @@ scale_by_factor_of (tree expr, unsigned int value) if (TREE_CODE (expr) == BIT_AND_EXPR && TREE_CODE (TREE_OPERAND (expr, 1)) == INTEGER_CST) { - unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (TREE_OPERAND (expr, 1)); + unsigned HOST_WIDE_INT mask = tree_to_hwi (TREE_OPERAND (expr, 1)); unsigned int i = 0; while ((mask & 1) == 0 && i < HOST_BITS_PER_WIDE_INT) @@ -2608,16 +2608,16 @@ potential_alignment_gap (tree prev_field, tree curr_field, tree offset) /* If the distance between the end of prev_field and the beginning of curr_field is constant, then there is a gap if the value of this constant is not null. */ - if (offset && host_integerp (offset, 1)) + if (offset && tree_fits_uhwi_p (offset)) return !integer_zerop (offset); /* If the size and position of the previous field are constant, then check the sum of this size and position. There will be a gap iff it is not multiple of the current field alignment. */ - if (host_integerp (DECL_SIZE (prev_field), 1) - && host_integerp (bit_position (prev_field), 1)) - return ((tree_low_cst (bit_position (prev_field), 1) - + tree_low_cst (DECL_SIZE (prev_field), 1)) + if (tree_fits_uhwi_p (DECL_SIZE (prev_field)) + && tree_fits_uhwi_p (bit_position (prev_field))) + return ((tree_to_uhwi (bit_position (prev_field)) + + tree_to_uhwi (DECL_SIZE (prev_field))) % DECL_ALIGN (curr_field) != 0); /* If both the position and size of the previous field are multiples @@ -3274,7 +3274,7 @@ build_vms_descriptor32 (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case ENUMERAL_TYPE: case BOOLEAN_TYPE: if (TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 10; @@ -3314,7 +3314,7 @@ build_vms_descriptor32 (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case COMPLEX_TYPE: if (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE && TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 12; @@ -3575,7 +3575,7 @@ build_vms_descriptor (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case ENUMERAL_TYPE: case BOOLEAN_TYPE: if (TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 10; @@ -3615,7 +3615,7 @@ build_vms_descriptor (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case COMPLEX_TYPE: if (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE && TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 12; @@ -3869,7 +3869,7 @@ convert_vms_descriptor64 (tree gnu_type, tree gnu_expr, Entity_Id gnat_subprog) tree max_field = DECL_CHAIN (TYPE_FIELDS (template_type)); tree template_tree, template_addr, aflags, dimct, t, u; /* See the head comment of build_vms_descriptor. */ - int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass)); + int iklass = tree_to_hwi (DECL_INITIAL (klass)); tree lfield, ufield; vec<constructor_elt, va_gc> *v; @@ -4023,7 +4023,7 @@ convert_vms_descriptor32 (tree gnu_type, tree gnu_expr, Entity_Id gnat_subprog) tree max_field = DECL_CHAIN (TYPE_FIELDS (template_type)); tree template_tree, template_addr, aflags, dimct, t, u; /* See the head comment of build_vms_descriptor. */ - int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass)); + int iklass = tree_to_hwi (DECL_INITIAL (klass)); vec<constructor_elt, va_gc> *v; /* Convert POINTER to the pointer-to-array type. */ @@ -5307,7 +5307,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p) GET_MODE_BITSIZE (TYPE_MODE (type)))) { tree rec_type = make_node (RECORD_TYPE); - unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (type)); + unsigned HOST_WIDE_INT prec = tree_to_hwi (TYPE_RM_SIZE (type)); tree field_type, field; if (TYPE_UNSIGNED (type)) @@ -5336,7 +5336,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p) GET_MODE_BITSIZE (TYPE_MODE (etype)))) { tree rec_type = make_node (RECORD_TYPE); - unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype)); + unsigned HOST_WIDE_INT prec = tree_to_hwi (TYPE_RM_SIZE (etype)); vec<constructor_elt, va_gc> *v; vec_alloc (v, 1); tree field_type, field; @@ -6061,11 +6061,10 @@ static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { /* Verify the arg number is a constant. */ - if (TREE_CODE (arg_num_expr) != INTEGER_CST - || TREE_INT_CST_HIGH (arg_num_expr) != 0) + if (!cst_fits_uhwi_p (arg_num_expr)) return false; - *valp = TREE_INT_CST_LOW (arg_num_expr); + *valp = tree_to_hwi (arg_num_expr); return true; } @@ -6302,7 +6301,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, size = TREE_VALUE (args); - if (!host_integerp (size, 1)) + if (!tree_fits_uhwi_p (size)) { warning (OPT_Wattributes, "%qs attribute ignored", IDENTIFIER_POINTER (name)); @@ -6310,7 +6309,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, } /* Get the vector size (in bytes). */ - vecsize = tree_low_cst (size, 1); + vecsize = tree_to_uhwi (size); /* We need to provide for vector pointers, vector arrays, and functions returning vectors. For example: @@ -6334,7 +6333,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, || (!SCALAR_FLOAT_MODE_P (orig_mode) && GET_MODE_CLASS (orig_mode) != MODE_INT && !ALL_SCALAR_FIXED_POINT_MODE_P (orig_mode)) - || !host_integerp (TYPE_SIZE_UNIT (type), 1) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) || TREE_CODE (type) == BOOLEAN_TYPE) { error ("invalid vector type for attribute %qs", @@ -6342,7 +6341,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, return NULL_TREE; } - if (vecsize % tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + if (vecsize % tree_to_uhwi (TYPE_SIZE_UNIT (type))) { error ("vector size not an integral multiple of component size"); return NULL; @@ -6355,7 +6354,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, } /* Calculate how many units fit in the vector. */ - nunits = vecsize / tree_low_cst (TYPE_SIZE_UNIT (type), 1); + nunits = vecsize / tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (nunits & (nunits - 1)) { error ("number of components of the vector not a power of two"); @@ -6403,7 +6402,7 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), bases, and this attribute is for binding implementors, not end-users, so we should never get there from legitimate explicit uses. */ - if (!host_integerp (rep_size, 1)) + if (!tree_fits_uhwi_p (rep_size)) return NULL_TREE; /* Get the element type/mode and check this is something we know @@ -6418,7 +6417,7 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), || (!SCALAR_FLOAT_MODE_P (elem_mode) && GET_MODE_CLASS (elem_mode) != MODE_INT && !ALL_SCALAR_FIXED_POINT_MODE_P (elem_mode)) - || !host_integerp (TYPE_SIZE_UNIT (elem_type), 1)) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (elem_type))) { error ("invalid element type for attribute %qs", IDENTIFIER_POINTER (name)); @@ -6427,9 +6426,9 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), /* Sanity check the vector size and element type consistency. */ - vec_bytes = tree_low_cst (rep_size, 1); + vec_bytes = tree_to_uhwi (rep_size); - if (vec_bytes % tree_low_cst (TYPE_SIZE_UNIT (elem_type), 1)) + if (vec_bytes % tree_to_uhwi (TYPE_SIZE_UNIT (elem_type))) { error ("vector size not an integral multiple of component size"); return NULL; @@ -6441,7 +6440,7 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), return NULL; } - vec_units = vec_bytes / tree_low_cst (TYPE_SIZE_UNIT (elem_type), 1); + vec_units = vec_bytes / tree_to_uhwi (TYPE_SIZE_UNIT (elem_type)); if (vec_units & (vec_units - 1)) { error ("number of components of the vector not a power of two"); diff --git a/gcc/ada/gcc-interface/utils2.c b/gcc/ada/gcc-interface/utils2.c index 64f7564a75d..ec986158a7a 100644 --- a/gcc/ada/gcc-interface/utils2.c +++ b/gcc/ada/gcc-interface/utils2.c @@ -119,7 +119,7 @@ known_alignment (tree exp) case INTEGER_CST: { - unsigned HOST_WIDE_INT c = TREE_INT_CST_LOW (exp); + unsigned HOST_WIDE_INT c = tree_to_hwi (exp); /* The first part of this represents the lowest bit in the constant, but it is originally in bytes, not bits. */ this_alignment = MIN (BITS_PER_UNIT * (c & -c), BIGGEST_ALIGNMENT); @@ -626,7 +626,7 @@ nonbinary_modular_operation (enum tree_code op_code, tree type, tree lhs, static unsigned int resolve_atomic_size (tree type) { - unsigned HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16) return size; diff --git a/gcc/alias.c b/gcc/alias.c index a48bb51ed96..ee24a575509 100644 --- a/gcc/alias.c +++ b/gcc/alias.c @@ -338,9 +338,10 @@ ao_ref_from_mem (ao_ref *ref, const_rtx mem) if (MEM_EXPR (mem) != get_spill_slot_decl (false) && (ref->offset < 0 || (DECL_P (ref->base) - && (!host_integerp (DECL_SIZE (ref->base), 1) - || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base))) - < (unsigned HOST_WIDE_INT)(ref->offset + ref->size)))))) + && (DECL_SIZE (ref->base) == NULL_TREE + || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST + || wi::ltu_p (DECL_SIZE (ref->base), + ref->offset + ref->size))))) return false; return true; @@ -1541,9 +1542,7 @@ rtx_equal_for_memref_p (const_rtx x, const_rtx y) case VALUE: CASE_CONST_UNIQUE: - /* There's no need to compare the contents of CONST_DOUBLEs or - CONST_INTs because pointer equality is a good enough - comparison for these nodes. */ + /* Pointer equality guarantees equality for these nodes. */ return 0; default: @@ -2346,15 +2345,23 @@ adjust_offset_for_component_ref (tree x, bool *known_p, { tree xoffset = component_ref_field_offset (x); tree field = TREE_OPERAND (x, 1); + addr_wide_int woffset; + if (TREE_CODE (xoffset) != INTEGER_CST) + { + *known_p = false; + return; + } + + woffset = xoffset; + woffset += wi::udiv_trunc (addr_wide_int (DECL_FIELD_BIT_OFFSET (field)), + BITS_PER_UNIT); - if (! host_integerp (xoffset, 1)) + if (!wi::fits_uhwi_p (woffset)) { *known_p = false; return; } - *offset += (tree_low_cst (xoffset, 1) - + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - / BITS_PER_UNIT)); + *offset += woffset.to_uhwi (); x = TREE_OPERAND (x, 0); } diff --git a/gcc/asan.c b/gcc/asan.c index c037ebfd62f..84c603ac521 100644 --- a/gcc/asan.c +++ b/gcc/asan.c @@ -1996,7 +1996,7 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v) CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, fold_convert (const_ptr_type_node, build_fold_addr_expr (refdecl))); - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size)); size += asan_red_zone_size (size); CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size)); diff --git a/gcc/builtins.c b/gcc/builtins.c index 5df5c548bd7..0304ec8faa7 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -360,8 +360,8 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, if (TREE_CODE (addr) == BIT_AND_EXPR && TREE_CODE (TREE_OPERAND (addr, 1)) == INTEGER_CST) { - align = (TREE_INT_CST_LOW (TREE_OPERAND (addr, 1)) - & -TREE_INT_CST_LOW (TREE_OPERAND (addr, 1))); + align = (tree_to_hwi (TREE_OPERAND (addr, 1)) + & -tree_to_hwi (TREE_OPERAND (addr, 1))); align *= BITS_PER_UNIT; addr = TREE_OPERAND (addr, 0); } @@ -378,7 +378,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, { unsigned HOST_WIDE_INT step = 1; if (TMR_STEP (exp)) - step = TREE_INT_CST_LOW (TMR_STEP (exp)); + step = tree_to_hwi (TMR_STEP (exp)); align = MIN (align, (step & -step) * BITS_PER_UNIT); } if (TMR_INDEX2 (exp)) @@ -400,7 +400,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, bitpos += ptr_bitpos; if (TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == TARGET_MEM_REF) - bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT; + bitpos += mem_ref_offset (exp).to_short_addr () * BITS_PER_UNIT; } } else if (TREE_CODE (exp) == STRING_CST) @@ -429,23 +429,23 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, } else next_offset = NULL; - if (host_integerp (offset, 1)) + if (tree_fits_uhwi_p (offset)) { /* Any overflow in calculating offset_bits won't change the alignment. */ unsigned offset_bits - = ((unsigned) tree_low_cst (offset, 1) * BITS_PER_UNIT); + = ((unsigned) tree_to_uhwi (offset) * BITS_PER_UNIT); if (offset_bits) inner = MIN (inner, (offset_bits & -offset_bits)); } else if (TREE_CODE (offset) == MULT_EXPR - && host_integerp (TREE_OPERAND (offset, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (offset, 1))) { /* Any overflow in calculating offset_factor won't change the alignment. */ unsigned offset_factor - = ((unsigned) tree_low_cst (TREE_OPERAND (offset, 1), 1) + = ((unsigned) tree_to_uhwi (TREE_OPERAND (offset, 1)) * BITS_PER_UNIT); if (offset_factor) @@ -536,7 +536,7 @@ get_pointer_alignment_1 (tree exp, unsigned int *alignp, else if (TREE_CODE (exp) == INTEGER_CST) { *alignp = BIGGEST_ALIGNMENT; - *bitposp = ((TREE_INT_CST_LOW (exp) * BITS_PER_UNIT) + *bitposp = ((tree_to_hwi (exp) * BITS_PER_UNIT) & (BIGGEST_ALIGNMENT - 1)); return true; } @@ -645,10 +645,10 @@ c_strlen (tree src, int only_value) a null character if we can represent it as a single HOST_WIDE_INT. */ if (offset_node == 0) offset = 0; - else if (! host_integerp (offset_node, 0)) + else if (!tree_fits_shwi_p (offset_node)) offset = -1; else - offset = tree_low_cst (offset_node, 0); + offset = tree_to_shwi (offset_node); /* If the offset is known to be out of bounds, warn, and call strlen at runtime. */ @@ -686,27 +686,31 @@ c_getstr (tree src) if (offset_node == 0) return TREE_STRING_POINTER (src); - else if (!host_integerp (offset_node, 1) + else if (!tree_fits_uhwi_p (offset_node) || compare_tree_int (offset_node, TREE_STRING_LENGTH (src) - 1) > 0) return 0; - return TREE_STRING_POINTER (src) + tree_low_cst (offset_node, 1); + return TREE_STRING_POINTER (src) + tree_to_uhwi (offset_node); } -/* Return a CONST_INT or CONST_DOUBLE corresponding to target reading +/* Return a constant integer corresponding to target reading GET_MODE_BITSIZE (MODE) bits from string constant STR. */ static rtx c_readstr (const char *str, enum machine_mode mode) { - HOST_WIDE_INT c[2]; + wide_int c; HOST_WIDE_INT ch; unsigned int i, j; + HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; + unsigned int len = (GET_MODE_PRECISION (mode) + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT; + + for (i = 0; i < len; i++) + tmp[i] = 0; gcc_assert (GET_MODE_CLASS (mode) == MODE_INT); - c[0] = 0; - c[1] = 0; ch = 1; for (i = 0; i < GET_MODE_SIZE (mode); i++) { @@ -717,13 +721,14 @@ c_readstr (const char *str, enum machine_mode mode) && GET_MODE_SIZE (mode) >= UNITS_PER_WORD) j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1; j *= BITS_PER_UNIT; - gcc_assert (j < HOST_BITS_PER_DOUBLE_INT); if (ch) ch = (unsigned char) str[i]; - c[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT); + tmp[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT); } - return immed_double_const (c[0], c[1], mode); + + c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode)); + return immed_wide_int_const (c, mode); } /* Cast a target constant CST to target CHAR and if that value fits into @@ -739,7 +744,9 @@ target_char_cast (tree cst, char *p) || CHAR_TYPE_SIZE > HOST_BITS_PER_WIDE_INT) return 1; - val = TREE_INT_CST_LOW (cst); + /* Do not care if it fits or not right here. */ + val = tree_to_hwi (cst); + if (CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT) val &= (((unsigned HOST_WIDE_INT) 1) << CHAR_TYPE_SIZE) - 1; @@ -3201,7 +3208,7 @@ expand_builtin_mempcpy_args (tree dest, tree src, tree len, return NULL_RTX; /* If LEN is not constant, call the normal function. */ - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) return NULL_RTX; len_rtx = expand_normal (len); @@ -3436,7 +3443,7 @@ expand_builtin_strncpy (tree exp, rtx target) tree slen = c_strlen (src, 1); /* We must be passed a constant len and src parameter. */ - if (!host_integerp (len, 1) || !slen || !host_integerp (slen, 1)) + if (!tree_fits_uhwi_p (len) || !slen || !tree_fits_uhwi_p (slen)) return NULL_RTX; slen = size_binop_loc (loc, PLUS_EXPR, slen, ssize_int (1)); @@ -3450,15 +3457,15 @@ expand_builtin_strncpy (tree exp, rtx target) const char *p = c_getstr (src); rtx dest_mem; - if (!p || dest_align == 0 || !host_integerp (len, 1) - || !can_store_by_pieces (tree_low_cst (len, 1), + if (!p || dest_align == 0 || !tree_fits_uhwi_p (len) + || !can_store_by_pieces (tree_to_uhwi (len), builtin_strncpy_read_str, CONST_CAST (char *, p), dest_align, false)) return NULL_RTX; dest_mem = get_memory_rtx (dest, len); - store_by_pieces (dest_mem, tree_low_cst (len, 1), + store_by_pieces (dest_mem, tree_to_uhwi (len), builtin_strncpy_read_str, CONST_CAST (char *, p), dest_align, false, 0); dest_mem = force_operand (XEXP (dest_mem, 0), target); @@ -3591,13 +3598,13 @@ expand_builtin_memset_args (tree dest, tree val, tree len, * the coefficients by pieces (in the required modes). * We can't pass builtin_memset_gen_str as that emits RTL. */ c = 1; - if (host_integerp (len, 1) - && can_store_by_pieces (tree_low_cst (len, 1), + if (tree_fits_uhwi_p (len) + && can_store_by_pieces (tree_to_uhwi (len), builtin_memset_read_str, &c, dest_align, true)) { val_rtx = force_reg (val_mode, val_rtx); - store_by_pieces (dest_mem, tree_low_cst (len, 1), + store_by_pieces (dest_mem, tree_to_uhwi (len), builtin_memset_gen_str, val_rtx, dest_align, true, 0); } @@ -3616,11 +3623,11 @@ expand_builtin_memset_args (tree dest, tree val, tree len, if (c) { - if (host_integerp (len, 1) - && can_store_by_pieces (tree_low_cst (len, 1), + if (tree_fits_uhwi_p (len) + && can_store_by_pieces (tree_to_uhwi (len), builtin_memset_read_str, &c, dest_align, true)) - store_by_pieces (dest_mem, tree_low_cst (len, 1), + store_by_pieces (dest_mem, tree_to_uhwi (len), builtin_memset_read_str, &c, dest_align, true, 0); else if (!set_storage_via_setmem (dest_mem, len_rtx, gen_int_mode (c, val_mode), @@ -4416,7 +4423,7 @@ expand_builtin_frame_address (tree fndecl, tree exp) if (call_expr_nargs (exp) == 0) /* Warning about missing arg was already issued. */ return const0_rtx; - else if (! host_integerp (CALL_EXPR_ARG (exp, 0), 1)) + else if (! tree_fits_uhwi_p (CALL_EXPR_ARG (exp, 0))) { if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) error ("invalid argument to %<__builtin_frame_address%>"); @@ -4428,7 +4435,7 @@ expand_builtin_frame_address (tree fndecl, tree exp) { rtx tem = expand_builtin_return_addr (DECL_FUNCTION_CODE (fndecl), - tree_low_cst (CALL_EXPR_ARG (exp, 0), 1)); + tree_to_uhwi (CALL_EXPR_ARG (exp, 0))); /* Some ports cannot access arbitrary stack frames. */ if (tem == NULL) @@ -4482,7 +4489,7 @@ expand_builtin_alloca (tree exp, bool cannot_accumulate) /* Compute the alignment. */ align = (alloca_with_align - ? TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1)) + ? tree_to_hwi (CALL_EXPR_ARG (exp, 1)) : BIGGEST_ALIGNMENT); /* Allocate the desired space. */ @@ -4923,12 +4930,12 @@ expand_builtin_signbit (tree exp, rtx target) if (bitpos < GET_MODE_BITSIZE (rmode)) { - double_int mask = double_int_zero.set_bit (bitpos); + wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (rmode)); if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode)) temp = gen_lowpart (rmode, temp); temp = expand_binop (rmode, and_optab, temp, - immed_double_int_const (mask, rmode), + immed_wide_int_const (mask, rmode), NULL_RTX, 1, OPTAB_LIB_WIDEN); } else @@ -5314,7 +5321,7 @@ expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp, weak = CALL_EXPR_ARG (exp, 3); is_weak = false; - if (host_integerp (weak, 0) && tree_low_cst (weak, 0) != 0) + if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0) is_weak = true; oldval = expect; @@ -7954,8 +7961,9 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg) { tree itype = TREE_TYPE (TREE_TYPE (fndecl)); tree ftype = TREE_TYPE (arg); - double_int val; + wide_int val; REAL_VALUE_TYPE r; + bool fail = false; switch (DECL_FUNCTION_CODE (fndecl)) { @@ -7981,9 +7989,10 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg) gcc_unreachable (); } - real_to_integer2 ((HOST_WIDE_INT *)&val.low, &val.high, &r); - if (double_int_fits_to_tree_p (itype, val)) - return double_int_to_tree (itype, val); + val = real_to_integer (&r, &fail, + TYPE_PRECISION (itype)); + if (!fail) + return wide_int_to_tree (itype, val); } } @@ -8016,94 +8025,33 @@ fold_builtin_bitop (tree fndecl, tree arg) /* Optimize for constant argument. */ if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg)) { - HOST_WIDE_INT hi, width, result; - unsigned HOST_WIDE_INT lo; - tree type; - - type = TREE_TYPE (arg); - width = TYPE_PRECISION (type); - lo = TREE_INT_CST_LOW (arg); - - /* Clear all the bits that are beyond the type's precision. */ - if (width > HOST_BITS_PER_WIDE_INT) - { - hi = TREE_INT_CST_HIGH (arg); - if (width < HOST_BITS_PER_DOUBLE_INT) - hi &= ~(HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT)); - } - else - { - hi = 0; - if (width < HOST_BITS_PER_WIDE_INT) - lo &= ~(HOST_WIDE_INT_M1U << width); - } + wide_int warg = arg; + int result; switch (DECL_FUNCTION_CODE (fndecl)) { CASE_INT_FN (BUILT_IN_FFS): - if (lo != 0) - result = ffs_hwi (lo); - else if (hi != 0) - result = HOST_BITS_PER_WIDE_INT + ffs_hwi (hi); - else - result = 0; + result = wi::ffs (warg); break; CASE_INT_FN (BUILT_IN_CLZ): - if (hi != 0) - result = width - floor_log2 (hi) - 1 - HOST_BITS_PER_WIDE_INT; - else if (lo != 0) - result = width - floor_log2 (lo) - 1; - else if (! CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result)) - result = width; + result = wi::clz (warg); break; CASE_INT_FN (BUILT_IN_CTZ): - if (lo != 0) - result = ctz_hwi (lo); - else if (hi != 0) - result = HOST_BITS_PER_WIDE_INT + ctz_hwi (hi); - else if (! CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result)) - result = width; + result = wi::ctz (warg); break; CASE_INT_FN (BUILT_IN_CLRSB): - if (width > 2 * HOST_BITS_PER_WIDE_INT) - return NULL_TREE; - if (width > HOST_BITS_PER_WIDE_INT - && (hi & ((unsigned HOST_WIDE_INT) 1 - << (width - HOST_BITS_PER_WIDE_INT - 1))) != 0) - { - hi = ~hi & ~(HOST_WIDE_INT_M1U - << (width - HOST_BITS_PER_WIDE_INT - 1)); - lo = ~lo; - } - else if (width <= HOST_BITS_PER_WIDE_INT - && (lo & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0) - lo = ~lo & ~(HOST_WIDE_INT_M1U << (width - 1)); - if (hi != 0) - result = width - floor_log2 (hi) - 2 - HOST_BITS_PER_WIDE_INT; - else if (lo != 0) - result = width - floor_log2 (lo) - 2; - else - result = width - 1; + result = wi::clrsb (warg); break; CASE_INT_FN (BUILT_IN_POPCOUNT): - result = 0; - while (lo) - result++, lo &= lo - 1; - while (hi) - result++, hi &= (unsigned HOST_WIDE_INT) hi - 1; + result = wi::popcount (warg); break; CASE_INT_FN (BUILT_IN_PARITY): - result = 0; - while (lo) - result++, lo &= lo - 1; - while (hi) - result++, hi &= (unsigned HOST_WIDE_INT) hi - 1; - result &= 1; + result = wi::parity (warg); break; default: @@ -8127,49 +8075,24 @@ fold_builtin_bswap (tree fndecl, tree arg) /* Optimize constant value. */ if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg)) { - HOST_WIDE_INT hi, width, r_hi = 0; - unsigned HOST_WIDE_INT lo, r_lo = 0; tree type = TREE_TYPE (TREE_TYPE (fndecl)); - width = TYPE_PRECISION (type); - lo = TREE_INT_CST_LOW (arg); - hi = TREE_INT_CST_HIGH (arg); - switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_BSWAP16: case BUILT_IN_BSWAP32: case BUILT_IN_BSWAP64: { - int s; - - for (s = 0; s < width; s += 8) - { - int d = width - s - 8; - unsigned HOST_WIDE_INT byte; - - if (s < HOST_BITS_PER_WIDE_INT) - byte = (lo >> s) & 0xff; - else - byte = (hi >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; - - if (d < HOST_BITS_PER_WIDE_INT) - r_lo |= byte << d; - else - r_hi |= byte << (d - HOST_BITS_PER_WIDE_INT); - } + signop sgn = TYPE_SIGN (type); + tree result = + wide_int_to_tree (type, + wide_int::from (arg, TYPE_PRECISION (type), + sgn).bswap ()); + return result; } - - break; - default: gcc_unreachable (); } - - if (width < HOST_BITS_PER_WIDE_INT) - return build_int_cst (type, r_lo); - else - return build_int_cst_wide (type, r_lo, r_hi); } return NULL_TREE; @@ -8231,7 +8154,7 @@ fold_builtin_logarithm (location_t loc, tree fndecl, tree arg, /* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */ { REAL_VALUE_TYPE dconst10; - real_from_integer (&dconst10, VOIDmode, 10, 0, 0); + real_from_integer (&dconst10, VOIDmode, 10, SIGNED); x = build_real (type, dconst10); } exponent = CALL_EXPR_ARG (arg, 0); @@ -8384,7 +8307,7 @@ fold_builtin_pow (location_t loc, tree fndecl, tree arg0, tree arg1, tree type) /* Check for an integer exponent. */ n = real_to_integer (&c); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); if (real_identical (&c, &cint)) { /* Attempt to evaluate pow at compile-time, unless this should @@ -8482,9 +8405,9 @@ fold_builtin_powi (location_t loc, tree fndecl ATTRIBUTE_UNUSED, if (real_onep (arg0)) return omit_one_operand_loc (loc, type, build_real (type, dconst1), arg1); - if (host_integerp (arg1, 0)) + if (tree_fits_shwi_p (arg1)) { - HOST_WIDE_INT c = TREE_INT_CST_LOW (arg1); + HOST_WIDE_INT c = tree_to_shwi (arg1); /* Evaluate powi at compile-time. */ if (TREE_CODE (arg0) == REAL_CST @@ -8581,7 +8504,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, || ! validate_arg (len, INTEGER_TYPE)) return NULL_TREE; - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) return NULL_TREE; /* If the LEN parameter is zero, return DEST. */ @@ -8611,7 +8534,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, if (! var_decl_component_p (var)) return NULL_TREE; - length = tree_low_cst (len, 1); + length = tree_to_uhwi (len); if (GET_MODE_SIZE (TYPE_MODE (etype)) != length || get_pointer_alignment (dest) / BITS_PER_UNIT < length) return NULL_TREE; @@ -8626,7 +8549,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, if (CHAR_BIT != 8 || BITS_PER_UNIT != 8 || HOST_BITS_PER_WIDE_INT > 64) return NULL_TREE; - cval = TREE_INT_CST_LOW (c); + cval = tree_to_hwi (c); cval &= 0xff; cval |= cval << 8; cval |= cval << 16; @@ -8714,9 +8637,9 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, if (!dest_align || !src_align) return NULL_TREE; if (readonly_data_expr (src) - || (host_integerp (len, 1) + || (tree_fits_uhwi_p (len) && (MIN (src_align, dest_align) / BITS_PER_UNIT - >= (unsigned HOST_WIDE_INT) tree_low_cst (len, 1)))) + >= (unsigned HOST_WIDE_INT) tree_to_uhwi (len)))) { tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) @@ -8739,8 +8662,8 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, destvar = TREE_OPERAND (dest, 0); dest_base = get_ref_base_and_extent (destvar, &dest_offset, &size, &maxsize); - if (host_integerp (len, 1)) - maxsize = tree_low_cst (len, 1); + if (tree_fits_uhwi_p (len)) + maxsize = tree_to_uhwi (len); else maxsize = -1; src_offset /= BITS_PER_UNIT; @@ -8756,20 +8679,19 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, else if (TREE_CODE (src_base) == MEM_REF && TREE_CODE (dest_base) == MEM_REF) { - double_int off; + addr_wide_int off; if (! operand_equal_p (TREE_OPERAND (src_base, 0), TREE_OPERAND (dest_base, 0), 0)) return NULL_TREE; - off = mem_ref_offset (src_base) + - double_int::from_shwi (src_offset); - if (!off.fits_shwi ()) + off = mem_ref_offset (src_base) + src_offset; + if (!wi::fits_shwi_p (off)) return NULL_TREE; - src_offset = off.low; - off = mem_ref_offset (dest_base) + - double_int::from_shwi (dest_offset); - if (!off.fits_shwi ()) + src_offset = off.to_shwi (); + + off = mem_ref_offset (dest_base) + dest_offset; + if (!wi::fits_shwi_p (off)) return NULL_TREE; - dest_offset = off.low; + dest_offset = off.to_shwi (); if (ranges_overlap_p (src_offset, maxsize, dest_offset, maxsize)) return NULL_TREE; @@ -8806,7 +8728,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, return NULL_TREE; } - if (!host_integerp (len, 0)) + if (!tree_fits_shwi_p (len)) return NULL_TREE; /* FIXME: This logic lose for arguments like (type *)malloc (sizeof (type)), @@ -9094,7 +9016,7 @@ fold_builtin_memchr (location_t loc, tree arg1, tree arg2, tree len, tree type) const char *p1; if (TREE_CODE (arg2) != INTEGER_CST - || !host_integerp (len, 1)) + || !tree_fits_uhwi_p (len)) return NULL_TREE; p1 = c_getstr (arg1); @@ -9107,7 +9029,7 @@ fold_builtin_memchr (location_t loc, tree arg1, tree arg2, tree len, tree type) if (target_char_cast (arg2, &c)) return NULL_TREE; - r = (const char *) memchr (p1, c, tree_low_cst (len, 1)); + r = (const char *) memchr (p1, c, tree_to_uhwi (len)); if (r == NULL) return build_int_cst (TREE_TYPE (arg1), 0); @@ -9146,11 +9068,11 @@ fold_builtin_memcmp (location_t loc, tree arg1, tree arg2, tree len) /* If all arguments are constant, and the value of len is not greater than the lengths of arg1 and arg2, evaluate at compile-time. */ - if (host_integerp (len, 1) && p1 && p2 + if (tree_fits_uhwi_p (len) && p1 && p2 && compare_tree_int (len, strlen (p1) + 1) <= 0 && compare_tree_int (len, strlen (p2) + 1) <= 0) { - const int r = memcmp (p1, p2, tree_low_cst (len, 1)); + const int r = memcmp (p1, p2, tree_to_uhwi (len)); if (r > 0) return integer_one_node; @@ -9162,7 +9084,7 @@ fold_builtin_memcmp (location_t loc, tree arg1, tree arg2, tree len) /* If len parameter is one, return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ - if (host_integerp (len, 1) && tree_low_cst (len, 1) == 1) + if (tree_fits_uhwi_p (len) && tree_to_uhwi (len) == 1) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node @@ -9274,9 +9196,9 @@ fold_builtin_strncmp (location_t loc, tree arg1, tree arg2, tree len) p1 = c_getstr (arg1); p2 = c_getstr (arg2); - if (host_integerp (len, 1) && p1 && p2) + if (tree_fits_uhwi_p (len) && p1 && p2) { - const int i = strncmp (p1, p2, tree_low_cst (len, 1)); + const int i = strncmp (p1, p2, tree_to_uhwi (len)); if (i > 0) return integer_one_node; else if (i < 0) @@ -9322,7 +9244,7 @@ fold_builtin_strncmp (location_t loc, tree arg1, tree arg2, tree len) /* If len parameter is one, return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ - if (host_integerp (len, 1) && tree_low_cst (len, 1) == 1) + if (tree_fits_uhwi_p (len) && tree_to_uhwi (len) == 1) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node @@ -9780,7 +9702,7 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1, /* If both arguments are constant, then try to evaluate it. */ if ((ldexp || REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2) && TREE_CODE (arg0) == REAL_CST && !TREE_OVERFLOW (arg0) - && host_integerp (arg1, 0)) + && tree_fits_shwi_p (arg1)) { /* Bound the maximum adjustment to twice the range of the mode's valid exponents. Use abs to ensure the range is @@ -9790,7 +9712,7 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1, - REAL_MODE_FORMAT (TYPE_MODE (type))->emin); /* Get the user-requested adjustment. */ - const HOST_WIDE_INT req_exp_adj = tree_low_cst (arg1, 0); + const HOST_WIDE_INT req_exp_adj = tree_to_shwi (arg1); /* The requested adjustment must be inside this range. This is a preliminary cap to avoid things like overflow, we @@ -12271,7 +12193,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, if (orig && !validate_arg (orig, POINTER_TYPE)) return NULL_TREE; - if (!host_integerp (destsize, 1)) + if (!tree_fits_uhwi_p (destsize)) return NULL_TREE; /* Check whether the format is a literal string constant. */ @@ -12285,7 +12207,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, if (!init_target_chars ()) return NULL_TREE; - destlen = tree_low_cst (destsize, 1); + destlen = tree_to_uhwi (destsize); /* If the format doesn't contain % args or %%, use strcpy. */ if (strchr (fmt_str, target_percent) == NULL) @@ -12330,10 +12252,10 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, return NULL_TREE; retval = c_strlen (orig, 1); - if (!retval || !host_integerp (retval, 1)) + if (!retval || !tree_fits_uhwi_p (retval)) return NULL_TREE; - origlen = tree_low_cst (retval, 1); + origlen = tree_to_uhwi (retval); /* We could expand this as memcpy (str1, str2, cst - 1); str1[cst - 1] = '\0'; or to @@ -12395,7 +12317,7 @@ expand_builtin_object_size (tree exp) return const0_rtx; } - object_size_type = tree_low_cst (ost, 0); + object_size_type = tree_to_shwi (ost); return object_size_type < 2 ? constm1_rtx : const0_rtx; } @@ -12424,10 +12346,10 @@ expand_builtin_memory_chk (tree exp, rtx target, enum machine_mode mode, len = CALL_EXPR_ARG (exp, 2); size = CALL_EXPR_ARG (exp, 3); - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_RTX; - if (host_integerp (len, 1) || integer_all_onesp (size)) + if (tree_fits_uhwi_p (len) || integer_all_onesp (size)) { tree fn; @@ -12558,22 +12480,22 @@ maybe_emit_chk_warning (tree exp, enum built_in_function fcode) if (!len || !size) return; - if (! host_integerp (size, 1) || integer_all_onesp (size)) + if (! tree_fits_uhwi_p (size) || integer_all_onesp (size)) return; if (is_strlen) { len = c_strlen (len, 1); - if (! len || ! host_integerp (len, 1) || tree_int_cst_lt (len, size)) + if (! len || ! tree_fits_uhwi_p (len) || tree_int_cst_lt (len, size)) return; } else if (fcode == BUILT_IN_STRNCAT_CHK) { tree src = CALL_EXPR_ARG (exp, 1); - if (! src || ! host_integerp (len, 1) || tree_int_cst_lt (len, size)) + if (! src || ! tree_fits_uhwi_p (len) || tree_int_cst_lt (len, size)) return; src = c_strlen (src, 1); - if (! src || ! host_integerp (src, 1)) + if (! src || ! tree_fits_uhwi_p (src)) { warning_at (loc, 0, "%Kcall to %D might overflow destination buffer", exp, get_callee_fndecl (exp)); @@ -12582,7 +12504,7 @@ maybe_emit_chk_warning (tree exp, enum built_in_function fcode) else if (tree_int_cst_lt (src, size)) return; } - else if (! host_integerp (len, 1) || ! tree_int_cst_lt (size, len)) + else if (! tree_fits_uhwi_p (len) || ! tree_int_cst_lt (size, len)) return; warning_at (loc, 0, "%Kcall to %D will always overflow destination buffer", @@ -12606,7 +12528,7 @@ maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode) size = CALL_EXPR_ARG (exp, 2); fmt = CALL_EXPR_ARG (exp, 3); - if (! host_integerp (size, 1) || integer_all_onesp (size)) + if (! tree_fits_uhwi_p (size) || integer_all_onesp (size)) return; /* Check whether the format is a literal string constant. */ @@ -12634,7 +12556,7 @@ maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode) return; len = c_strlen (arg, 1); - if (!len || ! host_integerp (len, 1)) + if (!len || ! tree_fits_uhwi_p (len)) return; } else @@ -12677,6 +12599,7 @@ fold_builtin_object_size (tree ptr, tree ost) { unsigned HOST_WIDE_INT bytes; int object_size_type; + int precision = TYPE_PRECISION (TREE_TYPE (ptr)); if (!validate_arg (ptr, POINTER_TYPE) || !validate_arg (ost, INTEGER_TYPE)) @@ -12689,7 +12612,7 @@ fold_builtin_object_size (tree ptr, tree ost) || compare_tree_int (ost, 3) > 0) return NULL_TREE; - object_size_type = tree_low_cst (ost, 0); + object_size_type = tree_to_shwi (ost); /* __builtin_object_size doesn't evaluate side-effects in its arguments; if there are any side-effects, it returns (size_t) -1 for types 0 and 1 @@ -12699,21 +12622,24 @@ fold_builtin_object_size (tree ptr, tree ost) if (TREE_CODE (ptr) == ADDR_EXPR) { - bytes = compute_builtin_object_size (ptr, object_size_type); - if (double_int_fits_to_tree_p (size_type_node, - double_int::from_uhwi (bytes))) - return build_int_cstu (size_type_node, bytes); + + wide_int wbytes + = wi::uhwi (compute_builtin_object_size (ptr, object_size_type), + precision); + if (wi::fits_to_tree_p (wbytes, size_type_node)) + return wide_int_to_tree (size_type_node, wbytes); } else if (TREE_CODE (ptr) == SSA_NAME) { /* If object size is not known yet, delay folding until later. Maybe subsequent passes will help determining it. */ + wide_int wbytes; bytes = compute_builtin_object_size (ptr, object_size_type); + wbytes = wi::uhwi (bytes, precision); if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0) - && double_int_fits_to_tree_p (size_type_node, - double_int::from_uhwi (bytes))) - return build_int_cstu (size_type_node, bytes); + && wi::fits_to_tree_p (wbytes, size_type_node)) + return wide_int_to_tree (size_type_node, wbytes); } return NULL_TREE; @@ -12755,17 +12681,17 @@ fold_builtin_memory_chk (location_t loc, tree fndecl, } } - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) { if (fcode == BUILT_IN_MEMPCPY_CHK && ignore) { @@ -12837,18 +12763,18 @@ fold_builtin_stxcpy_chk (location_t loc, tree fndecl, tree dest, if (fcode == BUILT_IN_STRCPY_CHK && operand_equal_p (src, dest, 0)) return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), dest); - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { len = c_strlen (src, 1); - if (! len || ! host_integerp (len, 1)) + if (! len || ! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) { if (fcode == BUILT_IN_STPCPY_CHK) { @@ -12924,17 +12850,17 @@ fold_builtin_stxncpy_chk (location_t loc, tree dest, tree src, return build_call_expr_loc (loc, fn, 4, dest, src, len, size); } - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) return NULL_TREE; } else @@ -12973,7 +12899,7 @@ fold_builtin_strcat_chk (location_t loc, tree fndecl, tree dest, if (p && *p == '\0') return omit_one_operand_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), dest, src); - if (! host_integerp (size, 1) || ! integer_all_onesp (size)) + if (! tree_fits_uhwi_p (size) || ! integer_all_onesp (size)) return NULL_TREE; /* If __builtin_strcat_chk is used, assume strcat is available. */ @@ -13007,15 +12933,15 @@ fold_builtin_strncat_chk (location_t loc, tree fndecl, else if (integer_zerop (len)) return omit_one_operand_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), dest, src); - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { tree src_len = c_strlen (src, 1); if (src_len - && host_integerp (src_len, 1) - && host_integerp (len, 1) + && tree_fits_uhwi_p (src_len) + && tree_fits_uhwi_p (len) && ! tree_int_cst_lt (len, src_len)) { /* If LEN >= strlen (SRC), optimize into __strcat_chk. */ @@ -13064,7 +12990,7 @@ fold_builtin_sprintf_chk_1 (location_t loc, int nargs, tree *args, if (!validate_arg (fmt, POINTER_TYPE)) return NULL_TREE; - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; len = NULL_TREE; @@ -13095,7 +13021,7 @@ fold_builtin_sprintf_chk_1 (location_t loc, int nargs, tree *args, if (validate_arg (arg, POINTER_TYPE)) { len = c_strlen (arg, 1); - if (! len || ! host_integerp (len, 1)) + if (! len || ! tree_fits_uhwi_p (len)) len = NULL_TREE; } } @@ -13172,17 +13098,17 @@ fold_builtin_snprintf_chk_1 (location_t loc, int nargs, tree *args, if (!validate_arg (fmt, POINTER_TYPE)) return NULL_TREE; - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) return NULL_TREE; } else @@ -13827,10 +13753,10 @@ do_mpfr_bessel_n (tree arg1, tree arg2, tree type, /* To proceed, MPFR must exactly represent the target floating point format, which only happens when the target base equals two. */ if (REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2 - && host_integerp (arg1, 0) + && tree_fits_shwi_p (arg1) && TREE_CODE (arg2) == REAL_CST && !TREE_OVERFLOW (arg2)) { - const HOST_WIDE_INT n = tree_low_cst (arg1, 0); + const HOST_WIDE_INT n = tree_to_shwi (arg1); const REAL_VALUE_TYPE *const ra = &TREE_REAL_CST (arg2); if (n == (long)n diff --git a/gcc/c-family/c-ada-spec.c b/gcc/c-family/c-ada-spec.c index eac57838752..89379dbe4c8 100644 --- a/gcc/c-family/c-ada-spec.c +++ b/gcc/c-family/c-ada-spec.c @@ -29,21 +29,7 @@ along with GCC; see the file COPYING3. If not see #include "cpplib.h" #include "c-pragma.h" #include "cpp-id-data.h" - -/* Adapted from hwint.h to use the Ada prefix. */ -#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG -# if HOST_BITS_PER_WIDE_INT == 64 -# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \ - "16#%" HOST_LONG_FORMAT "x%016" HOST_LONG_FORMAT "x#" -# else -# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \ - "16#%" HOST_LONG_FORMAT "x%08" HOST_LONG_FORMAT "x#" -# endif -#else - /* We can assume that 'long long' is at least 64 bits. */ -# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \ - "16#%" HOST_LONG_LONG_FORMAT "x%016" HOST_LONG_LONG_FORMAT "x#" -#endif /* HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG */ +#include "wide-int.h" /* Local functions, macros and variables. */ static int dump_generic_ada_node (pretty_printer *, tree, tree, @@ -1794,7 +1780,7 @@ dump_ada_template (pretty_printer *buffer, tree t, static bool is_simple_enum (tree node) { - unsigned HOST_WIDE_INT count = 0; + HOST_WIDE_INT count = 0; tree value; for (value = TYPE_VALUES (node); value; value = TREE_CHAIN (value)) @@ -1804,9 +1790,9 @@ is_simple_enum (tree node) if (TREE_CODE (int_val) != INTEGER_CST) int_val = DECL_INITIAL (int_val); - if (!host_integerp (int_val, 0)) + if (!tree_fits_shwi_p (int_val)) return false; - else if (TREE_INT_CST_LOW (int_val) != count) + else if (tree_to_shwi (int_val) != count) return false; count++; @@ -2203,25 +2189,24 @@ dump_generic_ada_node (pretty_printer *buffer, tree node, tree type, to generate the (0 .. -1) range for flexible array members. */ if (TREE_TYPE (node) == sizetype) node = fold_convert (ssizetype, node); - if (host_integerp (node, 0)) - pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); - else if (host_integerp (node, 1)) - pp_unsigned_wide_integer (buffer, TREE_INT_CST_LOW (node)); + if (tree_fits_shwi_p (node)) + pp_wide_integer (buffer, tree_to_shwi (node)); + else if (tree_fits_uhwi_p (node)) + pp_unsigned_wide_integer (buffer, tree_to_uhwi (node)); else { - tree val = node; - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); - HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); - - if (tree_int_cst_sgn (val) < 0) + wide_int val = node; + int i; + if (wi::neg_p (val)) { pp_minus (buffer); - high = ~high + !low; - low = -low; + val = -val; } sprintf (pp_buffer (buffer)->digit_buffer, - ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) high, low); + "16#%" HOST_LONG_FORMAT "x", val.elt (val.get_len () - 1)); + for (i = val.get_len () - 2; i <= 0; i--) + sprintf (pp_buffer (buffer)->digit_buffer, + HOST_WIDE_INT_PRINT_PADDED_HEX, val.elt (i)); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } break; diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c index d830288864e..4b283b3428b 100644 --- a/gcc/c-family/c-common.c +++ b/gcc/c-family/c-common.c @@ -43,6 +43,7 @@ along with GCC; see the file COPYING3. If not see #include "opts.h" #include "cgraph.h" #include "target-def.h" +#include "wide-int-print.h" cpp_reader *parse_in; /* Declared in c-pragma.h. */ @@ -848,7 +849,7 @@ finish_fname_decls (void) for (saved = TREE_PURPOSE (stack); saved; saved = TREE_CHAIN (saved)) { tree decl = TREE_PURPOSE (saved); - unsigned ix = TREE_INT_CST_LOW (TREE_VALUE (saved)); + unsigned ix = tree_to_hwi (TREE_VALUE (saved)); *fname_vars[ix].decl = decl; } @@ -2453,7 +2454,7 @@ shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise) arg0 = c_common_get_narrower (op0, &unsigned0); arg1 = c_common_get_narrower (op1, &unsigned1); - + /* UNS is 1 if the operation to be done is an unsigned one. */ uns = TYPE_UNSIGNED (result_type); @@ -3486,7 +3487,7 @@ c_common_type_for_mode (enum machine_mode mode, int unsignedp) if (mode == DImode) return unsignedp ? unsigned_intDI_type_node : intDI_type_node; - + #if HOST_BITS_PER_WIDE_INT >= 64 if (mode == TYPE_MODE (intTI_type_node)) return unsignedp ? unsigned_intTI_type_node : intTI_type_node; @@ -4002,7 +4003,7 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr, /* If one of the operands must be floated, we cannot optimize. */ real1 = TREE_CODE (TREE_TYPE (primop0)) == REAL_TYPE; real2 = TREE_CODE (TREE_TYPE (primop1)) == REAL_TYPE; - + /* If first arg is constant, swap the args (changing operation so value is preserved), for canonicalization. Don't do this if the second arg is 0. */ @@ -4087,9 +4088,12 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr, { /* Convert primop1 to target type, but do not introduce additional overflow. We know primop1 is an int_cst. */ - primop1 = force_fit_type_double (*restype_ptr, - tree_to_double_int (primop1), - 0, TREE_OVERFLOW (primop1)); + primop1 = force_fit_type (*restype_ptr, + wide_int::from + (primop1, + TYPE_PRECISION (*restype_ptr), + TYPE_SIGN (TREE_TYPE (primop1))), + 0, TREE_OVERFLOW (primop1)); } if (type != *restype_ptr) { @@ -4401,8 +4405,7 @@ pointer_int_sum (location_t loc, enum tree_code resultcode, convert (TREE_TYPE (intop), size_exp), 1); intop = convert (sizetype, t); if (TREE_OVERFLOW_P (intop) && !TREE_OVERFLOW (t)) - intop = build_int_cst_wide (TREE_TYPE (intop), TREE_INT_CST_LOW (intop), - TREE_INT_CST_HIGH (intop)); + intop = wide_int_to_tree (TREE_TYPE (intop), intop); } /* Create the sum or difference. */ @@ -4525,7 +4528,7 @@ c_common_truthvalue_conversion (location_t location, tree expr) case ERROR_MARK: return expr; - + case INTEGER_CST: return integer_zerop (expr) ? truthvalue_false_node : truthvalue_true_node; @@ -4775,7 +4778,7 @@ c_type_hash (const void *p) if (TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST) size = 0; else - size = TREE_INT_CST_LOW (TYPE_SIZE (t)); + size = tree_to_hwi (TYPE_SIZE (t)); return ((size << 24) | (n_elements << shift)); } @@ -4783,7 +4786,7 @@ static GTY((param_is (union tree_node))) htab_t type_hash_table; /* Return the typed-based alias set for T, which may be an expression or a type. Return -1 if we don't do anything special. */ - + alias_set_type c_common_get_alias_set (tree t) { @@ -5460,7 +5463,7 @@ c_common_nodes_and_builtins (void) } /* This node must not be shared. */ - void_zero_node = make_node (INTEGER_CST); + void_zero_node = make_int_cst (1); TREE_TYPE (void_zero_node) = void_type_node; void_list_node = build_void_list_node (); @@ -5649,7 +5652,7 @@ c_common_nodes_and_builtins (void) (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier (pname), ptype)); - + } } @@ -5671,7 +5674,7 @@ c_common_nodes_and_builtins (void) /* Create the built-in __null node. It is important that this is not shared. */ - null_node = make_node (INTEGER_CST); + null_node = make_int_cst (1); TREE_TYPE (null_node) = c_common_type_for_size (POINTER_SIZE, 0); /* Since builtin_types isn't gc'ed, don't export these nodes. */ @@ -6049,22 +6052,12 @@ c_add_case_label (location_t loc, splay_tree cases, tree cond, tree orig_type, static void match_case_to_enum_1 (tree key, tree type, tree label) { - char buf[2 + 2*HOST_BITS_PER_WIDE_INT/4 + 1]; - - /* ??? Not working too hard to print the double-word value. - Should perhaps be done with %lwd in the diagnostic routines? */ - if (TREE_INT_CST_HIGH (key) == 0) - snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_UNSIGNED, - TREE_INT_CST_LOW (key)); - else if (!TYPE_UNSIGNED (type) - && TREE_INT_CST_HIGH (key) == -1 - && TREE_INT_CST_LOW (key) != 0) - snprintf (buf, sizeof (buf), "-" HOST_WIDE_INT_PRINT_UNSIGNED, - -TREE_INT_CST_LOW (key)); + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + + if (tree_fits_hwi_p (key, TYPE_SIGN (type))) + print_dec (key, buf, TYPE_SIGN (type)); else - snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (key), - (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (key)); + print_hex (key, buf); if (TYPE_NAME (type) == 0) warning_at (DECL_SOURCE_LOCATION (CASE_LABEL (label)), @@ -6987,11 +6980,11 @@ get_priority (tree args, bool is_destructor) arg = TREE_VALUE (args); arg = default_conversion (arg); - if (!host_integerp (arg, /*pos=*/0) + if (!tree_fits_shwi_p (arg) || !INTEGRAL_TYPE_P (TREE_TYPE (arg))) goto invalid; - pri = tree_low_cst (arg, /*pos=*/0); + pri = tree_to_shwi (arg); if (pri < 0 || pri > MAX_INIT_PRIORITY) goto invalid; @@ -7125,7 +7118,7 @@ handle_mode_attribute (tree *node, tree name, tree args, tree ident = TREE_VALUE (args); *no_add_attrs = true; - + if (TREE_CODE (ident) != IDENTIFIER_NODE) warning (OPT_Wattributes, "%qE attribute ignored", name); else @@ -7988,11 +7981,11 @@ handle_alloc_size_attribute (tree *node, tree ARG_UNUSED (name), tree args, for (; args; args = TREE_CHAIN (args)) { tree position = TREE_VALUE (args); + wide_int p; if (TREE_CODE (position) != INTEGER_CST - || TREE_INT_CST_HIGH (position) - || TREE_INT_CST_LOW (position) < 1 - || TREE_INT_CST_LOW (position) > arg_count ) + || wi::ltu_p (p = wide_int (position), 1) + || wi::gtu_p (p, arg_count)) { warning (OPT_Wattributes, "alloc_size parameter outside range"); @@ -8433,14 +8426,14 @@ handle_vector_size_attribute (tree *node, tree name, tree args, size = TREE_VALUE (args); - if (!host_integerp (size, 1)) + if (!tree_fits_uhwi_p (size)) { warning (OPT_Wattributes, "%qE attribute ignored", name); return NULL_TREE; } /* Get the vector size (in bytes). */ - vecsize = tree_low_cst (size, 1); + vecsize = tree_to_uhwi (size); /* We need to provide for vector pointers, vector arrays, and functions returning vectors. For example: @@ -8466,14 +8459,14 @@ handle_vector_size_attribute (tree *node, tree name, tree args, || (!SCALAR_FLOAT_MODE_P (orig_mode) && GET_MODE_CLASS (orig_mode) != MODE_INT && !ALL_SCALAR_FIXED_POINT_MODE_P (orig_mode)) - || !host_integerp (TYPE_SIZE_UNIT (type), 1) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) || TREE_CODE (type) == BOOLEAN_TYPE) { error ("invalid vector type for attribute %qE", name); return NULL_TREE; } - if (vecsize % tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + if (vecsize % tree_to_uhwi (TYPE_SIZE_UNIT (type))) { error ("vector size not an integral multiple of component size"); return NULL; @@ -8486,7 +8479,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, } /* Calculate how many units fit in the vector. */ - nunits = vecsize / tree_low_cst (TYPE_SIZE_UNIT (type), 1); + nunits = vecsize / tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (nunits & (nunits - 1)) { error ("number of components of the vector not a power of two"); @@ -8648,7 +8641,7 @@ check_function_sentinel (const_tree fntype, int nargs, tree *argarray) if (TREE_VALUE (attr)) { tree p = TREE_VALUE (TREE_VALUE (attr)); - pos = TREE_INT_CST_LOW (p); + pos = tree_to_hwi (p); } /* The sentinel must be one of the varargs, i.e. @@ -8721,13 +8714,14 @@ check_nonnull_arg (void * ARG_UNUSED (ctx), tree param, static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { - /* Verify the arg number is a constant. */ - if (TREE_CODE (arg_num_expr) != INTEGER_CST - || TREE_INT_CST_HIGH (arg_num_expr) != 0) + /* Verify the arg number is a small constant. */ + if (cst_fits_uhwi_p (arg_num_expr)) + { + *valp = tree_to_hwi (arg_num_expr); + return true; + } + else return false; - - *valp = TREE_INT_CST_LOW (arg_num_expr); - return true; } /* Handle a "nothrow" attribute; arguments as in @@ -8925,7 +8919,7 @@ parse_optimize_options (tree args, bool attr_p) if (TREE_CODE (value) == INTEGER_CST) { char buffer[20]; - sprintf (buffer, "-O%ld", (long) TREE_INT_CST_LOW (value)); + sprintf (buffer, "-O%ld", (long) tree_to_hwi (value)); vec_safe_push (optimize_args, ggc_strdup (buffer)); } @@ -9164,11 +9158,10 @@ check_function_arguments_recurse (void (*callback) /* Extract the argument number, which was previously checked to be valid. */ format_num_expr = TREE_VALUE (TREE_VALUE (attrs)); + + gcc_assert (tree_fits_uhwi_p (format_num_expr)); - gcc_assert (TREE_CODE (format_num_expr) == INTEGER_CST - && !TREE_INT_CST_HIGH (format_num_expr)); - - format_num = TREE_INT_CST_LOW (format_num_expr); + format_num = tree_to_uhwi (format_num_expr); for (inner_arg = first_call_expr_arg (param, &iter), i = 1; inner_arg != 0; @@ -9422,7 +9415,7 @@ c_parse_error (const char *gmsgid, enum cpp_ttype token_type, || token_type == CPP_CHAR16 || token_type == CPP_CHAR32) { - unsigned int val = TREE_INT_CST_LOW (value); + unsigned int val = tree_to_hwi (value); const char *prefix; switch (token_type) @@ -9676,8 +9669,7 @@ fold_offsetof_1 (tree expr) return error_mark_node; } off = size_binop_loc (input_location, PLUS_EXPR, DECL_FIELD_OFFSET (t), - size_int (tree_low_cst (DECL_FIELD_BIT_OFFSET (t), - 1) + size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t)) / BITS_PER_UNIT)); break; @@ -10041,7 +10033,7 @@ complete_array_type (tree *ptype, tree initial_value, bool do_default) { error ("size of array is too large"); /* If we proceed with the array type as it is, we'll eventually - crash in tree_low_cst(). */ + crash in tree_to_uhwi(). */ type = error_mark_node; } @@ -10099,7 +10091,7 @@ sync_resolve_size (tree function, vec<tree, va_gc> *params) if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) goto incompatible; - size = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + size = tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16) return size; @@ -10258,7 +10250,7 @@ get_atomic_generic_size (location_t loc, tree function, return 0; } - size_0 = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type_0)), 1); + size_0 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type_0))); /* Zero size objects are not allowed. */ if (size_0 == 0) @@ -10283,7 +10275,7 @@ get_atomic_generic_size (location_t loc, tree function, function); return 0; } - size = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1); + size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); if (size != size_0) { error_at (loc, "size mismatch in argument %d of %qE", x + 1, @@ -10298,7 +10290,7 @@ get_atomic_generic_size (location_t loc, tree function, tree p = (*params)[x]; if (TREE_CODE (p) == INTEGER_CST) { - int i = tree_low_cst (p, 1); + int i = tree_to_uhwi (p); if (i < 0 || (i & MEMMODEL_MASK) >= MEMMODEL_LAST) { warning_at (loc, OPT_Winvalid_memory_model, @@ -10467,7 +10459,7 @@ resolve_overloaded_atomic_compare_exchange (location_t loc, tree function, bool fn(T* mem, T* desired, T* return, weak, success, failure) into bool fn ((In *)mem, (In *)expected, (In) *desired, weak, succ, fail) */ - + p0 = (*params)[0]; p1 = (*params)[1]; p2 = (*params)[2]; @@ -11171,24 +11163,24 @@ warn_for_sign_compare (location_t location, if (TREE_CODE (op1) == BIT_NOT_EXPR) op1 = c_common_get_narrower (TREE_OPERAND (op1, 0), &unsignedp1); - if (host_integerp (op0, 0) || host_integerp (op1, 0)) + if (tree_fits_shwi_p (op0) || tree_fits_shwi_p (op1)) { tree primop; HOST_WIDE_INT constant, mask; int unsignedp; unsigned int bits; - if (host_integerp (op0, 0)) + if (tree_fits_shwi_p (op0)) { primop = op1; unsignedp = unsignedp1; - constant = tree_low_cst (op0, 0); + constant = tree_to_shwi (op0); } else { primop = op0; unsignedp = unsignedp0; - constant = tree_low_cst (op1, 0); + constant = tree_to_shwi (op1); } bits = TYPE_PRECISION (TREE_TYPE (primop)); @@ -11266,7 +11258,7 @@ do_warn_double_promotion (tree result_type, tree type1, tree type2, early on, later parts of the compiler can always do the reverse translation and get back the corresponding typedef name. For example, given: - + typedef struct S MY_TYPE; MY_TYPE object; @@ -11628,8 +11620,8 @@ convert_vector_to_pointer_for_subscript (location_t loc, tree type1; if (TREE_CODE (index) == INTEGER_CST) - if (!host_integerp (index, 1) - || ((unsigned HOST_WIDE_INT) tree_low_cst (index, 1) + if (!tree_fits_uhwi_p (index) + || ((unsigned HOST_WIDE_INT) tree_to_uhwi (index) >= TYPE_VECTOR_SUBPARTS (type))) warning_at (loc, OPT_Warray_bounds, "index value is out of bound"); diff --git a/gcc/c-family/c-cppbuiltin.c b/gcc/c-family/c-cppbuiltin.c index ed4c82caa46..d0327928326 100644 --- a/gcc/c-family/c-cppbuiltin.c +++ b/gcc/c-family/c-cppbuiltin.c @@ -106,7 +106,7 @@ static void builtin_define_type_sizeof (const char *name, tree type) { builtin_define_with_int_value (name, - tree_low_cst (TYPE_SIZE_UNIT (type), 1)); + tree_to_uhwi (TYPE_SIZE_UNIT (type))); } /* Define the float.h constants for TYPE using NAME_PREFIX, FP_SUFFIX, @@ -648,7 +648,7 @@ cpp_atomic_builtins (cpp_reader *pfile) /* Tell the source code about various types. These map to the C++11 and C11 macros where 2 indicates lock-free always, and 1 indicates sometimes lock free. */ -#define SIZEOF_NODE(T) (tree_low_cst (TYPE_SIZE_UNIT (T), 1)) +#define SIZEOF_NODE(T) (tree_to_uhwi (TYPE_SIZE_UNIT (T))) #define SWAP_INDEX(T) ((SIZEOF_NODE (T) < SWAP_LIMIT) ? SIZEOF_NODE (T) : 0) builtin_define_with_int_value ("__GCC_ATOMIC_BOOL_LOCK_FREE", (have_swap[SWAP_INDEX (boolean_type_node)]? 2 : 1)); diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c index c11d93aa89d..e5847021b83 100644 --- a/gcc/c-family/c-format.c +++ b/gcc/c-family/c-format.c @@ -226,13 +226,13 @@ check_format_string (tree fntype, unsigned HOST_WIDE_INT format_num, static bool get_constant (tree expr, unsigned HOST_WIDE_INT *value, int validated_p) { - if (TREE_CODE (expr) != INTEGER_CST || TREE_INT_CST_HIGH (expr) != 0) + if (!cst_fits_uhwi_p (expr)) { gcc_assert (!validated_p); return false; } - *value = TREE_INT_CST_LOW (expr); + *value = tree_to_hwi (expr); return true; } @@ -1459,8 +1459,8 @@ check_format_arg (void *ctx, tree format_tree, res->number_non_literal++; return; } - if (!host_integerp (arg1, 0) - || (offset = tree_low_cst (arg1, 0)) < 0) + if (!tree_fits_shwi_p (arg1) + || (offset = tree_to_shwi (arg1)) < 0) { res->number_non_literal++; return; @@ -1506,8 +1506,8 @@ check_format_arg (void *ctx, tree format_tree, return; } if (TREE_CODE (format_tree) == ARRAY_REF - && host_integerp (TREE_OPERAND (format_tree, 1), 0) - && (offset += tree_low_cst (TREE_OPERAND (format_tree, 1), 0)) >= 0) + && tree_fits_shwi_p (TREE_OPERAND (format_tree, 1)) + && (offset += tree_to_shwi (TREE_OPERAND (format_tree, 1))) >= 0) format_tree = TREE_OPERAND (format_tree, 0); if (TREE_CODE (format_tree) == VAR_DECL && TREE_CODE (TREE_TYPE (format_tree)) == ARRAY_TYPE @@ -1537,9 +1537,9 @@ check_format_arg (void *ctx, tree format_tree, /* Variable length arrays can't be initialized. */ gcc_assert (TREE_CODE (array_size) == INTEGER_CST); - if (host_integerp (array_size, 0)) + if (tree_fits_shwi_p (array_size)) { - HOST_WIDE_INT array_size_value = TREE_INT_CST_LOW (array_size); + HOST_WIDE_INT array_size_value = tree_to_shwi (array_size); if (array_size_value > 0 && array_size_value == (int) array_size_value && format_length > array_size_value) diff --git a/gcc/c-family/c-lex.c b/gcc/c-family/c-lex.c index 819e9d51e10..62f738d7bed 100644 --- a/gcc/c-family/c-lex.c +++ b/gcc/c-family/c-lex.c @@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "debug.h" #include "target.h" +#include "wide-int.h" /* We may keep statistics about how long which files took to compile. */ static int header_time, body_time; @@ -47,9 +48,9 @@ static tree interpret_float (const cpp_token *, unsigned int, const char *, enum overflow_type *); static tree interpret_fixed (const cpp_token *, unsigned int); static enum integer_type_kind narrowest_unsigned_type - (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned int); + (const wide_int &, unsigned int); static enum integer_type_kind narrowest_signed_type - (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned int); + (const wide_int &, unsigned int); static enum cpp_ttype lex_string (const cpp_token *, tree *, bool, bool); static tree lex_charconst (const cpp_token *); static void update_header_times (const char *); @@ -525,9 +526,7 @@ c_lex_with_flags (tree *value, location_t *loc, unsigned char *cpp_flags, there isn't one. */ static enum integer_type_kind -narrowest_unsigned_type (unsigned HOST_WIDE_INT low, - unsigned HOST_WIDE_INT high, - unsigned int flags) +narrowest_unsigned_type (const wide_int &val, unsigned int flags) { int itk; @@ -546,9 +545,7 @@ narrowest_unsigned_type (unsigned HOST_WIDE_INT low, continue; upper = TYPE_MAX_VALUE (integer_types[itk]); - if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high - || ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high - && TREE_INT_CST_LOW (upper) >= low)) + if (wi::geu_p (upper, val)) return (enum integer_type_kind) itk; } @@ -557,8 +554,7 @@ narrowest_unsigned_type (unsigned HOST_WIDE_INT low, /* Ditto, but narrowest signed type. */ static enum integer_type_kind -narrowest_signed_type (unsigned HOST_WIDE_INT low, - unsigned HOST_WIDE_INT high, unsigned int flags) +narrowest_signed_type (const wide_int &val, unsigned int flags) { int itk; @@ -569,7 +565,6 @@ narrowest_signed_type (unsigned HOST_WIDE_INT low, else itk = itk_long_long; - for (; itk < itk_none; itk += 2 /* skip signed types */) { tree upper; @@ -578,9 +573,7 @@ narrowest_signed_type (unsigned HOST_WIDE_INT low, continue; upper = TYPE_MAX_VALUE (integer_types[itk]); - if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high - || ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high - && TREE_INT_CST_LOW (upper) >= low)) + if (wi::geu_p (upper, val)) return (enum integer_type_kind) itk; } @@ -596,6 +589,8 @@ interpret_integer (const cpp_token *token, unsigned int flags, enum integer_type_kind itk; cpp_num integer; cpp_options *options = cpp_get_options (parse_in); + HOST_WIDE_INT ival[2]; + wide_int wval; *overflow = OT_NONE; @@ -604,18 +599,22 @@ interpret_integer (const cpp_token *token, unsigned int flags, if (integer.overflow) *overflow = OT_OVERFLOW; + ival[0] = integer.low; + ival[1] = integer.high; + wval = wide_int::from_array (ival, 2, HOST_BITS_PER_WIDE_INT * 2); + /* The type of a constant with a U suffix is straightforward. */ if (flags & CPP_N_UNSIGNED) - itk = narrowest_unsigned_type (integer.low, integer.high, flags); + itk = narrowest_unsigned_type (wval, flags); else { /* The type of a potentially-signed integer constant varies depending on the base it's in, the standard in use, and the length suffixes. */ enum integer_type_kind itk_u - = narrowest_unsigned_type (integer.low, integer.high, flags); + = narrowest_unsigned_type (wval, flags); enum integer_type_kind itk_s - = narrowest_signed_type (integer.low, integer.high, flags); + = narrowest_signed_type (wval, flags); /* In both C89 and C99, octal and hex constants may be signed or unsigned, whichever fits tighter. We do not warn about this @@ -667,7 +666,7 @@ interpret_integer (const cpp_token *token, unsigned int flags, : "integer constant is too large for %<long%> type"); } - value = build_int_cst_wide (type, integer.low, integer.high); + value = wide_int_to_tree (type, wval); /* Convert imaginary to a complex type. */ if (flags & CPP_N_IMAGINARY) @@ -1154,9 +1153,9 @@ lex_charconst (const cpp_token *token) /* Cast to cppchar_signed_t to get correct sign-extension of RESULT before possibly widening to HOST_WIDE_INT for build_int_cst. */ if (unsignedp || (cppchar_signed_t) result >= 0) - value = build_int_cst_wide (type, result, 0); + value = build_int_cst (type, result); else - value = build_int_cst_wide (type, (cppchar_signed_t) result, -1); + value = build_int_cst (type, (cppchar_signed_t) result); return value; } diff --git a/gcc/c-family/c-omp.c b/gcc/c-family/c-omp.c index f001a75cd22..ef2d281de51 100644 --- a/gcc/c-family/c-omp.c +++ b/gcc/c-family/c-omp.c @@ -903,8 +903,8 @@ c_omp_declare_simd_clause_cmp (const void *p, const void *q) && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH) { - int c = tree_low_cst (OMP_CLAUSE_DECL (a), 0); - int d = tree_low_cst (OMP_CLAUSE_DECL (b), 0); + int c = tree_to_shwi (OMP_CLAUSE_DECL (a)); + int d = tree_to_shwi (OMP_CLAUSE_DECL (b)); if (c < d) return 1; if (c > d) @@ -969,7 +969,7 @@ c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses) && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH) { - int idx = tree_low_cst (OMP_CLAUSE_DECL (c), 0), i; + int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i; tree arg; for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg; arg = TREE_CHAIN (arg), i++) diff --git a/gcc/c-family/c-pragma.c b/gcc/c-family/c-pragma.c index be5748b1c53..abbb0cbaf6e 100644 --- a/gcc/c-family/c-pragma.c +++ b/gcc/c-family/c-pragma.c @@ -151,7 +151,8 @@ handle_pragma_pack (cpp_reader * ARG_UNUSED (dummy)) { if (TREE_CODE (x) != INTEGER_CST) GCC_BAD ("invalid constant in %<#pragma pack%> - ignored"); - align = TREE_INT_CST_LOW (x); + /* Cannot use tree_to_uhwi here or it will ice if above message printed. */ + align = tree_to_hwi (x); action = set; if (pragma_lex (&x) != CPP_CLOSE_PAREN) GCC_BAD ("malformed %<#pragma pack%> - ignored"); @@ -183,7 +184,8 @@ handle_pragma_pack (cpp_reader * ARG_UNUSED (dummy)) { if (TREE_CODE (x) != INTEGER_CST) GCC_BAD ("invalid constant in %<#pragma pack%> - ignored"); - align = TREE_INT_CST_LOW (x); + /* Cannot use tree_to_uhwi here or it will ice if above message printed. */ + align = tree_to_hwi (x); if (align == -1) action = set; } diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c index d0283e8af4d..6f0581dc2ff 100644 --- a/gcc/c-family/c-pretty-print.c +++ b/gcc/c-family/c-pretty-print.c @@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-pretty-print.h" #include "tree-iterator.h" #include "diagnostic.h" +#include "wide-int-print.h" /* The pretty-printer code is primarily designed to closely follow (GNU) C and C++ grammars. That is to be contrasted with spaghetti @@ -577,8 +578,8 @@ c_pretty_printer::direct_abstract_declarator (tree t) tree maxval = TYPE_MAX_VALUE (TYPE_DOMAIN (t)); tree type = TREE_TYPE (maxval); - if (host_integerp (maxval, 0)) - pp_wide_integer (this, tree_low_cst (maxval, 0) + 1); + if (tree_fits_shwi_p (maxval)) + pp_wide_integer (this, tree_to_shwi (maxval) + 1); else expression (fold_build2 (PLUS_EXPR, type, maxval, build_int_cst (type, 1))); @@ -906,22 +907,20 @@ pp_c_integer_constant (c_pretty_printer *pp, tree i) ? TYPE_CANONICAL (TREE_TYPE (i)) : TREE_TYPE (i); - if (host_integerp (i, 0)) - pp_wide_integer (pp, TREE_INT_CST_LOW (i)); - else if (host_integerp (i, 1)) - pp_unsigned_wide_integer (pp, TREE_INT_CST_LOW (i)); + if (tree_fits_shwi_p (i)) + pp_wide_integer (pp, tree_to_shwi (i)); + else if (tree_fits_uhwi_p (i)) + pp_unsigned_wide_integer (pp, tree_to_uhwi (i)); else { - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (i); - HOST_WIDE_INT high = TREE_INT_CST_HIGH (i); - if (tree_int_cst_sgn (i) < 0) + wide_int wi = i; + + if (wi::lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i)))) { pp_minus (pp); - high = ~high + !low; - low = -low; + wi = -wi; } - sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) high, (unsigned HOST_WIDE_INT) low); + print_hex (wi, pp_buffer (pp)->digit_buffer); pp_string (pp, pp_buffer (pp)->digit_buffer); } if (TYPE_UNSIGNED (type)) @@ -945,10 +944,10 @@ pp_c_character_constant (c_pretty_printer *pp, tree c) if (type == wchar_type_node) pp_character (pp, 'L'); pp_quote (pp); - if (host_integerp (c, TYPE_UNSIGNED (type))) - pp_c_char (pp, tree_low_cst (c, TYPE_UNSIGNED (type))); + if (tree_fits_hwi_p (c, TYPE_SIGN (type))) + pp_c_char (pp, tree_to_hwi (c, TYPE_SIGN (type))); else - pp_scalar (pp, "\\x%x", (unsigned) TREE_INT_CST_LOW (c)); + pp_scalar (pp, "\\x%x", (unsigned) tree_to_hwi (c)); pp_quote (pp); } @@ -1596,8 +1595,8 @@ c_pretty_printer::postfix_expression (tree e) if (type && tree_int_cst_equal (TYPE_SIZE (type), TREE_OPERAND (e, 1))) { - HOST_WIDE_INT bitpos = tree_low_cst (TREE_OPERAND (e, 2), 0); - HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE (type), 0); + HOST_WIDE_INT bitpos = tree_to_shwi (TREE_OPERAND (e, 2)); + HOST_WIDE_INT size = tree_to_shwi (TYPE_SIZE (type)); if ((bitpos % size) == 0) { pp_c_left_paren (this); diff --git a/gcc/c/Make-lang.in b/gcc/c/Make-lang.in index d79fc4f410c..e68000a8246 100644 --- a/gcc/c/Make-lang.in +++ b/gcc/c/Make-lang.in @@ -137,4 +137,3 @@ c.stageprofile: stageprofile-start -mv c/*$(objext) stageprofile/c c.stagefeedback: stagefeedback-start -mv c/*$(objext) stagefeedback/c - diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c index 0554e72703f..c99db13ff84 100644 --- a/gcc/c/c-decl.c +++ b/gcc/c/c-decl.c @@ -4819,14 +4819,14 @@ check_bitfield_type_and_width (tree *type, tree *width, tree orig_name) *width = build_int_cst (integer_type_node, w); } else - w = tree_low_cst (*width, 1); + w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt - || w < tree_int_cst_min_precision (lt->enum_min, TYPE_UNSIGNED (*type)) - || w < tree_int_cst_min_precision (lt->enum_max, TYPE_UNSIGNED (*type))) + || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) + || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning (0, "%qs is narrower than values of its type", name); } } @@ -5856,7 +5856,7 @@ grokdeclarator (const struct c_declarator *declarator, else error_at (loc, "size of unnamed array is too large"); /* If we proceed with the array type as it is, we'll eventually - crash in tree_low_cst(). */ + crash in tree_to_uhwi (). */ type = error_mark_node; } @@ -7182,7 +7182,7 @@ finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, if (DECL_INITIAL (x)) { - unsigned HOST_WIDE_INT width = tree_low_cst (DECL_INITIAL (x), 1); + unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); @@ -7253,7 +7253,7 @@ finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, && TREE_TYPE (*fieldlistp) != error_mark_node) { unsigned HOST_WIDE_INT width - = tree_low_cst (DECL_INITIAL (*fieldlistp), 1); + = tree_to_uhwi (DECL_INITIAL (*fieldlistp)); tree type = TREE_TYPE (*fieldlistp); if (width != TYPE_PRECISION (type)) { @@ -7479,7 +7479,8 @@ finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; - int precision, unsign; + int precision; + signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; @@ -7506,13 +7507,13 @@ finish_enum (tree enumtype, tree values, tree attributes) as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ - unsign = (tree_int_cst_sgn (minnode) >= 0); - precision = MAX (tree_int_cst_min_precision (minnode, unsign), - tree_int_cst_min_precision (maxnode, unsign)); + sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; + precision = MAX (tree_int_cst_min_precision (minnode, sign), + tree_int_cst_min_precision (maxnode, sign)); if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node)) { - tem = c_common_type_for_size (precision, unsign); + tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); @@ -7520,7 +7521,7 @@ finish_enum (tree enumtype, tree values, tree attributes) } } else - tem = unsign ? unsigned_type_node : integer_type_node; + tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c index 9b6abe0e95e..f8b737b02fb 100644 --- a/gcc/c/c-parser.c +++ b/gcc/c/c-parser.c @@ -383,7 +383,7 @@ c_lex_one_token (c_parser *parser, c_token *token) break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ - token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value); + token->pragma_kind = (enum pragma_kind) tree_to_hwi (token->value); token->value = NULL; break; default: @@ -9442,8 +9442,8 @@ c_parser_omp_clause_collapse (c_parser *parser, tree list) mark_exp_read (num); num = c_fully_fold (num, false, NULL); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) - || !host_integerp (num, 0) - || (n = tree_low_cst (num, 0)) <= 0 + || !tree_fits_shwi_p (num) + || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, @@ -11169,7 +11169,7 @@ c_parser_omp_for_loop (location_t loc, c_parser *parser, enum tree_code code, for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) - collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); + collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); gcc_assert (collapse >= 1); diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c index 1d83137cd3f..90318ba8ae4 100644 --- a/gcc/c/c-typeck.c +++ b/gcc/c/c-typeck.c @@ -42,6 +42,7 @@ along with GCC; see the file COPYING3. If not see #include "c-family/c-objc.h" #include "c-family/c-common.h" #include "c-family/c-ubsan.h" +#include "wide-int.h" /* Possible cases of implicit bad conversions. Used to select diagnostic messages in convert_for_assignment. */ @@ -4748,9 +4749,8 @@ build_c_cast (location_t loc, tree type, tree expr) } else if (TREE_OVERFLOW (value)) /* Reset VALUE's overflow flags, ensuring constant sharing. */ - value = build_int_cst_wide (TREE_TYPE (value), - TREE_INT_CST_LOW (value), - TREE_INT_CST_HIGH (value)); + value = wide_int_to_tree (TREE_TYPE (value), + value); } } @@ -6810,7 +6810,7 @@ push_init_level (int implicit, struct obstack * braced_init_obstack) else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { constructor_type = TREE_TYPE (constructor_type); - push_array_bounds (tree_low_cst (constructor_index, 1)); + push_array_bounds (tree_to_uhwi (constructor_index)); constructor_depth++; } @@ -7609,20 +7609,20 @@ set_nonincremental_init_from_string (tree str, { if (wchar_bytes == 1) { - val[1] = (unsigned char) *p++; - val[0] = 0; + val[0] = (unsigned char) *p++; + val[1] = 0; } else { - val[0] = 0; val[1] = 0; + val[0] = 0; for (byte = 0; byte < wchar_bytes; byte++) { if (BYTES_BIG_ENDIAN) bitpos = (wchar_bytes - byte - 1) * charwidth; else bitpos = byte * charwidth; - val[bitpos < HOST_BITS_PER_WIDE_INT] + val[bitpos % HOST_BITS_PER_WIDE_INT] |= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++)) << (bitpos % HOST_BITS_PER_WIDE_INT); } @@ -7633,24 +7633,26 @@ set_nonincremental_init_from_string (tree str, bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR; if (bitpos < HOST_BITS_PER_WIDE_INT) { - if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1))) + if (val[0] & (((HOST_WIDE_INT) 1) << (bitpos - 1))) { - val[1] |= ((HOST_WIDE_INT) -1) << bitpos; - val[0] = -1; + val[0] |= ((HOST_WIDE_INT) -1) << bitpos; + val[1] = -1; } } else if (bitpos == HOST_BITS_PER_WIDE_INT) { - if (val[1] < 0) - val[0] = -1; + if (val[0] < 0) + val[1] = -1; } - else if (val[0] & (((HOST_WIDE_INT) 1) + else if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1 - HOST_BITS_PER_WIDE_INT))) - val[0] |= ((HOST_WIDE_INT) -1) + val[1] |= ((HOST_WIDE_INT) -1) << (bitpos - HOST_BITS_PER_WIDE_INT); } - value = build_int_cst_wide (type, val[1], val[0]); + value = wide_int_to_tree (type, + wide_int::from_array (val, 2, + HOST_BITS_PER_WIDE_INT * 2)); add_pending_init (purpose, value, NULL_TREE, true, braced_init_obstack); } @@ -8360,7 +8362,7 @@ process_init_element (struct c_expr value, bool implicit, /* Now output the actual element. */ if (value.value) { - push_array_bounds (tree_low_cst (constructor_index, 1)); + push_array_bounds (tree_to_uhwi (constructor_index)); output_init_element (value.value, value.original_type, strict_string, elttype, constructor_index, 1, implicit, @@ -9156,7 +9158,7 @@ c_finish_bc_stmt (location_t loc, tree *label_p, bool is_break) } else if (TREE_CODE (label) == LABEL_DECL) ; - else switch (TREE_INT_CST_LOW (label)) + else switch (tree_to_hwi (label)) { case 0: if (is_break) @@ -11828,8 +11830,7 @@ c_tree_equal (tree t1, tree t2) switch (code1) { case INTEGER_CST: - return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2); + return wi::eq_p (t1, t2); case REAL_CST: return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index 5a78bb68b35..0e0c2c5fa17 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -271,7 +271,7 @@ add_stack_var (tree decl) * (size_t *)pointer_map_insert (decl_to_stack_part, decl) = stack_vars_num; v->decl = decl; - v->size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1); + v->size = tree_to_uhwi (DECL_SIZE_UNIT (SSAVAR (decl))); /* Ensure that all variables have size, so that &a != &b for any two variables that are simultaneously live. */ if (v->size == 0) @@ -1035,7 +1035,7 @@ expand_one_stack_var (tree var) HOST_WIDE_INT size, offset; unsigned byte_align; - size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (SSAVAR (var))); byte_align = align_local_variable (SSAVAR (var)); /* We handle highly aligned variables in expand_stack_vars. */ @@ -1133,7 +1133,7 @@ defer_stack_allocation (tree var, bool toplevel) get completely out of hand. So we avoid adding scalars and "small" aggregates to the list at all. */ if (optimize == 0 - && (tree_low_cst (DECL_SIZE_UNIT (var), 1) + && (tree_to_uhwi (DECL_SIZE_UNIT (var)) < PARAM_VALUE (PARAM_MIN_SIZE_FOR_STACK_SHARING))) return false; @@ -1248,7 +1248,7 @@ expand_one_var (tree var, bool toplevel, bool really_expand) { if (really_expand) expand_one_stack_var (origvar); - return tree_low_cst (DECL_SIZE_UNIT (var), 1); + return tree_to_uhwi (DECL_SIZE_UNIT (var)); } return 0; } @@ -1325,10 +1325,10 @@ stack_protect_classify_type (tree type) unsigned HOST_WIDE_INT len; if (!TYPE_SIZE_UNIT (type) - || !host_integerp (TYPE_SIZE_UNIT (type), 1)) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) len = max; else - len = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + len = tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (len < max) ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY; diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index 3babf777fae..18e0f52655c 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -332,7 +332,8 @@ alloc_loop (void) loop->exits = ggc_alloc_cleared_loop_exit (); loop->exits->next = loop->exits->prev = loop->exits; loop->can_be_parallel = false; - + loop->nb_iterations_upper_bound = 0; + loop->nb_iterations_estimate = 0; return loop; } @@ -1787,21 +1788,21 @@ get_loop_location (struct loop *loop) I_BOUND times. */ void -record_niter_bound (struct loop *loop, double_int i_bound, bool realistic, - bool upper) +record_niter_bound (struct loop *loop, const max_wide_int &i_bound, + bool realistic, bool upper) { /* Update the bounds only when there is no previous estimation, or when the current estimation is smaller. */ if (upper && (!loop->any_upper_bound - || i_bound.ult (loop->nb_iterations_upper_bound))) + || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound))) { loop->any_upper_bound = true; loop->nb_iterations_upper_bound = i_bound; } if (realistic && (!loop->any_estimate - || i_bound.ult (loop->nb_iterations_estimate))) + || wi::ltu_p (i_bound, loop->nb_iterations_estimate))) { loop->any_estimate = true; loop->nb_iterations_estimate = i_bound; @@ -1811,7 +1812,8 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic, number of iterations, use the upper bound instead. */ if (loop->any_upper_bound && loop->any_estimate - && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate)) + && wi::ltu_p (loop->nb_iterations_upper_bound, + loop->nb_iterations_estimate)) loop->nb_iterations_estimate = loop->nb_iterations_upper_bound; } @@ -1822,13 +1824,13 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic, HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *loop) { - double_int nit; + max_wide_int nit; HOST_WIDE_INT hwi_nit; if (!get_estimated_loop_iterations (loop, &nit)) return -1; - if (!nit.fits_shwi ()) + if (!wi::fits_shwi_p (nit)) return -1; hwi_nit = nit.to_shwi (); @@ -1859,7 +1861,7 @@ max_stmt_executions_int (struct loop *loop) returns true. */ bool -get_estimated_loop_iterations (struct loop *loop, double_int *nit) +get_estimated_loop_iterations (struct loop *loop, max_wide_int *nit) { /* Even if the bound is not recorded, possibly we can derrive one from profile. */ @@ -1867,7 +1869,7 @@ get_estimated_loop_iterations (struct loop *loop, double_int *nit) { if (loop->header->count) { - *nit = gcov_type_to_double_int + *nit = gcov_type_to_wide_int (expected_loop_iterations_unbounded (loop) + 1); return true; } @@ -1883,7 +1885,7 @@ get_estimated_loop_iterations (struct loop *loop, double_int *nit) false, otherwise returns true. */ bool -get_max_loop_iterations (struct loop *loop, double_int *nit) +get_max_loop_iterations (struct loop *loop, max_wide_int *nit) { if (!loop->any_upper_bound) return false; @@ -1899,13 +1901,13 @@ get_max_loop_iterations (struct loop *loop, double_int *nit) HOST_WIDE_INT get_max_loop_iterations_int (struct loop *loop) { - double_int nit; + max_wide_int nit; HOST_WIDE_INT hwi_nit; if (!get_max_loop_iterations (loop, &nit)) return -1; - if (!nit.fits_shwi ()) + if (!wi::fits_shwi_p (nit)) return -1; hwi_nit = nit.to_shwi (); diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h index 87086d49022..9dcc55fef5f 100644 --- a/gcc/cfgloop.h +++ b/gcc/cfgloop.h @@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see #define GCC_CFGLOOP_H #include "double-int.h" +#include "wide-int.h" #include "bitmap.h" #include "sbitmap.h" #include "function.h" @@ -62,7 +63,7 @@ struct GTY ((chain_next ("%h.next"))) nb_iter_bound { overflows (as MAX + 1 is sometimes produced as the estimate on number of executions of STMT). b) it is consistent with the result of number_of_iterations_exit. */ - double_int bound; + max_wide_int bound; /* True if the statement will cause the loop to be leaved the (at most) BOUND + 1-st time it is executed, that is, all the statements after it @@ -146,12 +147,12 @@ struct GTY ((chain_next ("%h.next"))) loop { /* An integer guaranteed to be greater or equal to nb_iterations. Only valid if any_upper_bound is true. */ - double_int nb_iterations_upper_bound; + max_wide_int nb_iterations_upper_bound; /* An integer giving an estimate on nb_iterations. Unlike nb_iterations_upper_bound, there is no guarantee that it is at least nb_iterations. */ - double_int nb_iterations_estimate; + max_wide_int nb_iterations_estimate; bool any_upper_bound; bool any_estimate; @@ -730,27 +731,27 @@ loop_outermost (struct loop *loop) return (*loop->superloops)[1]; } -extern void record_niter_bound (struct loop *, double_int, bool, bool); +extern void record_niter_bound (struct loop *, const max_wide_int &, bool, bool); extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *); extern HOST_WIDE_INT get_max_loop_iterations_int (struct loop *); -extern bool get_estimated_loop_iterations (struct loop *loop, double_int *nit); -extern bool get_max_loop_iterations (struct loop *loop, double_int *nit); +extern bool get_estimated_loop_iterations (struct loop *loop, max_wide_int *nit); +extern bool get_max_loop_iterations (struct loop *loop, max_wide_int *nit); extern int bb_loop_depth (const_basic_block); -/* Converts VAL to double_int. */ +/* Converts VAL to max_wide_int. */ -static inline double_int -gcov_type_to_double_int (gcov_type val) +static inline max_wide_int +gcov_type_to_wide_int (gcov_type val) { - double_int ret; + HOST_WIDE_INT a[2]; - ret.low = (unsigned HOST_WIDE_INT) val; + a[0] = (unsigned HOST_WIDE_INT) val; /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by the size of type. */ val >>= HOST_BITS_PER_WIDE_INT - 1; val >>= 1; - ret.high = (unsigned HOST_WIDE_INT) val; + a[1] = (unsigned HOST_WIDE_INT) val; - return ret; + return max_wide_int::from_array (a, 2); } #endif /* GCC_CFGLOOP_H */ diff --git a/gcc/cgraph.c b/gcc/cgraph.c index 6ebd0c71e02..8eedaa493aa 100644 --- a/gcc/cgraph.c +++ b/gcc/cgraph.c @@ -624,8 +624,7 @@ cgraph_add_thunk (struct cgraph_node *decl_node ATTRIBUTE_UNUSED, node = cgraph_create_node (alias); gcc_checking_assert (!virtual_offset - || tree_to_double_int (virtual_offset) == - double_int::from_shwi (virtual_value)); + || wi::eq_p (virtual_offset, virtual_value)); node->thunk.fixed_offset = fixed_offset; node->thunk.this_adjusting = this_adjusting; node->thunk.virtual_value = virtual_value; @@ -947,7 +946,7 @@ cgraph_create_indirect_edge (struct cgraph_node *caller, gimple call_stmt, gcc_assert (TREE_CODE (type) == RECORD_TYPE); edge->indirect_info->param_index = -1; edge->indirect_info->otr_token - = tree_low_cst (OBJ_TYPE_REF_TOKEN (target), 1); + = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target)); edge->indirect_info->otr_type = type; edge->indirect_info->polymorphic = 1; } diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c index e01918cfbd4..910d7978aea 100644 --- a/gcc/cgraphunit.c +++ b/gcc/cgraphunit.c @@ -1770,7 +1770,7 @@ expand_function (struct cgraph_node *node) larger_than_size)) { unsigned int size_as_int - = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (ret_type)); + = tree_to_hwi (TYPE_SIZE_UNIT (ret_type)); if (compare_tree_int (TYPE_SIZE_UNIT (ret_type), size_as_int) == 0) warning (OPT_Wlarger_than_, "size of return value of %q+D is %u bytes", diff --git a/gcc/combine.c b/gcc/combine.c index 13f5e29e3d6..05a27a907ec 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -2669,22 +2669,15 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, offset = -1; } - if (offset >= 0 - && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp))) - <= HOST_BITS_PER_DOUBLE_INT)) + if (offset >= 0) { - double_int m, o, i; + wide_int o; rtx inner = SET_SRC (PATTERN (i3)); rtx outer = SET_SRC (temp); - o = rtx_to_double_int (outer); - i = rtx_to_double_int (inner); - - m = double_int::mask (width); - i &= m; - m = m.llshift (offset, HOST_BITS_PER_DOUBLE_INT); - i = i.llshift (offset, HOST_BITS_PER_DOUBLE_INT); - o = o.and_not (m) | i; + o = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp))), + std::make_pair (inner, GET_MODE (dest)), + offset, width); combine_merges++; subst_insn = i3; @@ -2696,8 +2689,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, /* Replace the source in I2 with the new constant and make the resulting insn the new pattern for I3. Then skip to where we validate the pattern. Everything was set up above. */ - SUBST (SET_SRC (temp), - immed_double_int_const (o, GET_MODE (SET_DEST (temp)))); + SUBST (SET_SRC (temp), + immed_wide_int_const (o, GET_MODE (SET_DEST (temp)))); newpat = PATTERN (i2); @@ -5126,7 +5119,7 @@ subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy) if (! x) x = gen_rtx_CLOBBER (mode, const0_rtx); } - else if (CONST_INT_P (new_rtx) + else if (CONST_SCALAR_INT_P (new_rtx) && GET_CODE (x) == ZERO_EXTEND) { x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index d553af87dc6..a0b532ce81e 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -6020,18 +6020,18 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) if (count == -1 || !index || !TYPE_MAX_VALUE (index) - || !host_integerp (TYPE_MAX_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) || !TYPE_MIN_VALUE (index) - || !host_integerp (TYPE_MIN_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) || count < 0) return -1; - count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1) - - tree_low_cst (TYPE_MIN_VALUE (index), 1)); + count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) + - tree_to_uhwi (TYPE_MIN_VALUE (index))); /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -6060,8 +6060,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -6092,8 +6092,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -6853,7 +6853,7 @@ aarch64_simd_attr_length_move (rtx insn) static HOST_WIDE_INT aarch64_simd_vector_alignment (const_tree type) { - HOST_WIDE_INT align = tree_low_cst (TYPE_SIZE (type), 0); + HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type)); return MIN (align, 128); } @@ -7432,8 +7432,8 @@ aarch64_float_const_representable_p (rtx x) int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1; int exponent; unsigned HOST_WIDE_INT mantissa, mask; - HOST_WIDE_INT m1, m2; REAL_VALUE_TYPE r, m; + bool &fail if (!CONST_DOUBLE_P (x)) return false; @@ -7457,16 +7457,16 @@ aarch64_float_const_representable_p (rtx x) WARNING: If we ever have a representation using more than 2 * H_W_I - 1 bits for the mantissa, this can fail (low bits will be lost). */ real_ldexp (&m, &r, point_pos - exponent); - REAL_VALUE_TO_INT (&m1, &m2, m); + w = real_to_integer (m, &fail, HOST_BITS_PER_WIDE_INT * 2); /* If the low part of the mantissa has bits set we cannot represent the value. */ - if (m1 != 0) + if (w.elt (0) != 0) return false; /* We have rejected the lower HOST_WIDE_INT, so update our understanding of how many bits lie in the mantissa and look only at the high HOST_WIDE_INT. */ - mantissa = m2; + mantissa = w.elt (1); point_pos -= HOST_BITS_PER_WIDE_INT; /* We can only represent values with a mantissa of the form 1.xxxx. */ diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c index e39fc7731e6..8158a4e81ad 100644 --- a/gcc/config/alpha/alpha.c +++ b/gcc/config/alpha/alpha.c @@ -5859,7 +5859,7 @@ va_list_skip_additions (tree lhs) if (!CONVERT_EXPR_CODE_P (code) && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR) || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST - || !host_integerp (gimple_assign_rhs2 (stmt), 1))) + || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt)))) return stmt; lhs = gimple_assign_rhs1 (stmt); @@ -5985,10 +5985,10 @@ alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt) else goto escapes; - if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0)) + if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt))) goto escapes; - sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0); + sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt)); if (code2 == MINUS_EXPR) sub = -sub; if (sub < -48 || sub > -32) diff --git a/gcc/config/alpha/predicates.md b/gcc/config/alpha/predicates.md index a63d1254a6f..8a2166c1e03 100644 --- a/gcc/config/alpha/predicates.md +++ b/gcc/config/alpha/predicates.md @@ -357,7 +357,7 @@ && !SYMBOL_REF_TLS_MODEL (op)) { if (SYMBOL_REF_DECL (op)) - max_ofs = tree_low_cst (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op)), 1); + max_ofs = tree_to_uhwi (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op))); } else return false; diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index f4ce58bc935..db0d7fa23f4 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -4668,18 +4668,18 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) if (count == -1 || !index || !TYPE_MAX_VALUE (index) - || !host_integerp (TYPE_MAX_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) || !TYPE_MIN_VALUE (index) - || !host_integerp (TYPE_MIN_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) || count < 0) return -1; - count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1) - - tree_low_cst (TYPE_MIN_VALUE (index), 1)); + count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) + - tree_to_uhwi (TYPE_MIN_VALUE (index))); /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -4708,8 +4708,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -4740,8 +4740,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -11278,8 +11278,9 @@ vfp3_const_double_index (rtx x) int sign, exponent; unsigned HOST_WIDE_INT mantissa, mant_hi; unsigned HOST_WIDE_INT mask; - HOST_WIDE_INT m1, m2; int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1; + bool fail; + wide_int w; if (!TARGET_VFP3 || !CONST_DOUBLE_P (x)) return -1; @@ -11299,9 +11300,9 @@ vfp3_const_double_index (rtx x) WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1 bits for the mantissa, this may fail (low bits would be lost). */ real_ldexp (&m, &r, point_pos - exponent); - REAL_VALUE_TO_INT (&m1, &m2, m); - mantissa = m1; - mant_hi = m2; + w = real_to_integer (m, &fail, HOST_BITS_PER_WIDE_INT * 2); + mantissa = w.elt (0); + mant_hi = w.elt (1); /* If there are bits set in the low part of the mantissa, we can't represent this value. */ @@ -28749,7 +28750,7 @@ arm_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in) static HOST_WIDE_INT arm_vector_alignment (const_tree type) { - HOST_WIDE_INT align = tree_low_cst (TYPE_SIZE (type), 0); + HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type)); if (TARGET_AAPCS_BASED) align = MIN (align, 64); diff --git a/gcc/config/avr/avr-log.c b/gcc/config/avr/avr-log.c index 9e538e60124..18215679b71 100644 --- a/gcc/config/avr/avr-log.c +++ b/gcc/config/avr/avr-log.c @@ -142,28 +142,27 @@ avr_log_set_caller_f (const char *caller) Splits last digit of *CST (taken as unsigned) in BASE and returns it. */ static unsigned -avr_double_int_pop_digit (double_int *cst, unsigned base) +avr_wide_int_pop_digit (wide_int *cst, unsigned base) { - double_int drem; + wide_int wrem; - *cst = cst->udivmod (double_int::from_uhwi (base), (int) FLOOR_DIV_EXPR, - &drem); + *cst = cst->udivmod_floor (base, &wrem); - return (unsigned) drem.to_uhwi(); + return (unsigned) wrem.to_uhwi(); } /* Dump VAL as hex value to FILE. */ static void -avr_dump_double_int_hex (FILE *file, double_int val) +avr_dump_wide_int_hex (FILE *file, wide_int val) { unsigned digit[4]; - digit[0] = avr_double_int_pop_digit (&val, 1 << 16); - digit[1] = avr_double_int_pop_digit (&val, 1 << 16); - digit[2] = avr_double_int_pop_digit (&val, 1 << 16); - digit[3] = avr_double_int_pop_digit (&val, 1 << 16); + digit[0] = avr_wide_int_pop_digit (&val, 1 << 16); + digit[1] = avr_wide_int_pop_digit (&val, 1 << 16); + digit[2] = avr_wide_int_pop_digit (&val, 1 << 16); + digit[3] = avr_wide_int_pop_digit (&val, 1 << 16); fprintf (file, "0x"); @@ -232,7 +231,7 @@ avr_log_vadump (FILE *file, const char *fmt, va_list ap) break; case 'D': - dump_double_int (file, va_arg (ap, double_int), false); + dump_double_int (file, va_arg (ap, double_int), false); break; case 'X': diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c index f6d88856bec..a3b3c90c8b8 100644 --- a/gcc/config/avr/avr.c +++ b/gcc/config/avr/avr.c @@ -11367,24 +11367,24 @@ avr_expand_delay_cycles (rtx operands0) /* Return VAL * BASE + DIGIT. BASE = 0 is shortcut for BASE = 2^{32} */ -static double_int -avr_double_int_push_digit (double_int val, int base, - unsigned HOST_WIDE_INT digit) +static wide_int +avr_wide_int_push_digit (wide_int val, int base, + unsigned HOST_WIDE_INT digit) { val = 0 == base - ? val.llshift (32, 64) - : val * double_int::from_uhwi (base); + ? val.llshift (32) + : val * base; - return val + double_int::from_uhwi (digit); + return val + digit; } /* Compute the image of x under f, i.e. perform x --> f(x) */ static int -avr_map (double_int f, int x) +avr_map (wide_int f, int x) { - return 0xf & f.lrshift (4*x, 64).to_uhwi (); + return 0xf & f.lrshift (4*x).to_uhwi (); } @@ -11409,7 +11409,7 @@ enum }; static unsigned -avr_map_metric (double_int a, int mode) +avr_map_metric (wide_int a, int mode) { unsigned i, metric = 0; @@ -11442,7 +11442,7 @@ avr_map_metric (double_int a, int mode) bool avr_has_nibble_0xf (rtx ival) { - return 0 != avr_map_metric (rtx_to_double_int (ival), MAP_MASK_PREIMAGE_F); + return 0 != avr_map_metric (wide_int::from_rtx (ival), MAP_MASK_PREIMAGE_F); } @@ -11476,7 +11476,7 @@ typedef struct int cost; /* The composition F o G^-1 (*, arg) for some function F */ - double_int map; + wide_int map; /* For debug purpose only */ const char *str; @@ -11507,12 +11507,12 @@ static const avr_map_op_t avr_map_op[] = If result.cost < 0 then such a decomposition does not exist. */ static avr_map_op_t -avr_map_decompose (double_int f, const avr_map_op_t *g, bool val_const_p) +avr_map_decompose (wide_int f, const avr_map_op_t *g, bool val_const_p) { int i; bool val_used_p = 0 != avr_map_metric (f, MAP_MASK_PREIMAGE_F); avr_map_op_t f_ginv = *g; - double_int ginv = double_int::from_uhwi (g->ginv); + wide_int ginv = wide_int::from_uhwi (g->ginv); f_ginv.cost = -1; @@ -11532,7 +11532,7 @@ avr_map_decompose (double_int f, const avr_map_op_t *g, bool val_const_p) return f_ginv; } - f_ginv.map = avr_double_int_push_digit (f_ginv.map, 16, x); + f_ginv.map = avr_wide_int_push_digit (f_ginv.map, 16, x); } /* Step 2: Compute the cost of the operations. @@ -11584,7 +11584,7 @@ avr_map_decompose (double_int f, const avr_map_op_t *g, bool val_const_p) is different to its source position. */ static void -avr_move_bits (rtx *xop, double_int map, bool fixp_p, int *plen) +avr_move_bits (rtx *xop, wide_int map, bool fixp_p, int *plen) { int bit_dest, b; @@ -11637,7 +11637,7 @@ avr_move_bits (rtx *xop, double_int map, bool fixp_p, int *plen) const char* avr_out_insert_bits (rtx *op, int *plen) { - double_int map = rtx_to_double_int (op[1]); + wide_int map = wide_int::from_rtx (op[1]); unsigned mask_fixed; bool fixp_p = true; rtx xop[4]; @@ -12085,7 +12085,7 @@ avr_expand_builtin (tree exp, rtx target, if (TREE_CODE (CALL_EXPR_ARG (exp, 1)) != INTEGER_CST) break; - int rbit = (int) TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1)); + int rbit = (int) tree_to_hwi (CALL_EXPR_ARG (exp, 1)); if (rbit >= (int) GET_MODE_FBIT (mode)) { @@ -12228,7 +12228,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, tree tval = arg[2]; tree tmap; tree map_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); - double_int map; + wide_int map; bool changed = false; unsigned i; avr_map_op_t best_g; @@ -12241,8 +12241,8 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, break; } - map = tree_to_double_int (arg[0]); - tmap = double_int_to_tree (map_type, map); + map = wide_int::from_tree (arg[0]); + tmap = wide_int_to_tree (map_type, map); if (TREE_CODE (tval) != INTEGER_CST && 0 == avr_map_metric (map, MAP_MASK_PREIMAGE_F)) @@ -12269,7 +12269,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, /* Inserting bits known at compile time is easy and can be performed by AND and OR with appropriate masks. */ - int bits = TREE_INT_CST_LOW (tbits); + int bits = tree_to_hwi (tbits); int mask_ior = 0, mask_and = 0xff; for (i = 0; i < 8; i++) @@ -12346,7 +12346,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, /* Use map o G^-1 instead of original map to undo the effect of G. */ - tmap = double_int_to_tree (map_type, best_g.map); + tmap = wide_int_to_tree (map_type, best_g.map); return build_call_expr (fndecl, 3, tmap, tbits, tval); } /* AVR_BUILTIN_INSERT_BITS */ diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c index 558f4c18bef..71d9817fea1 100644 --- a/gcc/config/bfin/bfin.c +++ b/gcc/config/bfin/bfin.c @@ -3286,8 +3286,8 @@ bfin_local_alignment (tree type, unsigned align) memcpy can use 32 bit loads/stores. */ if (TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) > 8 - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 32) + && !wi::gtu_p (TYPE_SIZE (type), 8) + && align < 32) return 32; return align; } diff --git a/gcc/config/c6x/predicates.md b/gcc/config/c6x/predicates.md index 1a2fe8f69f5..fbcbdd02457 100644 --- a/gcc/config/c6x/predicates.md +++ b/gcc/config/c6x/predicates.md @@ -210,9 +210,9 @@ t = DECL_SIZE_UNIT (t); else t = TYPE_SIZE_UNIT (TREE_TYPE (t)); - if (t && host_integerp (t, 0)) + if (t && tree_fits_shwi_p (t)) { - size = tree_low_cst (t, 0); + size = tree_to_shwi (t); if (size < 0) size = 0; } diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c index cb1bc38a3be..5372f4a03f8 100644 --- a/gcc/config/darwin.c +++ b/gcc/config/darwin.c @@ -1284,22 +1284,18 @@ darwin_mergeable_constant_section (tree exp, { tree size = TYPE_SIZE_UNIT (TREE_TYPE (exp)); - if (TREE_CODE (size) == INTEGER_CST - && TREE_INT_CST_LOW (size) == 4 - && TREE_INT_CST_HIGH (size) == 0) - return darwin_sections[literal4_section]; - else if (TREE_CODE (size) == INTEGER_CST - && TREE_INT_CST_LOW (size) == 8 - && TREE_INT_CST_HIGH (size) == 0) - return darwin_sections[literal8_section]; - else if (HAVE_GAS_LITERAL16 - && TARGET_64BIT - && TREE_CODE (size) == INTEGER_CST - && TREE_INT_CST_LOW (size) == 16 - && TREE_INT_CST_HIGH (size) == 0) - return darwin_sections[literal16_section]; - else - return readonly_data_section; + if (TREE_CODE (size) == INTEGER_CST) + { + wide_int wsize = size; + if (wsize == 4) + return darwin_sections[literal4_section]; + else if (wsize == 8) + return darwin_sections[literal8_section]; + else if (HAVE_GAS_LITERAL16 + && TARGET_64BIT + && wsize == 16) + return darwin_sections[literal16_section]; + } } return readonly_data_section; @@ -1507,7 +1503,7 @@ machopic_select_section (tree decl, zsize = (DECL_P (decl) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == CONST_DECL) - && tree_low_cst (DECL_SIZE_UNIT (decl), 1) == 0); + && tree_to_uhwi (DECL_SIZE_UNIT (decl)) == 0); one = DECL_P (decl) && TREE_CODE (decl) == VAR_DECL @@ -1648,7 +1644,7 @@ machopic_select_section (tree decl, static bool warned_objc_46 = false; /* We shall assert that zero-sized objects are an error in ObjC meta-data. */ - gcc_assert (tree_low_cst (DECL_SIZE_UNIT (decl), 1) != 0); + gcc_assert (tree_to_uhwi (DECL_SIZE_UNIT (decl)) != 0); /* ??? This mechanism for determining the metadata section is broken when LTO is in use, since the frontend that generated @@ -1726,16 +1722,19 @@ machopic_select_rtx_section (enum machine_mode mode, rtx x, { if (GET_MODE_SIZE (mode) == 8 && (GET_CODE (x) == CONST_INT + || GET_CODE (x) == CONST_WIDE_INT || GET_CODE (x) == CONST_DOUBLE)) return darwin_sections[literal8_section]; else if (GET_MODE_SIZE (mode) == 4 && (GET_CODE (x) == CONST_INT + || GET_CODE (x) == CONST_WIDE_INT || GET_CODE (x) == CONST_DOUBLE)) return darwin_sections[literal4_section]; else if (HAVE_GAS_LITERAL16 && TARGET_64BIT && GET_MODE_SIZE (mode) == 16 && (GET_CODE (x) == CONST_INT + || GET_CODE (x) == CONST_WIDE_INT || GET_CODE (x) == CONST_DOUBLE || GET_CODE (x) == CONST_VECTOR)) return darwin_sections[literal16_section]; @@ -2185,7 +2184,7 @@ darwin_asm_declare_object_name (FILE *file, machopic_define_symbol (DECL_RTL (decl)); } - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); #ifdef DEBUG_DARWIN_MEM_ALLOCATORS fprintf (file, "# dadon: %s %s (%llu, %u) local %d weak %d" diff --git a/gcc/config/epiphany/epiphany.c b/gcc/config/epiphany/epiphany.c index fd4c01c49a4..f07f15e888c 100644 --- a/gcc/config/epiphany/epiphany.c +++ b/gcc/config/epiphany/epiphany.c @@ -2758,11 +2758,11 @@ epiphany_special_round_type_align (tree type, unsigned computed, continue; offset = bit_position (field); size = DECL_SIZE (field); - if (!host_integerp (offset, 1) || !host_integerp (size, 1) - || TREE_INT_CST_LOW (offset) >= try_align - || TREE_INT_CST_LOW (size) >= try_align) + if (!tree_fits_uhwi_p (offset) || !tree_fits_uhwi_p (size) + || tree_to_uhwi (offset) >= try_align + || tree_to_uhwi (size) >= try_align) return try_align; - total = TREE_INT_CST_LOW (offset) + TREE_INT_CST_LOW (size); + total = tree_to_hwi (offset) + tree_to_hwi (size); if (total > max) max = total; } @@ -2785,7 +2785,7 @@ epiphany_adjust_field_align (tree field, unsigned computed) { tree elmsz = TYPE_SIZE (TREE_TYPE (TREE_TYPE (field))); - if (!host_integerp (elmsz, 1) || tree_low_cst (elmsz, 1) >= 32) + if (!tree_fits_uhwi_p (elmsz) || tree_to_uhwi (elmsz) >= 32) return 64; } return computed; diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 54bd5f21d2c..4b69d4bf026 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -61,6 +61,7 @@ along with GCC; see the file COPYING3. If not see #include "diagnostic.h" #include "dumpfile.h" #include "tree-pass.h" +#include "wide-int.h" #include "context.h" #include "pass_manager.h" @@ -5360,7 +5361,7 @@ ix86_function_regparm (const_tree type, const_tree decl) attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type)); if (attr) { - regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))); + regparm = tree_to_hwi (TREE_VALUE (TREE_VALUE (attr))); return regparm; } } @@ -5487,7 +5488,7 @@ ix86_keep_aggregate_return_pointer (tree fntype) attr = lookup_attribute ("callee_pop_aggregate_return", TYPE_ATTRIBUTES (fntype)); if (attr) - return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0); + return (tree_to_hwi (TREE_VALUE (TREE_VALUE (attr))) == 0); /* For 32-bit MS-ABI the default is to keep aggregate return pointer. */ @@ -6156,7 +6157,7 @@ classify_argument (enum machine_mode mode, const_tree type, for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8; i < ((int_bit_position (field) + (bit_offset % 64)) - + tree_low_cst (DECL_SIZE (field), 0) + + tree_to_shwi (DECL_SIZE (field)) + 63) / 8 / 8; i++) classes[i] = merge_classes (X86_64_INTEGER_CLASS, @@ -25595,8 +25596,7 @@ ix86_data_alignment (tree type, int align, bool opt) && AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) + && wi::geu_p (TYPE_SIZE (type), max_align) && align < max_align) align = max_align; @@ -25607,8 +25607,8 @@ ix86_data_alignment (tree type, int align, bool opt) if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128 - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128) + && wi::geu_p (TYPE_SIZE (type), 128) + && align < 128) return 128; } @@ -25717,13 +25717,13 @@ ix86_local_alignment (tree exp, enum machine_mode mode, && TARGET_SSE) { if (AGGREGATE_TYPE_P (type) - && (va_list_type_node == NULL_TREE - || (TYPE_MAIN_VARIANT (type) - != TYPE_MAIN_VARIANT (va_list_type_node))) - && TYPE_SIZE (type) - && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16 - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128) + && (va_list_type_node == NULL_TREE + || (TYPE_MAIN_VARIANT (type) + != TYPE_MAIN_VARIANT (va_list_type_node))) + && TYPE_SIZE (type) + && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST + && wi::geu_p (TYPE_SIZE (type), 16) + && align < 128) return 128; } if (TREE_CODE (type) == ARRAY_TYPE) @@ -28743,7 +28743,7 @@ ix86_builtin_tm_load (tree type) { if (TREE_CODE (type) == VECTOR_TYPE) { - switch (tree_low_cst (TYPE_SIZE (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE (type))) { case 64: return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64); @@ -28763,7 +28763,7 @@ ix86_builtin_tm_store (tree type) { if (TREE_CODE (type) == VECTOR_TYPE) { - switch (tree_low_cst (TYPE_SIZE (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE (type))) { case 64: return builtin_decl_explicit (BUILT_IN_TM_STORE_M64); @@ -32077,8 +32077,8 @@ get_element_number (tree vec_type, tree arg) { unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1; - if (!host_integerp (arg, 1) - || (elt = tree_low_cst (arg, 1), elt > max)) + if (!tree_fits_uhwi_p (arg) + || (elt = tree_to_uhwi (arg), elt > max)) { error ("selector must be an integer constant in the range 0..%wi", max); return 0; @@ -38109,7 +38109,7 @@ void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode, e2 = gen_reg_rtx (mode); e3 = gen_reg_rtx (mode); - real_from_integer (&r, VOIDmode, -3, -1, 0); + real_from_integer (&r, VOIDmode, -3, SIGNED); mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode); real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL); diff --git a/gcc/config/ia64/predicates.md b/gcc/config/ia64/predicates.md index af7bc8ee4c2..31530be906d 100644 --- a/gcc/config/ia64/predicates.md +++ b/gcc/config/ia64/predicates.md @@ -72,9 +72,9 @@ t = DECL_SIZE_UNIT (t); else t = TYPE_SIZE_UNIT (TREE_TYPE (t)); - if (t && host_integerp (t, 0)) + if (t && tree_fits_shwi_p (t)) { - size = tree_low_cst (t, 0); + size = tree_to_shwi (t); if (size < 0) size = 0; } diff --git a/gcc/config/iq2000/iq2000.c b/gcc/config/iq2000/iq2000.c index da6f757d6f9..e65d0ccdc03 100644 --- a/gcc/config/iq2000/iq2000.c +++ b/gcc/config/iq2000/iq2000.c @@ -1279,7 +1279,7 @@ iq2000_function_arg (cumulative_args_t cum_v, enum machine_mode mode, if (! type || TREE_CODE (type) != RECORD_TYPE || ! named || ! TYPE_SIZE_UNIT (type) - || ! host_integerp (TYPE_SIZE_UNIT (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) ret = gen_rtx_REG (mode, regbase + *arg_words + bias); else { @@ -1289,7 +1289,7 @@ iq2000_function_arg (cumulative_args_t cum_v, enum machine_mode mode, if (TREE_CODE (field) == FIELD_DECL && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD - && host_integerp (bit_position (field), 0) + && tree_fits_shwi_p (bit_position (field)) && int_bit_position (field) % BITS_PER_WORD == 0) break; @@ -1307,7 +1307,7 @@ iq2000_function_arg (cumulative_args_t cum_v, enum machine_mode mode, /* ??? If this is a packed structure, then the last hunk won't be 64 bits. */ chunks - = tree_low_cst (TYPE_SIZE_UNIT (type), 1) / UNITS_PER_WORD; + = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD; if (chunks + *arg_words + bias > (unsigned) MAX_ARGS_IN_REGISTERS) chunks = MAX_ARGS_IN_REGISTERS - *arg_words - bias; diff --git a/gcc/config/m32c/m32c-pragma.c b/gcc/config/m32c/m32c-pragma.c index 6b0d05a8aaf..aa16a30453d 100644 --- a/gcc/config/m32c/m32c-pragma.c +++ b/gcc/config/m32c/m32c-pragma.c @@ -46,9 +46,9 @@ m32c_pragma_memregs (cpp_reader * reader ATTRIBUTE_UNUSED) type = pragma_lex (&val); if (type == CPP_NUMBER) { - if (host_integerp (val, 1)) + if (tree_fits_uhwi_p (val)) { - i = tree_low_cst (val, 1); + i = tree_to_uhwi (val); type = pragma_lex (&val); if (type != CPP_EOF) @@ -95,7 +95,7 @@ m32c_pragma_address (cpp_reader * reader ATTRIBUTE_UNUSED) { if (var != error_mark_node) { - unsigned uaddr = tree_low_cst (addr, 1); + unsigned uaddr = tree_to_uhwi (addr); m32c_note_pragma_address (IDENTIFIER_POINTER (var), uaddr); } diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c index deac40c228f..f78037446fd 100644 --- a/gcc/config/m32c/m32c.c +++ b/gcc/config/m32c/m32c.c @@ -2935,8 +2935,8 @@ function_vector_handler (tree * node ATTRIBUTE_UNUSED, name); *no_add_attrs = true; } - else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18 - || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255) + else if (tree_to_hwi (TREE_VALUE (args)) < 18 + || tree_to_hwi (TREE_VALUE (args)) > 255) { /* The argument value must be between 18 to 255. */ warning (OPT_Wattributes, @@ -2968,7 +2968,7 @@ current_function_special_page_vector (rtx x) { if (is_attribute_p ("function_vector", TREE_PURPOSE (list))) { - num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list))); + num = tree_to_hwi (TREE_VALUE (TREE_VALUE (list))); return num; } diff --git a/gcc/config/mep/mep-pragma.c b/gcc/config/mep/mep-pragma.c index 8a9c577f5a9..45a4b4496a4 100644 --- a/gcc/config/mep/mep-pragma.c +++ b/gcc/config/mep/mep-pragma.c @@ -232,9 +232,9 @@ mep_pragma_coprocessor_width (void) switch (type) { case CPP_NUMBER: - if (! host_integerp (val, 1)) + if (! tree_fits_uhwi_p (val)) break; - i = tree_low_cst (val, 1); + i = tree_to_uhwi (val); /* This pragma no longer has any effect. */ #if 0 if (i == 32) @@ -273,7 +273,7 @@ mep_pragma_coprocessor_subclass (void) type = mep_pragma_lex (&val); if (type != CPP_CHAR) goto syntax_error; - class_letter = tree_low_cst (val, 1); + class_letter = tree_to_uhwi (val); if (class_letter >= 'A' && class_letter <= 'D') switch (class_letter) { diff --git a/gcc/config/mep/mep.c b/gcc/config/mep/mep.c index d082070eef6..16de72681fa 100644 --- a/gcc/config/mep/mep.c +++ b/gcc/config/mep/mep.c @@ -4209,7 +4209,7 @@ mep_attrlist_to_encoding (tree list, tree decl) && TREE_VALUE (TREE_VALUE (list)) && TREE_CODE (TREE_VALUE (TREE_VALUE (list))) == INTEGER_CST) { - int location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(list))); + int location = tree_to_hwi (TREE_VALUE (TREE_VALUE(list))); if (location >= 0 && location <= 0x1000000) return 'i'; @@ -4298,7 +4298,7 @@ mep_insert_attributes (tree decl, tree *attributes) && TREE_VALUE (attr) && TREE_VALUE (TREE_VALUE(attr))) { - int location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(attr))); + int location = tree_to_hwi (TREE_VALUE (TREE_VALUE(attr))); static tree previous_value = 0; static int previous_location = 0; static tree previous_name = 0; @@ -4714,7 +4714,7 @@ mep_output_aligned_common (FILE *stream, tree decl, const char *name, if (attr && TREE_VALUE (attr) && TREE_VALUE (TREE_VALUE(attr))) - location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(attr))); + location = tree_to_hwi (TREE_VALUE (TREE_VALUE(attr))); if (location == -1) return; if (global) diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c index 5993aabe578..5040b40def5 100644 --- a/gcc/config/mips/mips.c +++ b/gcc/config/mips/mips.c @@ -5136,7 +5136,7 @@ mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode, && type != 0 && TREE_CODE (type) == RECORD_TYPE && TYPE_SIZE_UNIT (type) - && host_integerp (TYPE_SIZE_UNIT (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { tree field; @@ -5145,7 +5145,7 @@ mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode, if (TREE_CODE (field) == FIELD_DECL && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)) && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD - && host_integerp (bit_position (field), 0) + && tree_fits_shwi_p (bit_position (field)) && int_bit_position (field) % BITS_PER_WORD == 0) break; @@ -14905,7 +14905,7 @@ r10k_safe_address_p (rtx x, rtx insn) a link-time-constant address. */ static bool -r10k_safe_mem_expr_p (tree expr, HOST_WIDE_INT offset) +r10k_safe_mem_expr_p (tree expr, unsigned HOST_WIDE_INT offset) { HOST_WIDE_INT bitoffset, bitsize; tree inner, var_offset; @@ -14918,7 +14918,7 @@ r10k_safe_mem_expr_p (tree expr, HOST_WIDE_INT offset) return false; offset += bitoffset / BITS_PER_UNIT; - return offset >= 0 && offset < tree_low_cst (DECL_SIZE_UNIT (inner), 1); + return offset < tree_to_uhwi (DECL_SIZE_UNIT (inner)); } /* A for_each_rtx callback for which DATA points to the instruction diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c index e8575ace160..2e4d0e49758 100644 --- a/gcc/config/picochip/picochip.c +++ b/gcc/config/picochip/picochip.c @@ -809,7 +809,7 @@ picochip_compute_arg_size (const_tree type, enum machine_mode mode) int type_size_in_units = 0; if (type) - type_size_in_units = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + type_size_in_units = tree_to_uhwi (TYPE_SIZE_UNIT (type)); else type_size_in_units = GET_MODE_SIZE (mode); diff --git a/gcc/config/rs6000/darwin.h b/gcc/config/rs6000/darwin.h index d5919c4c71d..5a248a5b085 100644 --- a/gcc/config/rs6000/darwin.h +++ b/gcc/config/rs6000/darwin.h @@ -421,3 +421,4 @@ do { \ /* So far, there is no rs6000_fold_builtin, if one is introduced, then this will need to be modified similar to the x86 case. */ #define TARGET_FOLD_BUILTIN SUBTARGET_FOLD_BUILTIN + diff --git a/gcc/config/rs6000/predicates.md b/gcc/config/rs6000/predicates.md index b5bff044779..97aa1553775 100644 --- a/gcc/config/rs6000/predicates.md +++ b/gcc/config/rs6000/predicates.md @@ -19,7 +19,7 @@ ;; Return 1 for anything except PARALLEL. (define_predicate "any_operand" - (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem")) + (match_code "const_int,const_double,const_wide_int,const,symbol_ref,label_ref,subreg,reg,mem")) ;; Return 1 for any PARALLEL. (define_predicate "any_parallel_operand" @@ -596,7 +596,7 @@ ;; Return 1 if operand is constant zero (scalars and vectors). (define_predicate "zero_constant" - (and (match_code "const_int,const_double,const_vector") + (and (match_code "const_int,const_double,const_wide_int,const_vector") (match_test "op == CONST0_RTX (mode)"))) ;; Return 1 if operand is 0.0. @@ -790,7 +790,7 @@ ;; Return 1 if op is a constant that is not a logical operand, but could ;; be split into one. (define_predicate "non_logical_cint_operand" - (and (match_code "const_int,const_double") + (and (match_code "const_int,const_wide_int") (and (not (match_operand 0 "logical_operand")) (match_operand 0 "reg_or_logical_cint_operand")))) @@ -1058,7 +1058,7 @@ ;; Return 1 if this operand is a valid input for a move insn. (define_predicate "input_operand" (match_code "symbol_ref,const,reg,subreg,mem, - const_double,const_vector,const_int") + const_double,const_wide_int,const_vector,const_int") { /* Memory is always valid. */ if (memory_operand (op, mode)) @@ -1071,8 +1071,7 @@ /* Allow any integer constant. */ if (GET_MODE_CLASS (mode) == MODE_INT - && (GET_CODE (op) == CONST_INT - || GET_CODE (op) == CONST_DOUBLE)) + && CONST_SCALAR_INT_P (op)) return 1; /* Allow easy vector constants. */ @@ -1111,7 +1110,7 @@ ;; Return 1 if this operand is a valid input for a vsx_splat insn. (define_predicate "splat_input_operand" (match_code "symbol_ref,const,reg,subreg,mem, - const_double,const_vector,const_int") + const_double,const_wide_int,const_vector,const_int") { if (MEM_P (op)) { diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c index d58e6865193..4cf85dbab36 100644 --- a/gcc/config/rs6000/rs6000-c.c +++ b/gcc/config/rs6000/rs6000-c.c @@ -26,6 +26,7 @@ #include "tm.h" #include "cpplib.h" #include "tree.h" +#include "wide-int.h" #include "c-family/c-common.h" #include "c-family/c-pragma.h" #include "diagnostic-core.h" @@ -4195,8 +4196,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl, mode = TYPE_MODE (arg1_type); if ((mode == V2DFmode || mode == V2DImode) && VECTOR_MEM_VSX_P (mode) && TREE_CODE (arg2) == INTEGER_CST - && TREE_INT_CST_HIGH (arg2) == 0 - && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1)) + && wi::ltu_p (arg2, 2)) { tree call = NULL_TREE; @@ -4280,9 +4280,8 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl, /* If we can use the VSX xxpermdi instruction, use that for insert. */ mode = TYPE_MODE (arg1_type); if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode) - && TREE_CODE (arg2) == INTEGER_CST - && TREE_INT_CST_HIGH (arg2) == 0 - && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1)) + && tree_fits_uhwi_p (arg2) + && wi::ltu_p (arg2, 2)) { tree call = NULL_TREE; diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index e0545856543..674ff6027ca 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -4024,7 +4024,7 @@ rs6000_builtin_support_vector_misalignment (enum machine_mode mode, it's word aligned. */ if (rs6000_vector_alignment_reachable (type, is_packed)) { - int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type)); + int element_size = tree_to_hwi (TYPE_SIZE (type)); if (element_size == 64 || element_size == 32) return true; @@ -4844,6 +4844,15 @@ num_insns_constant (rtx op, enum machine_mode mode) else return num_insns_constant_wide (INTVAL (op)); + case CONST_WIDE_INT: + { + int i; + int ins = CONST_WIDE_INT_NUNITS (op) - 1; + for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++) + ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i)); + return ins; + } + case CONST_DOUBLE: if (mode == SFmode || mode == SDmode) { @@ -5018,8 +5027,8 @@ easy_altivec_constant (rtx op, enum machine_mode mode) if (mode == V2DImode) { - /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not - easy. */ + /* In case the compiler is built 32-bit, CONST_WIDE_INT + constants are not easy. */ if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT) return false; @@ -5180,9 +5189,7 @@ paired_expand_vector_init (rtx target, rtx vals) for (i = 0; i < n_elts; ++i) { x = XVECEXP (vals, 0, i); - if (!(CONST_INT_P (x) - || GET_CODE (x) == CONST_DOUBLE - || GET_CODE (x) == CONST_FIXED)) + if (!CONSTANT_P (x)) ++n_var; } if (n_var == 0) @@ -5334,9 +5341,7 @@ rs6000_expand_vector_init (rtx target, rtx vals) for (i = 0; i < n_elts; ++i) { x = XVECEXP (vals, 0, i); - if (!(CONST_INT_P (x) - || GET_CODE (x) == CONST_DOUBLE - || GET_CODE (x) == CONST_FIXED)) + if (!CONSTANT_P (x)) ++n_var, one_var = i; else if (x != CONST0_RTX (inner_mode)) all_const_zero = false; @@ -6092,13 +6097,13 @@ offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset, if (!DECL_SIZE_UNIT (decl)) return false; - if (!host_integerp (DECL_SIZE_UNIT (decl), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl))) return false; - - dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + + dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl)); if (dsize > 32768) return false; - + return dalign / BITS_PER_UNIT >= dsize; } } @@ -6118,8 +6123,8 @@ offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset, if (TREE_CODE (decl) == STRING_CST) dsize = TREE_STRING_LENGTH (decl); else if (TYPE_SIZE_UNIT (type) - && host_integerp (TYPE_SIZE_UNIT (type), 1)) - dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) + dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type)); else return false; if (dsize > 32768) @@ -6532,6 +6537,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, && TARGET_NO_TOC && ! flag_pic && GET_CODE (x) != CONST_INT + && GET_CODE (x) != CONST_WIDE_INT && GET_CODE (x) != CONST_DOUBLE && CONSTANT_P (x) && GET_MODE_NUNITS (mode) == 1 @@ -7963,21 +7969,12 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode) } /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */ - if (GET_CODE (operands[1]) == CONST_DOUBLE - && ! FLOAT_MODE_P (mode) + if (CONST_WIDE_INT_P (operands[1]) && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) { - /* FIXME. This should never happen. */ - /* Since it seems that it does, do the safe thing and convert - to a CONST_INT. */ - operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode); + /* This should be fixed with the introduction of CONST_WIDE_INT. */ + gcc_unreachable (); } - gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE - || FLOAT_MODE_P (mode) - || ((CONST_DOUBLE_HIGH (operands[1]) != 0 - || CONST_DOUBLE_LOW (operands[1]) < 0) - && (CONST_DOUBLE_HIGH (operands[1]) != -1 - || CONST_DOUBLE_LOW (operands[1]) >= 0))); /* Check if GCC is setting up a block move that will end up using FP registers as temporaries. We must make sure this is acceptable. */ @@ -8790,7 +8787,7 @@ rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum, mode = TYPE_MODE (ftype); if (DECL_SIZE (f) != 0 - && host_integerp (bit_position (f), 1)) + && tree_fits_uhwi_p (bit_position (f))) bitpos += int_bit_position (f); /* ??? FIXME: else assume zero offset. */ @@ -9267,7 +9264,7 @@ rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type, mode = TYPE_MODE (ftype); if (DECL_SIZE (f) != 0 - && host_integerp (bit_position (f), 1)) + && tree_fits_uhwi_p (bit_position (f))) bitpos += int_bit_position (f); /* ??? FIXME: else assume zero offset. */ @@ -11006,7 +11003,7 @@ rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 5-bit unsigned literals. */ STRIP_NOPS (arg1); if (TREE_CODE (arg1) != INTEGER_CST - || TREE_INT_CST_LOW (arg1) & ~0x1f) + || tree_to_hwi (arg1) & ~0x1f) { error ("argument 2 must be a 5-bit unsigned literal"); return const0_rtx; @@ -11051,7 +11048,7 @@ altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) return const0_rtx; } else - cr6_form_int = TREE_INT_CST_LOW (cr6_form); + cr6_form_int = tree_to_hwi (cr6_form); gcc_assert (mode0 == mode1); @@ -11542,7 +11539,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 4-bit unsigned literals. */ STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0xf) + || tree_to_hwi (arg2) & ~0xf) { error ("argument 3 must be a 4-bit unsigned literal"); return const0_rtx; @@ -11560,7 +11557,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 2-bit unsigned literals. */ STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0x3) + || tree_to_hwi (arg2) & ~0x3) { error ("argument 3 must be a 2-bit unsigned literal"); return const0_rtx; @@ -11572,7 +11569,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 1-bit unsigned literals. */ STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0x1) + || tree_to_hwi (arg2) & ~0x1) { error ("argument 3 must be a 1-bit unsigned literal"); return const0_rtx; @@ -11585,7 +11582,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) range and prepare arguments. */ STRIP_NOPS (arg1); if (TREE_CODE (arg1) != INTEGER_CST - || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1)) + || !IN_RANGE (TREE_INT_CST_ELT (arg1, 0), 0, 1)) { error ("argument 2 must be 0 or 1"); return const0_rtx; @@ -11593,7 +11590,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15)) + || !IN_RANGE (TREE_INT_CST_ELT (arg2, 0), 0, 15)) { error ("argument 3 must be in the range 0..15"); return const0_rtx; @@ -11776,7 +11773,7 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED, *expandedp = true; STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0x3) + || tree_to_hwi (arg2) & ~0x3) { error ("argument to %qs must be a 2-bit unsigned literal", d->name); return const0_rtx; @@ -11830,8 +11827,8 @@ get_element_number (tree vec_type, tree arg) { unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1; - if (!host_integerp (arg, 1) - || (elt = tree_low_cst (arg, 1), elt > max)) + if (!tree_fits_uhwi_p (arg) + || (elt = tree_to_uhwi (arg), elt > max)) { error ("selector must be an integer constant in the range 0..%wi", max); return 0; @@ -12023,7 +12020,7 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp) return const0_rtx; if (TREE_CODE (arg0) != INTEGER_CST - || TREE_INT_CST_LOW (arg0) & ~0x3) + || tree_to_hwi (arg0) & ~0x3) { error ("argument to dss must be a 2-bit unsigned literal"); return const0_rtx; @@ -12232,7 +12229,7 @@ spe_expand_builtin (tree exp, rtx target, bool *expandedp) case SPE_BUILTIN_EVSTWWO: arg1 = CALL_EXPR_ARG (exp, 2); if (TREE_CODE (arg1) != INTEGER_CST - || TREE_INT_CST_LOW (arg1) & ~0x1f) + || tree_to_hwi (arg1) & ~0x1f) { error ("argument 2 must be a 5-bit unsigned literal"); return const0_rtx; @@ -12358,7 +12355,7 @@ paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) return const0_rtx; } else - form_int = TREE_INT_CST_LOW (form); + form_int = tree_to_hwi (form); gcc_assert (mode0 == mode1); @@ -12430,7 +12427,7 @@ spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) return const0_rtx; } else - form_int = TREE_INT_CST_LOW (form); + form_int = tree_to_hwi (form); gcc_assert (mode0 == mode1); @@ -16293,6 +16290,7 @@ rs6000_output_move_128bit (rtx operands[]) /* Constants. */ else if (dest_regno >= 0 && (GET_CODE (src) == CONST_INT + || GET_CODE (src) == CONST_WIDE_INT || GET_CODE (src) == CONST_DOUBLE || GET_CODE (src) == CONST_VECTOR)) { @@ -17306,8 +17304,7 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p) if (TARGET_RELOCATABLE && in_section != toc_section && !recurse - && GET_CODE (x) != CONST_INT - && GET_CODE (x) != CONST_DOUBLE + && !CONST_SCALAR_INT_P (x) && CONSTANT_P (x)) { char buf[256]; @@ -23695,6 +23692,15 @@ rs6000_hash_constant (rtx k) case LABEL_REF: return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0)); + case CONST_WIDE_INT: + { + int i; + flen = CONST_WIDE_INT_NUNITS (k); + for (i = 0; i < flen; i++) + result = result * 613 + CONST_WIDE_INT_ELT (k, i); + return result; + } + case CONST_DOUBLE: if (mode != VOIDmode) return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result; @@ -23899,7 +23905,7 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode) /* If we're going to put a double constant in the TOC, make sure it's aligned properly when strict alignment is on. */ - if (GET_CODE (x) == CONST_DOUBLE + if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x)) && STRICT_ALIGNMENT && GET_MODE_BITSIZE (mode) >= 64 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) { @@ -27901,6 +27907,7 @@ rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED, /* FALLTHRU */ case CONST_DOUBLE: + case CONST_WIDE_INT: case CONST: case HIGH: case SYMBOL_REF: @@ -28540,7 +28547,7 @@ rs6000_emit_swrsqrt (rtx dst, rtx src) gcc_assert (code != CODE_FOR_nothing); /* Load up the constant 1.5 either as a scalar, or as a vector. */ - real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0); + real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED); SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1); halfthree = rs6000_load_constant_and_splat (mode, dconst3_2); diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h index 745437edb81..387d8c99ac1 100644 --- a/gcc/config/rs6000/rs6000.h +++ b/gcc/config/rs6000/rs6000.h @@ -2645,3 +2645,4 @@ enum rs6000_builtin_type_index extern GTY(()) tree rs6000_builtin_types[RS6000_BTI_MAX]; extern GTY(()) tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT]; +#define TARGET_SUPPORTS_WIDE_INT 1 diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md index 567961a42e5..9961c1e9c23 100644 --- a/gcc/config/rs6000/rs6000.md +++ b/gcc/config/rs6000/rs6000.md @@ -10259,7 +10259,7 @@ (define_split [(set (match_operand:DI 0 "gpc_reg_operand" "") - (match_operand:DI 1 "const_double_operand" ""))] + (match_operand:DI 1 "const_scalar_int_operand" ""))] "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1" [(set (match_dup 0) (match_dup 2)) (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))] @@ -10325,7 +10325,7 @@ (define_split [(set (match_operand:TI2 0 "int_reg_operand" "") - (match_operand:TI2 1 "const_double_operand" ""))] + (match_operand:TI2 1 "const_scalar_int_operand" ""))] "TARGET_POWERPC64 && (VECTOR_MEM_NONE_P (<MODE>mode) || (reload_completed && INT_REGNO_P (REGNO (operands[0]))))" @@ -10337,12 +10337,12 @@ <MODE>mode); operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0, <MODE>mode); - if (GET_CODE (operands[1]) == CONST_DOUBLE) + if (CONST_WIDE_INT_P (operands[1])) { - operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1])); - operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1])); + operands[4] = GEN_INT (CONST_WIDE_INT_ELT (operands[1], 1)); + operands[5] = GEN_INT (CONST_WIDE_INT_ELT (operands[1], 0)); } - else if (GET_CODE (operands[1]) == CONST_INT) + else if (CONST_INT_P (operands[1])) { operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0)); operands[5] = operands[1]; diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c index f0d6a59e978..681e179eccf 100644 --- a/gcc/config/s390/s390.c +++ b/gcc/config/s390/s390.c @@ -10192,9 +10192,9 @@ s390_encode_section_info (tree decl, rtx rtl, int first) SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1; if (!DECL_SIZE (decl) || !DECL_ALIGN (decl) - || !host_integerp (DECL_SIZE (decl), 0) + || !tree_fits_shwi_p (DECL_SIZE (decl)) || (DECL_ALIGN (decl) <= 64 - && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0))) + && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl)))) SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED; } diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c index 31d162597b8..5a8a236d25f 100644 --- a/gcc/config/sh/sh.c +++ b/gcc/config/sh/sh.c @@ -1164,7 +1164,7 @@ sh_print_operand (FILE *stream, rtx x, int code) DECL_ATTRIBUTES (current_function_decl)); if (trapa_attr) fprintf (stream, "trapa #%ld", - (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr)))); + (long) tree_to_hwi (TREE_VALUE (TREE_VALUE (trapa_attr)))); else if (sh_cfun_interrupt_handler_p ()) { if (sh_cfun_resbank_handler_p ()) @@ -9623,7 +9623,7 @@ sh2a_handle_function_vector_handler_attribute (tree * node, tree name, name); *no_add_attrs = true; } - else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255) + else if (tree_to_hwi (TREE_VALUE (args)) > 255) { /* The argument value must be between 0 to 255. */ warning (OPT_Wattributes, @@ -9672,7 +9672,7 @@ sh2a_get_function_vector_number (rtx x) { if (is_attribute_p ("function_vector", TREE_PURPOSE (list))) { - num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list))); + num = tree_to_hwi (TREE_VALUE (TREE_VALUE (list))); return num; } diff --git a/gcc/config/sol2-c.c b/gcc/config/sol2-c.c index ee44621591e..86a8d907521 100644 --- a/gcc/config/sol2-c.c +++ b/gcc/config/sol2-c.c @@ -93,8 +93,8 @@ solaris_pragma_align (cpp_reader *pfile ATTRIBUTE_UNUSED) return; } - low = TREE_INT_CST_LOW (x); - if (TREE_INT_CST_HIGH (x) != 0 + low = tree_to_hwi (x); + if (!cst_fits_uhwi_p (x) || (low != 1 && low != 2 && low != 4 && low != 8 && low != 16 && low != 32 && low != 64 && low != 128)) { diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c index 8ef634133c5..36757c7ac5c 100644 --- a/gcc/config/sparc/sparc.c +++ b/gcc/config/sparc/sparc.c @@ -54,6 +54,7 @@ along with GCC; see the file COPYING3. If not see #include "opts.h" #include "tree-pass.h" #include "context.h" +#include "wide-int.h" /* Processor costs */ @@ -6313,7 +6314,7 @@ function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos, if (integer_zerop (DECL_SIZE (field))) continue; - if (host_integerp (bit_position (field), 1)) + if (tree_fits_uhwi_p (bit_position (field))) bitpos += int_bit_position (field); } @@ -6461,7 +6462,7 @@ function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos, if (integer_zerop (DECL_SIZE (field))) continue; - if (host_integerp (bit_position (field), 1)) + if (tree_fits_uhwi_p (bit_position (field))) bitpos += int_bit_position (field); } @@ -7128,10 +7129,10 @@ sparc_struct_value_rtx (tree fndecl, int incoming) /* Calculate the return object size */ tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl)); - rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff); + rtx size_rtx = GEN_INT (tree_to_hwi (size) & 0xfff); /* Construct a temporary return value */ rtx temp_val - = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0); + = assign_stack_local (Pmode, tree_to_hwi (size), 0); /* Implement SPARC 32-bit psABI callee return struct checking: @@ -10480,31 +10481,31 @@ sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type, for (i = 0; i < num; ++i) { int val - = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)), - TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i))); + = sparc_vis_mul8x16 (tree_to_hwi (VECTOR_CST_ELT (cst0, i)), + tree_to_hwi (VECTOR_CST_ELT (cst1, i))); n_elts[i] = build_int_cst (inner_type, val); } break; case CODE_FOR_fmul8x16au_vis: - scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0)); + scale = tree_to_hwi (VECTOR_CST_ELT (cst1, 0)); for (i = 0; i < num; ++i) { int val - = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)), + = sparc_vis_mul8x16 (tree_to_hwi (VECTOR_CST_ELT (cst0, i)), scale); n_elts[i] = build_int_cst (inner_type, val); } break; case CODE_FOR_fmul8x16al_vis: - scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1)); + scale = tree_to_hwi (VECTOR_CST_ELT (cst1, 1)); for (i = 0; i < num; ++i) { int val - = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)), + = sparc_vis_mul8x16 (tree_to_hwi (VECTOR_CST_ELT (cst0, i)), scale); n_elts[i] = build_int_cst (inner_type, val); } @@ -10564,7 +10565,7 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0)); for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i) n_elts[i] = build_int_cst (inner_type, - TREE_INT_CST_LOW + tree_to_hwi (VECTOR_CST_ELT (arg0, i)) << 4); return build_vector (rtype, n_elts); } @@ -10619,30 +10620,33 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, && TREE_CODE (arg1) == VECTOR_CST && TREE_CODE (arg2) == INTEGER_CST) { - bool overflow = false; - double_int result = TREE_INT_CST (arg2); - double_int tmp; + bool overflow, overall_overflow = false; + wide_int result = wide_int::from_tree (arg2); + wide_int tmp; unsigned i; for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i) { - double_int e0 = TREE_INT_CST (VECTOR_CST_ELT (arg0, i)); - double_int e1 = TREE_INT_CST (VECTOR_CST_ELT (arg1, i)); - - bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf; - - tmp = e1.neg_with_overflow (&neg1_ovf); - tmp = e0.add_with_sign (tmp, false, &add1_ovf); - if (tmp.is_negative ()) - tmp = tmp.neg_with_overflow (&neg2_ovf); + wide_int e0 = wide_int::from_tree (VECTOR_CST_ELT (arg0, i)); + wide_int e1 = wide_int::from_tree (VECTOR_CST_ELT (arg1, i)); + + tmp = e1.neg (&overflow); + overall_overflow |= overall_overflow; + tmp = e0.add (tmp, SIGNED, &overflow); + overall_overflow |= overall_overflow; + if (tmp.neg_p ()) + { + tmp = tmp.neg (&overflow); + overall_overflow |= overall_overflow; + } - result = result.add_with_sign (tmp, false, &add2_ovf); - overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf; + result = result.add (tmp, SIGNED, &overflow); + overall_overflow |= overall_overflow; } - gcc_assert (!overflow); + gcc_assert (!overall_overflow); - return build_int_cst_wide (rtype, result.low, result.high); + return wide_int_to_tree (rtype, result); } default: diff --git a/gcc/config/vms/vms-c.c b/gcc/config/vms/vms-c.c index d56ac1b8a70..5e4ed7cca65 100644 --- a/gcc/config/vms/vms-c.c +++ b/gcc/config/vms/vms-c.c @@ -316,7 +316,7 @@ handle_pragma_pointer_size (const char *pragma_name) int val; if (TREE_CODE (x) == INTEGER_CST) - val = TREE_INT_CST_LOW (x); + val = tree_to_hwi (x); else val = -1; diff --git a/gcc/coretypes.h b/gcc/coretypes.h index f7ef8d777c1..07d0885923b 100644 --- a/gcc/coretypes.h +++ b/gcc/coretypes.h @@ -58,6 +58,9 @@ typedef const struct rtx_def *const_rtx; struct rtvec_def; typedef struct rtvec_def *rtvec; typedef const struct rtvec_def *const_rtvec; +struct hwivec_def; +typedef struct hwivec_def *hwivec; +typedef const struct hwivec_def *const_hwivec; union tree_node; typedef union tree_node *tree; typedef const union tree_node *const_tree; diff --git a/gcc/coverage.c b/gcc/coverage.c index e450151ac3e..9c91f641953 100644 --- a/gcc/coverage.c +++ b/gcc/coverage.c @@ -830,7 +830,7 @@ build_fn_info (const struct coverage_data *data, tree type, tree key) if (var) count - = tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (var))), 0) + = tree_to_shwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (var)))) + 1; CONSTRUCTOR_APPEND_ELT (ctr, TYPE_FIELDS (ctr_type), diff --git a/gcc/cp/call.c b/gcc/cp/call.c index 3ed73b80374..8753923348f 100644 --- a/gcc/cp/call.c +++ b/gcc/cp/call.c @@ -38,6 +38,7 @@ along with GCC; see the file COPYING3. If not see #include "c-family/c-objc.h" #include "timevar.h" #include "cgraph.h" +#include "wide-int.h" /* The various kinds of conversion. */ @@ -941,7 +942,7 @@ build_array_conv (tree type, tree ctor, int flags, tsubst_flags_t complain) if (TYPE_DOMAIN (type)) { - unsigned HOST_WIDE_INT alen = tree_low_cst (array_type_nelts_top (type), 1); + unsigned HOST_WIDE_INT alen = tree_to_uhwi (array_type_nelts_top (type)); if (alen < len) return NULL; } diff --git a/gcc/cp/class.c b/gcc/cp/class.c index c587e55ac68..0cc882c2600 100644 --- a/gcc/cp/class.c +++ b/gcc/cp/class.c @@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "pointer-set.h" #include "hash-table.h" +#include "wide-int.h" /* The number of nested classes being processed. If we are not in the scope of any class, this is zero. */ @@ -6131,7 +6132,7 @@ layout_class_type (tree t, tree *virtuals_p) { unsigned HOST_WIDE_INT width; tree ftype = TREE_TYPE (field); - width = tree_low_cst (DECL_SIZE (field), /*unsignedp=*/1); + width = tree_to_uhwi (DECL_SIZE (field)); if (width != TYPE_PRECISION (ftype)) { TREE_TYPE (field) @@ -8003,7 +8004,7 @@ dump_class_hierarchy_r (FILE *stream, igo = TREE_CHAIN (binfo); fprintf (stream, HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (BINFO_OFFSET (binfo), 0)); + tree_to_shwi (BINFO_OFFSET (binfo))); if (is_empty_class (BINFO_TYPE (binfo))) fprintf (stream, " empty"); else if (CLASSTYPE_NEARLY_EMPTY_P (BINFO_TYPE (binfo))) @@ -8079,10 +8080,10 @@ dump_class_hierarchy_1 (FILE *stream, int flags, tree t) { fprintf (stream, "Class %s\n", type_as_string (t, TFF_PLAIN_IDENTIFIER)); fprintf (stream, " size=%lu align=%lu\n", - (unsigned long)(tree_low_cst (TYPE_SIZE (t), 0) / BITS_PER_UNIT), + (unsigned long)(tree_to_shwi (TYPE_SIZE (t)) / BITS_PER_UNIT), (unsigned long)(TYPE_ALIGN (t) / BITS_PER_UNIT)); fprintf (stream, " base size=%lu base align=%lu\n", - (unsigned long)(tree_low_cst (TYPE_SIZE (CLASSTYPE_AS_BASE (t)), 0) + (unsigned long)(tree_to_shwi (TYPE_SIZE (CLASSTYPE_AS_BASE (t))) / BITS_PER_UNIT), (unsigned long)(TYPE_ALIGN (CLASSTYPE_AS_BASE (t)) / BITS_PER_UNIT)); @@ -8119,7 +8120,7 @@ dump_array (FILE * stream, tree decl) HOST_WIDE_INT elt; tree size = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (decl))); - elt = (tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl))), 0) + elt = (tree_to_shwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl)))) / BITS_PER_UNIT); fprintf (stream, "%s:", decl_as_string (decl, TFF_PLAIN_IDENTIFIER)); fprintf (stream, " %s entries", @@ -8208,10 +8209,10 @@ dump_thunk (FILE *stream, int indent, tree thunk) /*NOP*/; else if (DECL_THIS_THUNK_P (thunk)) fprintf (stream, " vcall=" HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (virtual_adjust, 0)); + tree_to_shwi (virtual_adjust)); else fprintf (stream, " vbase=" HOST_WIDE_INT_PRINT_DEC "(%s)", - tree_low_cst (BINFO_VPTR_FIELD (virtual_adjust), 0), + tree_to_shwi (BINFO_VPTR_FIELD (virtual_adjust)), type_as_string (BINFO_TYPE (virtual_adjust), TFF_SCOPE)); if (THUNK_ALIAS (thunk)) fprintf (stream, " alias to %p", (void *)THUNK_ALIAS (thunk)); diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h index b1347e20dd6..713e001730c 100644 --- a/gcc/cp/cp-tree.h +++ b/gcc/cp/cp-tree.h @@ -2804,7 +2804,7 @@ extern void decl_shadowed_for_var_insert (tree, tree); /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ - ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) + ((HOST_WIDE_INT) tree_to_hwi (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general @@ -3688,7 +3688,7 @@ more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node - is a INT_CST whose TREE_INT_CST_LOW indicates the level of the + is a INT_CST whose tree_to_hwi indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a diff --git a/gcc/cp/cvt.c b/gcc/cp/cvt.c index 08c026da178..0efb5548528 100644 --- a/gcc/cp/cvt.c +++ b/gcc/cp/cvt.c @@ -35,6 +35,7 @@ along with GCC; see the file COPYING3. If not see #include "convert.h" #include "decl.h" #include "target.h" +#include "wide-int.h" static tree cp_convert_to_pointer (tree, tree, tsubst_flags_t); static tree convert_to_pointer_force (tree, tree, tsubst_flags_t); @@ -581,9 +582,7 @@ ignore_overflows (tree expr, tree orig) { gcc_assert (!TREE_OVERFLOW (orig)); /* Ensure constant sharing. */ - expr = build_int_cst_wide (TREE_TYPE (expr), - TREE_INT_CST_LOW (expr), - TREE_INT_CST_HIGH (expr)); + expr = wide_int_to_tree (TREE_TYPE (expr), expr); } return expr; } diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c index fe8fe4ee52c..c15eca4d8f3 100644 --- a/gcc/cp/decl.c +++ b/gcc/cp/decl.c @@ -53,6 +53,7 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "plugin.h" #include "cgraph.h" +#include "wide-int.h" /* Possible cases of bad specifiers type used by bad_specifiers. */ enum bad_spec_place { @@ -4803,7 +4804,8 @@ check_array_designated_initializer (constructor_elt *ce, if (TREE_CODE (ce->index) == INTEGER_CST) { /* A C99 designator is OK if it matches the current index. */ - if (TREE_INT_CST_LOW (ce->index) == index) + if (tree_fits_uhwi_p (ce->index) + && tree_to_uhwi (ce->index) == index) return true; else sorry ("non-trivial designated initializers not supported"); @@ -5090,12 +5092,11 @@ reshape_init_array_1 (tree elt_type, tree max_index, reshape_iter *d, if (integer_all_onesp (max_index)) return new_init; - if (host_integerp (max_index, 1)) - max_index_cst = tree_low_cst (max_index, 1); + if (tree_fits_uhwi_p (max_index)) + max_index_cst = tree_to_uhwi (max_index); /* sizetype is sign extended, not zero extended. */ else - max_index_cst = tree_low_cst (fold_convert (size_type_node, max_index), - 1); + max_index_cst = tree_to_uhwi (fold_convert (size_type_node, max_index)); } /* Loop until there are no more initializers. */ @@ -10025,7 +10026,7 @@ grokdeclarator (const cp_declarator *declarator, { error ("size of array %qs is too large", name); /* If we proceed with the array type as it is, we'll eventually - crash in tree_low_cst(). */ + crash in tree_to_uhwi (). */ type = error_mark_node; } @@ -12671,9 +12672,9 @@ finish_enum_value_list (tree enumtype) enumeration. We must do this before the type of MINNODE and MAXNODE are transformed, since tree_int_cst_min_precision relies on the TREE_TYPE of the value it is passed. */ - bool unsignedp = tree_int_cst_sgn (minnode) >= 0; - int lowprec = tree_int_cst_min_precision (minnode, unsignedp); - int highprec = tree_int_cst_min_precision (maxnode, unsignedp); + signop sgn = tree_int_cst_sgn (minnode) >= 0 ? UNSIGNED : SIGNED; + int lowprec = tree_int_cst_min_precision (minnode, sgn); + int highprec = tree_int_cst_min_precision (maxnode, sgn); int precision = MAX (lowprec, highprec); unsigned int itk; bool use_short_enum; @@ -12705,7 +12706,7 @@ finish_enum_value_list (tree enumtype) underlying_type = integer_types[itk]; if (underlying_type != NULL_TREE && TYPE_PRECISION (underlying_type) >= precision - && TYPE_UNSIGNED (underlying_type) == unsignedp) + && TYPE_SIGN (underlying_type) == sgn) break; } if (itk == itk_none) @@ -12752,12 +12753,12 @@ finish_enum_value_list (tree enumtype) = build_distinct_type_copy (underlying_type); TYPE_PRECISION (ENUM_UNDERLYING_TYPE (enumtype)) = precision; set_min_and_max_values_for_integral_type - (ENUM_UNDERLYING_TYPE (enumtype), precision, unsignedp); + (ENUM_UNDERLYING_TYPE (enumtype), precision, sgn); /* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */ if (flag_strict_enums) set_min_and_max_values_for_integral_type (enumtype, precision, - unsignedp); + sgn); } else underlying_type = ENUM_UNDERLYING_TYPE (enumtype); @@ -12881,14 +12882,13 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc) value = error_mark_node; else { - double_int di = TREE_INT_CST (prev_value) - .add_with_sign (double_int_one, - false, &overflowed); + tree type = TREE_TYPE (prev_value); + signop sgn = TYPE_SIGN (type); + wide_int wi = wi::add (prev_value, 1, sgn, &overflowed); if (!overflowed) { - tree type = TREE_TYPE (prev_value); - bool pos = TYPE_UNSIGNED (type) || !di.is_negative (); - if (!double_int_fits_to_tree_p (type, di)) + bool pos = !wi::neg_p (wi, sgn); + if (!wi::fits_to_tree_p (wi, type)) { unsigned int itk; for (itk = itk_int; itk != itk_none; itk++) @@ -12896,7 +12896,7 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc) type = integer_types[itk]; if (type != NULL_TREE && (pos || !TYPE_UNSIGNED (type)) - && double_int_fits_to_tree_p (type, di)) + && wi::fits_to_tree_p (wi, type)) break; } if (type && cxx_dialect < cxx11 @@ -12908,7 +12908,7 @@ incremented enumerator value is too large for %<long%>"); if (type == NULL_TREE) overflowed = true; else - value = double_int_to_tree (type, di); + value = wide_int_to_tree (type, wi); } if (overflowed) diff --git a/gcc/cp/dump.c b/gcc/cp/dump.c index 4aa3935a7d2..cc888339af9 100644 --- a/gcc/cp/dump.c +++ b/gcc/cp/dump.c @@ -346,7 +346,7 @@ cp_dump_tree (void* dump_info, tree t) } dump_int (di, "fixd", THUNK_FIXED_OFFSET (t)); if (virt) - dump_int (di, "virt", tree_low_cst (virt, 0)); + dump_int (di, "virt", tree_to_shwi (virt)); dump_child ("fn", DECL_INITIAL (t)); } break; diff --git a/gcc/cp/error.c b/gcc/cp/error.c index 3f6f5948a66..1627afab95d 100644 --- a/gcc/cp/error.c +++ b/gcc/cp/error.c @@ -851,8 +851,8 @@ dump_type_suffix (cxx_pretty_printer *pp, tree t, int flags) tree max = TYPE_MAX_VALUE (dtype); if (integer_all_onesp (max)) pp_character (pp, '0'); - else if (host_integerp (max, 0)) - pp_wide_integer (pp, tree_low_cst (max, 0) + 1); + else if (tree_fits_shwi_p (max)) + pp_wide_integer (pp, tree_to_shwi (max) + 1); else { STRIP_NOPS (max); @@ -1853,7 +1853,7 @@ static tree resolve_virtual_fun_from_obj_type_ref (tree ref) { tree obj_type = TREE_TYPE (OBJ_TYPE_REF_OBJECT (ref)); - HOST_WIDE_INT index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1); + HOST_WIDE_INT index = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (ref)); tree fun = BINFO_VIRTUALS (TYPE_BINFO (TREE_TYPE (obj_type))); while (index) { @@ -2285,7 +2285,7 @@ dump_expr (cxx_pretty_printer *pp, tree t, int flags) pp_cxx_right_paren (pp); break; } - else if (host_integerp (idx, 0)) + else if (tree_fits_shwi_p (idx)) { tree virtuals; unsigned HOST_WIDE_INT n; @@ -2294,7 +2294,7 @@ dump_expr (cxx_pretty_printer *pp, tree t, int flags) t = TYPE_METHOD_BASETYPE (t); virtuals = BINFO_VIRTUALS (TYPE_BINFO (TYPE_MAIN_VARIANT (t))); - n = tree_low_cst (idx, 0); + n = tree_to_shwi (idx); /* Map vtable index back one, to allow for the null pointer to member. */ diff --git a/gcc/cp/init.c b/gcc/cp/init.c index d18dc5e8deb..450ff05bca3 100644 --- a/gcc/cp/init.c +++ b/gcc/cp/init.c @@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see #include "cp-tree.h" #include "flags.h" #include "target.h" +#include "wide-int.h" static bool begin_init_stmts (tree *, tree *); static tree finish_init_stmts (bool, tree, tree); @@ -2241,10 +2242,10 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, /* For arrays, a bounds checks on the NELTS parameter. */ tree outer_nelts_check = NULL_TREE; bool outer_nelts_from_type = false; - double_int inner_nelts_count = double_int_one; + addr_wide_int inner_nelts_count = 1; tree alloc_call, alloc_expr; /* Size of the inner array elements. */ - double_int inner_size; + addr_wide_int inner_size; /* The address returned by the call to "operator new". This node is a VAR_DECL and is therefore reusable. */ tree alloc_node; @@ -2299,9 +2300,9 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, if (TREE_CODE (inner_nelts_cst) == INTEGER_CST) { bool overflow; - double_int result = TREE_INT_CST (inner_nelts_cst) - .mul_with_sign (inner_nelts_count, - false, &overflow); + addr_wide_int result = wi::mul (addr_wide_int (inner_nelts_cst), + inner_nelts_count, SIGNED, + &overflow); if (overflow) { if (complain & tf_error) @@ -2403,42 +2404,40 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, { /* Maximum available size in bytes. Half of the address space minus the cookie size. */ - double_int max_size - = double_int_one.llshift (TYPE_PRECISION (sizetype) - 1, - HOST_BITS_PER_DOUBLE_INT); + addr_wide_int max_size + = wi::set_bit_in_zero <addr_wide_int> (TYPE_PRECISION (sizetype) - 1); /* Maximum number of outer elements which can be allocated. */ - double_int max_outer_nelts; + addr_wide_int max_outer_nelts; tree max_outer_nelts_tree; gcc_assert (TREE_CODE (size) == INTEGER_CST); cookie_size = targetm.cxx.get_cookie_size (elt_type); gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST); - gcc_checking_assert (TREE_INT_CST (cookie_size).ult (max_size)); + gcc_checking_assert (wi::ltu_p (cookie_size, max_size)); /* Unconditionally subtract the cookie size. This decreases the maximum object size and is safe even if we choose not to use a cookie after all. */ - max_size -= TREE_INT_CST (cookie_size); + max_size -= cookie_size; bool overflow; - inner_size = TREE_INT_CST (size) - .mul_with_sign (inner_nelts_count, false, &overflow); - if (overflow || inner_size.ugt (max_size)) + inner_size = wi::mul (addr_wide_int (size), inner_nelts_count, SIGNED, + &overflow); + if (overflow || wi::gtu_p (inner_size, max_size)) { if (complain & tf_error) error ("size of array is too large"); return error_mark_node; } - max_outer_nelts = max_size.udiv (inner_size, TRUNC_DIV_EXPR); + + max_outer_nelts = wi::udiv_trunc (max_size, inner_size); /* Only keep the top-most seven bits, to simplify encoding the constant in the instruction stream. */ { - unsigned shift = HOST_BITS_PER_DOUBLE_INT - 7 - - (max_outer_nelts.high ? clz_hwi (max_outer_nelts.high) - : (HOST_BITS_PER_WIDE_INT + clz_hwi (max_outer_nelts.low))); - max_outer_nelts - = max_outer_nelts.lrshift (shift, HOST_BITS_PER_DOUBLE_INT) - .llshift (shift, HOST_BITS_PER_DOUBLE_INT); + unsigned shift = (max_outer_nelts.get_precision ()) - 7 + - wi::clz (max_outer_nelts); + max_outer_nelts = wi::lshift (wi::lrshift (max_outer_nelts, shift), + shift); } - max_outer_nelts_tree = double_int_to_tree (sizetype, max_outer_nelts); + max_outer_nelts_tree = wide_int_to_tree (sizetype, max_outer_nelts); size = size_binop (MULT_EXPR, size, convert (sizetype, nelts)); outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node, @@ -2512,7 +2511,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, cookie_size = NULL_TREE; /* No size arithmetic necessary, so the size check is not needed. */ - if (outer_nelts_check != NULL && inner_size.is_one ()) + if (outer_nelts_check != NULL && inner_size == 1) outer_nelts_check = NULL_TREE; } /* Perform the overflow check. */ @@ -2557,7 +2556,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, cookie_size = NULL_TREE; /* No size arithmetic necessary, so the size check is not needed. */ - if (outer_nelts_check != NULL && inner_size.is_one ()) + if (outer_nelts_check != NULL && inner_size == 1) outer_nelts_check = NULL_TREE; } @@ -3639,9 +3638,9 @@ build_vec_init (tree base, tree maxindex, tree init, if (from_array || ((type_build_ctor_call (type) || init || explicit_value_init_p) - && ! (host_integerp (maxindex, 0) + && ! (tree_fits_shwi_p (maxindex) && (num_initialized_elts - == tree_low_cst (maxindex, 0) + 1)))) + == tree_to_shwi (maxindex) + 1)))) { /* If the ITERATOR is equal to -1, then we don't have to loop; we've already initialized all the elements. */ diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c index 202fafceb03..49bbedf0923 100644 --- a/gcc/cp/mangle.c +++ b/gcc/cp/mangle.c @@ -55,6 +55,7 @@ along with GCC; see the file COPYING3. If not see #include "flags.h" #include "target.h" #include "cgraph.h" +#include "wide-int.h" /* Debugging support. */ @@ -1503,7 +1504,7 @@ write_integer_cst (const tree cst) { int sign = tree_int_cst_sgn (cst); - if (TREE_INT_CST_HIGH (cst) + (sign < 0)) + if (!cst_fits_shwi_p (cst)) { /* A bignum. We do this in chunks, each of which fits in a HOST_WIDE_INT. */ @@ -1529,8 +1530,7 @@ write_integer_cst (const tree cst) type = c_common_signed_or_unsigned_type (1, TREE_TYPE (cst)); base = build_int_cstu (type, chunk); - n = build_int_cst_wide (type, - TREE_INT_CST_LOW (cst), TREE_INT_CST_HIGH (cst)); + n = wide_int_to_tree (type, cst); if (sign < 0) { @@ -1545,7 +1545,7 @@ write_integer_cst (const tree cst) done = integer_zerop (d); tmp = fold_build2_loc (input_location, MINUS_EXPR, type, n, tmp); - c = hwint_to_ascii (TREE_INT_CST_LOW (tmp), 10, ptr, + c = hwint_to_ascii (tree_to_hwi (tmp), 10, ptr, done ? 1 : chunk_digits); ptr -= c; count += c; @@ -1557,7 +1557,7 @@ write_integer_cst (const tree cst) else { /* A small num. */ - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (cst); + unsigned HOST_WIDE_INT low = tree_to_hwi (cst); if (sign < 0) { @@ -3223,12 +3223,12 @@ write_array_type (const tree type) { /* The ABI specifies that we should mangle the number of elements in the array, not the largest allowed index. */ - double_int dmax = tree_to_double_int (max) + double_int_one; + addr_wide_int wmax = addr_wide_int (max) + 1; /* Truncate the result - this will mangle [0, SIZE_INT_MAX] number of elements as zero. */ - dmax = dmax.zext (TYPE_PRECISION (TREE_TYPE (max))); - gcc_assert (dmax.fits_uhwi ()); - write_unsigned_number (dmax.low); + wmax = wi::zext (wmax, TYPE_PRECISION (TREE_TYPE (max))); + gcc_assert (wi::fits_uhwi_p (wmax)); + write_unsigned_number (wmax.to_uhwi ()); } else { diff --git a/gcc/cp/method.c b/gcc/cp/method.c index 4ac533eacf7..adef81c576e 100644 --- a/gcc/cp/method.c +++ b/gcc/cp/method.c @@ -95,7 +95,7 @@ make_thunk (tree function, bool this_adjusting, convert (ssizetype, TYPE_SIZE_UNIT (vtable_entry_type))); - d = tree_low_cst (fixed_offset, 0); + d = tree_to_shwi (fixed_offset); /* See if we already have the thunk in question. For this_adjusting thunks VIRTUAL_OFFSET will be an INTEGER_CST, for covariant thunks it @@ -323,7 +323,7 @@ use_thunk (tree thunk_fndecl, bool emit_p) { if (!this_adjusting) virtual_offset = BINFO_VPTR_FIELD (virtual_offset); - virtual_value = tree_low_cst (virtual_offset, /*pos=*/0); + virtual_value = tree_to_shwi (virtual_offset); gcc_assert (virtual_value); } else diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c index 10a7b962d05..383ea46b7b5 100644 --- a/gcc/cp/parser.c +++ b/gcc/cp/parser.c @@ -819,7 +819,7 @@ cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token) { /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = ((enum pragma_kind) - TREE_INT_CST_LOW (token->u.value)); + tree_to_hwi (token->u.value)); token->u.value = NULL_TREE; } } @@ -3912,7 +3912,7 @@ cp_parser_userdef_string_literal (cp_token *token) tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree value = USERDEF_LITERAL_VALUE (literal); int len = TREE_STRING_LENGTH (value) - / TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; + / tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; tree decl, result; vec<tree, va_gc> *args; @@ -26796,8 +26796,8 @@ cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location return list; num = fold_non_dependent_expr (num); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) - || !host_integerp (num, 0) - || (n = tree_low_cst (num, 0)) <= 0 + || !tree_fits_shwi_p (num) + || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); @@ -28579,7 +28579,7 @@ cp_parser_omp_for_loop (cp_parser *parser, enum tree_code code, tree clauses, for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) - collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); + collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); gcc_assert (collapse >= 1); diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c index e56052d4fda..b30794c183e 100644 --- a/gcc/cp/semantics.c +++ b/gcc/cp/semantics.c @@ -8560,7 +8560,7 @@ cxx_eval_array_reference (const constexpr_call *call, tree t, *non_constant_p = true; return t; } - i = tree_low_cst (index, 0); + i = tree_to_shwi (index); if (TREE_CODE (ary) == CONSTRUCTOR) return (*CONSTRUCTOR_ELTS (ary))[i].value; else if (elem_nchars == 1) @@ -8675,8 +8675,8 @@ cxx_eval_bit_field_ref (const constexpr_call *call, tree t, TREE_OPERAND (t, 1), TREE_OPERAND (t, 2)); start = TREE_OPERAND (t, 2); - istart = tree_low_cst (start, 0); - isize = tree_low_cst (TREE_OPERAND (t, 1), 0); + istart = tree_to_shwi (start); + isize = tree_to_shwi (TREE_OPERAND (t, 1)); utype = TREE_TYPE (t); if (!TYPE_UNSIGNED (utype)) utype = build_nonstandard_integer_type (TYPE_PRECISION (utype), 1); @@ -8688,11 +8688,11 @@ cxx_eval_bit_field_ref (const constexpr_call *call, tree t, return value; if (TREE_CODE (TREE_TYPE (field)) == INTEGER_TYPE && TREE_CODE (value) == INTEGER_CST - && host_integerp (bitpos, 0) - && host_integerp (DECL_SIZE (field), 0)) + && tree_fits_shwi_p (bitpos) + && tree_fits_shwi_p (DECL_SIZE (field))) { - HOST_WIDE_INT bit = tree_low_cst (bitpos, 0); - HOST_WIDE_INT sz = tree_low_cst (DECL_SIZE (field), 0); + HOST_WIDE_INT bit = tree_to_shwi (bitpos); + HOST_WIDE_INT sz = tree_to_shwi (DECL_SIZE (field)); HOST_WIDE_INT shift; if (bit >= istart && bit + sz <= istart + isize) { @@ -8849,7 +8849,7 @@ cxx_eval_vec_init_1 (const constexpr_call *call, tree atype, tree init, bool *non_constant_p, bool *overflow_p) { tree elttype = TREE_TYPE (atype); - int max = tree_low_cst (array_type_nelts (atype), 0); + int max = tree_to_shwi (array_type_nelts (atype)); vec<constructor_elt, va_gc> *n; vec_alloc (n, max + 1); bool pre_init = false; @@ -9068,9 +9068,9 @@ cxx_fold_indirect_ref (location_t loc, tree type, tree op0, bool *empty_base) && (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (op00type)))) { - HOST_WIDE_INT offset = tree_low_cst (op01, 0); + HOST_WIDE_INT offset = tree_to_shwi (op01); tree part_width = TYPE_SIZE (type); - unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT; + unsigned HOST_WIDE_INT part_widthi = tree_to_shwi (part_width)/BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c index 63ec7fa7266..1ffade6758b 100644 --- a/gcc/cp/tree.c +++ b/gcc/cp/tree.c @@ -32,6 +32,8 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "gimple.h" /* gimple_has_body_p */ #include "hash-table.h" +#include "wide-int.h" + static tree bot_manip (tree *, int *, void *); static tree bot_replace (tree *, int *, void *); @@ -1692,7 +1694,7 @@ debug_binfo (tree elem) fprintf (stderr, "type \"%s\", offset = " HOST_WIDE_INT_PRINT_DEC "\nvtable type:\n", TYPE_NAME_STRING (BINFO_TYPE (elem)), - TREE_INT_CST_LOW (BINFO_OFFSET (elem))); + tree_to_hwi (BINFO_OFFSET (elem))); debug_tree (BINFO_TYPE (elem)); if (BINFO_VTABLE (elem)) fprintf (stderr, "vtable decl \"%s\"\n", @@ -1708,7 +1710,7 @@ debug_binfo (tree elem) tree fndecl = TREE_VALUE (virtuals); fprintf (stderr, "%s [%ld =? %ld]\n", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl)), - (long) n, (long) TREE_INT_CST_LOW (DECL_VINDEX (fndecl))); + (long) n, (long) tree_to_hwi (DECL_VINDEX (fndecl))); ++n; virtuals = TREE_CHAIN (virtuals); } @@ -2601,8 +2603,7 @@ cp_tree_equal (tree t1, tree t2) switch (code1) { case INTEGER_CST: - return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2); + return max_wide_int (t1) == max_wide_int (t2); case REAL_CST: return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); @@ -3240,7 +3241,7 @@ handle_init_priority_attribute (tree* node, return NULL_TREE; } - pri = TREE_INT_CST_LOW (initp_expr); + pri = tree_to_hwi (initp_expr); type = strip_array_types (type); diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c index 011406ca3ab..c42e6097522 100644 --- a/gcc/cp/typeck2.c +++ b/gcc/cp/typeck2.c @@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see #include "cp-tree.h" #include "flags.h" #include "diagnostic-core.h" +#include "wide-int.h" static tree process_init_constructor (tree type, tree init, tsubst_flags_t complain); @@ -979,7 +980,7 @@ digest_init_r (tree type, tree init, bool nested, int flags, } if (TYPE_DOMAIN (type) != 0 && TREE_CONSTANT (TYPE_SIZE (type))) { - int size = TREE_INT_CST_LOW (TYPE_SIZE (type)); + int size = tree_to_hwi (TYPE_SIZE (type)); size = (size + BITS_PER_UNIT - 1) / BITS_PER_UNIT; /* In C it is ok to subtract 1 from the length of the string because it's ok to ignore the terminating null char that is @@ -1118,12 +1119,10 @@ process_init_constructor_array (tree type, tree init, { tree domain = TYPE_DOMAIN (type); if (domain && TREE_CONSTANT (TYPE_MAX_VALUE (domain))) - len = (tree_to_double_int (TYPE_MAX_VALUE (domain)) - - tree_to_double_int (TYPE_MIN_VALUE (domain)) - + double_int_one) - .ext (TYPE_PRECISION (TREE_TYPE (domain)), - TYPE_UNSIGNED (TREE_TYPE (domain))) - .low; + len = wi::ext (addr_wide_int (TYPE_MAX_VALUE (domain)) + - TYPE_MIN_VALUE (domain) + 1, + TYPE_PRECISION (TREE_TYPE (domain)), + TYPE_SIGN (TREE_TYPE (domain))).to_uhwi (); else unbounded = true; /* Take as many as there are. */ } diff --git a/gcc/cp/vtable-class-hierarchy.c b/gcc/cp/vtable-class-hierarchy.c index 78611a83264..38ea8beeb89 100644 --- a/gcc/cp/vtable-class-hierarchy.c +++ b/gcc/cp/vtable-class-hierarchy.c @@ -450,7 +450,7 @@ check_and_record_registered_pairs (tree vtable_decl, tree vptr_address, vptr_address = TREE_OPERAND (vptr_address, 0); if (TREE_OPERAND_LENGTH (vptr_address) > 1) - offset = TREE_INT_CST_LOW (TREE_OPERAND (vptr_address, 1)); + offset = tree_to_uhwi (TREE_OPERAND (vptr_address, 1)); else offset = 0; @@ -873,7 +873,7 @@ output_set_info (tree record_type, vec<tree> vtbl_ptr_array) vptr_name = IDENTIFIER_POINTER (DECL_NAME (arg0)); if (TREE_CODE (arg1) == INTEGER_CST) - vptr_offset = TREE_INT_CST_LOW (arg1); + vptr_offset = tree_to_uhwi (arg1); } snprintf (buffer, sizeof (buffer), "%s %s %s + %d\n", diff --git a/gcc/cppbuiltin.c b/gcc/cppbuiltin.c index 2ceccdcce2b..86ef5cbd2ca 100644 --- a/gcc/cppbuiltin.c +++ b/gcc/cppbuiltin.c @@ -128,7 +128,7 @@ define_builtin_macros_for_type_sizes (cpp_reader *pfile) { #define define_type_sizeof(NAME, TYPE) \ cpp_define_formatted (pfile, NAME"="HOST_WIDE_INT_PRINT_DEC, \ - tree_low_cst (TYPE_SIZE_UNIT (TYPE), 1)) + tree_to_uhwi (TYPE_SIZE_UNIT (TYPE))) define_type_sizeof ("__SIZEOF_INT__", integer_type_node); define_type_sizeof ("__SIZEOF_LONG__", long_integer_type_node); diff --git a/gcc/cse.c b/gcc/cse.c index 43fa1e8191f..4b3226c2411 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -2336,15 +2336,23 @@ hash_rtx_cb (const_rtx x, enum machine_mode mode, + (unsigned int) INTVAL (x)); return hash; + case CONST_WIDE_INT: + { + int i; + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hash += CONST_WIDE_INT_ELT (x, i); + } + return hash; + case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned int) code + (unsigned int) GET_MODE (x); - if (GET_MODE (x) != VOIDmode) - hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); - else + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode) hash += ((unsigned int) CONST_DOUBLE_LOW (x) + (unsigned int) CONST_DOUBLE_HIGH (x)); + else + hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); return hash; case CONST_FIXED: @@ -3288,8 +3296,8 @@ fold_rtx (rtx x, rtx insn) break; new_rtx = simplify_unary_operation (code, mode, - const_arg0 ? const_arg0 : folded_arg0, - mode_arg0); + const_arg0 ? const_arg0 : folded_arg0, + mode_arg0); } break; @@ -3761,6 +3769,7 @@ equiv_constant (rtx x) /* See if we previously assigned a constant value to this SUBREG. */ if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0 + || (new_rtx = lookup_as_function (x, CONST_WIDE_INT)) != 0 || (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0 || (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0) return new_rtx; diff --git a/gcc/cselib.c b/gcc/cselib.c index e201f5e7c49..5ebfebd15e8 100644 --- a/gcc/cselib.c +++ b/gcc/cselib.c @@ -926,8 +926,7 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, enum machine_mode memmode) /* These won't be handled correctly by the code below. */ switch (GET_CODE (x)) { - case CONST_DOUBLE: - case CONST_FIXED: + CASE_CONST_UNIQUE: case DEBUG_EXPR: return 0; @@ -1121,15 +1120,23 @@ cselib_hash_rtx (rtx x, int create, enum machine_mode memmode) hash += ((unsigned) CONST_INT << 7) + INTVAL (x); return hash ? hash : (unsigned int) CONST_INT; + case CONST_WIDE_INT: + { + int i; + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hash += CONST_WIDE_INT_ELT (x, i); + } + return hash; + case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned) code + (unsigned) GET_MODE (x); - if (GET_MODE (x) != VOIDmode) - hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); - else + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode) hash += ((unsigned) CONST_DOUBLE_LOW (x) + (unsigned) CONST_DOUBLE_HIGH (x)); + else + hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); return hash ? hash : (unsigned int) CONST_DOUBLE; case CONST_FIXED: diff --git a/gcc/dbxout.c b/gcc/dbxout.c index 9b5e23f6d71..65149814cc8 100644 --- a/gcc/dbxout.c +++ b/gcc/dbxout.c @@ -690,88 +690,40 @@ stabstr_U (unsigned HOST_WIDE_INT num) static void stabstr_O (tree cst) { - unsigned HOST_WIDE_INT high = TREE_INT_CST_HIGH (cst); - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (cst); - - char buf[128]; - char *p = buf + sizeof buf; - - /* GDB wants constants with no extra leading "1" bits, so - we need to remove any sign-extension that might be - present. */ - { - const unsigned int width = TYPE_PRECISION (TREE_TYPE (cst)); - if (width == HOST_BITS_PER_DOUBLE_INT) - ; - else if (width > HOST_BITS_PER_WIDE_INT) - high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1); - else if (width == HOST_BITS_PER_WIDE_INT) - high = 0; - else - high = 0, low &= (((HOST_WIDE_INT) 1 << width) - 1); - } + wide_int wcst = cst; + int prec = wcst.get_precision (); + int res_pres = prec % 3; + int i; + unsigned int digit; /* Leading zero for base indicator. */ stabstr_C ('0'); /* If the value is zero, the base indicator will serve as the value all by itself. */ - if (high == 0 && low == 0) + if (wcst == 0) return; - /* If the high half is zero, we need only print the low half normally. */ - if (high == 0) - NUMBER_FMT_LOOP (p, low, 8); - else + /* GDB wants constants with no extra leading "1" bits, so + we need to remove any sign-extension that might be + present. */ + if (res_pres == 1) { - /* When high != 0, we need to print enough zeroes from low to - give the digits from high their proper place-values. Hence - NUMBER_FMT_LOOP cannot be used. */ - const int n_digits = HOST_BITS_PER_WIDE_INT / 3; - int i; - - for (i = 1; i <= n_digits; i++) - { - unsigned int digit = low % 8; - low /= 8; - *--p = '0' + digit; - } - - /* Octal digits carry exactly three bits of information. The - width of a HOST_WIDE_INT is not normally a multiple of three. - Therefore, the next digit printed probably needs to carry - information from both low and high. */ - if (HOST_BITS_PER_WIDE_INT % 3 != 0) - { - const int n_leftover_bits = HOST_BITS_PER_WIDE_INT % 3; - const int n_bits_from_high = 3 - n_leftover_bits; - - const unsigned HOST_WIDE_INT - low_mask = (((unsigned HOST_WIDE_INT)1) << n_leftover_bits) - 1; - const unsigned HOST_WIDE_INT - high_mask = (((unsigned HOST_WIDE_INT)1) << n_bits_from_high) - 1; - - unsigned int digit; - - /* At this point, only the bottom n_leftover_bits bits of low - should be set. */ - gcc_assert (!(low & ~low_mask)); - - digit = (low | ((high & high_mask) << n_leftover_bits)); - high >>= n_bits_from_high; - - *--p = '0' + digit; - } - - /* Now we can format high in the normal manner. However, if - the only bits of high that were set were handled by the - digit split between low and high, high will now be zero, and - we don't want to print extra digits in that case. */ - if (high) - NUMBER_FMT_LOOP (p, high, 8); + digit = wi::extract_uhwi (wcst, prec - 1, 1); + stabstr_C ('0' + digit); + } + else if (res_pres == 2) + { + digit = wi::extract_uhwi (wcst, prec - 2, 2); + stabstr_C ('0' + digit); } - obstack_grow (&stabstr_ob, p, (buf + sizeof buf) - p); + prec -= res_pres; + for (i = prec - 3; i <= 0; i = i - 3) + { + digit = wi::extract_uhwi (wcst, i, 3); + stabstr_C ('0' + digit); + } } /* Called whenever it is safe to break a stabs string into multiple @@ -1519,9 +1471,9 @@ dbxout_type_fields (tree type) /* Omit fields whose position or size are variable or too large to represent. */ || (TREE_CODE (tem) == FIELD_DECL - && (! host_integerp (bit_position (tem), 0) + && (! tree_fits_shwi_p (bit_position (tem)) || ! DECL_SIZE (tem) - || ! host_integerp (DECL_SIZE (tem), 1)))) + || ! tree_fits_uhwi_p (DECL_SIZE (tem))))) continue; else if (TREE_CODE (tem) != CONST_DECL) @@ -1566,7 +1518,7 @@ dbxout_type_fields (tree type) stabstr_C (','); stabstr_D (int_bit_position (tem)); stabstr_C (','); - stabstr_D (tree_low_cst (DECL_SIZE (tem), 1)); + stabstr_D (tree_to_uhwi (DECL_SIZE (tem))); stabstr_C (';'); } } @@ -1610,9 +1562,9 @@ dbxout_type_method_1 (tree decl) stabstr_C (c1); stabstr_C (c2); - if (DECL_VINDEX (decl) && host_integerp (DECL_VINDEX (decl), 0)) + if (DECL_VINDEX (decl) && tree_fits_shwi_p (DECL_VINDEX (decl))) { - stabstr_D (tree_low_cst (DECL_VINDEX (decl), 0)); + stabstr_D (tree_to_shwi (DECL_VINDEX (decl))); stabstr_C (';'); dbxout_type (DECL_CONTEXT (decl), 0); stabstr_C (';'); @@ -1718,23 +1670,23 @@ dbxout_range_type (tree type, tree low, tree high) } stabstr_C (';'); - if (low && host_integerp (low, 0)) + if (low && tree_fits_shwi_p (low)) { if (print_int_cst_bounds_in_octal_p (type, low, high)) stabstr_O (low); else - stabstr_D (tree_low_cst (low, 0)); + stabstr_D (tree_to_shwi (low)); } else stabstr_C ('0'); stabstr_C (';'); - if (high && host_integerp (high, 0)) + if (high && tree_fits_shwi_p (high)) { if (print_int_cst_bounds_in_octal_p (type, low, high)) stabstr_O (high); else - stabstr_D (tree_low_cst (high, 0)); + stabstr_D (tree_to_shwi (high)); stabstr_C (';'); } else @@ -1864,7 +1816,7 @@ dbxout_type (tree type, int full) Sun dbx crashes if we do. */ if (! full || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ - || ! host_integerp (TYPE_SIZE (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE (type))) return; break; case TYPE_DEFINED: @@ -1889,7 +1841,7 @@ dbxout_type (tree type, int full) && !full) || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ - || ! host_integerp (TYPE_SIZE (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE (type))) { typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF; return; @@ -2147,7 +2099,7 @@ dbxout_type (tree type, int full) && !full) || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ - || ! host_integerp (TYPE_SIZE (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE (type))) { /* If the type is just a cross reference, output one and mark the type as partially described. @@ -2210,10 +2162,10 @@ dbxout_type (tree type, int full) offset within the vtable where we must look to find the necessary adjustment. */ stabstr_D - (tree_low_cst (BINFO_VPTR_FIELD (child), 0) + (tree_to_shwi (BINFO_VPTR_FIELD (child)) * BITS_PER_UNIT); else - stabstr_D (tree_low_cst (BINFO_OFFSET (child), 0) + stabstr_D (tree_to_shwi (BINFO_OFFSET (child)) * BITS_PER_UNIT); stabstr_C (','); dbxout_type (BINFO_TYPE (child), 0); @@ -2228,11 +2180,11 @@ dbxout_type (tree type, int full) stabstr_C (':'); dbxout_type (BINFO_TYPE (child), full); stabstr_C (','); - stabstr_D (tree_low_cst (BINFO_OFFSET (child), 0) + stabstr_D (tree_to_shwi (BINFO_OFFSET (child)) * BITS_PER_UNIT); stabstr_C (','); stabstr_D - (tree_low_cst (TYPE_SIZE (BINFO_TYPE (child)), 0) + (tree_to_shwi (TYPE_SIZE (BINFO_TYPE (child))) * BITS_PER_UNIT); stabstr_C (';'); } @@ -2299,11 +2251,8 @@ dbxout_type (tree type, int full) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (TREE_INT_CST_HIGH (value) == 0) - stabstr_D (TREE_INT_CST_LOW (value)); - else if (TREE_INT_CST_HIGH (value) == -1 - && (HOST_WIDE_INT) TREE_INT_CST_LOW (value) < 0) - stabstr_D (TREE_INT_CST_LOW (value)); + if (cst_fits_shwi_p (value)) + stabstr_D (tree_to_hwi (value)); else stabstr_O (value); @@ -2516,9 +2465,9 @@ dbxout_expand_expr (tree expr) return NULL; if (offset != NULL) { - if (!host_integerp (offset, 0)) + if (!tree_fits_shwi_p (offset)) return NULL; - x = adjust_address_nv (x, mode, tree_low_cst (offset, 0)); + x = adjust_address_nv (x, mode, tree_to_shwi (offset)); } if (bitpos != 0) x = adjust_address_nv (x, mode, bitpos / BITS_PER_UNIT); @@ -2796,7 +2745,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) /* Do not generate a tag for records of variable size, since this type can not be properly described in the DBX format, and it confuses some tools such as objdump. */ - && host_integerp (TYPE_SIZE (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (type))) { tree name = TYPE_NAME (type); if (TREE_CODE (name) == TYPE_DECL) @@ -2912,7 +2861,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) ??? Why do we skip emitting the type and location in this case? */ if (TREE_STATIC (decl) && TREE_READONLY (decl) && DECL_INITIAL (decl) != 0 - && host_integerp (DECL_INITIAL (decl), 0) + && tree_fits_shwi_p (DECL_INITIAL (decl)) && ! TREE_ASM_WRITTEN (decl) && (DECL_FILE_SCOPE_P (decl) || TREE_CODE (DECL_CONTEXT (decl)) == BLOCK @@ -2924,7 +2873,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) { - HOST_WIDE_INT ival = TREE_INT_CST_LOW (DECL_INITIAL (decl)); + HOST_WIDE_INT ival = tree_to_hwi (DECL_INITIAL (decl)); dbxout_begin_complex_stabs (); dbxout_symbol_name (decl, NULL, 'c'); diff --git a/gcc/defaults.h b/gcc/defaults.h index 37e8a08ac5a..0c6991868c2 100644 --- a/gcc/defaults.h +++ b/gcc/defaults.h @@ -1392,6 +1392,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #define SWITCHABLE_TARGET 0 #endif +/* If the target supports integers that are wider than two + HOST_WIDE_INTs on the host compiler, then the target should define + TARGET_SUPPORTS_WIDE_INT and make the appropriate fixups. + Otherwise the compiler really is not robust. */ +#ifndef TARGET_SUPPORTS_WIDE_INT +#define TARGET_SUPPORTS_WIDE_INT 0 +#endif + #endif /* GCC_INSN_FLAGS_H */ #endif /* ! GCC_DEFAULTS_H */ diff --git a/gcc/dfp.c b/gcc/dfp.c index d15ee8f8848..3988ac9f5fb 100644 --- a/gcc/dfp.c +++ b/gcc/dfp.c @@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see #include "tree.h" #include "tm_p.h" #include "dfp.h" +#include "wide-int.h" /* The order of the following headers is important for making sure decNumber structure is large enough to hold decimal128 digits. */ @@ -604,11 +605,11 @@ decimal_real_to_integer (const REAL_VALUE_TYPE *r) return real_to_integer (&to); } -/* Likewise, but to an integer pair, HI+LOW. */ +/* Likewise, but returns a wide_int with PRECISION. Fail + is set if the value does not fit. */ -void -decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, - const REAL_VALUE_TYPE *r) +wide_int +decimal_real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) { decContext set; decNumber dn, dn2, dn3; @@ -628,7 +629,7 @@ decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, function. */ decNumberToString (&dn, string); real_from_string (&to, string); - real_to_integer2 (plow, phigh, &to); + return real_to_integer (&to, fail, precision); } /* Perform the decimal floating point operation described by CODE. diff --git a/gcc/dfp.h b/gcc/dfp.h index 3b9bb8dd889..dcf3d833a9c 100644 --- a/gcc/dfp.h +++ b/gcc/dfp.h @@ -38,7 +38,6 @@ void decimal_real_convert (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALU void decimal_real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int); void decimal_do_fix_trunc (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); void decimal_real_maxval (REAL_VALUE_TYPE *, int, enum machine_mode); -void decimal_real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *, const REAL_VALUE_TYPE *); HOST_WIDE_INT decimal_real_to_integer (const REAL_VALUE_TYPE *); #ifdef TREE_CODE diff --git a/gcc/doc/generic.texi b/gcc/doc/generic.texi index ccecd6e842f..a78c19d3781 100644 --- a/gcc/doc/generic.texi +++ b/gcc/doc/generic.texi @@ -1022,10 +1022,12 @@ As this example indicates, the operands are zero-indexed. @node Constant expressions @subsection Constant expressions @tindex INTEGER_CST -@findex TREE_INT_CST_HIGH -@findex TREE_INT_CST_LOW -@findex tree_int_cst_lt -@findex tree_int_cst_equal +@tindex tree_fits_uhwi_p +@tindex tree_fits_shwi_p +@tindex tree_fits_hwi_p +@tindex tree_to_uhwi +@tindex tree_to_shwi +@tindex tree_to_hwi @tindex REAL_CST @tindex FIXED_CST @tindex COMPLEX_CST @@ -1044,36 +1046,20 @@ These nodes represent integer constants. Note that the type of these constants is obtained with @code{TREE_TYPE}; they are not always of type @code{int}. In particular, @code{char} constants are represented with @code{INTEGER_CST} nodes. The value of the integer constant @code{e} is -given by -@smallexample -((TREE_INT_CST_HIGH (e) << HOST_BITS_PER_WIDE_INT) -+ TREE_INST_CST_LOW (e)) -@end smallexample -@noindent -HOST_BITS_PER_WIDE_INT is at least thirty-two on all platforms. Both -@code{TREE_INT_CST_HIGH} and @code{TREE_INT_CST_LOW} return a -@code{HOST_WIDE_INT}. The value of an @code{INTEGER_CST} is interpreted -as a signed or unsigned quantity depending on the type of the constant. -In general, the expression given above will overflow, so it should not -be used to calculate the value of the constant. - -The variable @code{integer_zero_node} is an integer constant with value -zero. Similarly, @code{integer_one_node} is an integer constant with -value one. The @code{size_zero_node} and @code{size_one_node} variables -are analogous, but have type @code{size_t} rather than @code{int}. - -The function @code{tree_int_cst_lt} is a predicate which holds if its -first argument is less than its second. Both constants are assumed to -have the same signedness (i.e., either both should be signed or both -should be unsigned.) The full width of the constant is used when doing -the comparison; the usual rules about promotions and conversions are -ignored. Similarly, @code{tree_int_cst_equal} holds if the two -constants are equal. The @code{tree_int_cst_sgn} function returns the -sign of a constant. The value is @code{1}, @code{0}, or @code{-1} -according on whether the constant is greater than, equal to, or less -than zero. Again, the signedness of the constant's type is taken into -account; an unsigned constant is never less than zero, no matter what -its bit-pattern. +represented in an array of HOST_WIDE_INT. There are enough elements +in the array to represent the value without taking extra elements for +redundant 0s or -1. + +The functions @code{tree_fits_uhwi_p}, @code{tree_fits_shwi_p}, and +@code{tree_fits_hwi_p} can be used to tell if the value is small +enough to fit in a HOST_WIDE_INT, as either a signed value, an unsiged +value or a value whose sign is given as a parameter. The value can +then be extracted using the @code{tree_to_uhwi}, @code{tree_to_shwi}, +or @code{tree_to_hwi}. The @code{tree_to_hwi} comes in both checked +and unchecked flavors. However, when the value is used in a context +where it may represent a value that is larger than can be represented +in HOST_BITS_PER_WIDE_INT bits, the wide_int class should be used to +manipulate the constant. @item REAL_CST diff --git a/gcc/doc/rtl.texi b/gcc/doc/rtl.texi index 84c0444ad9c..03732f1a2d4 100644 --- a/gcc/doc/rtl.texi +++ b/gcc/doc/rtl.texi @@ -1529,17 +1529,22 @@ Similarly, there is only one object for the integer whose value is @findex const_double @item (const_double:@var{m} @var{i0} @var{i1} @dots{}) -Represents either a floating-point constant of mode @var{m} or an -integer constant too large to fit into @code{HOST_BITS_PER_WIDE_INT} -bits but small enough to fit within twice that number of bits (GCC -does not provide a mechanism to represent even larger constants). In -the latter case, @var{m} will be @code{VOIDmode}. For integral values -constants for modes with more bits than twice the number in -@code{HOST_WIDE_INT} the implied high order bits of that constant are -copies of the top bit of @code{CONST_DOUBLE_HIGH}. Note however that -integral values are neither inherently signed nor inherently unsigned; -where necessary, signedness is determined by the rtl operation -instead. +This represents either a floating-point constant of mode @var{m} or +(on ports older ports that do not define +@code{TARGET_SUPPORTS_WIDE_INT}) an integer constant too large to fit +into @code{HOST_BITS_PER_WIDE_INT} bits but small enough to fit within +twice that number of bits (GCC does not provide a mechanism to +represent even larger constants). In the latter case, @var{m} will be +@code{VOIDmode}. For integral values constants for modes with more +bits than twice the number in @code{HOST_WIDE_INT} the implied high +order bits of that constant are copies of the top bit of +@code{CONST_DOUBLE_HIGH}. Note however that integral values are +neither inherently signed nor inherently unsigned; where necessary, +signedness is determined by the rtl operation instead. + +On more modern ports, @code{CONST_DOUBLE} only represents floating +point values. New ports define to @code{TARGET_SUPPORTS_WIDE_INT} to +make this designation. @findex CONST_DOUBLE_LOW If @var{m} is @code{VOIDmode}, the bits of the value are stored in @@ -1554,6 +1559,37 @@ machine's or host machine's floating point format. To convert them to the precise bit pattern used by the target machine, use the macro @code{REAL_VALUE_TO_TARGET_DOUBLE} and friends (@pxref{Data Output}). +@findex CONST_WIDE_INT +@item (const_wide_int:@var{m} @var{nunits} @var{elt0} @dots{}) +This contains an array of @code{HOST_WIDE_INTS} that is large enough +to hold any constant that can be represented on the target. This form +of rtl is only used on targets that define +@code{TARGET_SUPPORTS_WIDE_INT} to be non zero and then +@code{CONST_DOUBLES} are only used to hold floating point values. If +the target leaves @code{TARGET_SUPPORTS_WIDE_INT} defined as 0, +@code{CONST_WIDE_INT}s are not used and @code{CONST_DOUBLE}s are as +they were before. + +The values are stored in a compressed format. The higher order +0s or -1s are not represented if they are just the logical sign +extension of the number that is represented. + +@findex CONST_WIDE_INT_VEC +@item CONST_WIDE_INT_VEC (@var{code}) +Returns the entire array of @code{HOST_WIDE_INT}s that are used to +store the value. This macro should be rarely used. + +@findex CONST_WIDE_INT_NUNITS +@item CONST_WIDE_INT_NUNITS (@var{code}) +The number of @code{HOST_WIDE_INT}s used to represent the number. +Note that this generally be smaller than the number of +@code{HOST_WIDE_INT}s implied by the mode size. + +@findex CONST_WIDE_INT_ELT +@item CONST_WIDE_INT_NUNITS (@var{code},@var{i}) +Returns the @code{i}th element of the array. Element 0 is contains +the low order bits of the constant. + @findex const_fixed @item (const_fixed:@var{m} @dots{}) Represents a fixed-point constant of mode @var{m}. diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index f96764dbb66..ae99c12ecad 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -9641,14 +9641,8 @@ Returns the negative of the floating point value @var{x}. Returns the absolute value of @var{x}. @end deftypefn -@deftypefn Macro void REAL_VALUE_TO_INT (HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, REAL_VALUE_TYPE @var{x}) -Converts a floating point value @var{x} into a double-precision integer -which is then stored into @var{low} and @var{high}. If the value is not -integral, it is truncated. -@end deftypefn - -@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, enum machine_mode @var{mode}) -Converts a double-precision integer found in @var{low} and @var{high}, +@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{val}, enum machine_mode @var{mode}) +Converts a double-precision integer found in @var{val}, into a floating point value which is then stored into @var{x}. The value is truncated to fit in mode @var{mode}. @end deftypefn @@ -11353,3 +11347,50 @@ It returns true if the target supports GNU indirect functions. The support includes the assembler, linker and dynamic linker. The default value of this hook is based on target's libc. @end deftypefn + +@defmac TARGET_SUPPORTS_WIDE_INT + +On older ports, large integers are stored in @code{CONST_DOUBLE} rtl +objects. Newer ports define @code{TARGET_SUPPORTS_WIDE_INT} to be non +zero to indicate that large integers are stored in +@code{CONST_WIDE_INT} rtl objects. The @code{CONST_WIDE_INT} allows +very large integer constants to be represented. @code{CONST_DOUBLE} +are limited to twice the size of host's @code{HOST_WIDE_INT} +representation. + +Converting a port mostly requires looking for the places where +@code{CONST_DOUBLES} are used with @code{VOIDmode} and replacing that +code with code that accesses @code{CONST_WIDE_INT}s. @samp{"grep -i +const_double"} at the port level gets you to 95% of the changes that +need to be made. There are a few places that require a deeper look. + +@itemize @bullet +@item +There is no equivalent to @code{hval} and @code{lval} for +@code{CONST_WIDE_INT}s. This would be difficult to express in the md +language since there are a variable number of elements. + +Most ports only check that @code{hval} is either 0 or -1 to see if the +value is small. As mentioned above, this will no longer be necessary +since small constants are always @code{CONST_INT}. Of course there +are still a few exceptions, the alpha's constraint used by the zap +instruction certainly requires careful examination by C code. +However, all the current code does is pass the hval and lval to C +code, so evolving the c code to look at the @code{CONST_WIDE_INT} is +not really a large change. + +@item +Because there is no standard template that ports use to materialize +constants, there is likely to be some futzing that is unique to each +port in this code. + +@item +The rtx costs may have to be adjusted to properly account for larger +constants that are represented as @code{CONST_WIDE_INT}. +@end itemize + +All and all it does not takes long to convert ports that the +maintainer is familiar with. + +@end defmac + diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index 863e843af3d..368afd6b1e7 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -7333,14 +7333,8 @@ Returns the negative of the floating point value @var{x}. Returns the absolute value of @var{x}. @end deftypefn -@deftypefn Macro void REAL_VALUE_TO_INT (HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, REAL_VALUE_TYPE @var{x}) -Converts a floating point value @var{x} into a double-precision integer -which is then stored into @var{low} and @var{high}. If the value is not -integral, it is truncated. -@end deftypefn - -@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, enum machine_mode @var{mode}) -Converts a double-precision integer found in @var{low} and @var{high}, +@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{val}, enum machine_mode @var{mode}) +Converts a double-precision integer found in @var{val}, into a floating point value which is then stored into @var{x}. The value is truncated to fit in mode @var{mode}. @end deftypefn @@ -8388,3 +8382,50 @@ and the associated definitions of those functions. @hook TARGET_ATOMIC_TEST_AND_SET_TRUEVAL @hook TARGET_HAS_IFUNC_P + +@defmac TARGET_SUPPORTS_WIDE_INT + +On older ports, large integers are stored in @code{CONST_DOUBLE} rtl +objects. Newer ports define @code{TARGET_SUPPORTS_WIDE_INT} to be non +zero to indicate that large integers are stored in +@code{CONST_WIDE_INT} rtl objects. The @code{CONST_WIDE_INT} allows +very large integer constants to be represented. @code{CONST_DOUBLE} +are limited to twice the size of host's @code{HOST_WIDE_INT} +representation. + +Converting a port mostly requires looking for the places where +@code{CONST_DOUBLES} are used with @code{VOIDmode} and replacing that +code with code that accesses @code{CONST_WIDE_INT}s. @samp{"grep -i +const_double"} at the port level gets you to 95% of the changes that +need to be made. There are a few places that require a deeper look. + +@itemize @bullet +@item +There is no equivalent to @code{hval} and @code{lval} for +@code{CONST_WIDE_INT}s. This would be difficult to express in the md +language since there are a variable number of elements. + +Most ports only check that @code{hval} is either 0 or -1 to see if the +value is small. As mentioned above, this will no longer be necessary +since small constants are always @code{CONST_INT}. Of course there +are still a few exceptions, the alpha's constraint used by the zap +instruction certainly requires careful examination by C code. +However, all the current code does is pass the hval and lval to C +code, so evolving the c code to look at the @code{CONST_WIDE_INT} is +not really a large change. + +@item +Because there is no standard template that ports use to materialize +constants, there is likely to be some futzing that is unique to each +port in this code. + +@item +The rtx costs may have to be adjusted to properly account for larger +constants that are represented as @code{CONST_WIDE_INT}. +@end itemize + +All and all it does not takes long to convert ports that the +maintainer is familiar with. + +@end defmac + diff --git a/gcc/dojump.c b/gcc/dojump.c index ee12d761eee..b7b78050217 100644 --- a/gcc/dojump.c +++ b/gcc/dojump.c @@ -142,6 +142,7 @@ static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { bool speed_p; + wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode)); if (and_test == 0) { @@ -162,8 +163,7 @@ prefer_and_bit_test (enum machine_mode mode, int bitnum) } /* Fill in the integers. */ - XEXP (and_test, 1) - = immed_double_int_const (double_int_zero.set_bit (bitnum), mode); + XEXP (and_test, 1) = immed_wide_int_const (mask, mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); speed_p = optimize_insn_for_speed_p (); @@ -541,10 +541,10 @@ do_jump (tree exp, rtx if_false_label, rtx if_true_label, int prob) && compare_tree_int (shift, 0) >= 0 && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0 && prefer_and_bit_test (TYPE_MODE (argtype), - TREE_INT_CST_LOW (shift))) + tree_to_hwi (shift))) { unsigned HOST_WIDE_INT mask - = (unsigned HOST_WIDE_INT) 1 << TREE_INT_CST_LOW (shift); + = (unsigned HOST_WIDE_INT) 1 << tree_to_hwi (shift); do_jump (build2 (BIT_AND_EXPR, argtype, arg, build_int_cstu (argtype, mask)), clr_label, set_label, setclr_prob); diff --git a/gcc/double-int.h b/gcc/double-int.h index 650520ba052..50ca182b83c 100644 --- a/gcc/double-int.h +++ b/gcc/double-int.h @@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see #ifndef DOUBLE_INT_H #define DOUBLE_INT_H +#include "wide-int.h" + /* A large integer is currently represented as a pair of HOST_WIDE_INTs. It therefore represents a number with precision of 2 * HOST_BITS_PER_WIDE_INT bits (it is however possible that the @@ -435,4 +437,36 @@ void mpz_set_double_int (mpz_t, double_int, bool); double_int mpz_get_double_int (const_tree, mpz_t, bool); #endif +namespace wi +{ + template <> + struct int_traits <double_int> + { + static const enum precision_type precision_type = CONST_PRECISION; + static const bool host_dependent_precision = true; + static const unsigned int precision = HOST_BITS_PER_DOUBLE_INT; + static unsigned int get_precision (const double_int &); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, + const double_int &); + }; +} + +inline unsigned int +wi::int_traits <double_int>::get_precision (const double_int &) +{ + return precision; +} + +inline wi::storage_ref +wi::int_traits <double_int>::decompose (HOST_WIDE_INT *scratch, unsigned int p, + const double_int &x) +{ + gcc_checking_assert (precision == p); + scratch[0] = x.low; + if ((x.high == 0 && scratch[0] >= 0) || (x.high == -1 && scratch[0] < 0)) + return wi::storage_ref (scratch, 1, precision); + scratch[1] = x.high; + return wi::storage_ref (scratch, 2, precision); +} + #endif /* DOUBLE_INT_H */ diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index 3f4e314e1d4..10112701f27 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -347,6 +347,17 @@ dump_struct_debug (tree type, enum debug_info_usage usage, #endif + +/* Get the number of host wide ints needed to represent the precision + of the number. */ + +static unsigned int +get_full_len (const wide_int &op) +{ + return ((op.get_precision () + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT); +} + static bool should_emit_struct_debug (tree type, enum debug_info_usage usage) { @@ -1382,6 +1393,9 @@ dw_val_equal_p (dw_val_node *a, dw_val_node *b) return (a->v.val_double.high == b->v.val_double.high && a->v.val_double.low == b->v.val_double.low); + case dw_val_class_wide_int: + return *a->v.val_wide == *b->v.val_wide; + case dw_val_class_vec: { size_t a_len = a->v.val_vec.elt_size * a->v.val_vec.length; @@ -1638,6 +1652,10 @@ size_of_loc_descr (dw_loc_descr_ref loc) case dw_val_class_const_double: size += HOST_BITS_PER_DOUBLE_INT / BITS_PER_UNIT; break; + case dw_val_class_wide_int: + size += (get_full_len (*loc->dw_loc_oprnd2.v.val_wide) + * HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT); + break; default: gcc_unreachable (); } @@ -1815,6 +1833,20 @@ output_loc_operands (dw_loc_descr_ref loc, int for_eh_or_skip) second, NULL); } break; + case dw_val_class_wide_int: + { + int i; + int len = get_full_len (*val2->v.val_wide); + if (WORDS_BIG_ENDIAN) + for (i = len; i >= 0; --i) + dw2_asm_output_data (HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR, + val2->v.val_wide->elt (i), NULL); + else + for (i = 0; i < len; ++i) + dw2_asm_output_data (HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR, + val2->v.val_wide->elt (i), NULL); + } + break; case dw_val_class_addr: gcc_assert (val1->v.val_unsigned == DWARF2_ADDR_SIZE); dw2_asm_output_addr_rtx (DWARF2_ADDR_SIZE, val2->v.val_addr, NULL); @@ -2024,6 +2056,21 @@ output_loc_operands (dw_loc_descr_ref loc, int for_eh_or_skip) dw2_asm_output_data (l, second, NULL); } break; + case dw_val_class_wide_int: + { + int i; + int len = get_full_len (*val2->v.val_wide); + l = HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + + dw2_asm_output_data (1, len * l, NULL); + if (WORDS_BIG_ENDIAN) + for (i = len; i >= 0; --i) + dw2_asm_output_data (l, val2->v.val_wide->elt (i), NULL); + else + for (i = 0; i < len; ++i) + dw2_asm_output_data (l, val2->v.val_wide->elt (i), NULL); + } + break; default: gcc_unreachable (); } @@ -3116,7 +3163,7 @@ static void add_AT_location_description (dw_die_ref, enum dwarf_attribute, static void add_data_member_location_attribute (dw_die_ref, tree); static bool add_const_value_attribute (dw_die_ref, rtx); static void insert_int (HOST_WIDE_INT, unsigned, unsigned char *); -static void insert_double (double_int, unsigned char *); +static void insert_wide_int (const wide_int &, unsigned char *); static void insert_float (const_rtx, unsigned char *); static rtx rtl_for_decl_location (tree); static bool add_location_or_const_value_attribute (dw_die_ref, tree, bool, @@ -3743,6 +3790,21 @@ AT_unsigned (dw_attr_ref a) return a->dw_attr_val.v.val_unsigned; } +/* Add an unsigned wide integer attribute value to a DIE. */ + +static inline void +add_AT_wide (dw_die_ref die, enum dwarf_attribute attr_kind, + const wide_int& w) +{ + dw_attr_node attr; + + attr.dw_attr = attr_kind; + attr.dw_attr_val.val_class = dw_val_class_wide_int; + attr.dw_attr_val.v.val_wide = ggc_alloc_cleared_wide_int (); + *attr.dw_attr_val.v.val_wide = w; + add_dwarf_attr (die, &attr); +} + /* Add an unsigned double integer attribute value to a DIE. */ static inline void @@ -5307,6 +5369,19 @@ print_die (dw_die_ref die, FILE *outfile) a->dw_attr_val.v.val_double.high, a->dw_attr_val.v.val_double.low); break; + case dw_val_class_wide_int: + { + int i = a->dw_attr_val.v.val_wide->get_len (); + fprintf (outfile, "constant ("); + gcc_assert (i > 0); + if (a->dw_attr_val.v.val_wide->elt (i) == 0) + fprintf (outfile, "0x"); + fprintf (outfile, HOST_WIDE_INT_PRINT_HEX, a->dw_attr_val.v.val_wide->elt (--i)); + while (-- i >= 0) + fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX, a->dw_attr_val.v.val_wide->elt (i)); + fprintf (outfile, ")"); + break; + } case dw_val_class_vec: fprintf (outfile, "floating-point or vector constant"); break; @@ -5480,6 +5555,9 @@ attr_checksum (dw_attr_ref at, struct md5_ctx *ctx, int *mark) case dw_val_class_const_double: CHECKSUM (at->dw_attr_val.v.val_double); break; + case dw_val_class_wide_int: + CHECKSUM (*at->dw_attr_val.v.val_wide); + break; case dw_val_class_vec: CHECKSUM_BLOCK (at->dw_attr_val.v.val_vec.array, (at->dw_attr_val.v.val_vec.length @@ -5757,6 +5835,12 @@ attr_checksum_ordered (enum dwarf_tag tag, dw_attr_ref at, CHECKSUM (at->dw_attr_val.v.val_double); break; + case dw_val_class_wide_int: + CHECKSUM_ULEB128 (DW_FORM_block); + CHECKSUM_ULEB128 (sizeof (*at->dw_attr_val.v.val_wide)); + CHECKSUM (*at->dw_attr_val.v.val_wide); + break; + case dw_val_class_vec: CHECKSUM_ULEB128 (DW_FORM_block); CHECKSUM_ULEB128 (at->dw_attr_val.v.val_vec.length @@ -6239,6 +6323,8 @@ same_dw_val_p (const dw_val_node *v1, const dw_val_node *v2, int *mark) case dw_val_class_const_double: return v1->v.val_double.high == v2->v.val_double.high && v1->v.val_double.low == v2->v.val_double.low; + case dw_val_class_wide_int: + return *v1->v.val_wide == *v2->v.val_wide; case dw_val_class_vec: if (v1->v.val_vec.length != v2->v.val_vec.length || v1->v.val_vec.elt_size != v2->v.val_vec.elt_size) @@ -7772,6 +7858,13 @@ size_of_die (dw_die_ref die) if (HOST_BITS_PER_WIDE_INT >= 64) size++; /* block */ break; + case dw_val_class_wide_int: + size += (get_full_len (*a->dw_attr_val.v.val_wide) + * HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR); + if (get_full_len (*a->dw_attr_val.v.val_wide) * HOST_BITS_PER_WIDE_INT + > 64) + size++; /* block */ + break; case dw_val_class_vec: size += constant_size (a->dw_attr_val.v.val_vec.length * a->dw_attr_val.v.val_vec.elt_size) @@ -8134,6 +8227,20 @@ value_format (dw_attr_ref a) default: return DW_FORM_block1; } + case dw_val_class_wide_int: + switch (get_full_len (*a->dw_attr_val.v.val_wide) * HOST_BITS_PER_WIDE_INT) + { + case 8: + return DW_FORM_data1; + case 16: + return DW_FORM_data2; + case 32: + return DW_FORM_data4; + case 64: + return DW_FORM_data8; + default: + return DW_FORM_block1; + } case dw_val_class_vec: switch (constant_size (a->dw_attr_val.v.val_vec.length * a->dw_attr_val.v.val_vec.elt_size)) @@ -8573,6 +8680,32 @@ output_die (dw_die_ref die) } break; + case dw_val_class_wide_int: + { + int i; + int len = get_full_len (*a->dw_attr_val.v.val_wide); + int l = HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + if (len * HOST_BITS_PER_WIDE_INT > 64) + dw2_asm_output_data (1, get_full_len (*a->dw_attr_val.v.val_wide) * l, + NULL); + + if (WORDS_BIG_ENDIAN) + for (i = len; i >= 0; --i) + { + dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i), + name); + name = NULL; + } + else + for (i = 0; i < len; ++i) + { + dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i), + name); + name = NULL; + } + } + break; + case dw_val_class_vec: { unsigned int elt_size = a->dw_attr_val.v.val_vec.elt_size; @@ -10163,25 +10296,25 @@ simple_type_size_in_bits (const_tree type) return BITS_PER_WORD; else if (TYPE_SIZE (type) == NULL_TREE) return 0; - else if (host_integerp (TYPE_SIZE (type), 1)) - return tree_low_cst (TYPE_SIZE (type), 1); + else if (tree_fits_uhwi_p (TYPE_SIZE (type))) + return tree_to_uhwi (TYPE_SIZE (type)); else return TYPE_ALIGN (type); } -/* Similarly, but return a double_int instead of UHWI. */ +/* Similarly, but return a wide_int instead of UHWI. */ -static inline double_int -double_int_type_size_in_bits (const_tree type) +static inline addr_wide_int +wide_int_type_size_in_bits (const_tree type) { if (TREE_CODE (type) == ERROR_MARK) - return double_int::from_uhwi (BITS_PER_WORD); + return BITS_PER_WORD; else if (TYPE_SIZE (type) == NULL_TREE) - return double_int_zero; + return 0; else if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) - return tree_to_double_int (TYPE_SIZE (type)); + return TYPE_SIZE (type); else - return double_int::from_uhwi (TYPE_ALIGN (type)); + return TYPE_ALIGN (type); } /* Given a pointer to a tree node for a subrange type, return a pointer @@ -11668,9 +11801,7 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode, rtx msb; if (GET_MODE_CLASS (mode) != MODE_INT - || GET_MODE (XEXP (rtl, 0)) != mode - || (GET_CODE (rtl) == CLZ - && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_DOUBLE_INT)) + || GET_MODE (XEXP (rtl, 0)) != mode) return NULL; op0 = mem_loc_descriptor (XEXP (rtl, 0), mode, mem_mode, @@ -11714,9 +11845,9 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode, msb = GEN_INT ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)); else - msb = immed_double_const (0, (unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (mode) - - HOST_BITS_PER_WIDE_INT - 1), mode); + msb = immed_wide_int_const + (wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1, + GET_MODE_PRECISION (mode)), mode); if (GET_CODE (msb) == CONST_INT && INTVAL (msb) < 0) tmp = new_loc_descr (HOST_BITS_PER_WIDE_INT == 32 ? DW_OP_const4u : HOST_BITS_PER_WIDE_INT == 64 @@ -12658,7 +12789,16 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode, mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_die_ref; mem_loc_result->dw_loc_oprnd1.v.val_die_ref.die = type_die; mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0; - if (SCALAR_FLOAT_MODE_P (mode)) +#if TARGET_SUPPORTS_WIDE_INT == 0 + if (!SCALAR_FLOAT_MODE_P (mode)) + { + mem_loc_result->dw_loc_oprnd2.val_class + = dw_val_class_const_double; + mem_loc_result->dw_loc_oprnd2.v.val_double + = rtx_to_double_int (rtl); + } + else +#endif { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array @@ -12670,13 +12810,26 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode, mem_loc_result->dw_loc_oprnd2.v.val_vec.elt_size = 4; mem_loc_result->dw_loc_oprnd2.v.val_vec.array = array; } - else - { - mem_loc_result->dw_loc_oprnd2.val_class - = dw_val_class_const_double; - mem_loc_result->dw_loc_oprnd2.v.val_double - = rtx_to_double_int (rtl); - } + } + break; + + case CONST_WIDE_INT: + if (!dwarf_strict) + { + dw_die_ref type_die; + + type_die = base_type_for_mode (mode, + GET_MODE_CLASS (mode) == MODE_INT); + if (type_die == NULL) + return NULL; + mem_loc_result = new_loc_descr (DW_OP_GNU_const_type, 0, 0); + mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_die_ref; + mem_loc_result->dw_loc_oprnd1.v.val_die_ref.die = type_die; + mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0; + mem_loc_result->dw_loc_oprnd2.val_class + = dw_val_class_wide_int; + mem_loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int (); + *mem_loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode); } break; @@ -13147,7 +13300,15 @@ loc_descriptor (rtx rtl, enum machine_mode mode, adequately represented. We output CONST_DOUBLEs as blocks. */ loc_result = new_loc_descr (DW_OP_implicit_value, GET_MODE_SIZE (mode), 0); - if (SCALAR_FLOAT_MODE_P (mode)) +#if TARGET_SUPPORTS_WIDE_INT == 0 + if (!SCALAR_FLOAT_MODE_P (mode)) + { + loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double; + loc_result->dw_loc_oprnd2.v.val_double + = rtx_to_double_int (rtl); + } + else +#endif { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array @@ -13159,12 +13320,26 @@ loc_descriptor (rtx rtl, enum machine_mode mode, loc_result->dw_loc_oprnd2.v.val_vec.elt_size = 4; loc_result->dw_loc_oprnd2.v.val_vec.array = array; } - else - { - loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double; - loc_result->dw_loc_oprnd2.v.val_double - = rtx_to_double_int (rtl); - } + } + break; + + case CONST_WIDE_INT: + if (mode == VOIDmode) + mode = GET_MODE (rtl); + + if (mode != VOIDmode && (dwarf_version >= 4 || !dwarf_strict)) + { + gcc_assert (mode == GET_MODE (rtl) || VOIDmode == GET_MODE (rtl)); + + /* Note that a CONST_DOUBLE rtx could represent either an integer + or a floating-point constant. A CONST_DOUBLE is used whenever + the constant requires more than one word in order to be + adequately represented. We output CONST_DOUBLEs as blocks. */ + loc_result = new_loc_descr (DW_OP_implicit_value, + GET_MODE_SIZE (mode), 0); + loc_result->dw_loc_oprnd2.val_class = dw_val_class_wide_int; + loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int (); + *loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode); } break; @@ -13180,6 +13355,7 @@ loc_descriptor (rtx rtl, enum machine_mode mode, ggc_alloc_atomic (length * elt_size); unsigned int i; unsigned char *p; + enum machine_mode imode = GET_MODE_INNER (mode); gcc_assert (mode == GET_MODE (rtl) || VOIDmode == GET_MODE (rtl)); switch (GET_MODE_CLASS (mode)) @@ -13188,15 +13364,8 @@ loc_descriptor (rtx rtl, enum machine_mode mode, for (i = 0, p = array; i < length; i++, p += elt_size) { rtx elt = CONST_VECTOR_ELT (rtl, i); - double_int val = rtx_to_double_int (elt); - - if (elt_size <= sizeof (HOST_WIDE_INT)) - insert_int (val.to_shwi (), elt_size, p); - else - { - gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT)); - insert_double (val, p); - } + wide_int val = std::make_pair (elt, imode); + insert_wide_int (val, p); } break; @@ -13442,10 +13611,10 @@ dw_sra_loc_expr (tree decl, rtx loc) enum var_init_status initialized; if (DECL_SIZE (decl) == NULL - || !host_integerp (DECL_SIZE (decl), 1)) + || !tree_fits_uhwi_p (DECL_SIZE (decl))) return NULL; - decl_size = tree_low_cst (DECL_SIZE (decl), 1); + decl_size = tree_to_uhwi (DECL_SIZE (decl)); descr = NULL; descr_tail = &descr; @@ -14148,17 +14317,17 @@ loc_list_from_tree (tree loc, int want_address) } case INTEGER_CST: - if ((want_address || !host_integerp (loc, 0)) + if ((want_address || !tree_fits_shwi_p (loc)) && (ret = cst_pool_loc_descr (loc))) have_address = 1; else if (want_address == 2 - && host_integerp (loc, 0) + && tree_fits_shwi_p (loc) && (ret = address_of_int_loc_descriptor (int_size_in_bytes (TREE_TYPE (loc)), - tree_low_cst (loc, 0)))) + tree_to_shwi (loc)))) have_address = 1; - else if (host_integerp (loc, 0)) - ret = int_loc_descriptor (tree_low_cst (loc, 0)); + else if (tree_fits_shwi_p (loc)) + ret = int_loc_descriptor (tree_to_shwi (loc)); else { expansion_failed (loc, NULL_RTX, @@ -14247,13 +14416,13 @@ loc_list_from_tree (tree loc, int want_address) case POINTER_PLUS_EXPR: case PLUS_EXPR: - if (host_integerp (TREE_OPERAND (loc, 1), 0)) + if (tree_fits_shwi_p (TREE_OPERAND (loc, 1))) { list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0); if (list_ret == 0) return 0; - loc_list_plus_const (list_ret, tree_low_cst (TREE_OPERAND (loc, 1), 0)); + loc_list_plus_const (list_ret, tree_to_shwi (TREE_OPERAND (loc, 1))); break; } @@ -14518,14 +14687,12 @@ simple_decl_align_in_bits (const_tree decl) /* Return the result of rounding T up to ALIGN. */ -static inline double_int -round_up_to_align (double_int t, unsigned int align) +static inline addr_wide_int +round_up_to_align (addr_wide_int t, unsigned int align) { - double_int alignd = double_int::from_uhwi (align); - t += alignd; - t += double_int_minus_one; - t = t.div (alignd, true, TRUNC_DIV_EXPR); - t *= alignd; + t += align - 1; + t = wi::udiv_trunc (t, align); + t *= align; return t; } @@ -14539,9 +14706,9 @@ round_up_to_align (double_int t, unsigned int align) static HOST_WIDE_INT field_byte_offset (const_tree decl) { - double_int object_offset_in_bits; - double_int object_offset_in_bytes; - double_int bitpos_int; + addr_wide_int object_offset_in_bits; + addr_wide_int object_offset_in_bytes; + addr_wide_int bitpos_int; if (TREE_CODE (decl) == ERROR_MARK) return 0; @@ -14554,21 +14721,21 @@ field_byte_offset (const_tree decl) if (TREE_CODE (bit_position (decl)) != INTEGER_CST) return 0; - bitpos_int = tree_to_double_int (bit_position (decl)); + bitpos_int = bit_position (decl); #ifdef PCC_BITFIELD_TYPE_MATTERS if (PCC_BITFIELD_TYPE_MATTERS) { tree type; tree field_size_tree; - double_int deepest_bitpos; - double_int field_size_in_bits; + addr_wide_int deepest_bitpos; + addr_wide_int field_size_in_bits; unsigned int type_align_in_bits; unsigned int decl_align_in_bits; - double_int type_size_in_bits; + addr_wide_int type_size_in_bits; type = field_type (decl); - type_size_in_bits = double_int_type_size_in_bits (type); + type_size_in_bits = wide_int_type_size_in_bits (type); type_align_in_bits = simple_type_align_in_bits (type); field_size_tree = DECL_SIZE (decl); @@ -14580,7 +14747,7 @@ field_byte_offset (const_tree decl) /* If the size of the field is not constant, use the type size. */ if (TREE_CODE (field_size_tree) == INTEGER_CST) - field_size_in_bits = tree_to_double_int (field_size_tree); + field_size_in_bits = field_size_tree; else field_size_in_bits = type_size_in_bits; @@ -14644,7 +14811,7 @@ field_byte_offset (const_tree decl) object_offset_in_bits = round_up_to_align (object_offset_in_bits, type_align_in_bits); - if (object_offset_in_bits.ugt (bitpos_int)) + if (wi::gtu_p (object_offset_in_bits, bitpos_int)) { object_offset_in_bits = deepest_bitpos - type_size_in_bits; @@ -14658,8 +14825,7 @@ field_byte_offset (const_tree decl) object_offset_in_bits = bitpos_int; object_offset_in_bytes - = object_offset_in_bits.div (double_int::from_uhwi (BITS_PER_UNIT), - true, TRUNC_DIV_EXPR); + = wi::udiv_trunc (object_offset_in_bits, BITS_PER_UNIT); return object_offset_in_bytes.to_shwi (); } @@ -14757,7 +14923,7 @@ add_data_member_location_attribute (dw_die_ref die, tree decl) add_loc_descr (&loc_descr, tmp); /* Calculate the address of the offset. */ - offset = tree_low_cst (BINFO_VPTR_FIELD (decl), 0); + offset = tree_to_shwi (BINFO_VPTR_FIELD (decl)); gcc_assert (offset < 0); tmp = int_loc_descriptor (-offset); @@ -14774,7 +14940,7 @@ add_data_member_location_attribute (dw_die_ref die, tree decl) add_loc_descr (&loc_descr, tmp); } else - offset = tree_low_cst (BINFO_OFFSET (decl), 0); + offset = tree_to_shwi (BINFO_OFFSET (decl)); } else offset = field_byte_offset (decl); @@ -14835,22 +15001,27 @@ extract_int (const unsigned char *src, unsigned int size) return val; } -/* Writes double_int values to dw_vec_const array. */ +/* Writes wide_int values to dw_vec_const array. */ static void -insert_double (double_int val, unsigned char *dest) +insert_wide_int (const wide_int &val, unsigned char *dest) { - unsigned char *p0 = dest; - unsigned char *p1 = dest + sizeof (HOST_WIDE_INT); + int i; if (WORDS_BIG_ENDIAN) - { - p0 = p1; - p1 = dest; - } - - insert_int ((HOST_WIDE_INT) val.low, sizeof (HOST_WIDE_INT), p0); - insert_int ((HOST_WIDE_INT) val.high, sizeof (HOST_WIDE_INT), p1); + for (i = (int)get_full_len (val) - 1; i >= 0; i--) + { + insert_int ((HOST_WIDE_INT) val.elt (i), + sizeof (HOST_WIDE_INT), dest); + dest += sizeof (HOST_WIDE_INT); + } + else + for (i = 0; i < (int)get_full_len (val); i++) + { + insert_int ((HOST_WIDE_INT) val.elt (i), + sizeof (HOST_WIDE_INT), dest); + dest += sizeof (HOST_WIDE_INT); + } } /* Writes floating point values to dw_vec_const array. */ @@ -14895,6 +15066,11 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) } return true; + case CONST_WIDE_INT: + add_AT_wide (die, DW_AT_const_value, + std::make_pair (rtl, GET_MODE (rtl))); + return true; + case CONST_DOUBLE: /* Note that a CONST_DOUBLE rtx could represent either an integer or a floating-point constant. A CONST_DOUBLE is used whenever the @@ -14903,7 +15079,10 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) { enum machine_mode mode = GET_MODE (rtl); - if (SCALAR_FLOAT_MODE_P (mode)) + if (TARGET_SUPPORTS_WIDE_INT == 0 && !SCALAR_FLOAT_MODE_P (mode)) + add_AT_double (die, DW_AT_const_value, + CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl)); + else { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array = (unsigned char *) ggc_alloc_atomic (length); @@ -14911,9 +15090,6 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) insert_float (rtl, array); add_AT_vec (die, DW_AT_const_value, length / 4, 4, array); } - else - add_AT_double (die, DW_AT_const_value, - CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl)); } return true; @@ -14926,6 +15102,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) (length * elt_size); unsigned int i; unsigned char *p; + enum machine_mode imode = GET_MODE_INNER (mode); switch (GET_MODE_CLASS (mode)) { @@ -14933,15 +15110,8 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) for (i = 0, p = array; i < length; i++, p += elt_size) { rtx elt = CONST_VECTOR_ELT (rtl, i); - double_int val = rtx_to_double_int (elt); - - if (elt_size <= sizeof (HOST_WIDE_INT)) - insert_int (val.to_shwi (), elt_size, p); - else - { - gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT)); - insert_double (val, p); - } + wide_int val = std::make_pair (elt, imode); + insert_wide_int (val, p); } break; @@ -15428,9 +15598,9 @@ fortran_common (tree decl, HOST_WIDE_INT *value) *value = 0; if (offset != NULL) { - if (!host_integerp (offset, 0)) + if (!tree_fits_shwi_p (offset)) return NULL_TREE; - *value = tree_low_cst (offset, 0); + *value = tree_to_shwi (offset); } if (bitpos != 0) *value += bitpos / BITS_PER_UNIT; @@ -15596,14 +15766,14 @@ native_encode_initializer (tree init, unsigned char *array, int size) constructor_elt *ce; if (TYPE_DOMAIN (type) == NULL_TREE - || !host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0)) + || !tree_fits_shwi_p (TYPE_MIN_VALUE (TYPE_DOMAIN (type)))) return false; fieldsize = int_size_in_bytes (TREE_TYPE (type)); if (fieldsize <= 0) return false; - min_index = tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0); + min_index = tree_to_shwi (TYPE_MIN_VALUE (TYPE_DOMAIN (type))); memset (array, '\0', size); FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (init), cnt, ce) { @@ -15611,10 +15781,10 @@ native_encode_initializer (tree init, unsigned char *array, int size) tree index = ce->index; int pos = curpos; if (index && TREE_CODE (index) == RANGE_EXPR) - pos = (tree_low_cst (TREE_OPERAND (index, 0), 0) - min_index) + pos = (tree_to_shwi (TREE_OPERAND (index, 0)) - min_index) * fieldsize; else if (index) - pos = (tree_low_cst (index, 0) - min_index) * fieldsize; + pos = (tree_to_shwi (index) - min_index) * fieldsize; if (val) { @@ -15625,8 +15795,8 @@ native_encode_initializer (tree init, unsigned char *array, int size) curpos = pos + fieldsize; if (index && TREE_CODE (index) == RANGE_EXPR) { - int count = tree_low_cst (TREE_OPERAND (index, 1), 0) - - tree_low_cst (TREE_OPERAND (index, 0), 0); + int count = tree_to_shwi (TREE_OPERAND (index, 1)) + - tree_to_shwi (TREE_OPERAND (index, 0)); while (count-- > 0) { if (val) @@ -15670,9 +15840,9 @@ native_encode_initializer (tree init, unsigned char *array, int size) && ! TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (field)))) return false; else if (DECL_SIZE_UNIT (field) == NULL_TREE - || !host_integerp (DECL_SIZE_UNIT (field), 0)) + || !tree_fits_shwi_p (DECL_SIZE_UNIT (field))) return false; - fieldsize = tree_low_cst (DECL_SIZE_UNIT (field), 0); + fieldsize = tree_to_shwi (DECL_SIZE_UNIT (field)); pos = int_byte_position (field); gcc_assert (pos + fieldsize <= size); if (val @@ -16062,9 +16232,9 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b /* Use the default if possible. */ if (bound_attr == DW_AT_lower_bound - && host_integerp (bound, 0) + && tree_fits_shwi_p (bound) && (dflt = lower_bound_default ()) != -1 - && tree_low_cst (bound, 0) == dflt) + && tree_to_shwi (bound) == dflt) ; /* Otherwise represent the bound as an unsigned value with the @@ -16072,18 +16242,14 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b type will be necessary to re-interpret it unambiguously. */ else if (prec < HOST_BITS_PER_WIDE_INT) { - unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 << prec) - 1; add_AT_unsigned (subrange_die, bound_attr, - TREE_INT_CST_LOW (bound) & mask); + zext_hwi (tree_to_hwi (bound), prec)); } - else if (prec == HOST_BITS_PER_WIDE_INT - || TREE_INT_CST_HIGH (bound) == 0) - add_AT_unsigned (subrange_die, bound_attr, - TREE_INT_CST_LOW (bound)); + else if (prec == HOST_BITS_PER_WIDE_INT + || (cst_fits_uhwi_p (bound) && wi::ges_p (bound, 0))) + add_AT_unsigned (subrange_die, bound_attr, tree_to_hwi (bound)); else - add_AT_double (subrange_die, bound_attr, TREE_INT_CST_HIGH (bound), - TREE_INT_CST_LOW (bound)); + add_AT_wide (subrange_die, bound_attr, wide_int (bound)); } break; @@ -16293,8 +16459,8 @@ add_bit_offset_attribute (dw_die_ref die, tree decl) /* We can't yet handle bit-fields whose offsets are variable, so if we encounter such things, just return without generating any attribute whatsoever. Likewise for variable or too large size. */ - if (! host_integerp (bit_position (decl), 0) - || ! host_integerp (DECL_SIZE (decl), 1)) + if (! tree_fits_shwi_p (bit_position (decl)) + || ! tree_fits_uhwi_p (DECL_SIZE (decl))) return; bitpos_int = int_bit_position (decl); @@ -16309,7 +16475,7 @@ add_bit_offset_attribute (dw_die_ref die, tree decl) if (! BYTES_BIG_ENDIAN) { - highest_order_field_bit_offset += tree_low_cst (DECL_SIZE (decl), 0); + highest_order_field_bit_offset += tree_to_shwi (DECL_SIZE (decl)); highest_order_object_bit_offset += simple_type_size_in_bits (type); } @@ -16334,8 +16500,8 @@ add_bit_size_attribute (dw_die_ref die, tree decl) gcc_assert (TREE_CODE (decl) == FIELD_DECL && DECL_BIT_FIELD_TYPE (decl)); - if (host_integerp (DECL_SIZE (decl), 1)) - add_AT_unsigned (die, DW_AT_bit_size, tree_low_cst (DECL_SIZE (decl), 1)); + if (tree_fits_uhwi_p (DECL_SIZE (decl))) + add_AT_unsigned (die, DW_AT_bit_size, tree_to_uhwi (DECL_SIZE (decl))); } /* If the compiled language is ANSI C, then add a 'prototyped' @@ -16404,10 +16570,10 @@ add_pure_or_virtual_attribute (dw_die_ref die, tree func_decl) { add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual); - if (host_integerp (DECL_VINDEX (func_decl), 0)) + if (tree_fits_shwi_p (DECL_VINDEX (func_decl))) add_AT_loc (die, DW_AT_vtable_elem_location, new_loc_descr (DW_OP_constu, - tree_low_cst (DECL_VINDEX (func_decl), 0), + tree_to_shwi (DECL_VINDEX (func_decl)), 0)); /* GNU extension: Record what type this method came from originally. */ @@ -16954,8 +17120,8 @@ descr_info_loc (tree val, tree base_decl) case VAR_DECL: return loc_descriptor_from_tree (val, 0); case INTEGER_CST: - if (host_integerp (val, 0)) - return int_loc_descriptor (tree_low_cst (val, 0)); + if (tree_fits_shwi_p (val)) + return int_loc_descriptor (tree_to_shwi (val)); break; case INDIRECT_REF: size = int_size_in_bytes (TREE_TYPE (val)); @@ -16971,14 +17137,14 @@ descr_info_loc (tree val, tree base_decl) return loc; case POINTER_PLUS_EXPR: case PLUS_EXPR: - if (host_integerp (TREE_OPERAND (val, 1), 1) - && (unsigned HOST_WIDE_INT) tree_low_cst (TREE_OPERAND (val, 1), 1) + if (tree_fits_uhwi_p (TREE_OPERAND (val, 1)) + && (unsigned HOST_WIDE_INT) tree_to_uhwi (TREE_OPERAND (val, 1)) < 16384) { loc = descr_info_loc (TREE_OPERAND (val, 0), base_decl); if (!loc) break; - loc_descr_plus_const (&loc, tree_low_cst (TREE_OPERAND (val, 1), 0)); + loc_descr_plus_const (&loc, tree_to_shwi (TREE_OPERAND (val, 1))); } else { @@ -17018,9 +17184,9 @@ add_descr_info_field (dw_die_ref die, enum dwarf_attribute attr, { dw_loc_descr_ref loc; - if (host_integerp (val, 0)) + if (tree_fits_shwi_p (val)) { - add_AT_unsigned (die, attr, tree_low_cst (val, 0)); + add_AT_unsigned (die, attr, tree_to_shwi (val)); return; } @@ -17071,9 +17237,9 @@ gen_descr_array_type_die (tree type, struct array_descr_info *info, /* If it is the default value, omit it. */ int dflt; - if (host_integerp (info->dimen[dim].lower_bound, 0) + if (tree_fits_shwi_p (info->dimen[dim].lower_bound) && (dflt = lower_bound_default ()) != -1 - && tree_low_cst (info->dimen[dim].lower_bound, 0) == dflt) + && tree_to_shwi (info->dimen[dim].lower_bound) == dflt) ; else add_descr_info_field (subrange_die, DW_AT_lower_bound, @@ -17220,9 +17386,9 @@ gen_enumeration_type_die (tree type, dw_die_ref context_die) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (host_integerp (value, TYPE_UNSIGNED (TREE_TYPE (value))) + if (tree_fits_hwi_p (value) && (simple_type_size_in_bits (TREE_TYPE (value)) - <= HOST_BITS_PER_WIDE_INT || host_integerp (value, 0))) + <= HOST_BITS_PER_WIDE_INT || tree_fits_shwi_p (value))) /* DWARF2 does not provide a way of indicating whether or not enumeration constants are signed or unsigned. GDB always assumes the values are signed, so we output all @@ -17235,12 +17401,11 @@ gen_enumeration_type_die (tree type, dw_die_ref context_die) This should be re-worked to use correct signed/unsigned int/double tags for all cases, instead of always treating as signed. */ - add_AT_int (enum_die, DW_AT_const_value, TREE_INT_CST_LOW (value)); + add_AT_int (enum_die, DW_AT_const_value, tree_to_hwi (value)); else /* Enumeration constants may be wider than HOST_WIDE_INT. Handle that here. */ - add_AT_double (enum_die, DW_AT_const_value, - TREE_INT_CST_HIGH (value), TREE_INT_CST_LOW (value)); + add_AT_wide (enum_die, DW_AT_const_value, wide_int (value)); } add_gnat_descriptive_type_attribute (type_die, type, context_die); @@ -23012,9 +23177,9 @@ optimize_location_into_implicit_ptr (dw_die_ref die, tree decl) we can add DW_OP_GNU_implicit_pointer. */ STRIP_NOPS (init); if (TREE_CODE (init) == POINTER_PLUS_EXPR - && host_integerp (TREE_OPERAND (init, 1), 0)) + && tree_fits_shwi_p (TREE_OPERAND (init, 1))) { - offset = tree_low_cst (TREE_OPERAND (init, 1), 0); + offset = tree_to_shwi (TREE_OPERAND (init, 1)); init = TREE_OPERAND (init, 0); STRIP_NOPS (init); } @@ -23302,6 +23467,9 @@ hash_loc_operands (dw_loc_descr_ref loc, hashval_t hash) hash = iterative_hash_object (val2->v.val_double.low, hash); hash = iterative_hash_object (val2->v.val_double.high, hash); break; + case dw_val_class_wide_int: + hash = iterative_hash_object (*val2->v.val_wide, hash); + break; case dw_val_class_addr: hash = iterative_hash_rtx (val2->v.val_addr, hash); break; @@ -23391,6 +23559,9 @@ hash_loc_operands (dw_loc_descr_ref loc, hashval_t hash) hash = iterative_hash_object (val2->v.val_double.low, hash); hash = iterative_hash_object (val2->v.val_double.high, hash); break; + case dw_val_class_wide_int: + hash = iterative_hash_object (*val2->v.val_wide, hash); + break; default: gcc_unreachable (); } @@ -23539,6 +23710,8 @@ compare_loc_operands (dw_loc_descr_ref x, dw_loc_descr_ref y) case dw_val_class_const_double: return valx2->v.val_double.low == valy2->v.val_double.low && valx2->v.val_double.high == valy2->v.val_double.high; + case dw_val_class_wide_int: + return *valx2->v.val_wide == *valy2->v.val_wide; case dw_val_class_addr: return rtx_equal_p (valx2->v.val_addr, valy2->v.val_addr); default: @@ -23582,6 +23755,8 @@ compare_loc_operands (dw_loc_descr_ref x, dw_loc_descr_ref y) case dw_val_class_const_double: return valx2->v.val_double.low == valy2->v.val_double.low && valx2->v.val_double.high == valy2->v.val_double.high; + case dw_val_class_wide_int: + return *valx2->v.val_wide == *valy2->v.val_wide; default: gcc_unreachable (); } diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h index ad03a34021d..78d8cc0a80c 100644 --- a/gcc/dwarf2out.h +++ b/gcc/dwarf2out.h @@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see #define GCC_DWARF2OUT_H 1 #include "dwarf2.h" /* ??? Remove this once only used by dwarf2foo.c. */ +#include "wide-int.h" typedef struct die_struct *dw_die_ref; typedef const struct die_struct *const_dw_die_ref; @@ -29,6 +30,7 @@ typedef struct dw_val_struct *dw_val_ref; typedef struct dw_cfi_struct *dw_cfi_ref; typedef struct dw_loc_descr_struct *dw_loc_descr_ref; typedef struct dw_loc_list_struct *dw_loc_list_ref; +typedef wide_int *wide_int_ptr; /* Call frames are described using a sequence of Call Frame @@ -139,6 +141,7 @@ enum dw_val_class dw_val_class_const, dw_val_class_unsigned_const, dw_val_class_const_double, + dw_val_class_wide_int, dw_val_class_vec, dw_val_class_flag, dw_val_class_die_ref, @@ -180,6 +183,7 @@ typedef struct GTY(()) dw_val_struct { HOST_WIDE_INT GTY ((default)) val_int; unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned; double_int GTY ((tag ("dw_val_class_const_double"))) val_double; + wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide; dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec; struct dw_val_die_union { diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index b0fc8462268..0311fff192e 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -125,6 +125,9 @@ rtx cc0_rtx; static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) htab_t const_int_htab; +static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) + htab_t const_wide_int_htab; + /* A hash table storing memory attribute structures. */ static GTY ((if_marked ("ggc_marked_p"), param_is (struct mem_attrs))) htab_t mem_attrs_htab; @@ -150,6 +153,11 @@ static void set_used_decls (tree); static void mark_label_nuses (rtx); static hashval_t const_int_htab_hash (const void *); static int const_int_htab_eq (const void *, const void *); +#if TARGET_SUPPORTS_WIDE_INT +static hashval_t const_wide_int_htab_hash (const void *); +static int const_wide_int_htab_eq (const void *, const void *); +static rtx lookup_const_wide_int (rtx); +#endif static hashval_t const_double_htab_hash (const void *); static int const_double_htab_eq (const void *, const void *); static rtx lookup_const_double (rtx); @@ -186,6 +194,43 @@ const_int_htab_eq (const void *x, const void *y) return (INTVAL ((const_rtx) x) == *((const HOST_WIDE_INT *) y)); } +#if TARGET_SUPPORTS_WIDE_INT +/* Returns a hash code for X (which is a really a CONST_WIDE_INT). */ + +static hashval_t +const_wide_int_htab_hash (const void *x) +{ + int i; + HOST_WIDE_INT hash = 0; + const_rtx xr = (const_rtx) x; + + for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) + hash += CONST_WIDE_INT_ELT (xr, i); + + return (hashval_t) hash; +} + +/* Returns nonzero if the value represented by X (which is really a + CONST_WIDE_INT) is the same as that given by Y (which is really a + CONST_WIDE_INT). */ + +static int +const_wide_int_htab_eq (const void *x, const void *y) +{ + int i; + const_rtx xr = (const_rtx)x; + const_rtx yr = (const_rtx)y; + if (CONST_WIDE_INT_NUNITS (xr) != CONST_WIDE_INT_NUNITS (yr)) + return 0; + + for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) + if (CONST_WIDE_INT_ELT (xr, i) != CONST_WIDE_INT_ELT (yr, i)) + return 0; + + return 1; +} +#endif + /* Returns a hash code for X (which is really a CONST_DOUBLE). */ static hashval_t const_double_htab_hash (const void *x) @@ -193,7 +238,7 @@ const_double_htab_hash (const void *x) const_rtx const value = (const_rtx) x; hashval_t h; - if (GET_MODE (value) == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (value) == VOIDmode) h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value); else { @@ -213,7 +258,7 @@ const_double_htab_eq (const void *x, const void *y) if (GET_MODE (a) != GET_MODE (b)) return 0; - if (GET_MODE (a) == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (a) == VOIDmode) return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b) && CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b)); else @@ -479,6 +524,7 @@ const_fixed_from_fixed_value (FIXED_VALUE_TYPE value, enum machine_mode mode) return lookup_const_fixed (fixed); } +#if TARGET_SUPPORTS_WIDE_INT == 0 /* Constructs double_int from rtx CST. */ double_int @@ -498,17 +544,70 @@ rtx_to_double_int (const_rtx cst) return r; } +#endif + +#if TARGET_SUPPORTS_WIDE_INT +/* Determine whether WIDE_INT, already exists in the hash table. If + so, return its counterpart; otherwise add it to the hash table and + return it. */ +static rtx +lookup_const_wide_int (rtx wint) +{ + void **slot = htab_find_slot (const_wide_int_htab, wint, INSERT); + if (*slot == 0) + *slot = wint; -/* Return a CONST_DOUBLE or CONST_INT for a value specified as - a double_int. */ + return (rtx) *slot; +} +#endif +/* V contains a wide_int. A CONST_INT or CONST_WIDE_INT (if + TARGET_SUPPORTS_WIDE_INT is defined) or CONST_DOUBLE if + TARGET_SUPPORTS_WIDE_INT is not defined is produced based on the + number of HOST_WIDE_INTs that are necessary to represent the value + in compact form. */ rtx -immed_double_int_const (double_int i, enum machine_mode mode) +immed_wide_int_const (const wide_int &v, enum machine_mode mode) { - return immed_double_const (i.low, i.high, mode); + unsigned int len = v.get_len (); + unsigned int prec = GET_MODE_PRECISION (mode); + + /* Allow truncation but not extension since we do not know if the + number is signed or unsigned. */ + gcc_assert (prec <= v.get_precision ()); + + if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT) + return gen_int_mode (v.elt (0), mode); + +#if TARGET_SUPPORTS_WIDE_INT + { + unsigned int i; + rtx value; + unsigned int blocks_needed + = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; + + if (len > blocks_needed) + len = blocks_needed; + + value = const_wide_int_alloc (len); + + /* It is so tempting to just put the mode in here. Must control + myself ... */ + PUT_MODE (value, VOIDmode); + CWI_PUT_NUM_ELEM (value, len); + + for (i = 0; i < len; i++) + CONST_WIDE_INT_ELT (value, i) = v.elt (i); + + return lookup_const_wide_int (value); + } +#else + return immed_double_const (v.elt (0), v.elt (1), mode); +#endif } +#if TARGET_SUPPORTS_WIDE_INT == 0 /* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair of ints: I0 is the low-order word and I1 is the high-order word. For values that are larger than HOST_BITS_PER_DOUBLE_INT, the @@ -560,6 +659,7 @@ immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, enum machine_mode mode) return lookup_const_double (value); } +#endif rtx gen_rtx_REG (enum machine_mode mode, unsigned int regno) @@ -1541,12 +1641,12 @@ get_mem_align_offset (rtx mem, unsigned int align) tree bit_offset = DECL_FIELD_BIT_OFFSET (field); if (!byte_offset - || !host_integerp (byte_offset, 1) - || !host_integerp (bit_offset, 1)) + || !tree_fits_uhwi_p (byte_offset) + || !tree_fits_uhwi_p (bit_offset)) return -1; - offset += tree_low_cst (byte_offset, 1); - offset += tree_low_cst (bit_offset, 1) / BITS_PER_UNIT; + offset += tree_to_uhwi (byte_offset); + offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT; if (inner == NULL_TREE) { @@ -1770,10 +1870,10 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, { attrs.expr = t2; attrs.offset_known_p = false; - if (host_integerp (off_tree, 1)) + if (tree_fits_uhwi_p (off_tree)) { attrs.offset_known_p = true; - attrs.offset = tree_low_cst (off_tree, 1); + attrs.offset = tree_to_uhwi (off_tree); apply_bitpos = bitpos; } } @@ -1800,10 +1900,10 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, attrs.align = MAX (attrs.align, obj_align); } - if (host_integerp (new_size, 1)) + if (tree_fits_uhwi_p (new_size)) { attrs.size_known_p = true; - attrs.size = tree_low_cst (new_size, 1); + attrs.size = tree_to_uhwi (new_size); } /* If we modified OFFSET based on T, then subtract the outstanding @@ -2273,15 +2373,15 @@ widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset) && attrs.offset >= 0) break; - if (! host_integerp (offset, 1)) + if (! tree_fits_uhwi_p (offset)) { attrs.expr = NULL_TREE; break; } attrs.expr = TREE_OPERAND (attrs.expr, 0); - attrs.offset += tree_low_cst (offset, 1); - attrs.offset += (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) + attrs.offset += tree_to_uhwi (offset); + attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) / BITS_PER_UNIT); } /* Similarly for the decl. */ @@ -5648,11 +5748,15 @@ init_emit_once (void) enum machine_mode mode; enum machine_mode double_mode; - /* Initialize the CONST_INT, CONST_DOUBLE, CONST_FIXED, and memory attribute - hash tables. */ + /* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE, + CONST_FIXED, and memory attribute hash tables. */ const_int_htab = htab_create_ggc (37, const_int_htab_hash, const_int_htab_eq, NULL); +#if TARGET_SUPPORTS_WIDE_INT + const_wide_int_htab = htab_create_ggc (37, const_wide_int_htab_hash, + const_wide_int_htab_eq, NULL); +#endif const_double_htab = htab_create_ggc (37, const_double_htab_hash, const_double_htab_eq, NULL); @@ -5716,9 +5820,9 @@ init_emit_once (void) else const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE); - REAL_VALUE_FROM_INT (dconst0, 0, 0, double_mode); - REAL_VALUE_FROM_INT (dconst1, 1, 0, double_mode); - REAL_VALUE_FROM_INT (dconst2, 2, 0, double_mode); + REAL_VALUE_FROM_INT (dconst0, 0, double_mode); + REAL_VALUE_FROM_INT (dconst1, 1, double_mode); + REAL_VALUE_FROM_INT (dconst2, 2, double_mode); dconstm1 = dconst1; dconstm1.sign = 1; diff --git a/gcc/except.c b/gcc/except.c index fb47fbb06cb..a3076056aea 100644 --- a/gcc/except.c +++ b/gcc/except.c @@ -313,20 +313,20 @@ init_eh (void) /* Cache the interesting field offsets so that we have easy access from rtl. */ sjlj_fc_call_site_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_cs), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_cs), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_cs)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_cs)) / BITS_PER_UNIT); sjlj_fc_data_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_data), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_data), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_data)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_data)) / BITS_PER_UNIT); sjlj_fc_personality_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_per), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_per), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_per)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_per)) / BITS_PER_UNIT); sjlj_fc_lsda_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_lsda), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_lsda), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_lsda)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_lsda)) / BITS_PER_UNIT); sjlj_fc_jbuf_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_jbuf), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_jbuf), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_jbuf)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_jbuf)) / BITS_PER_UNIT); } } @@ -2050,8 +2050,8 @@ expand_builtin_eh_common (tree region_nr_t) HOST_WIDE_INT region_nr; eh_region region; - gcc_assert (host_integerp (region_nr_t, 0)); - region_nr = tree_low_cst (region_nr_t, 0); + gcc_assert (tree_fits_shwi_p (region_nr_t)); + region_nr = tree_to_shwi (region_nr_t); region = (*cfun->eh->region_array)[region_nr]; @@ -2145,7 +2145,7 @@ expand_builtin_eh_return_data_regno (tree exp) return constm1_rtx; } - iwhich = tree_low_cst (which, 1); + iwhich = tree_to_uhwi (which); iwhich = EH_RETURN_DATA_REGNO (iwhich); if (iwhich == INVALID_REGNUM) return constm1_rtx; @@ -2381,7 +2381,7 @@ collect_one_action_chain (action_hash_type ar_hash, eh_region region) { /* Retrieve the filter from the head of the filter list where we have stored it (see assign_filter_values). */ - int filter = TREE_INT_CST_LOW (TREE_VALUE (c->filter_list)); + int filter = tree_to_hwi (TREE_VALUE (c->filter_list)); next = add_action_record (ar_hash, filter, 0); } else @@ -2408,7 +2408,7 @@ collect_one_action_chain (action_hash_type ar_hash, eh_region region) flt_node = c->filter_list; for (; flt_node; flt_node = TREE_CHAIN (flt_node)) { - int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node)); + int filter = tree_to_hwi (TREE_VALUE (flt_node)); next = add_action_record (ar_hash, filter, next); } } diff --git a/gcc/explow.c b/gcc/explow.c index f278e29b78e..360e541d973 100644 --- a/gcc/explow.c +++ b/gcc/explow.c @@ -95,38 +95,9 @@ plus_constant (enum machine_mode mode, rtx x, HOST_WIDE_INT c) switch (code) { - case CONST_INT: - if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) - { - double_int di_x = double_int::from_shwi (INTVAL (x)); - double_int di_c = double_int::from_shwi (c); - - bool overflow; - double_int v = di_x.add_with_sign (di_c, false, &overflow); - if (overflow) - gcc_unreachable (); - - return immed_double_int_const (v, mode); - } - - return gen_int_mode (INTVAL (x) + c, mode); - - case CONST_DOUBLE: - { - double_int di_x = double_int::from_pair (CONST_DOUBLE_HIGH (x), - CONST_DOUBLE_LOW (x)); - double_int di_c = double_int::from_shwi (c); - - bool overflow; - double_int v = di_x.add_with_sign (di_c, false, &overflow); - if (overflow) - /* Sorry, we have no way to represent overflows this wide. - To fix, add constant support wider than CONST_DOUBLE. */ - gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT); - - return immed_double_int_const (v, mode); - } - + CASE_CONST_SCALAR_INT: + return immed_wide_int_const (wi::add (std::make_pair (x, mode), c), + mode); case MEM: /* If this is a reference to the constant pool, try replacing it with a reference to a new constant. If the resulting address isn't @@ -270,10 +241,10 @@ int_expr_size (tree exp) gcc_assert (size); } - if (size == 0 || !host_integerp (size, 0)) + if (size == 0 || !tree_fits_shwi_p (size)) return -1; - return tree_low_cst (size, 0); + return tree_to_shwi (size); } /* Return a copy of X in which all memory references diff --git a/gcc/expmed.c b/gcc/expmed.c index 92c293879d5..d4227722955 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -55,7 +55,6 @@ static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT, static rtx extract_fixed_bit_field (enum machine_mode, rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx, int); -static rtx mask_rtx (enum machine_mode, int, int, int); static rtx lshift_value (enum machine_mode, unsigned HOST_WIDE_INT, int); static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, int); @@ -63,6 +62,19 @@ static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx); static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT); static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT); +/* Return a constant integer mask value of mode MODE with BITSIZE ones + followed by BITPOS zeros, or the complement of that if COMPLEMENT. + The mask is truncated if necessary to the width of mode MODE. The + mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */ + +static inline rtx +mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement) +{ + return immed_wide_int_const + (wi::shifted_mask (bitpos, bitsize, complement, + GET_MODE_PRECISION (mode)), mode); +} + /* Test whether a value is zero of a power of two. */ #define EXACT_POWER_OF_2_OR_ZERO_P(x) \ (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0) @@ -1801,26 +1813,6 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0, return expand_shift (RSHIFT_EXPR, mode, op0, GET_MODE_BITSIZE (mode) - bitsize, target, 0); } - -/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value - of mode MODE with BITSIZE ones followed by BITPOS zeros, or the - complement of that if COMPLEMENT. The mask is truncated if - necessary to the width of mode MODE. The mask is zero-extended if - BITSIZE+BITPOS is too small for MODE. */ - -static rtx -mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement) -{ - double_int mask; - - mask = double_int::mask (bitsize); - mask = mask.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT); - - if (complement) - mask = ~mask; - - return immed_double_int_const (mask, mode); -} /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value VALUE << BITPOS. */ @@ -1829,12 +1821,9 @@ static rtx lshift_value (enum machine_mode mode, unsigned HOST_WIDE_INT value, int bitpos) { - double_int val; - - val = double_int::from_uhwi (value); - val = val.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT); - - return immed_double_int_const (val, mode); + return + immed_wide_int_const (wi::lshift (max_wide_int (value), + bitpos), mode); } /* Extract a bit field that is split across two words @@ -3062,37 +3051,41 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, only if the constant value exactly fits in an `unsigned int' without any truncation. This means that multiplying by negative values does not work; results are off by 2^32 on a 32 bit machine. */ - if (CONST_INT_P (scalar_op1)) { coeff = INTVAL (scalar_op1); is_neg = coeff < 0; } +#if TARGET_SUPPORTS_WIDE_INT + else if (CONST_WIDE_INT_P (scalar_op1)) +#else else if (CONST_DOUBLE_AS_INT_P (scalar_op1)) +#endif { - /* If we are multiplying in DImode, it may still be a win - to try to work with shifts and adds. */ - if (CONST_DOUBLE_HIGH (scalar_op1) == 0 - && (CONST_DOUBLE_LOW (scalar_op1) > 0 - || (CONST_DOUBLE_LOW (scalar_op1) < 0 - && EXACT_POWER_OF_2_OR_ZERO_P - (CONST_DOUBLE_LOW (scalar_op1))))) + int p = GET_MODE_PRECISION (mode); + wide_int val = std::make_pair (scalar_op1, mode); + int shift = wi::exact_log2 (val); + /* Perfect power of 2. */ + is_neg = false; + if (shift > 0) { - coeff = CONST_DOUBLE_LOW (scalar_op1); - is_neg = false; + /* Do the shift count trucation against the bitsize, not + the precision. See the comment above + wide-int.c:trunc_shift for details. */ + if (SHIFT_COUNT_TRUNCATED) + shift &= GET_MODE_BITSIZE (mode) - 1; + /* We could consider adding just a move of 0 to target + if the shift >= p */ + if (shift < p) + return expand_shift (LSHIFT_EXPR, mode, op0, + shift, target, unsignedp); + /* Any positive number that fits in a word. */ + coeff = CONST_WIDE_INT_ELT (scalar_op1, 0); } - else if (CONST_DOUBLE_LOW (scalar_op1) == 0) + else if (wi::sign_mask (val) == 0) { - coeff = CONST_DOUBLE_HIGH (scalar_op1); - if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)) - { - int shift = floor_log2 (coeff) + HOST_BITS_PER_WIDE_INT; - if (shift < HOST_BITS_PER_DOUBLE_INT - 1 - || mode_bitsize <= HOST_BITS_PER_DOUBLE_INT) - return expand_shift (LSHIFT_EXPR, mode, op0, - shift, target, unsignedp); - } - goto skip_synth; + /* Any positive number that fits in a word. */ + coeff = CONST_WIDE_INT_ELT (scalar_op1, 0); } else goto skip_synth; @@ -3270,7 +3263,7 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, unsigned HOST_WIDE_INT *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr) { - double_int mhigh, mlow; + wide_int mhigh, mlow; int lgup, post_shift; int pow, pow2; @@ -3282,23 +3275,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, pow = n + lgup; pow2 = n + lgup - precision; - /* We could handle this with some effort, but this case is much - better handled directly with a scc insn, so rely on caller using - that. */ - gcc_assert (pow != HOST_BITS_PER_DOUBLE_INT); - /* mlow = 2^(N + lgup)/d */ - double_int val = double_int_zero.set_bit (pow); - mlow = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR); + wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT); + mlow = wi::udiv_trunc (val, d); /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */ - val |= double_int_zero.set_bit (pow2); - mhigh = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR); - - gcc_assert (!mhigh.high || val.high - d < d); - gcc_assert (mhigh.high <= 1 && mlow.high <= 1); - /* Assert that mlow < mhigh. */ - gcc_assert (mlow.ult (mhigh)); + val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT); + mhigh = wi::udiv_trunc (val, d); /* If precision == N, then mlow, mhigh exceed 2^N (but they do not exceed 2^(N+1)). */ @@ -3306,14 +3289,15 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, /* Reduce to lowest terms. */ for (post_shift = lgup; post_shift > 0; post_shift--) { - int shft = HOST_BITS_PER_WIDE_INT - 1; - unsigned HOST_WIDE_INT ml_lo = (mlow.high << shft) | (mlow.low >> 1); - unsigned HOST_WIDE_INT mh_lo = (mhigh.high << shft) | (mhigh.low >> 1); + unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1, + HOST_BITS_PER_WIDE_INT); + unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1, + HOST_BITS_PER_WIDE_INT); if (ml_lo >= mh_lo) break; - mlow = double_int::from_uhwi (ml_lo); - mhigh = double_int::from_uhwi (mh_lo); + mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT); + mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT); } *post_shift_ptr = post_shift; @@ -3321,13 +3305,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, if (n < HOST_BITS_PER_WIDE_INT) { unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1; - *multiplier_ptr = mhigh.low & mask; - return mhigh.low >= mask; + *multiplier_ptr = mhigh.to_uhwi () & mask; + return mhigh.to_uhwi () >= mask; } else { - *multiplier_ptr = mhigh.low; - return mhigh.high; + *multiplier_ptr = mhigh.to_uhwi (); + return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1); } } @@ -3594,9 +3578,10 @@ expmed_mult_highpart (enum machine_mode mode, rtx op0, rtx op1, static rtx expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) { - unsigned HOST_WIDE_INT masklow, maskhigh; rtx result, temp, shift, label; int logd; + wide_int mask; + int prec = GET_MODE_PRECISION (mode); logd = floor_log2 (d); result = gen_reg_rtx (mode); @@ -3609,8 +3594,8 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) mode, 0, -1); if (signmask) { + HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1; signmask = force_reg (mode, signmask); - masklow = ((HOST_WIDE_INT) 1 << logd) - 1; shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd); /* Use the rtx_cost of a LSHIFTRT instruction to determine @@ -3657,19 +3642,11 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) modulus. By including the signbit in the operation, many targets can avoid an explicit compare operation in the following comparison against zero. */ - - masklow = ((HOST_WIDE_INT) 1 << logd) - 1; - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) - { - masklow |= HOST_WIDE_INT_M1U << (GET_MODE_BITSIZE (mode) - 1); - maskhigh = -1; - } - else - maskhigh = HOST_WIDE_INT_M1U - << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1); + mask = wi::mask (logd, false, GET_MODE_PRECISION (mode)); + mask = wi::set_bit (mask, prec - 1); temp = expand_binop (mode, and_optab, op0, - immed_double_const (masklow, maskhigh, mode), + immed_wide_int_const (mask, mode), result, 1, OPTAB_LIB_WIDEN); if (temp != result) emit_move_insn (result, temp); @@ -3679,10 +3656,10 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) temp = expand_binop (mode, sub_optab, result, const1_rtx, result, 0, OPTAB_LIB_WIDEN); - masklow = HOST_WIDE_INT_M1U << logd; - maskhigh = -1; + + mask = wi::mask (logd, true, GET_MODE_PRECISION (mode)); temp = expand_binop (mode, ior_optab, temp, - immed_double_const (masklow, maskhigh, mode), + immed_wide_int_const (mask, mode), result, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, add_optab, temp, const1_rtx, result, 0, OPTAB_LIB_WIDEN); @@ -4926,24 +4903,15 @@ make_tree (tree type, rtx x) switch (GET_CODE (x)) { case CONST_INT: - { - HOST_WIDE_INT hi = 0; - - if (INTVAL (x) < 0 - && !(TYPE_UNSIGNED (type) - && (GET_MODE_BITSIZE (TYPE_MODE (type)) - < HOST_BITS_PER_WIDE_INT))) - hi = -1; - - t = build_int_cst_wide (type, INTVAL (x), hi); - - return t; - } + case CONST_WIDE_INT: + t = wide_int_to_tree (type, std::make_pair (x, TYPE_MODE (type))); + return t; case CONST_DOUBLE: - if (GET_MODE (x) == VOIDmode) - t = build_int_cst_wide (type, - CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x)); + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode) + t = wide_int_to_tree (type, + wide_int::from_array (&CONST_DOUBLE_LOW (x), 2, + HOST_BITS_PER_WIDE_INT * 2)); else { REAL_VALUE_TYPE d; diff --git a/gcc/expr.c b/gcc/expr.c index 76ee9e0ab7f..f71435dd571 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -711,64 +711,34 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns if (mode == oldmode) return x; - /* There is one case that we must handle specially: If we are converting - a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and - we are to interpret the constant as unsigned, gen_lowpart will do - the wrong if the constant appears negative. What we want to do is - make the high-order word of the constant zero, not all ones. */ - - if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT - && CONST_INT_P (x) && INTVAL (x) < 0) + if (CONST_SCALAR_INT_P (x) + && GET_MODE_CLASS (mode) == MODE_INT + && (oldmode == VOIDmode || GET_MODE_CLASS (oldmode) == MODE_INT)) { - double_int val = double_int::from_uhwi (INTVAL (x)); - - /* We need to zero extend VAL. */ - if (oldmode != VOIDmode) - val = val.zext (GET_MODE_BITSIZE (oldmode)); - - return immed_double_int_const (val, mode); + /* If the caller did not tell us the old mode, then there is + not much to do with respect to canonization. We have to assume + that all the bits are significant. */ + if (oldmode == VOIDmode) + oldmode = MAX_MODE_INT; + wide_int w = wide_int::from (std::make_pair (x, oldmode), + GET_MODE_PRECISION (mode), + unsignedp ? UNSIGNED : SIGNED); + return immed_wide_int_const (w, mode); } /* We can do this with a gen_lowpart if both desired and current modes are integer, and this is either a constant integer, a register, or a - non-volatile MEM. Except for the constant case where MODE is no - wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */ - - if ((CONST_INT_P (x) - && GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT) - || (GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_CLASS (oldmode) == MODE_INT - && (CONST_DOUBLE_AS_INT_P (x) - || (GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode) - && ((MEM_P (x) && ! MEM_VOLATILE_P (x) - && direct_load[(int) mode]) - || (REG_P (x) - && (! HARD_REGISTER_P (x) - || HARD_REGNO_MODE_OK (REGNO (x), mode)) - && TRULY_NOOP_TRUNCATION_MODES_P (mode, - GET_MODE (x)))))))) - { - /* ?? If we don't know OLDMODE, we have to assume here that - X does not need sign- or zero-extension. This may not be - the case, but it's the best we can do. */ - if (CONST_INT_P (x) && oldmode != VOIDmode - && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (oldmode)) - { - HOST_WIDE_INT val = INTVAL (x); - - /* We must sign or zero-extend in this case. Start by - zero-extending, then sign extend if we need to. */ - val &= GET_MODE_MASK (oldmode); - if (! unsignedp - && val_signbit_known_set_p (oldmode, val)) - val |= ~GET_MODE_MASK (oldmode); - - return gen_int_mode (val, mode); - } - - return gen_lowpart (mode, x); - } + non-volatile MEM. */ + if (GET_MODE_CLASS (mode) == MODE_INT + && GET_MODE_CLASS (oldmode) == MODE_INT + && GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode) + && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) mode]) + || (REG_P (x) + && (!HARD_REGISTER_P (x) + || HARD_REGNO_MODE_OK (REGNO (x), mode)) + && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))))) + + return gen_lowpart (mode, x); /* Converting from integer constant into mode is always equivalent to an subreg operation. */ @@ -1750,6 +1720,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) { rtx first, second; + /* TODO: const_wide_int can have sizes other than this... */ gcc_assert (2 * len == ssize); split_double (src, &first, &second); if (i) @@ -4560,14 +4531,14 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, relative to the representative. DECL_FIELD_OFFSET of field and repr are the same by construction if they are not constants, see finish_bitfield_layout. */ - if (host_integerp (DECL_FIELD_OFFSET (field), 1) - && host_integerp (DECL_FIELD_OFFSET (repr), 1)) - bitoffset = (tree_low_cst (DECL_FIELD_OFFSET (field), 1) - - tree_low_cst (DECL_FIELD_OFFSET (repr), 1)) * BITS_PER_UNIT; + if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) + && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) + bitoffset = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) + - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; else bitoffset = 0; - bitoffset += (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)); + bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); /* If the adjustment is larger than bitpos, we would have a negative bit position for the lower bound and this may wreak havoc later. This can @@ -4588,7 +4559,7 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, else *bitstart = *bitpos - bitoffset; - *bitend = *bitstart + tree_low_cst (DECL_SIZE (repr), 1) - 1; + *bitend = *bitstart + tree_to_uhwi (DECL_SIZE (repr)) - 1; } /* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside @@ -4794,12 +4765,14 @@ expand_assignment (tree to, tree from, bool nontemporal) && (bitpos == 0 || bitpos == mode_bitsize / 2)) result = store_expr (from, XEXP (to_rtx, bitpos != 0), false, nontemporal); - else if (bitpos + bitsize <= mode_bitsize / 2) + else if (bitpos + bitsize <= mode_bitsize / 2 + && bitpos+bitsize <= mode_bitsize) result = store_field (XEXP (to_rtx, 0), bitsize, bitpos, bitregion_start, bitregion_end, mode1, from, get_alias_set (to), nontemporal); - else if (bitpos >= mode_bitsize / 2) + else if (bitpos >= mode_bitsize / 2 + && bitpos+bitsize <= mode_bitsize) result = store_field (XEXP (to_rtx, 1), bitsize, bitpos - mode_bitsize / 2, bitregion_start, bitregion_end, @@ -4818,8 +4791,12 @@ expand_assignment (tree to, tree from, bool nontemporal) } else { + HOST_WIDE_INT extra = 0; + if (bitpos+bitsize > mode_bitsize) + extra = bitpos+bitsize - mode_bitsize; rtx temp = assign_stack_temp (GET_MODE (to_rtx), - GET_MODE_SIZE (GET_MODE (to_rtx))); + GET_MODE_SIZE (GET_MODE (to_rtx)) + + extra); write_complex_part (temp, XEXP (to_rtx, 0), false); write_complex_part (temp, XEXP (to_rtx, 1), true); result = store_field (temp, bitsize, bitpos, @@ -5208,10 +5185,10 @@ store_expr (tree exp, rtx target, int call_param_p, bool nontemporal) &alt_rtl); } - /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not - the same as that of TARGET, adjust the constant. This is needed, for - example, in case it is a CONST_DOUBLE and we want only a word-sized - value. */ + /* If TEMP is a VOIDmode constant and the mode of the type of EXP is + not the same as that of TARGET, adjust the constant. This is + needed, for example, in case it is a CONST_DOUBLE or + CONST_WIDE_INT and we want only a word-sized value. */ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode && TREE_CODE (exp) != ERROR_MARK && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp))) @@ -5404,11 +5381,11 @@ count_type_elements (const_tree type, bool for_ctor_p) tree nelts; nelts = array_type_nelts (type); - if (nelts && host_integerp (nelts, 1)) + if (nelts && tree_fits_uhwi_p (nelts)) { unsigned HOST_WIDE_INT n; - n = tree_low_cst (nelts, 1) + 1; + n = tree_to_uhwi (nelts) + 1; if (n == 0 || for_ctor_p) return n; else @@ -5523,9 +5500,9 @@ categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts, tree lo_index = TREE_OPERAND (purpose, 0); tree hi_index = TREE_OPERAND (purpose, 1); - if (host_integerp (lo_index, 1) && host_integerp (hi_index, 1)) - mult = (tree_low_cst (hi_index, 1) - - tree_low_cst (lo_index, 1) + 1); + if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index)) + mult = (tree_to_uhwi (hi_index) + - tree_to_uhwi (lo_index) + 1); } num_fields += mult; elt_type = TREE_TYPE (value); @@ -5825,8 +5802,8 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) if (cleared && initializer_zerop (value)) continue; - if (host_integerp (DECL_SIZE (field), 1)) - bitsize = tree_low_cst (DECL_SIZE (field), 1); + if (tree_fits_uhwi_p (DECL_SIZE (field))) + bitsize = tree_to_uhwi (DECL_SIZE (field)); else bitsize = -1; @@ -5835,14 +5812,14 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) mode = VOIDmode; offset = DECL_FIELD_OFFSET (field); - if (host_integerp (offset, 0) - && host_integerp (bit_position (field), 0)) + if (tree_fits_shwi_p (offset) + && tree_fits_shwi_p (bit_position (field))) { bitpos = int_bit_position (field); offset = 0; } else - bitpos = tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 0); + bitpos = tree_to_shwi (DECL_FIELD_BIT_OFFSET (field)); if (offset) { @@ -5925,14 +5902,14 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) domain = TYPE_DOMAIN (type); const_bounds_p = (TYPE_MIN_VALUE (domain) && TYPE_MAX_VALUE (domain) - && host_integerp (TYPE_MIN_VALUE (domain), 0) - && host_integerp (TYPE_MAX_VALUE (domain), 0)); + && tree_fits_shwi_p (TYPE_MIN_VALUE (domain)) + && tree_fits_shwi_p (TYPE_MAX_VALUE (domain))); /* If we have constant bounds for the range of the type, get them. */ if (const_bounds_p) { - minelt = tree_low_cst (TYPE_MIN_VALUE (domain), 0); - maxelt = tree_low_cst (TYPE_MAX_VALUE (domain), 0); + minelt = tree_to_shwi (TYPE_MIN_VALUE (domain)); + maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain)); } /* If the constructor has fewer elements than the array, clear @@ -5964,15 +5941,15 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) tree lo_index = TREE_OPERAND (index, 0); tree hi_index = TREE_OPERAND (index, 1); - if (! host_integerp (lo_index, 1) - || ! host_integerp (hi_index, 1)) + if (! tree_fits_uhwi_p (lo_index) + || ! tree_fits_uhwi_p (hi_index)) { need_to_clear = 1; break; } - this_node_count = (tree_low_cst (hi_index, 1) - - tree_low_cst (lo_index, 1) + 1); + this_node_count = (tree_to_uhwi (hi_index) + - tree_to_uhwi (lo_index) + 1); } else this_node_count = 1; @@ -6019,8 +5996,8 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) mode = TYPE_MODE (elttype); if (mode == BLKmode) - bitsize = (host_integerp (TYPE_SIZE (elttype), 1) - ? tree_low_cst (TYPE_SIZE (elttype), 1) + bitsize = (tree_fits_uhwi_p (TYPE_SIZE (elttype)) + ? tree_to_uhwi (TYPE_SIZE (elttype)) : -1); else bitsize = GET_MODE_BITSIZE (mode); @@ -6035,21 +6012,21 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) /* If the range is constant and "small", unroll the loop. */ if (const_bounds_p - && host_integerp (lo_index, 0) - && host_integerp (hi_index, 0) - && (lo = tree_low_cst (lo_index, 0), - hi = tree_low_cst (hi_index, 0), + && tree_fits_shwi_p (lo_index) + && tree_fits_shwi_p (hi_index) + && (lo = tree_to_shwi (lo_index), + hi = tree_to_shwi (hi_index), count = hi - lo + 1, (!MEM_P (target) || count <= 2 - || (host_integerp (TYPE_SIZE (elttype), 1) - && (tree_low_cst (TYPE_SIZE (elttype), 1) * count + || (tree_fits_uhwi_p (TYPE_SIZE (elttype)) + && (tree_to_uhwi (TYPE_SIZE (elttype)) * count <= 40 * 8))))) { lo -= minelt; hi -= minelt; for (; lo <= hi; lo++) { - bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0); + bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype)); if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target) @@ -6124,8 +6101,8 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) emit_label (loop_end); } } - else if ((index != 0 && ! host_integerp (index, 0)) - || ! host_integerp (TYPE_SIZE (elttype), 1)) + else if ((index != 0 && ! tree_fits_shwi_p (index)) + || ! tree_fits_uhwi_p (TYPE_SIZE (elttype))) { tree position; @@ -6152,10 +6129,10 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) else { if (index != 0) - bitpos = ((tree_low_cst (index, 0) - minelt) - * tree_low_cst (TYPE_SIZE (elttype), 1)); + bitpos = ((tree_to_shwi (index) - minelt) + * tree_to_uhwi (TYPE_SIZE (elttype))); else - bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1)); + bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype))); if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target) && TREE_CODE (type) == ARRAY_TYPE @@ -6179,7 +6156,7 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) int need_to_clear; int icode = CODE_FOR_nothing; tree elttype = TREE_TYPE (type); - int elt_size = tree_low_cst (TYPE_SIZE (elttype), 1); + int elt_size = tree_to_uhwi (TYPE_SIZE (elttype)); enum machine_mode eltmode = TYPE_MODE (elttype); HOST_WIDE_INT bitsize; HOST_WIDE_INT bitpos; @@ -6219,10 +6196,10 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value) { - int n_elts_here = tree_low_cst + int n_elts_here = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, TYPE_SIZE (TREE_TYPE (value)), - TYPE_SIZE (elttype)), 1); + TYPE_SIZE (elttype))); count += n_elts_here; if (mostly_zeros_p (value)) @@ -6261,12 +6238,12 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) HOST_WIDE_INT eltpos; tree value = ce->value; - bitsize = tree_low_cst (TYPE_SIZE (TREE_TYPE (value)), 1); + bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value))); if (cleared && initializer_zerop (value)) continue; if (ce->index) - eltpos = tree_low_cst (ce->index, 1); + eltpos = tree_to_uhwi (ce->index); else eltpos = i; @@ -6543,7 +6520,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, enum machine_mode mode = VOIDmode; bool blkmode_bitfield = false; tree offset = size_zero_node; - double_int bit_offset = double_int_zero; + addr_wide_int bit_offset = 0; /* First get the mode, signedness, and size. We do this from just the outermost expression. */ @@ -6591,10 +6568,10 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, if (size_tree != 0) { - if (! host_integerp (size_tree, 1)) + if (! tree_fits_uhwi_p (size_tree)) mode = BLKmode, *pbitsize = -1; else - *pbitsize = tree_low_cst (size_tree, 1); + *pbitsize = tree_to_uhwi (size_tree); } /* Compute cumulative bit-offset for nested component-refs and array-refs, @@ -6604,7 +6581,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, switch (TREE_CODE (exp)) { case BIT_FIELD_REF: - bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2)); + bit_offset += TREE_OPERAND (exp, 2); break; case COMPONENT_REF: @@ -6619,7 +6596,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, break; offset = size_binop (PLUS_EXPR, offset, this_offset); - bit_offset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field)); + bit_offset += DECL_FIELD_BIT_OFFSET (field); /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */ } @@ -6651,7 +6628,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, break; case IMAGPART_EXPR: - bit_offset += double_int::from_uhwi (*pbitsize); + bit_offset += *pbitsize; break; case VIEW_CONVERT_EXPR: @@ -6672,9 +6649,9 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, tree off = TREE_OPERAND (exp, 1); if (!integer_zerop (off)) { - double_int boff, coff = mem_ref_offset (exp); - boff = coff.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); + addr_wide_int boff, coff = mem_ref_offset (exp); + boff = wi::lshift (coff, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); bit_offset += boff; } exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); @@ -6698,11 +6675,12 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, this conversion. */ if (TREE_CODE (offset) == INTEGER_CST) { - double_int tem = tree_to_double_int (offset); - tem = tem.sext (TYPE_PRECISION (sizetype)); - tem = tem.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); + addr_wide_int tem = wi::sext (addr_wide_int (offset), + TYPE_PRECISION (sizetype)); + tem = wi::lshift (tem, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); tem += bit_offset; - if (tem.fits_shwi ()) + if (wi::fits_shwi_p (tem)) { *pbitpos = tem.to_shwi (); *poffset = offset = NULL_TREE; @@ -6713,20 +6691,20 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, if (offset) { /* Avoid returning a negative bitpos as this may wreak havoc later. */ - if (bit_offset.is_negative ()) + if (wi::neg_p (bit_offset)) { - double_int mask - = double_int::mask (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - double_int tem = bit_offset.and_not (mask); + addr_wide_int mask + = wi::mask <addr_wide_int> (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + false); + addr_wide_int tem = bit_offset.and_not (mask); /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf. Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */ bit_offset -= tem; - tem = tem.arshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT), - HOST_BITS_PER_DOUBLE_INT); + tem = wi::arshift (tem, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); offset = size_binop (PLUS_EXPR, offset, - double_int_to_tree (sizetype, tem)); + wide_int_to_tree (sizetype, tem)); } *pbitpos = bit_offset.to_shwi (); @@ -7292,9 +7270,7 @@ highest_pow2_factor (const_tree exp) return BIGGEST_ALIGNMENT; else { - /* Note: tree_low_cst is intentionally not used here, - we don't care about the upper bits. */ - c0 = TREE_INT_CST_LOW (exp); + c0 = tree_to_hwi (exp); c0 &= -c0; return c0 ? c0 : BIGGEST_ALIGNMENT; } @@ -7313,10 +7289,10 @@ highest_pow2_factor (const_tree exp) case ROUND_DIV_EXPR: case TRUNC_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: if (integer_pow2p (TREE_OPERAND (exp, 1)) - && host_integerp (TREE_OPERAND (exp, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (exp, 1))) { c0 = highest_pow2_factor (TREE_OPERAND (exp, 0)); - c1 = tree_low_cst (TREE_OPERAND (exp, 1), 1); + c1 = tree_to_uhwi (TREE_OPERAND (exp, 1)); return MAX (1, c0 / c1); } break; @@ -7717,11 +7693,12 @@ expand_constructor (tree exp, rtx target, enum expand_modifier modifier, /* All elts simple constants => refer to a constant in memory. But if this is a non-BLKmode mode, let it store a field at a time - since that should make a CONST_INT or CONST_DOUBLE when we - fold. Likewise, if we have a target we can use, it is best to - store directly into the target unless the type is large enough - that memcpy will be used. If we are making an initializer and - all operands are constant, put it in memory as well. + since that should make a CONST_INT, CONST_WIDE_INT or + CONST_DOUBLE when we fold. Likewise, if we have a target we can + use, it is best to store directly into the target unless the type + is large enough that memcpy will be used. If we are making an + initializer and all operands are constant, put it in memory as + well. FIXME: Avoid trying to fill vector constructors piece-meal. Output them with output_constant_def below unless we're sure @@ -7731,9 +7708,9 @@ expand_constructor (tree exp, rtx target, enum expand_modifier modifier, && ((mode == BLKmode && ! (target != 0 && safe_from_p (target, exp, 1))) || TREE_ADDRESSABLE (exp) - || (host_integerp (TYPE_SIZE_UNIT (type), 1) + || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) && (! MOVE_BY_PIECES_P - (tree_low_cst (TYPE_SIZE_UNIT (type), 1), + (tree_to_uhwi (TYPE_SIZE_UNIT (type)), TYPE_ALIGN (type))) && ! mostly_zeros_p (exp)))) || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS) @@ -8191,17 +8168,18 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, && TREE_CONSTANT (treeop1)) { rtx constant_part; + HOST_WIDE_INT wc; + enum machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1)); op1 = expand_expr (treeop1, subtarget, VOIDmode, EXPAND_SUM); - /* Use immed_double_const to ensure that the constant is + /* Use wi::shwi to ensure that the constant is truncated according to the mode of OP1, then sign extended to a HOST_WIDE_INT. Using the constant directly can result in non-canonical RTL in a 64x32 cross compile. */ - constant_part - = immed_double_const (TREE_INT_CST_LOW (treeop0), - (HOST_WIDE_INT) 0, - TYPE_MODE (TREE_TYPE (treeop1))); + wc = tree_to_hwi (treeop0); + constant_part = + immed_wide_int_const (wi::shwi (wc, wmode), wmode); op1 = plus_constant (mode, op1, INTVAL (constant_part)); if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) op1 = force_operand (op1, target); @@ -8213,6 +8191,8 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, && TREE_CONSTANT (treeop0)) { rtx constant_part; + HOST_WIDE_INT wc; + enum machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0)); op0 = expand_expr (treeop0, subtarget, VOIDmode, (modifier == EXPAND_INITIALIZER @@ -8227,14 +8207,13 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, return simplify_gen_binary (PLUS, mode, op0, op1); goto binop2; } - /* Use immed_double_const to ensure that the constant is + /* Use wi::shwi to ensure that the constant is truncated according to the mode of OP1, then sign extended to a HOST_WIDE_INT. Using the constant directly can result in non-canonical RTL in a 64x32 cross compile. */ + wc = tree_to_hwi (treeop1); constant_part - = immed_double_const (TREE_INT_CST_LOW (treeop1), - (HOST_WIDE_INT) 0, - TYPE_MODE (TREE_TYPE (treeop0))); + = immed_wide_int_const (wi::shwi (wc, wmode), wmode); op0 = plus_constant (mode, op0, INTVAL (constant_part)); if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) op0 = force_operand (op0, target); @@ -8513,7 +8492,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, indexed address, for machines that support that. */ if (modifier == EXPAND_SUM && mode == ptr_mode - && host_integerp (treeop1, 0)) + && tree_fits_shwi_p (treeop1)) { tree exp1 = treeop1; @@ -8526,8 +8505,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, op0 = copy_to_mode_reg (mode, op0); return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0, - gen_int_mode (tree_low_cst (exp1, 0), - TYPE_MODE (TREE_TYPE (exp1))))); + gen_int_mode (tree_to_shwi (exp1), TYPE_MODE (TREE_TYPE (exp1))))); } if (modifier == EXPAND_STACK_PARM) @@ -8763,10 +8741,14 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, for unsigned bitfield expand this as XOR with a proper constant instead. */ if (reduce_bit_field && TYPE_UNSIGNED (type)) - temp = expand_binop (mode, xor_optab, op0, - immed_double_int_const - (double_int::mask (TYPE_PRECISION (type)), mode), - target, 1, OPTAB_LIB_WIDEN); + { + wide_int mask = wi::mask (TYPE_PRECISION (type), + false, GET_MODE_PRECISION (mode)); + + temp = expand_binop (mode, xor_optab, op0, + immed_wide_int_const (mask, mode), + target, 1, OPTAB_LIB_WIDEN); + } else temp = expand_unop (mode, one_cmpl_optab, op0, target, 1); gcc_assert (temp); @@ -9417,11 +9399,19 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, return decl_rtl; case INTEGER_CST: - temp = immed_double_const (TREE_INT_CST_LOW (exp), - TREE_INT_CST_HIGH (exp), mode); - - return temp; - + { + tree type = TREE_TYPE (exp); + /* One could argue that GET_MODE_PRECISION (TYPE_MODE (type)) + should always be the same as TYPE_PRECISION (type). + However, it is not. Since we are converting from tree to + rtl, we have to expose this ugly truth here. */ + temp = immed_wide_int_const (wide_int::from + (exp, + GET_MODE_PRECISION (TYPE_MODE (type)), + TYPE_SIGN (type)), + TYPE_MODE (type)); + return temp; + } case VECTOR_CST: { tree tmp = NULL_TREE; @@ -9605,12 +9595,12 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, might end up in a register. */ if (mem_ref_refers_to_non_mem_p (exp)) { - HOST_WIDE_INT offset = mem_ref_offset (exp).low; + HOST_WIDE_INT offset = mem_ref_offset (exp).to_short_addr (); base = TREE_OPERAND (base, 0); if (offset == 0 - && host_integerp (TYPE_SIZE (type), 1) + && tree_fits_uhwi_p (TYPE_SIZE (type)) && (GET_MODE_BITSIZE (DECL_MODE (base)) - == TREE_INT_CST_LOW (TYPE_SIZE (type)))) + == tree_to_uhwi (TYPE_SIZE (type)))) return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base), target, tmode, modifier); if (TYPE_MODE (type) == BLKmode) @@ -9640,8 +9630,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, op0 = memory_address_addr_space (address_mode, op0, as); if (!integer_zerop (TREE_OPERAND (exp, 1))) { - rtx off - = immed_double_int_const (mem_ref_offset (exp), address_mode); + rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode); op0 = simplify_gen_binary (PLUS, address_mode, op0, off); } op0 = memory_address_addr_space (mode, op0, as); @@ -9789,11 +9778,11 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, { tree type = TREE_TYPE (TREE_TYPE (init)); enum machine_mode mode = TYPE_MODE (type); - + if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == 1) return gen_int_mode (TREE_STRING_POINTER (init) - [TREE_INT_CST_LOW (index1)], + [tree_to_hwi (index1)], mode); } } @@ -9830,7 +9819,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, op0 = expand_expr (value, target, tmode, modifier); if (DECL_BIT_FIELD (field)) { - HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field)); + HOST_WIDE_INT bitsize = tree_to_hwi (DECL_SIZE (field)); enum machine_mode imode = TYPE_MODE (TREE_TYPE (field)); if (TYPE_UNSIGNED (TREE_TYPE (field))) @@ -10505,9 +10494,10 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type) } else if (TYPE_UNSIGNED (type)) { - rtx mask = immed_double_int_const (double_int::mask (prec), - GET_MODE (exp)); - return expand_and (GET_MODE (exp), exp, mask, target); + enum machine_mode mode = GET_MODE (exp); + rtx mask = immed_wide_int_const + (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode); + return expand_and (mode, exp, mask, target); } else { @@ -10533,10 +10523,10 @@ is_aligning_offset (const_tree offset, const_tree exp) /* We must now have a BIT_AND_EXPR with a constant that is one less than power of 2 and which is larger than BIGGEST_ALIGNMENT. */ if (TREE_CODE (offset) != BIT_AND_EXPR - || !host_integerp (TREE_OPERAND (offset, 1), 1) + || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1)) || compare_tree_int (TREE_OPERAND (offset, 1), BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0 - || !exact_log2 (tree_low_cst (TREE_OPERAND (offset, 1), 1) + 1) < 0) + || !exact_log2 (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1) < 0) return 0; /* Look at the first operand of BIT_AND_EXPR and strip any conversion. @@ -10671,7 +10661,7 @@ string_constant (tree arg, tree *ptr_offset) and inside of the bounds of the string literal. */ offset = fold_convert (sizetype, offset); if (compare_tree_int (DECL_SIZE_UNIT (array), length) > 0 - && (! host_integerp (offset, 1) + && (! tree_fits_uhwi_p (offset) || compare_tree_int (offset, length) >= 0)) return 0; @@ -11081,8 +11071,8 @@ const_vector_from_tree (tree exp) RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt), inner); else - RTVEC_ELT (v, i) = immed_double_int_const (tree_to_double_int (elt), - inner); + RTVEC_ELT (v, i) + = immed_wide_int_const (elt, TYPE_MODE (TREE_TYPE (elt))); } return gen_rtx_CONST_VECTOR (mode, v); diff --git a/gcc/expr.h b/gcc/expr.h index 218984402c5..f0bbb0ed5d6 100644 --- a/gcc/expr.h +++ b/gcc/expr.h @@ -26,7 +26,7 @@ along with GCC; see the file COPYING3. If not see #include "rtl.h" /* For optimize_size */ #include "flags.h" -/* For host_integerp, tree_low_cst, fold_convert, size_binop, ssize_int, +/* For tree_fits_uhwi_p, tree_to_uhwi, fold_convert, size_binop, ssize_int, TREE_CODE, TYPE_SIZE, int_size_in_bytes, */ #include "tree.h" /* For GET_MODE_BITSIZE, word_mode */ @@ -94,8 +94,8 @@ struct locate_and_pad_arg_data #define ADD_PARM_SIZE(TO, INC) \ do { \ tree inc = (INC); \ - if (host_integerp (inc, 0)) \ - (TO).constant += tree_low_cst (inc, 0); \ + if (tree_fits_shwi_p (inc)) \ + (TO).constant += tree_to_shwi (inc); \ else if ((TO).var == 0) \ (TO).var = fold_convert (ssizetype, inc); \ else \ @@ -106,8 +106,8 @@ do { \ #define SUB_PARM_SIZE(TO, DEC) \ do { \ tree dec = (DEC); \ - if (host_integerp (dec, 0)) \ - (TO).constant -= tree_low_cst (dec, 0); \ + if (tree_fits_shwi_p (dec)) \ + (TO).constant -= tree_to_shwi (dec); \ else if ((TO).var == 0) \ (TO).var = size_binop (MINUS_EXPR, ssize_int (0), \ fold_convert (ssizetype, dec)); \ diff --git a/gcc/final.c b/gcc/final.c index 641ebe48eee..8a9b48f631b 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see #include "cfgloop.h" #include "params.h" #include "tree-pretty-print.h" /* for dump_function_header */ +#include "wide-int-print.h" #ifdef XCOFF_DEBUGGING_INFO #include "xcoffout.h" /* Needed for external data @@ -3881,8 +3882,21 @@ output_addr_const (FILE *file, rtx x) output_addr_const (file, XEXP (x, 0)); break; + case CONST_WIDE_INT: + /* We do not know the mode here so we have to use a round about + way to build a wide-int to get it printed properly. */ + { + wide_int w = wide_int::from_array (&CONST_WIDE_INT_ELT (x, 0), + CONST_WIDE_INT_NUNITS (x), + CONST_WIDE_INT_NUNITS (x) + * HOST_BITS_PER_WIDE_INT, + false); + print_decs (w, file); + } + break; + case CONST_DOUBLE: - if (GET_MODE (x) == VOIDmode) + if (CONST_DOUBLE_AS_INT_P (x)) { /* We can use %d if the number is one word and positive. */ if (CONST_DOUBLE_HIGH (x)) diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c index 91ec5880119..cc238bd479e 100644 --- a/gcc/fixed-value.c +++ b/gcc/fixed-value.c @@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see #include "tm.h" #include "tree.h" #include "diagnostic-core.h" +#include "wide-int.h" /* Compare two fixed objects for bitwise identity. */ @@ -113,6 +114,8 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode) REAL_VALUE_TYPE real_value, fixed_value, base_value; unsigned int fbit; enum fixed_value_range_code temp; + bool fail; + wide_int w; f->mode = mode; fbit = GET_MODE_FBIT (mode); @@ -127,8 +130,9 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode) "large fixed-point constant implicitly truncated to fixed-point type"); real_2expN (&base_value, fbit, mode); real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value); - real_to_integer2 ((HOST_WIDE_INT *)&f->data.low, &f->data.high, - &fixed_value); + w = real_to_integer (&fixed_value, &fail, GET_MODE_PRECISION (mode)); + f->data.low = w.elt (0); + f->data.high = w.elt (1); if (temp == FIXED_MAX_EPS && ALL_FRACT_MODE_P (f->mode)) { @@ -153,9 +157,12 @@ fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *f_orig, { REAL_VALUE_TYPE real_value, base_value, fixed_value; + signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED; real_2expN (&base_value, GET_MODE_FBIT (f_orig->mode), f_orig->mode); - real_from_integer (&real_value, VOIDmode, f_orig->data.low, f_orig->data.high, - UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode)); + real_from_integer (&real_value, VOIDmode, + wide_int::from (f_orig->data, + GET_MODE_PRECISION (f_orig->mode), sgn), + sgn); real_arithmetic (&fixed_value, RDIV_EXPR, &real_value, &base_value); real_to_decimal (str, &fixed_value, buf_size, 0, 1); } @@ -1041,12 +1048,17 @@ fixed_convert_from_real (FIXED_VALUE_TYPE *f, enum machine_mode mode, int i_f_bits = GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode); unsigned int fbit = GET_MODE_FBIT (mode); enum fixed_value_range_code temp; + bool fail; + wide_int w; real_value = *a; f->mode = mode; real_2expN (&base_value, fbit, mode); real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value); - real_to_integer2 ((HOST_WIDE_INT *)&f->data.low, &f->data.high, &fixed_value); + + w = real_to_integer (&fixed_value, &fail, GET_MODE_PRECISION (mode)); + f->data.low = w.elt (0); + f->data.high = w.elt (1); temp = check_real_for_fixed_mode (&real_value, mode); if (temp == FIXED_UNDERFLOW) /* Minimum. */ { @@ -1091,9 +1103,11 @@ real_convert_from_fixed (REAL_VALUE_TYPE *r, enum machine_mode mode, { REAL_VALUE_TYPE base_value, fixed_value, real_value; + signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f->mode) ? UNSIGNED : SIGNED; real_2expN (&base_value, GET_MODE_FBIT (f->mode), f->mode); - real_from_integer (&fixed_value, VOIDmode, f->data.low, f->data.high, - UNSIGNED_FIXED_POINT_MODE_P (f->mode)); + real_from_integer (&fixed_value, VOIDmode, + wide_int::from (f->data, GET_MODE_PRECISION (f->mode), + sgn), sgn); real_arithmetic (&real_value, RDIV_EXPR, &fixed_value, &base_value); real_convert (r, mode, &real_value); } diff --git a/gcc/fold-const.c b/gcc/fold-const.c index c4c09b65eb1..9bb0ff8ca25 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -107,7 +107,6 @@ static tree decode_field_reference (location_t, tree, HOST_WIDE_INT *, HOST_WIDE_INT *, enum machine_mode *, int *, int *, tree *, tree *); -static int all_ones_mask_p (const_tree, int); static tree sign_bit_p (tree, const_tree); static int simple_operand_p (const_tree); static bool simple_operand_p_2 (tree); @@ -164,26 +163,39 @@ protected_set_expr_location_unshare (tree x, location_t loc) return x; } -/* If ARG2 divides ARG1 with zero remainder, carries out the division - of type CODE and returns the quotient. - Otherwise returns NULL_TREE. */ +/* If ARG2 divides ARG1 with zero remainder, carries out the exact + division and returns the quotient. Otherwise returns + NULL_TREE. */ tree -div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2) +div_if_zero_remainder (const_tree arg1, const_tree arg2) { - double_int quo, rem; - int uns; + wide_int quo; + wide_int warg1 = arg1; + wide_int warg2 = arg2; + signop sgn = TYPE_SIGN (TREE_TYPE (arg1)); + signop sgn2 = TYPE_SIGN (TREE_TYPE (arg2)); - /* The sign of the division is according to operand two, that - does the correct thing for POINTER_PLUS_EXPR where we want - a signed division. */ - uns = TYPE_UNSIGNED (TREE_TYPE (arg2)); - - quo = tree_to_double_int (arg1).divmod (tree_to_double_int (arg2), - uns, code, &rem); + if (sgn != sgn2) + { + /* When signedness mismatches, we promote the unsigned value to + a signed value. We preserve the value by extending the + precision by 1 bit, iff the top bit is set. */ + if (sgn == UNSIGNED) + { + if (wi::neg_p (warg1)) + warg1 = wide_int::from (warg1, warg1.get_precision () + 1, sgn); + sgn = SIGNED; + } + else + { + if (wi::neg_p (warg2)) + warg2 = wide_int::from (warg2, warg2.get_precision () + 1, sgn2); + } + } - if (rem.is_zero ()) - return build_int_cst_wide (TREE_TYPE (arg1), quo.low, quo.high); + if (wi::multiple_of_p (warg1, warg2, sgn, &quo)) + return wide_int_to_tree (TREE_TYPE (arg1), quo); return NULL_TREE; } @@ -357,8 +369,6 @@ negate_mathfn_p (enum built_in_function code) bool may_negate_without_overflow_p (const_tree t) { - unsigned HOST_WIDE_INT val; - unsigned int prec; tree type; gcc_assert (TREE_CODE (t) == INTEGER_CST); @@ -367,19 +377,7 @@ may_negate_without_overflow_p (const_tree t) if (TYPE_UNSIGNED (type)) return false; - prec = TYPE_PRECISION (type); - if (prec > HOST_BITS_PER_WIDE_INT) - { - if (TREE_INT_CST_LOW (t) != 0) - return true; - prec -= HOST_BITS_PER_WIDE_INT; - val = TREE_INT_CST_HIGH (t); - } - else - val = TREE_INT_CST_LOW (t); - if (prec < HOST_BITS_PER_WIDE_INT) - val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1; - return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1)); + return !wi::only_sign_bit_p (t); } /* Determine whether an expression T can be cheaply negated using @@ -519,13 +517,11 @@ negate_expr_p (tree t) break; case RSHIFT_EXPR: - /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */ + /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (t, 1); - if (TREE_INT_CST_HIGH (op1) == 0 - && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1) - == TREE_INT_CST_LOW (op1)) + if (wi::eq_p (op1, TYPE_PRECISION (type) - 1)) return true; } break; @@ -736,13 +732,11 @@ fold_negate_expr (location_t loc, tree t) break; case RSHIFT_EXPR: - /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */ + /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (t, 1); - if (TREE_INT_CST_HIGH (op1) == 0 - && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1) - == TREE_INT_CST_LOW (op1)) + if (wi::eq_p (op1, TYPE_PRECISION (type) - 1)) { tree ntype = TYPE_UNSIGNED (type) ? signed_type_for (type) @@ -972,153 +966,151 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2 to evaluate CODE at compile-time. */ static tree -int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2, +int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2, int overflowable) { - double_int op1, op2, res, tmp; + wide_int op1, arg2, res; tree t; tree type = TREE_TYPE (arg1); - bool uns = TYPE_UNSIGNED (type); + signop sign = TYPE_SIGN (type); bool overflow = false; - op1 = tree_to_double_int (arg1); - op2 = tree_to_double_int (arg2); + op1 = arg1; + arg2 = wide_int::from (parg2, TYPE_PRECISION (type), + TYPE_SIGN (TREE_TYPE (parg2))); switch (code) { case BIT_IOR_EXPR: - res = op1 | op2; + res = op1 | arg2; break; case BIT_XOR_EXPR: - res = op1 ^ op2; + res = op1 ^ arg2; break; case BIT_AND_EXPR: - res = op1 & op2; + res = op1 & arg2; break; case RSHIFT_EXPR: - res = op1.rshift (op2.to_shwi (), TYPE_PRECISION (type), !uns); - break; - case LSHIFT_EXPR: - /* It's unclear from the C standard whether shifts can overflow. - The following code ignores overflow; perhaps a C standard - interpretation ruling is needed. */ - res = op1.lshift (op2.to_shwi (), TYPE_PRECISION (type), !uns); + if (wi::neg_p (arg2)) + { + arg2 = -arg2; + if (code == RSHIFT_EXPR) + code = LSHIFT_EXPR; + else + code = RSHIFT_EXPR; + } + + if (code == RSHIFT_EXPR) + /* It's unclear from the C standard whether shifts can overflow. + The following code ignores overflow; perhaps a C standard + interpretation ruling is needed. */ + res = wi::rshift (op1, arg2, sign, GET_MODE_BITSIZE (TYPE_MODE (type))); + else + res = wi::lshift (op1, arg2, GET_MODE_BITSIZE (TYPE_MODE (type))); break; - + case RROTATE_EXPR: - res = op1.rrotate (op2.to_shwi (), TYPE_PRECISION (type)); - break; - case LROTATE_EXPR: - res = op1.lrotate (op2.to_shwi (), TYPE_PRECISION (type)); + if (wi::neg_p (arg2)) + { + arg2 = -arg2; + if (code == RROTATE_EXPR) + code = LROTATE_EXPR; + else + code = RROTATE_EXPR; + } + + if (code == RROTATE_EXPR) + res = wi::rrotate (op1, arg2); + else + res = wi::lrotate (op1, arg2); break; case PLUS_EXPR: - res = op1.add_with_sign (op2, false, &overflow); + res = wi::add (op1, arg2, sign, &overflow); break; case MINUS_EXPR: - res = op1.sub_with_overflow (op2, &overflow); + res = wi::sub (op1, arg2, sign, &overflow); break; - + case MULT_EXPR: - res = op1.mul_with_sign (op2, false, &overflow); + res = wi::mul (op1, arg2, sign, &overflow); break; case MULT_HIGHPART_EXPR: - if (TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT) - { - bool dummy_overflow; - if (TYPE_PRECISION (type) != 2 * HOST_BITS_PER_WIDE_INT) - return NULL_TREE; - op1.wide_mul_with_sign (op2, uns, &res, &dummy_overflow); - } - else - { - bool dummy_overflow; - /* MULT_HIGHPART_EXPR can't ever oveflow, as the multiplication - is performed in twice the precision of arguments. */ - tmp = op1.mul_with_sign (op2, false, &dummy_overflow); - res = tmp.rshift (TYPE_PRECISION (type), - 2 * TYPE_PRECISION (type), !uns); - } + res = wi::mul_high (op1, arg2, sign); break; case TRUNC_DIV_EXPR: - case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case EXACT_DIV_EXPR: - /* This is a shortcut for a common special case. */ - if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0 - && !TREE_OVERFLOW (arg1) - && !TREE_OVERFLOW (arg2) - && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0) - { - if (code == CEIL_DIV_EXPR) - op1.low += op2.low - 1; + res = wi::div_trunc (op1, arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; - res.low = op1.low / op2.low, res.high = 0; - break; - } + case FLOOR_DIV_EXPR: + res = wi::div_floor (op1, arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; - /* ... fall through ... */ + case CEIL_DIV_EXPR: + res = wi::div_ceil (op1, arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; case ROUND_DIV_EXPR: - if (op2.is_zero ()) + res = wi::div_round (op1, arg2, sign, &overflow); + if (overflow) return NULL_TREE; - if (op2.is_one ()) - { - res = op1; - break; - } - if (op1 == op2 && !op1.is_zero ()) - { - res = double_int_one; - break; - } - res = op1.divmod_with_overflow (op2, uns, code, &tmp, &overflow); break; case TRUNC_MOD_EXPR: - case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: - /* This is a shortcut for a common special case. */ - if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0 - && !TREE_OVERFLOW (arg1) - && !TREE_OVERFLOW (arg2) - && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0) - { - if (code == CEIL_MOD_EXPR) - op1.low += op2.low - 1; - res.low = op1.low % op2.low, res.high = 0; - break; - } + res = wi::mod_trunc (op1, arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; - /* ... fall through ... */ + case FLOOR_MOD_EXPR: + res = wi::mod_floor (op1, arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; + + case CEIL_MOD_EXPR: + res = wi::mod_ceil (op1, arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; case ROUND_MOD_EXPR: - if (op2.is_zero ()) + res = wi::mod_round (op1, arg2, sign, &overflow); + if (overflow) return NULL_TREE; - tmp = op1.divmod_with_overflow (op2, uns, code, &res, &overflow); break; case MIN_EXPR: - res = op1.min (op2, uns); + res = wi::min (op1, arg2, sign); break; case MAX_EXPR: - res = op1.max (op2, uns); + res = wi::max (op1, arg2, sign); break; default: return NULL_TREE; } - t = force_fit_type_double (TREE_TYPE (arg1), res, overflowable, - (!uns && overflow) - | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)); + t = force_fit_type (type, res, overflowable, + (((sign == SIGNED || overflowable == -1) + && overflow) + | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (parg2))); return t; } @@ -1246,9 +1238,12 @@ const_binop (enum tree_code code, tree arg1, tree arg2) case LSHIFT_EXPR: case RSHIFT_EXPR: - f2.data.high = TREE_INT_CST_HIGH (arg2); - f2.data.low = TREE_INT_CST_LOW (arg2); - f2.mode = SImode; + { + wide_int w2 = arg2; + f2.data.high = w2.elt (1); + f2.data.low = w2.elt (0); + f2.mode = SImode; + } break; default: @@ -1429,13 +1424,13 @@ const_binop (enum tree_code code, tree arg1, tree arg2) if (code == VEC_LSHIFT_EXPR || code == VEC_RSHIFT_EXPR) { - if (!host_integerp (arg2, 1)) + if (!tree_fits_uhwi_p (arg2)) return NULL_TREE; - unsigned HOST_WIDE_INT shiftc = tree_low_cst (arg2, 1); - unsigned HOST_WIDE_INT outerc = tree_low_cst (TYPE_SIZE (type), 1); + unsigned HOST_WIDE_INT shiftc = tree_to_uhwi (arg2); + unsigned HOST_WIDE_INT outerc = tree_to_uhwi (TYPE_SIZE (type)); unsigned HOST_WIDE_INT innerc - = tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1); + = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type))); if (shiftc >= outerc || (shiftc % innerc) != 0) return NULL_TREE; int offset = shiftc / innerc; @@ -1583,18 +1578,12 @@ size_diffop_loc (location_t loc, tree arg0, tree arg1) static tree fold_convert_const_int_from_int (tree type, const_tree arg1) { - tree t; - /* Given an integer constant, make new constant with new type, - appropriately sign-extended or truncated. */ - t = force_fit_type_double (type, tree_to_double_int (arg1), - !POINTER_TYPE_P (TREE_TYPE (arg1)), - (TREE_INT_CST_HIGH (arg1) < 0 - && (TYPE_UNSIGNED (type) - < TYPE_UNSIGNED (TREE_TYPE (arg1)))) - | TREE_OVERFLOW (arg1)); - - return t; + appropriately sign-extended or truncated. Use max_wide_int + so that any extension is done according ARG1's type. */ + return force_fit_type (type, max_wide_int (arg1), + !POINTER_TYPE_P (TREE_TYPE (arg1)), + TREE_OVERFLOW (arg1)); } /* A subroutine of fold_convert_const handling conversions a REAL_CST @@ -1603,7 +1592,7 @@ fold_convert_const_int_from_int (tree type, const_tree arg1) static tree fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1) { - int overflow = 0; + bool overflow = false; tree t; /* The following code implements the floating point to integer @@ -1615,7 +1604,7 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg C and C++ standards that simply state that the behavior of FP-to-integer conversion is unspecified upon overflow. */ - double_int val; + wide_int val; REAL_VALUE_TYPE r; REAL_VALUE_TYPE x = TREE_REAL_CST (arg1); @@ -1632,8 +1621,8 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg /* If R is NaN, return zero and show we have an overflow. */ if (REAL_VALUE_ISNAN (r)) { - overflow = 1; - val = double_int_zero; + overflow = true; + val = max_wide_int (0); } /* See if R is less than the lower bound or greater than the @@ -1645,8 +1634,8 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt); if (REAL_VALUES_LESS (r, l)) { - overflow = 1; - val = tree_to_double_int (lt); + overflow = true; + val = max_wide_int (lt); } } @@ -1658,16 +1647,16 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut); if (REAL_VALUES_LESS (u, r)) { - overflow = 1; - val = tree_to_double_int (ut); + overflow = true; + val = max_wide_int (ut); } } } if (! overflow) - real_to_integer2 ((HOST_WIDE_INT *) &val.low, &val.high, &r); + val = real_to_integer (&r, &overflow, TYPE_PRECISION (type)); - t = force_fit_type_double (type, val, -1, overflow | TREE_OVERFLOW (arg1)); + t = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (arg1)); return t; } @@ -1710,12 +1699,12 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1) /* Given a fixed-point constant, make new constant with new type, appropriately sign-extended or truncated. */ - t = force_fit_type_double (type, temp, -1, - (temp.is_negative () - && (TYPE_UNSIGNED (type) - < TYPE_UNSIGNED (TREE_TYPE (arg1)))) - | TREE_OVERFLOW (arg1)); - + t = force_fit_type (type, temp, -1, + (temp.is_negative () + && (TYPE_UNSIGNED (type) + < TYPE_UNSIGNED (TREE_TYPE (arg1)))) + | TREE_OVERFLOW (arg1)); + return t; } @@ -1797,9 +1786,18 @@ fold_convert_const_fixed_from_int (tree type, const_tree arg1) FIXED_VALUE_TYPE value; tree t; bool overflow_p; + double_int di; + + gcc_assert (TREE_INT_CST_NUNITS (arg1) <= 2); + + di.low = TREE_INT_CST_ELT (arg1, 0); + if (TREE_INT_CST_NUNITS (arg1) == 1) + di.high = (HOST_WIDE_INT)di.low < 0 ? (HOST_WIDE_INT)-1 : 0; + else + di.high = TREE_INT_CST_ELT (arg1, 1); overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type), - TREE_INT_CST (arg1), + di, TYPE_UNSIGNED (TREE_TYPE (arg1)), TYPE_SATURATING (type)); t = build_fixed (type, value); @@ -3418,8 +3416,8 @@ make_bit_field_ref (location_t loc, tree inner, tree type, tree size = TYPE_SIZE (TREE_TYPE (inner)); if ((INTEGRAL_TYPE_P (TREE_TYPE (inner)) || POINTER_TYPE_P (TREE_TYPE (inner))) - && host_integerp (size, 0) - && tree_low_cst (size, 0) == bitsize) + && tree_fits_shwi_p (size) + && tree_to_shwi (size) == bitsize) return fold_convert_loc (loc, type, inner); } @@ -3709,23 +3707,24 @@ decode_field_reference (location_t loc, tree exp, HOST_WIDE_INT *pbitsize, } /* Return nonzero if MASK represents a mask of SIZE ones in the low-order - bit positions. */ + bit positions and MASK is SIGNED. */ static int -all_ones_mask_p (const_tree mask, int size) +all_ones_mask_p (const_tree mask, unsigned int size) { tree type = TREE_TYPE (mask); unsigned int precision = TYPE_PRECISION (type); - tree tmask; - tmask = build_int_cst_type (signed_type_for (type), -1); + /* If this function returns true when the type of the mask is + UNSIGNED, then there will be errors. In particular see + gcc.c-torture/execute/990326-1.c. There does not appear to be + any documentation paper trail as to why this is so. But the pre + wide-int worked with that restriction and it has been preserved + here. */ + if (size > precision || TYPE_SIGN (type) == UNSIGNED) + return false; - return - tree_int_cst_equal (mask, - const_binop (RSHIFT_EXPR, - const_binop (LSHIFT_EXPR, tmask, - size_int (precision - size)), - size_int (precision - size))); + return wi::mask (size, false, precision) == mask; } /* Subroutine for fold: determine if VAL is the INTEGER_CONST that @@ -3737,8 +3736,6 @@ all_ones_mask_p (const_tree mask, int size) static tree sign_bit_p (tree exp, const_tree val) { - unsigned HOST_WIDE_INT mask_lo, lo; - HOST_WIDE_INT mask_hi, hi; int width; tree t; @@ -3753,27 +3750,7 @@ sign_bit_p (tree exp, const_tree val) return NULL_TREE; width = TYPE_PRECISION (t); - if (width > HOST_BITS_PER_WIDE_INT) - { - hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1); - lo = 0; - - mask_hi = (HOST_WIDE_INT_M1U >> (HOST_BITS_PER_DOUBLE_INT - width)); - mask_lo = -1; - } - else - { - hi = 0; - lo = (unsigned HOST_WIDE_INT) 1 << (width - 1); - - mask_hi = 0; - mask_lo = (HOST_WIDE_INT_M1U >> (HOST_BITS_PER_WIDE_INT - width)); - } - - /* We mask off those bits beyond TREE_TYPE (exp) so that we can - treat VAL as if it were unsigned. */ - if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi - && (TREE_INT_CST_LOW (val) & mask_lo) == lo) + if (wi::only_sign_bit_p (val, width)) return exp; /* Handle extension from a narrower type. */ @@ -4018,7 +3995,7 @@ make_range_step (location_t loc, enum tree_code code, tree arg0, tree arg1, { in_p = ! in_p; high = range_binop (MINUS_EXPR, NULL_TREE, low, 0, - integer_one_node, 0); + build_int_cst (TREE_TYPE (low), 1), 0); low = build_int_cst (arg0_type, 0); } } @@ -4088,9 +4065,9 @@ make_range_step (location_t loc, enum tree_code code, tree arg0, tree arg1, if (n_low && n_high && tree_int_cst_lt (n_high, n_low)) { low = range_binop (PLUS_EXPR, arg0_type, n_high, 0, - integer_one_node, 0); + build_int_cst (TREE_TYPE (n_high), 1), 0); high = range_binop (MINUS_EXPR, arg0_type, n_low, 0, - integer_one_node, 0); + build_int_cst (TREE_TYPE (n_low), 1), 0); /* If the range is of the form +/- [ x+1, x ], we won't be able to normalize it. But then, it represents the @@ -4328,23 +4305,10 @@ build_range_check (location_t loc, tree type, tree exp, int in_p, /* Optimize (c>=1) && (c<=127) into (signed char)c > 0. */ if (integer_onep (low) && TREE_CODE (high) == INTEGER_CST) { - unsigned HOST_WIDE_INT lo; - HOST_WIDE_INT hi; - int prec; + int prec = TYPE_PRECISION (etype); + wide_int osb = wi::set_bit_in_zero (prec - 1, prec) - 1; - prec = TYPE_PRECISION (etype); - if (prec <= HOST_BITS_PER_WIDE_INT) - { - hi = 0; - lo = ((unsigned HOST_WIDE_INT) 1 << (prec - 1)) - 1; - } - else - { - hi = ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)) - 1; - lo = HOST_WIDE_INT_M1U; - } - - if (TREE_INT_CST_HIGH (high) == hi && TREE_INT_CST_LOW (high) == lo) + if (osb == high) { if (TYPE_UNSIGNED (etype)) { @@ -4378,7 +4342,7 @@ build_range_check (location_t loc, tree type, tree exp, int in_p, utype = unsigned_type_for (etype); maxv = fold_convert_loc (loc, utype, TYPE_MAX_VALUE (etype)); maxv = range_binop (PLUS_EXPR, NULL_TREE, maxv, 1, - integer_one_node, 1); + build_int_cst (TREE_TYPE (maxv), 1), 1); minv = fold_convert_loc (loc, utype, TYPE_MIN_VALUE (etype)); if (integer_zerop (range_binop (NE_EXPR, integer_type_node, @@ -4426,7 +4390,8 @@ range_predecessor (tree val) && operand_equal_p (val, TYPE_MIN_VALUE (type), 0)) return 0; else - return range_binop (MINUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0); + return range_binop (MINUS_EXPR, NULL_TREE, val, 0, + build_int_cst (TREE_TYPE (val), 1), 0); } /* Return the successor of VAL in its type, handling the infinite case. */ @@ -4440,7 +4405,8 @@ range_successor (tree val) && operand_equal_p (val, TYPE_MAX_VALUE (type), 0)) return 0; else - return range_binop (PLUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0); + return range_binop (PLUS_EXPR, NULL_TREE, val, 0, + build_int_cst (TREE_TYPE (val), 1), 0); } /* Given two ranges, see if we can merge them into one. Return 1 if we @@ -4620,7 +4586,8 @@ merge_ranges (int *pin_p, tree *plow, tree *phigh, int in0_p, tree low0, if (TYPE_UNSIGNED (TREE_TYPE (high1)) && integer_zerop (range_binop (PLUS_EXPR, NULL_TREE, high1, 1, - integer_one_node, 1))) + build_int_cst (TREE_TYPE (high1), 1), + 1))) high1 = 0; break; default: @@ -5072,8 +5039,7 @@ unextend (tree c, int p, int unsignedp, tree mask) /* We work by getting just the sign bit into the low-order bit, then into the high-order bit, then sign-extend. We then XOR that value with C. */ - temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1)); - temp = const_binop (BIT_AND_EXPR, temp, size_int (1)); + temp = build_int_cst (TREE_TYPE (c), wi::extract_uhwi (c, p - 1, 1)); /* We must use a signed type in order to get an arithmetic right shift. However, we must also avoid introducing accidental overflows, so that @@ -5879,8 +5845,7 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type, && (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0))) /* const_binop may not detect overflow correctly, so check for it explicitly here. */ - && TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1) - && TREE_INT_CST_HIGH (op1) == 0 + && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1) && 0 != (t1 = fold_convert (ctype, const_binop (LSHIFT_EXPR, size_one_node, @@ -6026,21 +5991,17 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type, assuming no overflow. */ if (tcode == code) { - double_int mul; + wide_int mul; bool overflow_p; - unsigned prec = TYPE_PRECISION (ctype); - bool uns = TYPE_UNSIGNED (ctype); - double_int diop1 = tree_to_double_int (op1).ext (prec, uns); - double_int dic = tree_to_double_int (c).ext (prec, uns); - mul = diop1.mul_with_sign (dic, false, &overflow_p); - overflow_p = ((!uns && overflow_p) - | TREE_OVERFLOW (c) | TREE_OVERFLOW (op1)); - if (!double_int_fits_to_tree_p (ctype, mul) - && ((uns && tcode != MULT_EXPR) || !uns)) - overflow_p = 1; + signop sign = TYPE_SIGN (ctype); + mul = wi::mul_full (op1, c, sign); + overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1); + if (!wi::fits_to_tree_p (mul, ctype) + && ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED)) + overflow_p = true; if (!overflow_p) return fold_build2 (tcode, ctype, fold_convert (ctype, op0), - double_int_to_tree (ctype, mul)); + wide_int_to_tree (ctype, mul)); } /* If these operations "cancel" each other, we have the main @@ -6439,29 +6400,27 @@ fold_div_compare (location_t loc, tree prod, tmp, hi, lo; tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); - double_int val; - bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0)); - bool neg_overflow; + wide_int val; + signop sign = TYPE_SIGN (TREE_TYPE (arg0)); + bool neg_overflow = false; bool overflow; /* We have to do this the hard way to detect unsigned overflow. prod = int_const_binop (MULT_EXPR, arg01, arg1); */ - val = TREE_INT_CST (arg01) - .mul_with_sign (TREE_INT_CST (arg1), unsigned_p, &overflow); - prod = force_fit_type_double (TREE_TYPE (arg00), val, -1, overflow); + val = wi::mul (arg01, arg1, sign, &overflow); + prod = force_fit_type (TREE_TYPE (arg00), val, -1, overflow); neg_overflow = false; - if (unsigned_p) + if (sign == UNSIGNED) { tmp = int_const_binop (MINUS_EXPR, arg01, build_int_cst (TREE_TYPE (arg01), 1)); lo = prod; /* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp). */ - val = TREE_INT_CST (prod) - .add_with_sign (TREE_INT_CST (tmp), unsigned_p, &overflow); - hi = force_fit_type_double (TREE_TYPE (arg00), val, - -1, overflow | TREE_OVERFLOW (prod)); + val = wi::add (prod, tmp, sign, &overflow); + hi = force_fit_type (TREE_TYPE (arg00), val, + -1, overflow | TREE_OVERFLOW (prod)); } else if (tree_int_cst_sgn (arg01) >= 0) { @@ -6652,12 +6611,10 @@ fold_single_bit_test (location_t loc, enum tree_code code, not overflow, adjust BITNUM and INNER. */ if (TREE_CODE (inner) == RSHIFT_EXPR && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST - && host_integerp (TREE_OPERAND (inner, 1), 1) - && bitnum < TYPE_PRECISION (type) - && (TREE_INT_CST_LOW (TREE_OPERAND (inner, 1)) - < (unsigned) (TYPE_PRECISION (type) - bitnum))) + && wi::ltu_p (wi::add (TREE_OPERAND (inner, 1), bitnum), + TYPE_PRECISION (type))) { - bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1)); + bitnum += tree_to_hwi (TREE_OPERAND (inner, 1)); inner = TREE_OPERAND (inner, 0); } @@ -6915,8 +6872,7 @@ fold_sign_changed_comparison (location_t loc, enum tree_code code, tree type, return NULL_TREE; if (TREE_CODE (arg1) == INTEGER_CST) - arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1), - 0, TREE_OVERFLOW (arg1)); + arg1 = force_fit_type (inner_type, arg1, 0, TREE_OVERFLOW (arg1)); else arg1 = fold_convert_loc (loc, inner_type, arg1); @@ -7004,7 +6960,7 @@ try_move_mult_to_index (location_t loc, tree addr, tree op1) else { /* Try if delta is a multiple of step. */ - tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step); + tree tmp = div_if_zero_remainder (op1, step); if (! tmp) goto cont; delta = tmp; @@ -7076,7 +7032,7 @@ cont: else { /* Try if delta is a multiple of step. */ - tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step); + tree tmp = div_if_zero_remainder (op1, step); if (! tmp) continue; delta = tmp; @@ -7232,7 +7188,8 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type, arg10 = build_one_cst (type); /* As we canonicalize A - 2 to A + -2 get rid of that sign for the purpose of this canonicalization. */ - if (TREE_INT_CST_HIGH (arg1) == -1 + if (TYPE_SIGN (TREE_TYPE (arg1)) == SIGNED + && wi::neg_p (arg1) && negate_expr_p (arg1) && code == PLUS_EXPR) { @@ -7264,14 +7221,14 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type, /* No identical multiplicands; see if we can find a common power-of-two factor in non-power-of-two multiplies. This can help in multi-dimensional array access. */ - else if (host_integerp (arg01, 0) - && host_integerp (arg11, 0)) + else if (tree_fits_shwi_p (arg01) + && tree_fits_shwi_p (arg11)) { HOST_WIDE_INT int01, int11, tmp; bool swap = false; tree maybe_same; - int01 = TREE_INT_CST_LOW (arg01); - int11 = TREE_INT_CST_LOW (arg11); + int01 = tree_to_shwi (arg01); + int11 = tree_to_shwi (arg11); /* Move min of absolute values to int11. */ if (absu_hwi (int01) < absu_hwi (int11)) @@ -7330,11 +7287,7 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len) for (byte = 0; byte < total_bytes; byte++) { int bitpos = byte * BITS_PER_UNIT; - if (bitpos < HOST_BITS_PER_WIDE_INT) - value = (unsigned char) (TREE_INT_CST_LOW (expr) >> bitpos); - else - value = (unsigned char) (TREE_INT_CST_HIGH (expr) - >> (bitpos - HOST_BITS_PER_WIDE_INT)); + value = wi::extract_uhwi (expr, bitpos, BITS_PER_UNIT); if (total_bytes > UNITS_PER_WORD) { @@ -7497,9 +7450,9 @@ native_encode_string (const_tree expr, unsigned char *ptr, int len) if (TREE_CODE (type) != ARRAY_TYPE || TREE_CODE (TREE_TYPE (type)) != INTEGER_TYPE || GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type))) != BITS_PER_UNIT - || !host_integerp (TYPE_SIZE_UNIT (type), 0)) + || !tree_fits_shwi_p (TYPE_SIZE_UNIT (type))) return 0; - total_bytes = tree_low_cst (TYPE_SIZE_UNIT (type), 0); + total_bytes = tree_to_shwi (TYPE_SIZE_UNIT (type)); if (total_bytes > len) return 0; if (TREE_STRING_LENGTH (expr) < total_bytes) @@ -7556,15 +7509,15 @@ static tree native_interpret_int (tree type, const unsigned char *ptr, int len) { int total_bytes = GET_MODE_SIZE (TYPE_MODE (type)); - double_int result; + wide_int result; if (total_bytes > len || total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT) return NULL_TREE; - result = double_int::from_buffer (ptr, total_bytes); + result = wi::from_buffer (ptr, total_bytes); - return double_int_to_tree (type, result); + return wide_int_to_tree (type, result); } @@ -8107,11 +8060,11 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0) change = 1; else if (TYPE_PRECISION (TREE_TYPE (and1)) <= HOST_BITS_PER_WIDE_INT - && host_integerp (and1, 1)) + && tree_fits_uhwi_p (and1)) { unsigned HOST_WIDE_INT cst; - cst = tree_low_cst (and1, 1); + cst = tree_to_uhwi (and1); cst &= HOST_WIDE_INT_M1U << (TYPE_PRECISION (TREE_TYPE (and1)) - 1); change = (cst == 0); @@ -8129,10 +8082,9 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0) } if (change) { - tem = force_fit_type_double (type, tree_to_double_int (and1), - 0, TREE_OVERFLOW (and1)); + tem = force_fit_type (type, and1, 0, TREE_OVERFLOW (and1)); return fold_build2_loc (loc, BIT_AND_EXPR, type, - fold_convert_loc (loc, type, and0), tem); + fold_convert_loc (loc, type, and0), tem); } } @@ -8912,7 +8864,7 @@ maybe_canonicalize_comparison (location_t loc, enum tree_code code, tree type, static bool pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos) { - double_int di_offset, total; + wide_int wi_offset, total; if (!POINTER_TYPE_P (TREE_TYPE (base))) return true; @@ -8920,20 +8872,21 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos) if (bitpos < 0) return true; + int precision = TYPE_PRECISION (TREE_TYPE (base)); if (offset == NULL_TREE) - di_offset = double_int_zero; + wi_offset = wi::zero (precision); else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset)) return true; else - di_offset = TREE_INT_CST (offset); + wi_offset = offset; bool overflow; - double_int units = double_int::from_uhwi (bitpos / BITS_PER_UNIT); - total = di_offset.add_with_sign (units, true, &overflow); + wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision); + total = wi::add (wi_offset, units, UNSIGNED, &overflow); if (overflow) return true; - if (total.high != 0) + if (!wi::fits_uhwi_p (total)) return true; HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (TREE_TYPE (base))); @@ -8951,7 +8904,7 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos) size = base_size; } - return total.low > (unsigned HOST_WIDE_INT) size; + return total.to_uhwi () > (unsigned HOST_WIDE_INT) size; } /* Subroutine of fold_binary. This routine performs all of the @@ -9087,7 +9040,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type, indirect_base0 = true; } offset0 = TREE_OPERAND (arg0, 1); - if (host_integerp (offset0, 0)) + if (tree_fits_shwi_p (offset0)) { HOST_WIDE_INT off = size_low_cst (offset0); if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off) @@ -9121,7 +9074,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type, indirect_base1 = true; } offset1 = TREE_OPERAND (arg1, 1); - if (host_integerp (offset1, 0)) + if (tree_fits_shwi_p (offset1)) { HOST_WIDE_INT off = size_low_cst (offset1); if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off) @@ -9759,7 +9712,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue, inner_code = TREE_CODE (op1); if (inner_code == INTEGER_CST) { - *residue += TREE_INT_CST_LOW (op1); + *residue += tree_to_hwi (op1); return modulus; } else if (inner_code == MULT_EXPR) @@ -9770,7 +9723,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue, unsigned HOST_WIDE_INT align; /* Compute the greatest power-of-2 divisor of op1. */ - align = TREE_INT_CST_LOW (op1); + align = tree_to_hwi (op1); align &= -align; /* If align is non-zero and less than *modulus, replace @@ -9945,17 +9898,15 @@ exact_inverse (tree type, tree cst) /* Mask out the tz least significant bits of X of type TYPE where tz is the number of trailing zeroes in Y. */ -static double_int -mask_with_tz (tree type, double_int x, double_int y) +static wide_int +mask_with_tz (tree type, wide_int x, wide_int y) { - int tz = y.trailing_zeros (); - + int tz = wi::ctz (y); if (tz > 0) { - double_int mask; + wide_int mask; - mask = ~double_int::mask (tz); - mask = mask.ext (TYPE_PRECISION (type), TYPE_UNSIGNED (type)); + mask = wi::mask (tz, true, TYPE_PRECISION (type)); return mask & x; } return x; @@ -10499,9 +10450,7 @@ fold_binary_loc (location_t loc, code11 = TREE_CODE (tree11); if (code01 == INTEGER_CST && code11 == INTEGER_CST - && TREE_INT_CST_HIGH (tree01) == 0 - && TREE_INT_CST_HIGH (tree11) == 0 - && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11)) + && (wi::add (tree01, tree11) == element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0))))) { tem = build2_loc (loc, LROTATE_EXPR, @@ -11283,22 +11232,21 @@ fold_binary_loc (location_t loc, && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { - double_int c1, c2, c3, msk; + wide_int c1, c2, c3, msk; int width = TYPE_PRECISION (type), w; bool try_simplify = true; - - c1 = tree_to_double_int (TREE_OPERAND (arg0, 1)); - c2 = tree_to_double_int (arg1); + c1 = TREE_OPERAND (arg0, 1); + c2 = arg1; /* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */ if ((c1 & c2) == c1) return omit_one_operand_loc (loc, type, arg1, TREE_OPERAND (arg0, 0)); - msk = double_int::mask (width); + msk = wi::mask (width, false, TYPE_PRECISION (TREE_TYPE (arg1))); /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ - if (msk.and_not (c1 | c2).is_zero ()) + if (msk.and_not (c1 | c2) == 0) return fold_build2_loc (loc, BIT_IOR_EXPR, type, TREE_OPERAND (arg0, 0), arg1); @@ -11308,16 +11256,13 @@ fold_binary_loc (location_t loc, c1 &= msk; c2 &= msk; c3 = c1.and_not (c2); - for (w = BITS_PER_UNIT; - w <= width && w <= HOST_BITS_PER_WIDE_INT; - w <<= 1) + for (w = BITS_PER_UNIT; w <= width; w <<= 1) { - unsigned HOST_WIDE_INT mask - = HOST_WIDE_INT_M1U >> (HOST_BITS_PER_WIDE_INT - w); - if (((c1.low | c2.low) & mask) == mask - && (c1.low & ~mask) == 0 && c1.high == 0) + wide_int mask = wi::mask (width - w, false, + TYPE_PRECISION (type)); + if (((c1 | c2) & mask) == mask && c1.and_not (mask) == 0) { - c3 = double_int::from_uhwi (mask); + c3 = mask; break; } } @@ -11330,7 +11275,7 @@ fold_binary_loc (location_t loc, == INTEGER_CST) { tree t = TREE_OPERAND (TREE_OPERAND (arg0, 0), 1); - double_int masked = mask_with_tz (type, c3, tree_to_double_int (t)); + wide_int masked = mask_with_tz (type, c3, t); try_simplify = (masked != c1); } @@ -11339,8 +11284,8 @@ fold_binary_loc (location_t loc, return fold_build2_loc (loc, BIT_IOR_EXPR, type, fold_build2_loc (loc, BIT_AND_EXPR, type, TREE_OPERAND (arg0, 0), - double_int_to_tree (type, - c3)), + wide_int_to_tree (type, + c3)), arg1); } @@ -11710,12 +11655,11 @@ fold_binary_loc (location_t loc, multiple of 1 << CST. */ if (TREE_CODE (arg1) == INTEGER_CST) { - double_int cst1 = tree_to_double_int (arg1); - double_int ncst1 = (-cst1).ext (TYPE_PRECISION (TREE_TYPE (arg1)), - TYPE_UNSIGNED (TREE_TYPE (arg1))); + wide_int cst1 = arg1; + wide_int ncst1 = -cst1; if ((cst1 & ncst1) == ncst1 && multiple_of_p (type, arg0, - double_int_to_tree (TREE_TYPE (arg1), ncst1))) + wide_int_to_tree (TREE_TYPE (arg1), ncst1))) return fold_convert_loc (loc, type, arg0); } @@ -11725,16 +11669,14 @@ fold_binary_loc (location_t loc, && TREE_CODE (arg0) == MULT_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { - double_int masked - = mask_with_tz (type, tree_to_double_int (arg1), - tree_to_double_int (TREE_OPERAND (arg0, 1))); + wide_int masked = mask_with_tz (type, arg1, TREE_OPERAND (arg0, 1)); - if (masked.is_zero ()) + if (masked == 0) return omit_two_operands_loc (loc, type, build_zero_cst (type), arg0, arg1); - else if (masked != tree_to_double_int (arg1)) + else if (masked != arg1) return fold_build2_loc (loc, code, type, op0, - double_int_to_tree (type, masked)); + wide_int_to_tree (type, masked)); } /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M, @@ -11744,10 +11686,10 @@ fold_binary_loc (location_t loc, and for - instead of + (or unary - instead of +) and/or ^ instead of |. If B is constant and (B & M) == 0, fold into A & M. */ - if (host_integerp (arg1, 1)) + if (TREE_CODE (arg1) == INTEGER_CST) { - unsigned HOST_WIDE_INT cst1 = tree_low_cst (arg1, 1); - if (~cst1 && (cst1 & (cst1 + 1)) == 0 + wide_int cst1 = arg1; + if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0 && INTEGRAL_TYPE_P (TREE_TYPE (arg0)) && (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR @@ -11757,8 +11699,8 @@ fold_binary_loc (location_t loc, { tree pmop[2]; int which = 0; - unsigned HOST_WIDE_INT cst0; - + wide_int cst0; + /* Now we know that arg0 is (C + D) or (C - D) or -C and arg1 (M) is == (1LL << cst) - 1. Store C into PMOP[0] and D into PMOP[1]. */ @@ -11769,12 +11711,10 @@ fold_binary_loc (location_t loc, pmop[1] = TREE_OPERAND (arg0, 1); which = 1; } - - if (!host_integerp (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1) - || (tree_low_cst (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1) - & cst1) != cst1) + + if ((wi::max_value (TREE_TYPE (arg0)) & cst1) != cst1) which = -1; - + for (; which >= 0; which--) switch (TREE_CODE (pmop[which])) { @@ -11784,9 +11724,7 @@ fold_binary_loc (location_t loc, if (TREE_CODE (TREE_OPERAND (pmop[which], 1)) != INTEGER_CST) break; - /* tree_low_cst not used, because we don't care about - the upper bits. */ - cst0 = TREE_INT_CST_LOW (TREE_OPERAND (pmop[which], 1)); + cst0 = TREE_OPERAND (pmop[which], 1); cst0 &= cst1; if (TREE_CODE (pmop[which]) == BIT_AND_EXPR) { @@ -11805,13 +11743,13 @@ fold_binary_loc (location_t loc, omitted (assumed 0). */ if ((TREE_CODE (arg0) == PLUS_EXPR || (TREE_CODE (arg0) == MINUS_EXPR && which == 0)) - && (TREE_INT_CST_LOW (pmop[which]) & cst1) == 0) + && (cst1 & pmop[which]) == 0) pmop[which] = NULL; break; default: break; } - + /* Only build anything new if we optimized one or both arguments above. */ if (pmop[0] != TREE_OPERAND (arg0, 0) @@ -11829,7 +11767,7 @@ fold_binary_loc (location_t loc, if (pmop[1] != NULL) pmop[1] = fold_convert_loc (loc, utype, pmop[1]); } - + if (TREE_CODE (arg0) == NEGATE_EXPR) tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[0]); else if (TREE_CODE (arg0) == PLUS_EXPR) @@ -11856,7 +11794,7 @@ fold_binary_loc (location_t loc, } } } - + t1 = distribute_bit_expr (loc, code, type, arg0, arg1); if (t1 != NULL_TREE) return t1; @@ -11864,11 +11802,11 @@ fold_binary_loc (location_t loc, if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))) { + wide_int mask; prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))); - if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT - && (~TREE_INT_CST_LOW (arg1) - & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0) + mask = wide_int::from (arg1, prec, UNSIGNED); + if (mask == -1) return fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0)); } @@ -11893,10 +11831,10 @@ fold_binary_loc (location_t loc, /* If arg0 is derived from the address of an object or function, we may be able to fold this expression using the object or function's alignment. */ - if (POINTER_TYPE_P (TREE_TYPE (arg0)) && host_integerp (arg1, 1)) + if (POINTER_TYPE_P (TREE_TYPE (arg0)) && tree_fits_uhwi_p (arg1)) { unsigned HOST_WIDE_INT modulus, residue; - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg1); + unsigned HOST_WIDE_INT low = tree_to_uhwi (arg1); modulus = get_pointer_modulus_and_residue (arg0, &residue, integer_onep (arg1)); @@ -11913,16 +11851,16 @@ fold_binary_loc (location_t loc, if the new mask might be further optimized. */ if ((TREE_CODE (arg0) == LSHIFT_EXPR || TREE_CODE (arg0) == RSHIFT_EXPR) - && host_integerp (TREE_OPERAND (arg0, 1), 1) - && host_integerp (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1))) - && tree_low_cst (TREE_OPERAND (arg0, 1), 1) + && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1)) + && tree_fits_hwi_p (arg1) + && tree_to_uhwi (TREE_OPERAND (arg0, 1)) < TYPE_PRECISION (TREE_TYPE (arg0)) && TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT - && tree_low_cst (TREE_OPERAND (arg0, 1), 1) > 0) + && tree_to_uhwi (TREE_OPERAND (arg0, 1)) > 0) { - unsigned int shiftc = tree_low_cst (TREE_OPERAND (arg0, 1), 1); + unsigned int shiftc = tree_to_uhwi (TREE_OPERAND (arg0, 1)); unsigned HOST_WIDE_INT mask - = tree_low_cst (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1))); + = tree_to_hwi (arg1, TYPE_SIGN (TREE_TYPE (arg1))); unsigned HOST_WIDE_INT newmask, zerobits = 0; tree shift_type = TREE_TYPE (arg0); @@ -12272,17 +12210,10 @@ fold_binary_loc (location_t loc, tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1), arg1, TREE_OPERAND (arg0, 1)); if (sum && integer_zerop (sum)) { - unsigned long pow2; - - if (TREE_INT_CST_LOW (arg1)) - pow2 = exact_log2 (TREE_INT_CST_LOW (arg1)); - else - pow2 = exact_log2 (TREE_INT_CST_HIGH (arg1)) - + HOST_BITS_PER_WIDE_INT; - + tree pow2 = build_int_cst (integer_type_node, + wi::exact_log2 (arg1)); return fold_build2_loc (loc, RSHIFT_EXPR, type, - TREE_OPERAND (arg0, 0), - build_int_cst (integer_type_node, pow2)); + TREE_OPERAND (arg0, 0), pow2); } } @@ -12300,13 +12231,8 @@ fold_binary_loc (location_t loc, if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0) { tree sh_cnt = TREE_OPERAND (arg1, 1); - unsigned long pow2; - - if (TREE_INT_CST_LOW (sval)) - pow2 = exact_log2 (TREE_INT_CST_LOW (sval)); - else - pow2 = exact_log2 (TREE_INT_CST_HIGH (sval)) - + HOST_BITS_PER_WIDE_INT; + tree pow2 = build_int_cst (TREE_TYPE (sh_cnt), + wi::exact_log2 (sval)); if (strict_overflow_p) fold_overflow_warning (("assuming signed overflow does not " @@ -12314,11 +12240,9 @@ fold_binary_loc (location_t loc, WARN_STRICT_OVERFLOW_MISC); sh_cnt = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (sh_cnt), - sh_cnt, - build_int_cst (TREE_TYPE (sh_cnt), - pow2)); + sh_cnt, pow2); return fold_build2_loc (loc, RSHIFT_EXPR, type, - fold_convert_loc (loc, type, arg0), sh_cnt); + fold_convert_loc (loc, type, arg0), sh_cnt); } } @@ -12341,8 +12265,7 @@ fold_binary_loc (location_t loc, /* X / -1 is -X. */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST - && TREE_INT_CST_LOW (arg1) == HOST_WIDE_INT_M1U - && TREE_INT_CST_HIGH (arg1) == -1) + && wi::eq_p (arg1, -1)) return fold_convert_loc (loc, type, negate_expr (arg0)); /* Convert -A / -B to A / B when the type is signed and overflow is @@ -12424,16 +12347,15 @@ fold_binary_loc (location_t loc, /* X % -1 is zero. */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST - && TREE_INT_CST_LOW (arg1) == HOST_WIDE_INT_M1U - && TREE_INT_CST_HIGH (arg1) == -1) + && wi::eq_p (arg1, -1)) return omit_one_operand_loc (loc, type, integer_zero_node, arg0); /* X % -C is the same as X % C. */ if (code == TRUNC_MOD_EXPR - && !TYPE_UNSIGNED (type) + && TYPE_SIGN (type) == SIGNED && TREE_CODE (arg1) == INTEGER_CST && !TREE_OVERFLOW (arg1) - && TREE_INT_CST_HIGH (arg1) < 0 + && wi::neg_p (arg1) && !TYPE_OVERFLOW_TRAPS (type) /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ && !sign_bit_p (arg1, arg1)) @@ -12527,13 +12449,13 @@ fold_binary_loc (location_t loc, prec = element_precision (type); /* Turn (a OP c1) OP c2 into a OP (c1+c2). */ - if (TREE_CODE (op0) == code && host_integerp (arg1, true) - && TREE_INT_CST_LOW (arg1) < prec - && host_integerp (TREE_OPERAND (arg0, 1), true) - && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) < prec) + if (TREE_CODE (op0) == code && tree_fits_uhwi_p (arg1) + && tree_to_uhwi (arg1) < prec + && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1)) + && tree_to_uhwi (TREE_OPERAND (arg0, 1)) < prec) { - unsigned int low = (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) - + TREE_INT_CST_LOW (arg1)); + HOST_WIDE_INT low = (tree_to_shwi (TREE_OPERAND (arg0, 1)) + + tree_to_shwi (arg1)); /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 being well defined. */ @@ -12557,13 +12479,13 @@ fold_binary_loc (location_t loc, if (((code == LSHIFT_EXPR && TREE_CODE (arg0) == RSHIFT_EXPR) || (TYPE_UNSIGNED (type) && code == RSHIFT_EXPR && TREE_CODE (arg0) == LSHIFT_EXPR)) - && host_integerp (arg1, false) - && TREE_INT_CST_LOW (arg1) < prec - && host_integerp (TREE_OPERAND (arg0, 1), false) - && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) < prec) + && tree_fits_shwi_p (arg1) + && tree_to_shwi (arg1) < prec + && tree_fits_shwi_p (TREE_OPERAND (arg0, 1)) + && tree_to_shwi (TREE_OPERAND (arg0, 1)) < prec) { - HOST_WIDE_INT low0 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)); - HOST_WIDE_INT low1 = TREE_INT_CST_LOW (arg1); + HOST_WIDE_INT low0 = tree_to_shwi (TREE_OPERAND (arg0, 1)); + HOST_WIDE_INT low1 = tree_to_shwi (arg1); tree lshift; tree arg00; @@ -12601,16 +12523,13 @@ fold_binary_loc (location_t loc, fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 1), arg1)); - /* Two consecutive rotates adding up to the precision of the - type can be ignored. */ + /* Two consecutive rotates adding up to the some integer + multiple of the precision of the type can be ignored. */ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == RROTATE_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST - && TREE_INT_CST_HIGH (arg1) == 0 - && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0 - && ((TREE_INT_CST_LOW (arg1) - + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))) - == prec)) + && wi::umod_trunc (wi::add (arg1, TREE_OPERAND (arg0, 1)), + prec) == 0) return TREE_OPERAND (arg0, 0); /* Fold (X & C2) << C1 into (X << C1) & (C2 << C1) @@ -12932,7 +12851,7 @@ fold_binary_loc (location_t loc, && operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0, 1)), arg1, 0) - && (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 0)) & 1) == 1) + && wi::bit_and (TREE_OPERAND (arg0, 0), 1) == 1) { return omit_two_operands_loc (loc, type, code == NE_EXPR @@ -13023,15 +12942,14 @@ fold_binary_loc (location_t loc, prec = TYPE_PRECISION (itype); /* Check for a valid shift count. */ - if (TREE_INT_CST_HIGH (arg001) == 0 - && TREE_INT_CST_LOW (arg001) < prec) + if (wi::ltu_p (arg001, prec)) { tree arg01 = TREE_OPERAND (arg0, 1); tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); unsigned HOST_WIDE_INT log2 = tree_log2 (arg01); /* If (C2 << C1) doesn't overflow, then ((X >> C1) & C2) != 0 can be rewritten as (X & (C2 << C1)) != 0. */ - if ((log2 + TREE_INT_CST_LOW (arg001)) < prec) + if ((log2 + tree_to_uhwi (arg001)) < prec) { tem = fold_build2_loc (loc, LSHIFT_EXPR, itype, arg01, arg001); tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, arg000, tem); @@ -13149,9 +13067,7 @@ fold_binary_loc (location_t loc, tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); tree itype = TREE_TYPE (arg00); - if (TREE_INT_CST_HIGH (arg01) == 0 - && TREE_INT_CST_LOW (arg01) - == (unsigned HOST_WIDE_INT) (TYPE_PRECISION (itype) - 1)) + if (wi::eq_p (arg01, TYPE_PRECISION (itype) - 1)) { if (TYPE_UNSIGNED (itype)) { @@ -13553,59 +13469,17 @@ fold_binary_loc (location_t loc, the specified precision will have known values. */ { tree arg1_type = TREE_TYPE (arg1); - unsigned int width = TYPE_PRECISION (arg1_type); + unsigned int prec = TYPE_PRECISION (arg1_type); if (TREE_CODE (arg1) == INTEGER_CST - && width <= HOST_BITS_PER_DOUBLE_INT && (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type))) { - HOST_WIDE_INT signed_max_hi; - unsigned HOST_WIDE_INT signed_max_lo; - unsigned HOST_WIDE_INT max_hi, max_lo, min_hi, min_lo; - - if (width <= HOST_BITS_PER_WIDE_INT) - { - signed_max_lo = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - - 1; - signed_max_hi = 0; - max_hi = 0; - - if (TYPE_UNSIGNED (arg1_type)) - { - max_lo = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1; - min_lo = 0; - min_hi = 0; - } - else - { - max_lo = signed_max_lo; - min_lo = (HOST_WIDE_INT_M1U << (width - 1)); - min_hi = -1; - } - } - else - { - width -= HOST_BITS_PER_WIDE_INT; - signed_max_lo = -1; - signed_max_hi = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - - 1; - max_lo = -1; - min_lo = 0; - - if (TYPE_UNSIGNED (arg1_type)) - { - max_hi = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1; - min_hi = 0; - } - else - { - max_hi = signed_max_hi; - min_hi = (HOST_WIDE_INT_M1U << (width - 1)); - } - } + wide_int max = wi::max_value (arg1_type); + wide_int signed_max = wi::max_value (prec, SIGNED); + wide_int min = wi::min_value (arg1_type); + wide_int wi_arg1 = arg1; - if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) == max_hi - && TREE_INT_CST_LOW (arg1) == max_lo) + if (wi_arg1 == max) switch (code) { case GT_EXPR: @@ -13626,9 +13500,7 @@ fold_binary_loc (location_t loc, default: break; } - else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) - == max_hi - && TREE_INT_CST_LOW (arg1) == max_lo - 1) + else if (wi_arg1 == (max - 1)) switch (code) { case GT_EXPR: @@ -13648,9 +13520,7 @@ fold_binary_loc (location_t loc, default: break; } - else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) - == min_hi - && TREE_INT_CST_LOW (arg1) == min_lo) + else if (wi_arg1 == min) switch (code) { case LT_EXPR: @@ -13668,19 +13538,19 @@ fold_binary_loc (location_t loc, default: break; } - else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) - == min_hi - && TREE_INT_CST_LOW (arg1) == min_lo + 1) + else if (wi_arg1 == (min + 1)) switch (code) { case GE_EXPR: - arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node); + arg1 = const_binop (MINUS_EXPR, arg1, + build_int_cst (TREE_TYPE (arg1), 1)); return fold_build2_loc (loc, NE_EXPR, type, fold_convert_loc (loc, TREE_TYPE (arg1), arg0), arg1); case LT_EXPR: - arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node); + arg1 = const_binop (MINUS_EXPR, arg1, + build_int_cst (TREE_TYPE (arg1), 1)); return fold_build2_loc (loc, EQ_EXPR, type, fold_convert_loc (loc, TREE_TYPE (arg1), arg0), @@ -13689,14 +13559,17 @@ fold_binary_loc (location_t loc, break; } - else if (TREE_INT_CST_HIGH (arg1) == signed_max_hi - && TREE_INT_CST_LOW (arg1) == signed_max_lo + else if (wi_arg1 == signed_max && TYPE_UNSIGNED (arg1_type) + /* KENNY QUESTIONS THE CHECKING OF THE BITSIZE + HERE. HE FEELS THAT THE PRECISION SHOULD BE + CHECKED */ + /* We will flip the signedness of the comparison operator associated with the mode of arg1, so the sign bit is specified by this mode. Check that arg1 is the signed max associated with this sign bit. */ - && width == GET_MODE_BITSIZE (TYPE_MODE (arg1_type)) + && prec == GET_MODE_BITSIZE (TYPE_MODE (arg1_type)) /* signed_type does not work on pointer types. */ && INTEGRAL_TYPE_P (arg1_type)) { @@ -14222,8 +14095,8 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, && TYPE_PRECISION (TREE_TYPE (tem)) < TYPE_PRECISION (type)) { - unsigned HOST_WIDE_INT mask_lo; - HOST_WIDE_INT mask_hi; + wide_int mask; + wide_int wi_arg1 = arg1; int inner_width, outer_width; tree tem_type; @@ -14232,36 +14105,16 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, if (outer_width > TYPE_PRECISION (type)) outer_width = TYPE_PRECISION (type); - if (outer_width > HOST_BITS_PER_WIDE_INT) - { - mask_hi = (HOST_WIDE_INT_M1U - >> (HOST_BITS_PER_DOUBLE_INT - outer_width)); - mask_lo = -1; - } - else - { - mask_hi = 0; - mask_lo = (HOST_WIDE_INT_M1U - >> (HOST_BITS_PER_WIDE_INT - outer_width)); - } - if (inner_width > HOST_BITS_PER_WIDE_INT) - { - mask_hi &= ~(HOST_WIDE_INT_M1U - >> (HOST_BITS_PER_WIDE_INT - inner_width)); - mask_lo = 0; - } - else - mask_lo &= ~(HOST_WIDE_INT_M1U - >> (HOST_BITS_PER_WIDE_INT - inner_width)); + mask = wi::shifted_mask + (inner_width, outer_width - inner_width, false, + TYPE_PRECISION (TREE_TYPE (arg1))); - if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == mask_hi - && (TREE_INT_CST_LOW (arg1) & mask_lo) == mask_lo) + if (wi_arg1 == mask) { tem_type = signed_type_for (TREE_TYPE (tem)); tem = fold_convert_loc (loc, tem_type, tem); } - else if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == 0 - && (TREE_INT_CST_LOW (arg1) & mask_lo) == 0) + else if ((wi_arg1 & mask) == 0) { tem_type = unsigned_type_for (TREE_TYPE (tem)); tem = fold_convert_loc (loc, tem_type, tem); @@ -14290,9 +14143,9 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, tree tem = TREE_OPERAND (arg0, 0); STRIP_NOPS (tem); if (TREE_CODE (tem) == RSHIFT_EXPR - && TREE_CODE (TREE_OPERAND (tem, 1)) == INTEGER_CST + && tree_fits_uhwi_p (TREE_OPERAND (tem, 1)) && (unsigned HOST_WIDE_INT) tree_log2 (arg1) == - TREE_INT_CST_LOW (TREE_OPERAND (tem, 1))) + tree_to_uhwi (TREE_OPERAND (tem, 1))) return fold_build2_loc (loc, BIT_AND_EXPR, type, TREE_OPERAND (tem, 0), arg1); } @@ -14385,9 +14238,9 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, && TREE_TYPE (type) == TREE_TYPE (TREE_TYPE (arg0))))) { tree eltype = TREE_TYPE (TREE_TYPE (arg0)); - unsigned HOST_WIDE_INT width = tree_low_cst (TYPE_SIZE (eltype), 1); - unsigned HOST_WIDE_INT n = tree_low_cst (arg1, 1); - unsigned HOST_WIDE_INT idx = tree_low_cst (op2, 1); + unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); + unsigned HOST_WIDE_INT n = tree_to_uhwi (arg1); + unsigned HOST_WIDE_INT idx = tree_to_uhwi (op2); if (n != 0 && (idx % width) == 0 @@ -14458,7 +14311,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, /* A bit-field-ref that referenced the full argument can be stripped. */ if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)) - && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_low_cst (arg1, 1) + && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_to_uhwi (arg1) && integer_zerop (op2)) return fold_convert_loc (loc, type, arg0); @@ -14466,17 +14319,17 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, fold (nearly) all BIT_FIELD_REFs. */ if (CONSTANT_CLASS_P (arg0) && can_native_interpret_type_p (type) - && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (arg0)), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (arg0))) /* This limitation should not be necessary, we just need to round this up to mode size. */ - && tree_low_cst (op1, 1) % BITS_PER_UNIT == 0 + && tree_to_uhwi (op1) % BITS_PER_UNIT == 0 /* Need bit-shifting of the buffer to relax the following. */ - && tree_low_cst (op2, 1) % BITS_PER_UNIT == 0) + && tree_to_uhwi (op2) % BITS_PER_UNIT == 0) { - unsigned HOST_WIDE_INT bitpos = tree_low_cst (op2, 1); - unsigned HOST_WIDE_INT bitsize = tree_low_cst (op1, 1); + unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (op2); + unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (op1); unsigned HOST_WIDE_INT clen; - clen = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (arg0)), 1); + clen = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (arg0))); /* ??? We cannot tell native_encode_expr to start at some random byte only. So limit us to a reasonable amount of work. */ @@ -14512,29 +14365,35 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, case VEC_PERM_EXPR: if (TREE_CODE (arg2) == VECTOR_CST) { - unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i, mask; + unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i; unsigned char *sel = XALLOCAVEC (unsigned char, nelts); - tree t; bool need_mask_canon = false; bool all_in_vec0 = true; bool all_in_vec1 = true; bool maybe_identity = true; bool single_arg = (op0 == op1); bool changed = false; + int nelts_cnt = single_arg ? nelts : nelts * 2; - mask = single_arg ? (nelts - 1) : (2 * nelts - 1); gcc_assert (nelts == VECTOR_CST_NELTS (arg2)); for (i = 0; i < nelts; i++) { tree val = VECTOR_CST_ELT (arg2, i); + wide_int t; + if (TREE_CODE (val) != INTEGER_CST) return NULL_TREE; - sel[i] = TREE_INT_CST_LOW (val) & mask; - if (TREE_INT_CST_HIGH (val) - || ((unsigned HOST_WIDE_INT) - TREE_INT_CST_LOW (val) != sel[i])) - need_mask_canon = true; + /* Make sure that the perm value is in an acceptable + range. */ + t = val; + if (wi::gtu_p (t, nelts_cnt)) + { + need_mask_canon = true; + sel[i] = t.to_uhwi () & (nelts_cnt - 1); + } + else + sel[i] = t.to_uhwi (); if (sel[i] < nelts) all_in_vec1 = false; @@ -14568,7 +14427,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, && (TREE_CODE (op1) == VECTOR_CST || TREE_CODE (op1) == CONSTRUCTOR)) { - t = fold_vec_perm (type, op0, op1, sel); + tree t = fold_vec_perm (type, op0, op1, sel); if (t != NULL_TREE) return t; } @@ -15350,9 +15209,7 @@ multiple_of_p (tree type, const_tree top, const_tree bottom) op1 = TREE_OPERAND (top, 1); /* const_binop may not detect overflow correctly, so check for it explicitly here. */ - if (TYPE_PRECISION (TREE_TYPE (size_one_node)) - > TREE_INT_CST_LOW (op1) - && TREE_INT_CST_HIGH (op1) == 0 + if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1) && 0 != (t1 = fold_convert (type, const_binop (LSHIFT_EXPR, size_one_node, @@ -15557,11 +15414,11 @@ tree_binary_nonnegative_warnv_p (enum tree_code code, tree type, tree op0, && TREE_CODE (inner1) == INTEGER_TYPE && unsigned1) { unsigned int precision0 = (TREE_CODE (op0) == INTEGER_CST) - ? tree_int_cst_min_precision (op0, /*unsignedp=*/true) + ? tree_int_cst_min_precision (op0, UNSIGNED) : TYPE_PRECISION (inner0); unsigned int precision1 = (TREE_CODE (op1) == INTEGER_CST) - ? tree_int_cst_min_precision (op1, /*unsignedp=*/true) + ? tree_int_cst_min_precision (op1, UNSIGNED) : TYPE_PRECISION (inner1); return precision0 + precision1 < TYPE_PRECISION (type); @@ -15741,7 +15598,7 @@ tree_call_nonnegative_warnv_p (tree type, tree fndecl, /* True if the 1st argument is nonnegative or the second argument is an even integer. */ if (TREE_CODE (arg1) == INTEGER_CST - && (TREE_INT_CST_LOW (arg1) & 1) == 0) + && (tree_to_hwi (arg1) & 1) == 0) return true; return tree_expr_nonnegative_warnv_p (arg0, strict_overflow_p); @@ -15759,8 +15616,7 @@ tree_call_nonnegative_warnv_p (tree type, tree fndecl, if ((n & 1) == 0) { REAL_VALUE_TYPE cint; - real_from_integer (&cint, VOIDmode, n, - n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); if (real_identical (&c, &cint)) return true; } @@ -16337,7 +16193,7 @@ fold_read_from_constant_string (tree exp) && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == 1)) return build_int_cst_type (TREE_TYPE (exp), (TREE_STRING_POINTER (string) - [TREE_INT_CST_LOW (index)])); + [tree_to_uhwi (index)])); } return NULL; } @@ -16356,12 +16212,12 @@ fold_negate_const (tree arg0, tree type) { case INTEGER_CST: { - double_int val = tree_to_double_int (arg0); + wide_int val = arg0; bool overflow; - val = val.neg_with_overflow (&overflow); - t = force_fit_type_double (type, val, 1, - (overflow | TREE_OVERFLOW (arg0)) - && !TYPE_UNSIGNED (type)); + val = wi::neg (val, &overflow); + t = force_fit_type (type, val, 1, + (overflow | TREE_OVERFLOW (arg0)) + && !TYPE_UNSIGNED (type)); break; } @@ -16403,12 +16259,11 @@ fold_abs_const (tree arg0, tree type) { case INTEGER_CST: { - double_int val = tree_to_double_int (arg0); + wide_int val = arg0; /* If the value is unsigned or non-negative, then the absolute value is the same as the ordinary value. */ - if (TYPE_UNSIGNED (type) - || !val.is_negative ()) + if (!wi::neg_p (val, TYPE_SIGN (type))) t = arg0; /* If the value is negative, then the absolute value is @@ -16416,9 +16271,9 @@ fold_abs_const (tree arg0, tree type) else { bool overflow; - val = val.neg_with_overflow (&overflow); - t = force_fit_type_double (type, val, -1, - overflow | TREE_OVERFLOW (arg0)); + val = wi::neg (val, &overflow); + t = force_fit_type (type, val, -1, + overflow | TREE_OVERFLOW (arg0)); } } break; @@ -16443,12 +16298,12 @@ fold_abs_const (tree arg0, tree type) static tree fold_not_const (const_tree arg0, tree type) { - double_int val; + wide_int val; gcc_assert (TREE_CODE (arg0) == INTEGER_CST); - val = ~tree_to_double_int (arg0); - return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0)); + val = wi::bit_not (arg0); + return force_fit_type (type, val, 0, TREE_OVERFLOW (arg0)); } /* Given CODE, a relational operator, the target type, TYPE and two @@ -16712,9 +16567,10 @@ fold_indirect_ref_1 (location_t loc, tree type, tree op0) if (TREE_CODE (op00type) == VECTOR_TYPE && type == TREE_TYPE (op00type)) { - HOST_WIDE_INT offset = tree_low_cst (op01, 0); + HOST_WIDE_INT offset = tree_to_shwi (op01); tree part_width = TYPE_SIZE (type); - unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT; + unsigned HOST_WIDE_INT part_widthi + = tree_to_shwi (part_width) / BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); @@ -16852,8 +16708,7 @@ fold_ignored_result (tree t) } } -/* Return the value of VALUE, rounded up to a multiple of DIVISOR. - This can only be applied to objects of a sizetype. */ +/* Return the value of VALUE, rounded up to a multiple of DIVISOR. */ tree round_up_loc (location_t loc, tree value, int divisor) @@ -16881,24 +16736,19 @@ round_up_loc (location_t loc, tree value, int divisor) { if (TREE_CODE (value) == INTEGER_CST) { - double_int val = tree_to_double_int (value); + wide_int val = value; bool overflow_p; - if ((val.low & (divisor - 1)) == 0) + if ((val & (divisor - 1)) == 0) return value; overflow_p = TREE_OVERFLOW (value); - val.low &= ~(divisor - 1); - val.low += divisor; - if (val.low == 0) - { - val.high++; - if (val.high == 0) - overflow_p = true; - } + val &= ~(divisor - 1); + val += divisor; + if (val == 0) + overflow_p = true; - return force_fit_type_double (TREE_TYPE (value), val, - -1, overflow_p); + return force_fit_type (TREE_TYPE (value), val, -1, overflow_p); } else { @@ -17019,7 +16869,7 @@ ptr_difference_const (tree e1, tree e2, HOST_WIDE_INT *diff) toffset2 = fold_convert (type, toffset2); tdiff = fold_build2 (MINUS_EXPR, type, toffset1, toffset2); - if (!cst_and_fits_in_hwi (tdiff)) + if (!cst_fits_shwi_p (tdiff)) return false; *diff = int_cst_value (tdiff); diff --git a/gcc/fortran/target-memory.c b/gcc/fortran/target-memory.c index 21b44ae482f..937fda529b8 100644 --- a/gcc/fortran/target-memory.c +++ b/gcc/fortran/target-memory.c @@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-const.h" #include "trans-types.h" #include "target-memory.h" +#include "wide-int.h" /* --------------------------------------------------------------- */ /* Calculate the size of an expression. */ @@ -251,8 +252,8 @@ encode_derived (gfc_expr *source, unsigned char *buffer, size_t buffer_size) gcc_assert (cmp); if (!c->expr) continue; - ptr = TREE_INT_CST_LOW(DECL_FIELD_OFFSET(cmp->backend_decl)) - + TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; + ptr = tree_to_hwi (DECL_FIELD_OFFSET(cmp->backend_decl)) + + tree_to_hwi (DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; if (c->expr->expr_type == EXPR_NULL) { @@ -427,7 +428,7 @@ gfc_interpret_logical (int kind, unsigned char *buffer, size_t buffer_size, { tree t = native_interpret_expr (gfc_get_logical_type (kind), buffer, buffer_size); - *logical = tree_to_double_int (t).is_zero () ? 0 : 1; + *logical = wi::eq_p (t, 0) ? 0 : 1; return size_logical (kind); } @@ -545,9 +546,9 @@ gfc_interpret_derived (unsigned char *buffer, size_t buffer_size, gfc_expr *resu i.e. there are, e.g., no bit fields. */ gcc_assert (cmp->backend_decl); - ptr = TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (cmp->backend_decl)); + ptr = tree_to_hwi (DECL_FIELD_BIT_OFFSET (cmp->backend_decl)); gcc_assert (ptr % 8 == 0); - ptr = ptr/8 + TREE_INT_CST_LOW (DECL_FIELD_OFFSET (cmp->backend_decl)); + ptr = ptr/8 + tree_to_hwi (DECL_FIELD_OFFSET (cmp->backend_decl)); gfc_target_interpret_expr (&buffer[ptr], buffer_size - ptr, e, true); } @@ -659,8 +660,8 @@ expr_to_char (gfc_expr *e, unsigned char *data, unsigned char *chk, size_t len) gcc_assert (cmp && cmp->backend_decl); if (!c->expr) continue; - ptr = TREE_INT_CST_LOW(DECL_FIELD_OFFSET(cmp->backend_decl)) - + TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; + ptr = tree_to_hwi (DECL_FIELD_OFFSET(cmp->backend_decl)) + + tree_to_hwi (DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; expr_to_char (c->expr, &data[ptr], &chk[ptr], len); } return len; diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c index 5a3cf80f9f6..5c71e176b32 100644 --- a/gcc/fortran/trans-array.c +++ b/gcc/fortran/trans-array.c @@ -90,6 +90,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-array.h" #include "trans-const.h" #include "dependency.h" +#include "wide-int.h" static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base); @@ -1684,7 +1685,7 @@ gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type, tmp = gfc_build_addr_expr (NULL_TREE, tmp); init = gfc_build_addr_expr (NULL_TREE, init); - size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type)); + size = tree_to_hwi (TYPE_SIZE_UNIT (type)); bound = build_int_cst (size_type_node, n * size); tmp = build_call_expr_loc (input_location, builtin_decl_explicit (BUILT_IN_MEMCPY), @@ -5361,9 +5362,8 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr) { gfc_constructor *c; tree tmp; + addr_wide_int wtmp; gfc_se se; - HOST_WIDE_INT hi; - unsigned HOST_WIDE_INT lo; tree index, range; vec<constructor_elt, va_gc> *v = NULL; @@ -5385,20 +5385,13 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr) else gfc_conv_structure (&se, expr, 1); - tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); - gcc_assert (tmp && INTEGER_CST_P (tmp)); - hi = TREE_INT_CST_HIGH (tmp); - lo = TREE_INT_CST_LOW (tmp); - lo++; - if (lo == 0) - hi++; + wtmp = addr_wide_int (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1; + gcc_assert (wtmp != 0); /* This will probably eat buckets of memory for large arrays. */ - while (hi != 0 || lo != 0) + while (wtmp != 0) { CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr); - if (lo == 0) - hi--; - lo--; + wtmp -= 1; } break; diff --git a/gcc/fortran/trans-common.c b/gcc/fortran/trans-common.c index e2234b1ae0c..ffe48925ace 100644 --- a/gcc/fortran/trans-common.c +++ b/gcc/fortran/trans-common.c @@ -397,8 +397,8 @@ build_common_decl (gfc_common_head *com, tree union_type, bool is_init) gfc_warning ("Named COMMON block '%s' at %L shall be of the " "same size as elsewhere (%lu vs %lu bytes)", com->name, &com->where, - (unsigned long) TREE_INT_CST_LOW (size), - (unsigned long) TREE_INT_CST_LOW (DECL_SIZE_UNIT (decl))); + (unsigned long) tree_to_uhwi (size), + (unsigned long) tree_to_uhwi (DECL_SIZE_UNIT (decl))); if (tree_int_cst_lt (DECL_SIZE_UNIT (decl), size)) { diff --git a/gcc/fortran/trans-const.c b/gcc/fortran/trans-const.c index a217c471411..1ff33e5c92a 100644 --- a/gcc/fortran/trans-const.c +++ b/gcc/fortran/trans-const.c @@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-const.h" #include "trans-types.h" #include "target-memory.h" +#include "wide-int.h" tree gfc_rank_cst[GFC_MAX_DIMENSIONS + 1]; @@ -144,10 +145,9 @@ gfc_conv_string_init (tree length, gfc_expr * expr) gcc_assert (expr->expr_type == EXPR_CONSTANT); gcc_assert (expr->ts.type == BT_CHARACTER); - gcc_assert (INTEGER_CST_P (length)); - gcc_assert (TREE_INT_CST_HIGH (length) == 0); + gcc_assert (cst_fits_uhwi_p (length)); - len = TREE_INT_CST_LOW (length); + len = tree_to_hwi (length); slen = expr->value.character.length; if (len > slen) @@ -200,8 +200,8 @@ gfc_init_constants (void) tree gfc_conv_mpz_to_tree (mpz_t i, int kind) { - double_int val = mpz_get_double_int (gfc_get_int_type (kind), i, true); - return double_int_to_tree (gfc_get_int_type (kind), val); + wide_int val = wi::from_mpz (gfc_get_int_type (kind), i, true); + return wide_int_to_tree (gfc_get_int_type (kind), val); } /* Converts a backend tree into a GMP integer. */ @@ -209,8 +209,7 @@ gfc_conv_mpz_to_tree (mpz_t i, int kind) void gfc_conv_tree_to_mpz (mpz_t i, tree source) { - double_int val = tree_to_double_int (source); - mpz_set_double_int (i, val, TYPE_UNSIGNED (TREE_TYPE (source))); + wi::to_mpz (source, i, TYPE_SIGN (TREE_TYPE (source))); } /* Converts a real constant into backend form. */ diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c index c2c736e1c66..eea05814504 100644 --- a/gcc/fortran/trans-decl.c +++ b/gcc/fortran/trans-decl.c @@ -405,10 +405,10 @@ gfc_can_put_var_on_stack (tree size) if (gfc_option.flag_max_stack_var_size < 0) return 1; - if (TREE_INT_CST_HIGH (size) != 0) + if (!cst_fits_uhwi_p (size)) return 0; - low = TREE_INT_CST_LOW (size); + low = tree_to_hwi (size); if (low > (unsigned HOST_WIDE_INT) gfc_option.flag_max_stack_var_size) return 0; diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c index a590ca18c80..c38a79ada77 100644 --- a/gcc/fortran/trans-expr.c +++ b/gcc/fortran/trans-expr.c @@ -38,7 +38,7 @@ along with GCC; see the file COPYING3. If not see /* Only for gfc_trans_assign and gfc_trans_pointer_assign. */ #include "trans-stmt.h" #include "dependency.h" - +#include "wide-int.h" /* Convert a scalar to an array descriptor. To be used for assumed-rank arrays. */ @@ -2081,13 +2081,14 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs) HOST_WIDE_INT m; unsigned HOST_WIDE_INT n; int sgn; + wide_int wrhs = rhs; /* If exponent is too large, we won't expand it anyway, so don't bother with large integer values. */ - if (!TREE_INT_CST (rhs).fits_shwi ()) + if (!wi::fits_shwi_p (wrhs)) return 0; - m = TREE_INT_CST (rhs).to_shwi (); + m = wrhs.to_shwi (); /* There's no ABS for HOST_WIDE_INT, so here we go. It also takes care of the asymmetric range of the integer type. */ n = (unsigned HOST_WIDE_INT) (m < 0 ? -m : m); @@ -2626,11 +2627,11 @@ gfc_string_to_single_character (tree len, tree str, int kind) { if (len == NULL - || !INTEGER_CST_P (len) || TREE_INT_CST_HIGH (len) != 0 + || !cst_fits_uhwi_p (len) || !POINTER_TYPE_P (TREE_TYPE (str))) return NULL_TREE; - if (TREE_INT_CST_LOW (len) == 1) + if (tree_to_hwi (len) == 1) { str = fold_convert (gfc_get_pchar_type (kind), str); return build_fold_indirect_ref_loc (input_location, str); @@ -2642,8 +2643,8 @@ gfc_string_to_single_character (tree len, tree str, int kind) && TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST && array_ref_low_bound (TREE_OPERAND (str, 0)) == TREE_OPERAND (TREE_OPERAND (str, 0), 1) - && TREE_INT_CST_LOW (len) > 1 - && TREE_INT_CST_LOW (len) + && tree_to_uhwi (len) > 1 + && tree_to_uhwi (len) == (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0))) { @@ -2740,8 +2741,9 @@ gfc_optimize_len_trim (tree len, tree str, int kind) && TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST && array_ref_low_bound (TREE_OPERAND (str, 0)) == TREE_OPERAND (TREE_OPERAND (str, 0), 1) - && TREE_INT_CST_LOW (len) >= 1 - && TREE_INT_CST_LOW (len) + && tree_fits_uhwi_p (len) + && tree_to_uhwi (len) >= 1 + && tree_to_uhwi (len) == (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0))) { diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c index 7e2bb36ca6d..37c8a1cec36 100644 --- a/gcc/fortran/trans-intrinsic.c +++ b/gcc/fortran/trans-intrinsic.c @@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-array.h" /* Only for gfc_trans_assign and gfc_trans_pointer_assign. */ #include "trans-stmt.h" +#include "wide-int.h" /* This maps Fortran intrinsic math functions to external library or GCC builtin functions. */ @@ -983,12 +984,11 @@ trans_this_image (gfc_se * se, gfc_expr *expr) if (INTEGER_CST_P (dim_arg)) { - int hi, co_dim; + wide_int wdim_arg = dim_arg; - hi = TREE_INT_CST_HIGH (dim_arg); - co_dim = TREE_INT_CST_LOW (dim_arg); - if (hi || co_dim < 1 - || co_dim > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) + if (wi::ltu_p (wdim_arg, 1) + || wi::gtu_p (wdim_arg, + GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))) gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " "dimension index", expr->value.function.isym->name, &expr->where); @@ -1345,14 +1345,10 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) if (INTEGER_CST_P (bound)) { - int hi, low; - - hi = TREE_INT_CST_HIGH (bound); - low = TREE_INT_CST_LOW (bound); - if (hi || low < 0 - || ((!as || as->type != AS_ASSUMED_RANK) - && low >= GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))) - || low > GFC_MAX_DIMENSIONS) + wide_int wbound = bound; + if (((!as || as->type != AS_ASSUMED_RANK) + && wi::geu_p (wbound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc)))) + || wi::gtu_p (wbound, GFC_MAX_DIMENSIONS)) gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " "dimension index", upper ? "UBOUND" : "LBOUND", &expr->where); @@ -1547,11 +1543,9 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr) if (INTEGER_CST_P (bound)) { - int hi, low; - - hi = TREE_INT_CST_HIGH (bound); - low = TREE_INT_CST_LOW (bound); - if (hi || low < 1 || low > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) + wide_int wbound = bound; + if (wi::ltu_p (wbound, 1) + || wi::gtu_p (wbound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))) gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " "dimension index", expr->value.function.isym->name, &expr->where); diff --git a/gcc/fortran/trans-io.c b/gcc/fortran/trans-io.c index ec17dc97c21..fd5642209d2 100644 --- a/gcc/fortran/trans-io.c +++ b/gcc/fortran/trans-io.c @@ -292,8 +292,8 @@ gfc_build_io_library_fndecls (void) = build_pointer_type (gfc_intio_type_node); types[IOPARM_type_parray] = pchar_type_node; types[IOPARM_type_pchar] = pchar_type_node; - pad_size = 16 * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (pchar_type_node)); - pad_size += 32 * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (integer_type_node)); + pad_size = 16 * tree_to_hwi (TYPE_SIZE_UNIT (pchar_type_node)); + pad_size += 32 * tree_to_hwi (TYPE_SIZE_UNIT (integer_type_node)); pad_idx = build_index_type (size_int (pad_size - 1)); types[IOPARM_type_pad] = build_array_type (char_type_node, pad_idx); diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c index 806accc7015..48ab6545071 100644 --- a/gcc/fortran/trans-types.c +++ b/gcc/fortran/trans-types.c @@ -861,8 +861,6 @@ gfc_init_types (void) int index; tree type; unsigned n; - unsigned HOST_WIDE_INT hi; - unsigned HOST_WIDE_INT lo; /* Create and name the types. */ #define PUSH_TYPE(name, node) \ @@ -954,13 +952,10 @@ gfc_init_types (void) descriptor. */ n = TYPE_PRECISION (gfc_array_index_type) - GFC_DTYPE_SIZE_SHIFT; - lo = ~ (unsigned HOST_WIDE_INT) 0; - if (n > HOST_BITS_PER_WIDE_INT) - hi = lo >> (2*HOST_BITS_PER_WIDE_INT - n); - else - hi = 0, lo >>= HOST_BITS_PER_WIDE_INT - n; - gfc_max_array_element_size - = build_int_cst_wide (long_unsigned_type_node, lo, hi); + gfc_max_array_element_size + = wide_int_to_tree (long_unsigned_type_node, + wi::mask (n, UNSIGNED, + TYPE_PRECISION (long_unsigned_type_node))); boolean_type_node = gfc_get_logical_type (gfc_default_logical_kind); boolean_true_node = build_int_cst (boolean_type_node, 1); @@ -1449,7 +1444,7 @@ gfc_get_dtype (tree type) if (tree_int_cst_lt (gfc_max_array_element_size, size)) gfc_fatal_error ("Array element size too big at %C"); - i += TREE_INT_CST_LOW (size) << GFC_DTYPE_SIZE_SHIFT; + i += tree_to_hwi (size) << GFC_DTYPE_SIZE_SHIFT; } dtype = build_int_cst (gfc_array_index_type, i); @@ -1887,7 +1882,7 @@ gfc_get_array_type_bounds (tree etype, int dimen, int codimen, tree * lbound, if (stride) rtype = build_range_type (gfc_array_index_type, gfc_index_zero_node, int_const_binop (MINUS_EXPR, stride, - integer_one_node)); + build_int_cst (TREE_TYPE (stride), 1))); else rtype = gfc_array_range_type; arraytype = build_array_type (etype, rtype); diff --git a/gcc/function.c b/gcc/function.c index eb8aca91aaa..ae56aedcdae 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -3825,8 +3825,8 @@ locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, { tree s2 = sizetree; if (where_pad != none - && (!host_integerp (sizetree, 1) - || (tree_low_cst (sizetree, 1) * BITS_PER_UNIT) % round_boundary)) + && (!tree_fits_uhwi_p (sizetree) + || (tree_to_uhwi (sizetree) * BITS_PER_UNIT) % round_boundary)) s2 = round_up (s2, round_boundary / BITS_PER_UNIT); SUB_PARM_SIZE (locate->slot_offset, s2); } @@ -3868,7 +3868,7 @@ locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, #ifdef PUSH_ROUNDING if (passed_mode != BLKmode) - sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree))); + sizetree = size_int (PUSH_ROUNDING (tree_to_hwi (sizetree))); #endif /* Pad_below needs the pre-rounded size to know how much to pad below @@ -3878,8 +3878,8 @@ locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, pad_below (&locate->offset, passed_mode, sizetree); if (where_pad != none - && (!host_integerp (sizetree, 1) - || (tree_low_cst (sizetree, 1) * BITS_PER_UNIT) % round_boundary)) + && (!tree_fits_uhwi_p (sizetree) + || (tree_to_uhwi (sizetree) * BITS_PER_UNIT) % round_boundary)) sizetree = round_up (sizetree, round_boundary / BITS_PER_UNIT); ADD_PARM_SIZE (locate->size, sizetree); @@ -3970,7 +3970,7 @@ pad_below (struct args_size *offset_ptr, enum machine_mode passed_mode, tree siz else { if (TREE_CODE (sizetree) != INTEGER_CST - || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY) + || (tree_to_hwi (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY) { /* Round the size up to multiple of PARM_BOUNDARY bits. */ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT); diff --git a/gcc/gcse.c b/gcc/gcse.c index bb9ba15ea8b..0d02d0a0312 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -1997,6 +1997,13 @@ prune_insertions_deletions (int n_elems) bitmap_clear_bit (pre_delete_map[i], j); } + if (dump_file) + { + dump_bitmap_vector (dump_file, "pre_insert_map", "", pre_insert_map, n_edges); + dump_bitmap_vector (dump_file, "pre_delete_map", "", pre_delete_map, + last_basic_block); + } + sbitmap_free (prune_exprs); free (insertions); free (deletions); diff --git a/gcc/gdbinit.in b/gcc/gdbinit.in index 503ef24e301..73cabfebc36 100644 --- a/gcc/gdbinit.in +++ b/gcc/gdbinit.in @@ -94,6 +94,15 @@ Print the expression that is $ in C syntax. Works only when an inferior is executing. end +define pmz +set mpz_out_str(stderr, 10, $) +end + +document pmz +Print the mpz value that is $ +Works only when an inferior is executing. +end + define ptc output (enum tree_code) $.common.code echo \n diff --git a/gcc/genemit.c b/gcc/genemit.c index d4bb301320d..223999ac165 100644 --- a/gcc/genemit.c +++ b/gcc/genemit.c @@ -204,6 +204,7 @@ gen_exp (rtx x, enum rtx_code subroutine_type, char *used) case CONST_DOUBLE: case CONST_FIXED: + case CONST_WIDE_INT: /* These shouldn't be written in MD files. Instead, the appropriate routines in varasm.c should be called. */ gcc_unreachable (); diff --git a/gcc/gengenrtl.c b/gcc/gengenrtl.c index 4a35683c896..ce2049dfd9c 100644 --- a/gcc/gengenrtl.c +++ b/gcc/gengenrtl.c @@ -142,6 +142,7 @@ static int excluded_rtx (int idx) { return ((strcmp (defs[idx].enumname, "CONST_DOUBLE") == 0) + || (strcmp (defs[idx].enumname, "CONST_WIDE_INT") == 0) || (strcmp (defs[idx].enumname, "CONST_FIXED") == 0)); } diff --git a/gcc/gengtype-lex.l b/gcc/gengtype-lex.l index f46cd17586c..7ece2ab8e60 100644 --- a/gcc/gengtype-lex.l +++ b/gcc/gengtype-lex.l @@ -57,7 +57,7 @@ ITYPE {IWORD}({WS}{IWORD})* /* Include '::' in identifiers to capture C++ scope qualifiers. */ ID {CID}({HWS}::{HWS}{CID})* EOID [^[:alnum:]_] -CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend +CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend|static %x in_struct in_struct_comment in_comment %option warn noyywrap nounput nodefault perf-report @@ -110,6 +110,7 @@ CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend "const"/{EOID} /* don't care */ {CXX_KEYWORD}/{EOID} | "~" | +"^" | "&" { *yylval = XDUPVAR (const char, yytext, yyleng, yyleng + 1); return IGNORABLE_CXX_KEYWORD; diff --git a/gcc/gengtype-parse.c b/gcc/gengtype-parse.c index e5204c1a71d..244ddcc85a1 100644 --- a/gcc/gengtype-parse.c +++ b/gcc/gengtype-parse.c @@ -182,6 +182,23 @@ require2 (int t1, int t2) return v; } +/* If the next token does not have one of the codes T1, T2 or T3, report a + parse error; otherwise return the token's value. */ +static const char * +require3 (int t1, int t2, int t3) +{ + int u = token (); + const char *v = advance (); + if (u != t1 && u != t2 && u != t3) + { + parse_error ("expected %s, %s or %s, have %s", + print_token (t1, 0), print_token (t2, 0), + print_token (t3, 0), print_token (u, v)); + return 0; + } + return v; +} + /* Near-terminals. */ /* C-style string constant concatenation: STRING+ @@ -228,18 +245,45 @@ require_template_declaration (const char *tmpl_name) str = concat (tmpl_name, "<", (char *) 0); /* Read the comma-separated list of identifiers. */ - while (token () != '>') + int depth = 1; + while (depth > 0) { - const char *id = require2 (ID, ','); + if (token () == ENUM) + { + advance (); + str = concat (str, "enum ", (char *) 0); + continue; + } + if (token () == NUM) + { + str = concat (str, advance (), (char *) 0); + continue; + } + if (token () == ':') + { + advance (); + str = concat (str, ":", (char *) 0); + continue; + } + if (token () == '<') + { + advance (); + str = concat (str, "<", (char *) 0); + depth += 1; + continue; + } + if (token () == '>') + { + advance (); + str = concat (str, ">", (char *) 0); + depth -= 1; + continue; + } + const char *id = require3 (SCALAR, ID, ','); if (id == NULL) id = ","; str = concat (str, id, (char *) 0); } - - /* Recognize the closing '>'. */ - require ('>'); - str = concat (str, ">", (char *) 0); - return str; } diff --git a/gcc/gengtype-state.c b/gcc/gengtype-state.c index 96ecc7cdaf8..0b5cf8f0866 100644 --- a/gcc/gengtype-state.c +++ b/gcc/gengtype-state.c @@ -30,7 +30,6 @@ #endif #include "system.h" #include "errors.h" /* For fatal. */ -#include "double-int.h" #include "hashtab.h" #include "version.h" /* For version_string & pkgversion_string. */ #include "obstack.h" diff --git a/gcc/gengtype.c b/gcc/gengtype.c index b9bef4df76c..654c35423e1 100644 --- a/gcc/gengtype.c +++ b/gcc/gengtype.c @@ -25,7 +25,6 @@ #include "system.h" #include "errors.h" /* for fatal */ #include "getopt.h" -#include "double-int.h" #include "version.h" /* for version_string & pkgversion_string. */ #include "hashtab.h" #include "xregex.h" @@ -525,7 +524,7 @@ do_typedef (const char *s, type_p t, struct fileloc *pos) for (p = typedefs; p != NULL; p = p->next) if (strcmp (p->name, s) == 0) { - if (p->type != t) + if (p->type != t && strcmp (s, "result_type") != 0) { error_at_line (pos, "type `%s' previously defined", s); error_at_line (&p->line, "previously defined here"); @@ -1733,7 +1732,7 @@ open_base_files (void) static const char *const ifiles[] = { "config.h", "system.h", "coretypes.h", "tm.h", "hashtab.h", "splay-tree.h", "obstack.h", "bitmap.h", "input.h", - "tree.h", "rtl.h", "function.h", "insn-config.h", "expr.h", + "tree.h", "rtl.h", "wide-int.h", "function.h", "insn-config.h", "expr.h", "hard-reg-set.h", "basic-block.h", "cselib.h", "insn-addr.h", "optabs.h", "libfuncs.h", "debug.h", "ggc.h", "cgraph.h", "tree-ssa.h", "reload.h", "cpp-id-data.h", "tree-chrec.h", @@ -5475,6 +5474,9 @@ main (int argc, char **argv) POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos)); POS_HERE (do_scalar_typedef ("FIXED_VALUE_TYPE", &pos)); POS_HERE (do_scalar_typedef ("double_int", &pos)); + POS_HERE (do_scalar_typedef ("double_int_storage", &pos)); + POS_HERE (do_scalar_typedef ("addr_wide_int", &pos)); + POS_HERE (do_scalar_typedef ("max_wide_int", &pos)); POS_HERE (do_scalar_typedef ("uint64_t", &pos)); POS_HERE (do_scalar_typedef ("uint8", &pos)); POS_HERE (do_scalar_typedef ("uintptr_t", &pos)); diff --git a/gcc/genmodes.c b/gcc/genmodes.c index a0b2f21f11b..8122a3ff924 100644 --- a/gcc/genmodes.c +++ b/gcc/genmodes.c @@ -869,14 +869,14 @@ emit_max_int (void) max = i->bytesize; if (max > mmax) mmax = max; - printf ("#define MAX_BITSIZE_MODE_ANY_INT %d*BITS_PER_UNIT\n", mmax); + printf ("#define MAX_BITSIZE_MODE_ANY_INT %d\n", mmax * MAX_BITS_PER_UNIT); mmax = 0; for (j = 0; j < MAX_MODE_CLASS; j++) for (i = modes[j]; i; i = i->next) if (mmax < i->bytesize) mmax = i->bytesize; - printf ("#define MAX_BITSIZE_MODE_ANY_MODE %d*BITS_PER_UNIT\n", mmax); + printf ("#define MAX_BITSIZE_MODE_ANY_MODE %d\n", mmax * MAX_BITS_PER_UNIT); } static void diff --git a/gcc/genpreds.c b/gcc/genpreds.c index f3d9dac9c55..9114d2d60d9 100644 --- a/gcc/genpreds.c +++ b/gcc/genpreds.c @@ -612,7 +612,7 @@ write_one_predicate_function (struct pred_data *p) add_mode_tests (p); /* A normal predicate can legitimately not look at enum machine_mode - if it accepts only CONST_INTs and/or CONST_DOUBLEs. */ + if it accepts only CONST_INTs and/or CONST_WIDE_INT and/or CONST_DOUBLEs. */ printf ("int\n%s (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)\n{\n", p->name); write_predicate_stmts (p->exp); @@ -810,7 +810,6 @@ add_constraint (const char *name, const char *regclass, { enum rtx_code appropriate_code = is_const_int ? CONST_INT : CONST_DOUBLE; - /* Consider relaxing this requirement in the future. */ if (regclass || GET_CODE (exp) != AND @@ -1075,12 +1074,17 @@ write_tm_constrs_h (void) if (needs_ival) puts (" if (CONST_INT_P (op))\n" " ival = INTVAL (op);"); +#if TARGET_SUPPORTS_WIDE_INT + if (needs_lval || needs_hval) + error ("you can't use lval or hval"); +#else if (needs_hval) puts (" if (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode)" " hval = CONST_DOUBLE_HIGH (op);"); if (needs_lval) puts (" if (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode)" " lval = CONST_DOUBLE_LOW (op);"); +#endif if (needs_rval) puts (" if (GET_CODE (op) == CONST_DOUBLE && mode != VOIDmode)" " rval = CONST_DOUBLE_REAL_VALUE (op);"); diff --git a/gcc/genrecog.c b/gcc/genrecog.c index 814be7d17e3..14a7e1561ee 100644 --- a/gcc/genrecog.c +++ b/gcc/genrecog.c @@ -588,6 +588,7 @@ validate_pattern (rtx pattern, rtx insn, rtx set, int set_code) && GET_CODE (src) != PC && GET_CODE (src) != CC0 && !CONST_INT_P (src) + && !CONST_WIDE_INT_P (src) && GET_CODE (src) != CALL) { const char *which; @@ -772,13 +773,14 @@ add_to_sequence (rtx pattern, struct decision_head *last, We can optimize the generated code a little if either (a) the predicate only accepts one code, or (b) the - predicate does not allow CONST_INT, in which case it - can match only if the modes match. */ + predicate does not allow CONST_INT or CONST_WIDE_INT, + in which case it can match only if the modes match. */ pred = lookup_predicate (pred_name); if (pred) { test->u.pred.data = pred; - allows_const_int = pred->codes[CONST_INT]; + allows_const_int = (pred->codes[CONST_INT] + || pred->codes[CONST_WIDE_INT]); if (was_code == MATCH_PARALLEL && pred->singleton != PARALLEL) message_with_line (pattern_lineno, diff --git a/gcc/gensupport.c b/gcc/gensupport.c index e6c5c23a161..1b80f956d1d 100644 --- a/gcc/gensupport.c +++ b/gcc/gensupport.c @@ -2806,7 +2806,13 @@ static const struct std_pred_table std_preds[] = { {"scratch_operand", false, false, {SCRATCH, REG}}, {"immediate_operand", false, true, {UNKNOWN}}, {"const_int_operand", false, false, {CONST_INT}}, +#if TARGET_SUPPORTS_WIDE_INT + {"const_wide_int_operand", false, false, {CONST_WIDE_INT}}, + {"const_scalar_int_operand", false, false, {CONST_INT, CONST_WIDE_INT}}, + {"const_double_operand", false, false, {CONST_DOUBLE}}, +#else {"const_double_operand", false, false, {CONST_INT, CONST_DOUBLE}}, +#endif {"nonimmediate_operand", false, false, {SUBREG, REG, MEM}}, {"nonmemory_operand", false, true, {SUBREG, REG}}, {"push_operand", false, false, {MEM}}, diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index d1349588fbd..0f35e3ddb39 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -1065,7 +1065,7 @@ gimple_extract_devirt_binfo_from_cst (tree cst, tree expected_type) continue; pos = int_bit_position (fld); - size = tree_low_cst (DECL_SIZE (fld), 1); + size = tree_to_uhwi (DECL_SIZE (fld)); if (pos <= offset && (pos + size) > offset) break; } @@ -1142,7 +1142,7 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace) if (binfo) { HOST_WIDE_INT token - = TREE_INT_CST_LOW (OBJ_TYPE_REF_TOKEN (callee)); + = tree_to_hwi (OBJ_TYPE_REF_TOKEN (callee)); tree fndecl = gimple_get_virt_method_for_binfo (token, binfo); if (fndecl) { @@ -2709,9 +2709,9 @@ get_base_constructor (tree base, HOST_WIDE_INT *bit_offset, { if (!integer_zerop (TREE_OPERAND (base, 1))) { - if (!host_integerp (TREE_OPERAND (base, 1), 0)) + if (!tree_fits_shwi_p (TREE_OPERAND (base, 1))) return NULL_TREE; - *bit_offset += (mem_ref_offset (base).low + *bit_offset += (mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT); } @@ -2806,9 +2806,10 @@ fold_array_ctor_reference (tree type, tree ctor, { unsigned HOST_WIDE_INT cnt; tree cfield, cval; - double_int low_bound, elt_size; - double_int index, max_index; - double_int access_index; + addr_wide_int low_bound; + addr_wide_int elt_size; + addr_wide_int index, max_index; + addr_wide_int access_index; tree domain_type = NULL_TREE, index_type = NULL_TREE; HOST_WIDE_INT inner_offset; @@ -2820,31 +2821,29 @@ fold_array_ctor_reference (tree type, tree ctor, /* Static constructors for variably sized objects makes no sense. */ gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type)) == INTEGER_CST); index_type = TREE_TYPE (TYPE_MIN_VALUE (domain_type)); - low_bound = tree_to_double_int (TYPE_MIN_VALUE (domain_type)); + low_bound = TYPE_MIN_VALUE (domain_type); } else - low_bound = double_int_zero; + low_bound = 0; /* Static constructors for variably sized objects makes no sense. */ gcc_assert (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))) == INTEGER_CST); - elt_size = - tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))); - + elt_size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor))); /* We can handle only constantly sized accesses that are known to not be larger than size of array element. */ if (!TYPE_SIZE_UNIT (type) || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST - || elt_size.slt (tree_to_double_int (TYPE_SIZE_UNIT (type)))) + || wi::lts_p (elt_size, TYPE_SIZE_UNIT (type))) return NULL_TREE; /* Compute the array index we look for. */ - access_index = double_int::from_uhwi (offset / BITS_PER_UNIT) - .udiv (elt_size, TRUNC_DIV_EXPR); + access_index = wi::udiv_trunc (addr_wide_int (offset / BITS_PER_UNIT), + elt_size); access_index += low_bound; if (index_type) - access_index = access_index.ext (TYPE_PRECISION (index_type), - TYPE_UNSIGNED (index_type)); + access_index = wi::ext (access_index, TYPE_PRECISION (index_type), + TYPE_SIGN (index_type)); /* And offset within the access. */ inner_offset = offset % (elt_size.to_uhwi () * BITS_PER_UNIT); @@ -2854,9 +2853,10 @@ fold_array_ctor_reference (tree type, tree ctor, if (inner_offset + size > elt_size.to_uhwi () * BITS_PER_UNIT) return NULL_TREE; - index = low_bound - double_int_one; + index = low_bound - 1; if (index_type) - index = index.ext (TYPE_PRECISION (index_type), TYPE_UNSIGNED (index_type)); + index = wi::ext (index, TYPE_PRECISION (index_type), + TYPE_SIGN (index_type)); FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval) { @@ -2866,26 +2866,26 @@ fold_array_ctor_reference (tree type, tree ctor, if (cfield) { if (TREE_CODE (cfield) == INTEGER_CST) - max_index = index = tree_to_double_int (cfield); + max_index = index = cfield; else { gcc_assert (TREE_CODE (cfield) == RANGE_EXPR); - index = tree_to_double_int (TREE_OPERAND (cfield, 0)); - max_index = tree_to_double_int (TREE_OPERAND (cfield, 1)); + index = TREE_OPERAND (cfield, 0); + max_index = TREE_OPERAND (cfield, 1); } } else { - index += double_int_one; + index += 1; if (index_type) - index = index.ext (TYPE_PRECISION (index_type), - TYPE_UNSIGNED (index_type)); + index = wi::ext (index, TYPE_PRECISION (index_type), + TYPE_SIGN (index_type)); max_index = index; } /* Do we have match? */ - if (access_index.cmp (index, 1) >= 0 - && access_index.cmp (max_index, 1) <= 0) + if (wi::cmpu (access_index, index) >= 0 + && wi::cmpu (access_index, max_index) <= 0) return fold_ctor_reference (type, cval, inner_offset, size, from_decl); } @@ -2912,10 +2912,9 @@ fold_nonarray_ctor_reference (tree type, tree ctor, tree byte_offset = DECL_FIELD_OFFSET (cfield); tree field_offset = DECL_FIELD_BIT_OFFSET (cfield); tree field_size = DECL_SIZE (cfield); - double_int bitoffset; - double_int byte_offset_cst = tree_to_double_int (byte_offset); - double_int bits_per_unit_cst = double_int::from_uhwi (BITS_PER_UNIT); - double_int bitoffset_end, access_end; + addr_wide_int bitoffset; + addr_wide_int byte_offset_cst = byte_offset; + addr_wide_int bitoffset_end, access_end; /* Variable sized objects in static constructors makes no sense, but field_size can be NULL for flexible array members. */ @@ -2926,30 +2925,28 @@ fold_nonarray_ctor_reference (tree type, tree ctor, : TREE_CODE (TREE_TYPE (cfield)) == ARRAY_TYPE)); /* Compute bit offset of the field. */ - bitoffset = tree_to_double_int (field_offset) - + byte_offset_cst * bits_per_unit_cst; + bitoffset = wi::add (field_offset, byte_offset_cst * BITS_PER_UNIT); /* Compute bit offset where the field ends. */ if (field_size != NULL_TREE) - bitoffset_end = bitoffset + tree_to_double_int (field_size); + bitoffset_end = bitoffset + field_size; else - bitoffset_end = double_int_zero; + bitoffset_end = 0; - access_end = double_int::from_uhwi (offset) - + double_int::from_uhwi (size); + access_end = addr_wide_int (offset) + size; /* Is there any overlap between [OFFSET, OFFSET+SIZE) and [BITOFFSET, BITOFFSET_END)? */ - if (access_end.cmp (bitoffset, 0) > 0 + if (wi::cmps (access_end, bitoffset) > 0 && (field_size == NULL_TREE - || double_int::from_uhwi (offset).slt (bitoffset_end))) + || wi::lts_p (offset, bitoffset_end))) { - double_int inner_offset = double_int::from_uhwi (offset) - bitoffset; + addr_wide_int inner_offset = addr_wide_int (offset) - bitoffset; /* We do have overlap. Now see if field is large enough to cover the access. Give up for accesses spanning multiple fields. */ - if (access_end.cmp (bitoffset_end, 0) > 0) + if (wi::cmps (access_end, bitoffset_end) > 0) return NULL_TREE; - if (double_int::from_uhwi (offset).slt (bitoffset)) + if (wi::lts_p (offset, bitoffset)) return NULL_TREE; return fold_ctor_reference (type, cval, inner_offset.to_uhwi (), size, @@ -3039,38 +3036,42 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree)) && (idx = (*valueize) (TREE_OPERAND (t, 1))) && TREE_CODE (idx) == INTEGER_CST) { - tree low_bound, unit_size; - double_int doffset; + tree low_bound = array_ref_low_bound (t); + tree unit_size = array_ref_element_size (t); /* If the resulting bit-offset is constant, track it. */ - if ((low_bound = array_ref_low_bound (t), - TREE_CODE (low_bound) == INTEGER_CST) - && (unit_size = array_ref_element_size (t), - host_integerp (unit_size, 1)) - && (doffset = (TREE_INT_CST (idx) - TREE_INT_CST (low_bound)) - .sext (TYPE_PRECISION (TREE_TYPE (idx))), - doffset.fits_shwi ())) + if ((TREE_CODE (low_bound) == INTEGER_CST) + && (tree_fits_uhwi_p (unit_size))) { - offset = doffset.to_shwi (); - offset *= TREE_INT_CST_LOW (unit_size); - offset *= BITS_PER_UNIT; - - base = TREE_OPERAND (t, 0); - ctor = get_base_constructor (base, &offset, valueize); - /* Empty constructor. Always fold to 0. */ - if (ctor == error_mark_node) - return build_zero_cst (TREE_TYPE (t)); - /* Out of bound array access. Value is undefined, - but don't fold. */ - if (offset < 0) - return NULL_TREE; - /* We can not determine ctor. */ - if (!ctor) - return NULL_TREE; - return fold_ctor_reference (TREE_TYPE (t), ctor, offset, - TREE_INT_CST_LOW (unit_size) - * BITS_PER_UNIT, - base); + addr_wide_int woffset + = wi::sext (addr_wide_int (idx) - low_bound, + TYPE_PRECISION (TREE_TYPE (idx))); + + if (wi::fits_shwi_p (woffset)) + { + offset = woffset.to_shwi (); + /* TODO: This code seems wrong, multiply then check + to see if it fits. */ + offset *= tree_to_hwi (unit_size); + offset *= BITS_PER_UNIT; + + base = TREE_OPERAND (t, 0); + ctor = get_base_constructor (base, &offset, valueize); + /* Empty constructor. Always fold to 0. */ + if (ctor == error_mark_node) + return build_zero_cst (TREE_TYPE (t)); + /* Out of bound array access. Value is undefined, + but don't fold. */ + if (offset < 0) + return NULL_TREE; + /* We can not determine ctor. */ + if (!ctor) + return NULL_TREE; + return fold_ctor_reference (TREE_TYPE (t), ctor, offset, + tree_to_uhwi (unit_size) + * BITS_PER_UNIT, + base); + } } } /* Fallthru. */ @@ -3140,7 +3141,7 @@ gimple_get_virt_method_for_binfo (HOST_WIDE_INT token, tree known_binfo) if (TREE_CODE (v) == POINTER_PLUS_EXPR) { - offset = tree_low_cst (TREE_OPERAND (v, 1), 1) * BITS_PER_UNIT; + offset = tree_to_uhwi (TREE_OPERAND (v, 1)) * BITS_PER_UNIT; v = TREE_OPERAND (v, 0); } else @@ -3166,7 +3167,7 @@ gimple_get_virt_method_for_binfo (HOST_WIDE_INT token, tree known_binfo) return NULL_TREE; } gcc_checking_assert (TREE_CODE (TREE_TYPE (v)) == ARRAY_TYPE); - size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (v))), 1); + size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (v)))); offset += token * size; fn = fold_ctor_reference (TREE_TYPE (TREE_TYPE (v)), init, offset, size, v); @@ -3285,7 +3286,7 @@ gimple_val_nonnegative_real_p (tree val) arg1 = gimple_call_arg (def_stmt, 1); if (TREE_CODE (arg1) == INTEGER_CST - && (TREE_INT_CST_LOW (arg1) & 1) == 0) + && (tree_to_hwi (arg1) & 1) == 0) return true; break; @@ -3306,7 +3307,7 @@ gimple_val_nonnegative_real_p (tree val) if ((n & 1) == 0) { REAL_VALUE_TYPE cint; - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); if (real_identical (&c, &cint)) return true; } @@ -3390,12 +3391,12 @@ gimple_fold_indirect_ref (tree t) if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_TYPE (addrtype)) == VECTOR_TYPE && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype))) - && host_integerp (off, 1)) + && tree_fits_uhwi_p (off)) { - unsigned HOST_WIDE_INT offset = tree_low_cst (off, 1); + unsigned HOST_WIDE_INT offset = tree_to_uhwi (off); tree part_width = TYPE_SIZE (type); unsigned HOST_WIDE_INT part_widthi - = tree_low_cst (part_width, 0) / BITS_PER_UNIT; + = tree_to_shwi (part_width) / BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); if (offset / part_widthi @@ -3419,9 +3420,7 @@ gimple_fold_indirect_ref (tree t) || DECL_P (TREE_OPERAND (addr, 0))) return fold_build2 (MEM_REF, type, addr, - build_int_cst_wide (ptype, - TREE_INT_CST_LOW (off), - TREE_INT_CST_HIGH (off))); + wide_int_to_tree (ptype, off)); } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c index 14fb1c8c26f..fb333122227 100644 --- a/gcc/gimple-pretty-print.c +++ b/gcc/gimple-pretty-print.c @@ -713,7 +713,7 @@ dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags) pp_string (buffer, " [ "); /* Get the transaction code properties. */ - props = TREE_INT_CST_LOW (t); + props = tree_to_hwi (t); if (props & PR_INSTRUMENTEDCODE) pp_string (buffer, "instrumentedCode "); @@ -1728,7 +1728,7 @@ dump_ssaname_info (pretty_printer *buffer, tree node, int spc) if (!POINTER_TYPE_P (TREE_TYPE (node)) && SSA_NAME_RANGE_INFO (node)) { - double_int min, max; + max_wide_int min, max; value_range_type range_type = get_range_info (node, &min, &max); if (range_type == VR_VARYING) @@ -1737,9 +1737,9 @@ dump_ssaname_info (pretty_printer *buffer, tree node, int spc) { pp_printf (buffer, "# RANGE "); pp_printf (buffer, "%s[", range_type == VR_RANGE ? "" : "~"); - pp_double_int (buffer, min, TYPE_UNSIGNED (TREE_TYPE (node))); + pp_wide_int (buffer, min, TYPE_SIGN (TREE_TYPE (node))); pp_printf (buffer, ", "); - pp_double_int (buffer, max, TYPE_UNSIGNED (TREE_TYPE (node))); + pp_wide_int (buffer, max, TYPE_SIGN (TREE_TYPE (node))); pp_printf (buffer, "]"); newline_and_indent (buffer, spc); } diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c index 2b5e69f7725..f6955bbc189 100644 --- a/gcc/gimple-ssa-strength-reduction.c +++ b/gcc/gimple-ssa-strength-reduction.c @@ -49,6 +49,7 @@ along with GCC; see the file COPYING3. If not see #include "params.h" #include "hash-table.h" #include "tree-ssa-address.h" +#include "wide-int-print.h" /* Information about a strength reduction candidate. Each statement in the candidate table represents an expression of one of the @@ -230,7 +231,7 @@ struct slsr_cand_d tree stride; /* The index constant i. */ - double_int index; + max_wide_int index; /* The type of the candidate. This is normally the type of base_expr, but casts may have occurred when combining feeding instructions. @@ -305,7 +306,7 @@ typedef const struct cand_chain_d *const_cand_chain_t; struct incr_info_d { /* The increment that relates a candidate to its basis. */ - double_int incr; + max_wide_int incr; /* How many times the increment occurs in the candidate tree. */ unsigned count; @@ -554,7 +555,7 @@ record_potential_basis (slsr_cand_t c) static slsr_cand_t alloc_cand_and_find_basis (enum cand_kind kind, gimple gs, tree base, - double_int index, tree stride, tree ctype, + const max_wide_int &index, tree stride, tree ctype, unsigned savings) { slsr_cand_t c = (slsr_cand_t) obstack_alloc (&cand_obstack, @@ -603,8 +604,8 @@ stmt_cost (gimple gs, bool speed) case MULT_EXPR: rhs2 = gimple_assign_rhs2 (gs); - if (host_integerp (rhs2, 0)) - return mult_by_coeff_cost (TREE_INT_CST_LOW (rhs2), lhs_mode, speed); + if (tree_fits_shwi_p (rhs2)) + return mult_by_coeff_cost (tree_to_shwi (rhs2), lhs_mode, speed); gcc_assert (TREE_CODE (rhs1) != INTEGER_CST); return mul_cost (speed, lhs_mode); @@ -745,8 +746,8 @@ slsr_process_phi (gimple phi, bool speed) CAND_PHI. */ base_type = TREE_TYPE (arg0_base); - c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, double_int_zero, - integer_one_node, base_type, savings); + c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, + 0, integer_one_node, base_type, savings); /* Add the candidate to the statement-candidate mapping. */ add_cand_for_stmt (phi, c); @@ -763,7 +764,7 @@ slsr_process_phi (gimple phi, bool speed) int (i * S). Otherwise, just return double int zero. */ -static double_int +static max_wide_int backtrace_base_for_ref (tree *pbase) { tree base_in = *pbase; @@ -779,19 +780,19 @@ backtrace_base_for_ref (tree *pbase) base_in = get_unwidened (base_in, NULL_TREE); if (TREE_CODE (base_in) != SSA_NAME) - return tree_to_double_int (integer_zero_node); + return 0; base_cand = base_cand_from_table (base_in); while (base_cand && base_cand->kind != CAND_PHI) { if (base_cand->kind == CAND_ADD - && base_cand->index.is_one () + && base_cand->index == 1 && TREE_CODE (base_cand->stride) == INTEGER_CST) { /* X = B + (1 * S), S is integer constant. */ *pbase = base_cand->base_expr; - return tree_to_double_int (base_cand->stride); + return base_cand->stride; } else if (base_cand->kind == CAND_ADD && TREE_CODE (base_cand->stride) == INTEGER_CST @@ -808,7 +809,7 @@ backtrace_base_for_ref (tree *pbase) base_cand = NULL; } - return tree_to_double_int (integer_zero_node); + return 0; } /* Look for the following pattern: @@ -838,38 +839,35 @@ backtrace_base_for_ref (tree *pbase) *PINDEX: C1 + (C2 * C3) + C4 + (C5 * C3) */ static bool -restructure_reference (tree *pbase, tree *poffset, double_int *pindex, +restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex, tree *ptype) { tree base = *pbase, offset = *poffset; - double_int index = *pindex; - double_int bpu = double_int::from_uhwi (BITS_PER_UNIT); - tree mult_op0, mult_op1, t1, t2, type; - double_int c1, c2, c3, c4, c5; + max_wide_int index = *pindex; + tree mult_op0, t1, t2, type; + max_wide_int c1, c2, c3, c4, c5; if (!base || !offset || TREE_CODE (base) != MEM_REF || TREE_CODE (offset) != MULT_EXPR || TREE_CODE (TREE_OPERAND (offset, 1)) != INTEGER_CST - || !index.umod (bpu, FLOOR_MOD_EXPR).is_zero ()) + || wi::umod_floor (index, BITS_PER_UNIT) != 0) return false; t1 = TREE_OPERAND (base, 0); - c1 = mem_ref_offset (base); + c1 = max_wide_int::from (mem_ref_offset (base), SIGNED); type = TREE_TYPE (TREE_OPERAND (base, 1)); mult_op0 = TREE_OPERAND (offset, 0); - mult_op1 = TREE_OPERAND (offset, 1); - - c3 = tree_to_double_int (mult_op1); + c3 = TREE_OPERAND (offset, 1); if (TREE_CODE (mult_op0) == PLUS_EXPR) if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST) { t2 = TREE_OPERAND (mult_op0, 0); - c2 = tree_to_double_int (TREE_OPERAND (mult_op0, 1)); + c2 = TREE_OPERAND (mult_op0, 1); } else return false; @@ -879,7 +877,7 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex, if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST) { t2 = TREE_OPERAND (mult_op0, 0); - c2 = -tree_to_double_int (TREE_OPERAND (mult_op0, 1)); + c2 = -(max_wide_int)TREE_OPERAND (mult_op0, 1); } else return false; @@ -887,15 +885,15 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex, else { t2 = mult_op0; - c2 = double_int_zero; + c2 = 0; } - c4 = index.udiv (bpu, FLOOR_DIV_EXPR); + c4 = wi::udiv_floor (index, BITS_PER_UNIT); c5 = backtrace_base_for_ref (&t2); *pbase = t1; *poffset = fold_build2 (MULT_EXPR, sizetype, fold_convert (sizetype, t2), - double_int_to_tree (sizetype, c3)); + wide_int_to_tree (sizetype, c3)); *pindex = c1 + c2 * c3 + c4 + c5 * c3; *ptype = type; @@ -912,7 +910,7 @@ slsr_process_ref (gimple gs) HOST_WIDE_INT bitsize, bitpos; enum machine_mode mode; int unsignedp, volatilep; - double_int index; + max_wide_int index; slsr_cand_t c; if (gimple_vdef (gs)) @@ -928,7 +926,7 @@ slsr_process_ref (gimple gs) base = get_inner_reference (ref_expr, &bitsize, &bitpos, &offset, &mode, &unsignedp, &volatilep, false); - index = double_int::from_uhwi (bitpos); + index = bitpos; if (!restructure_reference (&base, &offset, &index, &type)) return; @@ -949,7 +947,7 @@ static slsr_cand_t create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - double_int index; + max_wide_int index; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -981,7 +979,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) ============================ X = B + ((i' * S) * Z) */ base = base_cand->base_expr; - index = base_cand->index * tree_to_double_int (base_cand->stride); + index = base_cand->index * base_cand->stride; stride = stride_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1000,7 +998,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) /* No interpretations had anything useful to propagate, so produce X = (Y + 0) * Z. */ base = base_in; - index = double_int_zero; + index = 0; stride = stride_in; ctype = TREE_TYPE (base_in); } @@ -1019,7 +1017,7 @@ static slsr_cand_t create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - double_int index, temp; + max_wide_int index, temp; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -1037,9 +1035,8 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) X = (B + i') * (S * c) */ base = base_cand->base_expr; index = base_cand->index; - temp = tree_to_double_int (base_cand->stride) - * tree_to_double_int (stride_in); - stride = double_int_to_tree (TREE_TYPE (stride_in), temp); + temp = wi::mul (base_cand->stride, stride_in); + stride = wide_int_to_tree (TREE_TYPE (stride_in), temp); ctype = base_cand->cand_type; if (has_single_use (base_in)) savings = (base_cand->dead_savings @@ -1060,7 +1057,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) + stmt_cost (base_cand->cand_stmt, speed)); } else if (base_cand->kind == CAND_ADD - && base_cand->index.is_one () + && base_cand->index == 1 && TREE_CODE (base_cand->stride) == INTEGER_CST) { /* Y = B + (1 * S), S constant @@ -1068,7 +1065,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) =========================== X = (B + S) * c */ base = base_cand->base_expr; - index = tree_to_double_int (base_cand->stride); + index = base_cand->stride; stride = stride_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1087,7 +1084,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) /* No interpretations had anything useful to propagate, so produce X = (Y + 0) * c. */ base = base_in; - index = double_int_zero; + index = 0; stride = stride_in; ctype = TREE_TYPE (base_in); } @@ -1150,7 +1147,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, bool subtract_p, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL; - double_int index; + max_wide_int index; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -1161,7 +1158,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, while (addend_cand && !base && addend_cand->kind != CAND_PHI) { if (addend_cand->kind == CAND_MULT - && addend_cand->index.is_zero () + && addend_cand->index == 0 && TREE_CODE (addend_cand->stride) == INTEGER_CST) { /* Z = (B + 0) * S, S constant @@ -1169,7 +1166,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, =========================== X = Y + ((+/-1 * S) * B) */ base = base_in; - index = tree_to_double_int (addend_cand->stride); + index = addend_cand->stride; if (subtract_p) index = -index; stride = addend_cand->base_expr; @@ -1188,7 +1185,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, while (base_cand && !base && base_cand->kind != CAND_PHI) { if (base_cand->kind == CAND_ADD - && (base_cand->index.is_zero () + && (base_cand->index == 0 || operand_equal_p (base_cand->stride, integer_zero_node, 0))) { @@ -1197,7 +1194,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, ============================ X = B + (+/-1 * Z) */ base = base_cand->base_expr; - index = subtract_p ? double_int_minus_one : double_int_one; + index = subtract_p ? -1 : 1; stride = addend_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1211,7 +1208,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, while (subtrahend_cand && !base && subtrahend_cand->kind != CAND_PHI) { if (subtrahend_cand->kind == CAND_MULT - && subtrahend_cand->index.is_zero () + && subtrahend_cand->index == 0 && TREE_CODE (subtrahend_cand->stride) == INTEGER_CST) { /* Z = (B + 0) * S, S constant @@ -1219,7 +1216,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, =========================== Value: X = Y + ((-1 * S) * B) */ base = base_in; - index = tree_to_double_int (subtrahend_cand->stride); + index = subtrahend_cand->stride; index = -index; stride = subtrahend_cand->base_expr; ctype = TREE_TYPE (base_in); @@ -1246,7 +1243,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, /* No interpretations had anything useful to propagate, so produce X = Y + (1 * Z). */ base = base_in; - index = subtract_p ? double_int_minus_one : double_int_one; + index = subtract_p ? -1 : 1; stride = addend_in; ctype = TREE_TYPE (base_in); } @@ -1261,22 +1258,21 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, about BASE_IN into the new candidate. Return the new candidate. */ static slsr_cand_t -create_add_imm_cand (gimple gs, tree base_in, double_int index_in, bool speed) +create_add_imm_cand (gimple gs, tree base_in, max_wide_int index_in, bool speed) { enum cand_kind kind = CAND_ADD; tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - double_int index, multiple; + max_wide_int index, multiple; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); while (base_cand && !base && base_cand->kind != CAND_PHI) { - bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (base_cand->stride)); + signop sign = TYPE_SIGN (TREE_TYPE (base_cand->stride)); if (TREE_CODE (base_cand->stride) == INTEGER_CST - && index_in.multiple_of (tree_to_double_int (base_cand->stride), - unsigned_p, &multiple)) + && wi::multiple_of_p (index_in, base_cand->stride, sign, &multiple)) { /* Y = (B + i') * S, S constant, c = kS for some integer k X = Y + c @@ -1361,10 +1357,10 @@ slsr_process_add (gimple gs, tree rhs1, tree rhs2, bool speed) } else { - double_int index; + max_wide_int index; /* Record an interpretation for the add-immediate. */ - index = tree_to_double_int (rhs2); + index = rhs2; if (subtract_p) index = -index; @@ -1512,10 +1508,10 @@ slsr_process_cast (gimple gs, tree rhs1, bool speed) The first of these is somewhat arbitrary, but the choice of 1 for the stride simplifies the logic for propagating casts into their uses. */ - c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, double_int_zero, - integer_one_node, ctype, 0); - c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, double_int_zero, - integer_one_node, ctype, 0); + c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, + 0, integer_one_node, ctype, 0); + c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, + 0, integer_one_node, ctype, 0); c->next_interp = c2->cand_num; } @@ -1569,10 +1565,10 @@ slsr_process_copy (gimple gs, tree rhs1, bool speed) The first of these is somewhat arbitrary, but the choice of 1 for the stride simplifies the logic for propagating casts into their uses. */ - c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, double_int_zero, - integer_one_node, TREE_TYPE (rhs1), 0); - c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, double_int_zero, - integer_one_node, TREE_TYPE (rhs1), 0); + c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, + 0, integer_one_node, TREE_TYPE (rhs1), 0); + c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, + 0, integer_one_node, TREE_TYPE (rhs1), 0); c->next_interp = c2->cand_num; } @@ -1689,7 +1685,7 @@ dump_candidate (slsr_cand_t c) fputs (" MULT : (", dump_file); print_generic_expr (dump_file, c->base_expr, 0); fputs (" + ", dump_file); - dump_double_int (dump_file, c->index, false); + print_decs (c->index, dump_file); fputs (") * ", dump_file); print_generic_expr (dump_file, c->stride, 0); fputs (" : ", dump_file); @@ -1698,7 +1694,7 @@ dump_candidate (slsr_cand_t c) fputs (" ADD : ", dump_file); print_generic_expr (dump_file, c->base_expr, 0); fputs (" + (", dump_file); - dump_double_int (dump_file, c->index, false); + print_decs (c->index, dump_file); fputs (" * ", dump_file); print_generic_expr (dump_file, c->stride, 0); fputs (") : ", dump_file); @@ -1709,7 +1705,7 @@ dump_candidate (slsr_cand_t c) fputs (" + (", dump_file); print_generic_expr (dump_file, c->stride, 0); fputs (") + ", dump_file); - dump_double_int (dump_file, c->index, false); + print_decs (c->index, dump_file); fputs (" : ", dump_file); break; case CAND_PHI: @@ -1788,7 +1784,7 @@ dump_incr_vec (void) for (i = 0; i < incr_vec_len; i++) { fprintf (dump_file, "%3d increment: ", i); - dump_double_int (dump_file, incr_vec[i].incr, false); + print_decs (incr_vec[i].incr, dump_file); fprintf (dump_file, "\n count: %d", incr_vec[i].count); fprintf (dump_file, "\n cost: %d", incr_vec[i].cost); fputs ("\n initializer: ", dump_file); @@ -1819,7 +1815,7 @@ replace_ref (tree *expr, slsr_cand_t c) add_expr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (c->base_expr), c->base_expr, c->stride); mem_ref = fold_build2 (MEM_REF, acc_type, add_expr, - double_int_to_tree (c->cand_type, c->index)); + wide_int_to_tree (c->cand_type, c->index)); /* Gimplify the base addressing expression for the new MEM_REF tree. */ gimple_stmt_iterator gsi = gsi_for_stmt (c->cand_stmt); @@ -1874,7 +1870,7 @@ phi_dependent_cand_p (slsr_cand_t c) /* Calculate the increment required for candidate C relative to its basis. */ -static double_int +static max_wide_int cand_increment (slsr_cand_t c) { slsr_cand_t basis; @@ -1897,12 +1893,12 @@ cand_increment (slsr_cand_t c) for this candidate, return the absolute value of that increment instead. */ -static inline double_int +static inline max_wide_int cand_abs_increment (slsr_cand_t c) { - double_int increment = cand_increment (c); + max_wide_int increment = cand_increment (c); - if (!address_arithmetic_p && increment.is_negative ()) + if (!address_arithmetic_p && wi::neg_p (increment)) increment = -increment; return increment; @@ -1921,17 +1917,18 @@ cand_already_replaced (slsr_cand_t c) replace_conditional_candidate. */ static void -replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump) +replace_mult_candidate (slsr_cand_t c, tree basis_name, const max_wide_int &bump_in) { tree target_type = TREE_TYPE (gimple_assign_lhs (c->cand_stmt)); enum tree_code cand_code = gimple_assign_rhs_code (c->cand_stmt); + max_wide_int bump = bump_in; /* It is highly unlikely, but possible, that the resulting bump doesn't fit in a HWI. Abandon the replacement in this case. This does not affect siblings or dependents of C. Restriction to signed HWI is conservative for unsigned types but allows for safe negation without twisted logic. */ - if (bump.fits_shwi () + if (wi::fits_shwi_p (bump) && bump.to_shwi () != HOST_WIDE_INT_MIN /* It is not useful to replace casts, copies, or adds of an SSA name and a constant. */ @@ -1949,13 +1946,13 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump) types, introduce a cast. */ if (!useless_type_conversion_p (target_type, TREE_TYPE (basis_name))) basis_name = introduce_cast_before_cand (c, target_type, basis_name); - if (bump.is_negative ()) + if (wi::neg_p (bump)) { code = MINUS_EXPR; bump = -bump; } - bump_tree = double_int_to_tree (target_type, bump); + bump_tree = wide_int_to_tree (target_type, bump); if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -1963,7 +1960,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump) print_gimple_stmt (dump_file, c->cand_stmt, 0, 0); } - if (bump.is_zero ()) + if (bump == 0) { tree lhs = gimple_assign_lhs (c->cand_stmt); gimple copy_stmt = gimple_build_assign (lhs, basis_name); @@ -2024,14 +2021,13 @@ static void replace_unconditional_candidate (slsr_cand_t c) { slsr_cand_t basis; - double_int stride, bump; + max_wide_int bump; if (cand_already_replaced (c)) return; basis = lookup_cand (c->basis); - stride = tree_to_double_int (c->stride); - bump = cand_increment (c) * stride; + bump = cand_increment (c) * c->stride; replace_mult_candidate (c, gimple_assign_lhs (basis->cand_stmt), bump); } @@ -2041,7 +2037,7 @@ replace_unconditional_candidate (slsr_cand_t c) MAX_INCR_VEC_LEN increments have been found. */ static inline int -incr_vec_index (double_int increment) +incr_vec_index (max_wide_int increment) { unsigned i; @@ -2061,7 +2057,7 @@ incr_vec_index (double_int increment) static tree create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, - double_int increment, edge e, location_t loc, + max_wide_int increment, edge e, location_t loc, bool known_stride) { basic_block insert_bb; @@ -2072,7 +2068,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, /* If the add candidate along this incoming edge has the same index as C's hidden basis, the hidden basis represents this edge correctly. */ - if (increment.is_zero ()) + if (increment == 0) return basis_name; basis_type = TREE_TYPE (basis_name); @@ -2082,21 +2078,21 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, { tree bump_tree; enum tree_code code = PLUS_EXPR; - double_int bump = increment * tree_to_double_int (c->stride); - if (bump.is_negative ()) + max_wide_int bump = increment * c->stride; + if (wi::neg_p (bump)) { code = MINUS_EXPR; bump = -bump; } - bump_tree = double_int_to_tree (basis_type, bump); + bump_tree = wide_int_to_tree (basis_type, bump); new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name, bump_tree); } else { int i; - bool negate_incr = (!address_arithmetic_p && increment.is_negative ()); + bool negate_incr = (!address_arithmetic_p && wi::neg_p (increment)); i = incr_vec_index (negate_incr ? -increment : increment); gcc_assert (i >= 0); @@ -2106,10 +2102,10 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name, incr_vec[i].initializer); } - else if (increment.is_one ()) + else if (increment == 1) new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, lhs, basis_name, c->stride); - else if (increment.is_minus_one ()) + else if (increment == -1) new_stmt = gimple_build_assign_with_ops (MINUS_EXPR, lhs, basis_name, c->stride); else @@ -2170,11 +2166,11 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name, /* If the phi argument is the base name of the CAND_PHI, then this incoming arc should use the hidden basis. */ if (operand_equal_p (arg, phi_cand->base_expr, 0)) - if (basis->index.is_zero ()) + if (basis->index == 0) feeding_def = gimple_assign_lhs (basis->cand_stmt); else { - double_int incr = -basis->index; + max_wide_int incr = -basis->index; feeding_def = create_add_on_incoming_edge (c, basis_name, incr, e, loc, known_stride); } @@ -2191,7 +2187,7 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name, else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; feeding_def = create_add_on_incoming_edge (c, basis_name, diff, e, loc, known_stride); } @@ -2237,7 +2233,7 @@ replace_conditional_candidate (slsr_cand_t c) tree basis_name, name; slsr_cand_t basis; location_t loc; - double_int stride, bump; + max_wide_int bump; /* Look up the LHS SSA name from C's basis. This will be the RHS1 of the adds we will introduce to create new phi arguments. */ @@ -2250,8 +2246,7 @@ replace_conditional_candidate (slsr_cand_t c) name = create_phi_basis (c, lookup_cand (c->def_phi)->cand_stmt, basis_name, loc, KNOWN_STRIDE); /* Replace C with an add of the new basis phi and a constant. */ - stride = tree_to_double_int (c->stride); - bump = c->index * stride; + bump = c->index * c->stride; replace_mult_candidate (c, name, bump); } @@ -2383,14 +2378,15 @@ count_candidates (slsr_cand_t c) candidates with the same increment, also record T_0 for subsequent use. */ static void -record_increment (slsr_cand_t c, double_int increment, bool is_phi_adjust) +record_increment (slsr_cand_t c, const max_wide_int &increment_in, bool is_phi_adjust) { bool found = false; unsigned i; + max_wide_int increment = increment_in; /* Treat increments that differ only in sign as identical so as to share initializers, unless we are generating pointer arithmetic. */ - if (!address_arithmetic_p && increment.is_negative ()) + if (!address_arithmetic_p && wi::neg_p (increment)) increment = -increment; for (i = 0; i < incr_vec_len; i++) @@ -2434,8 +2430,8 @@ record_increment (slsr_cand_t c, double_int increment, bool is_phi_adjust) if (c->kind == CAND_ADD && !is_phi_adjust && c->index == increment - && (increment.sgt (double_int_one) - || increment.slt (double_int_minus_one)) + && (wi::gts_p (increment, 1) + || wi::lts_p (increment, -1)) && (gimple_assign_rhs_code (c->cand_stmt) == PLUS_EXPR || gimple_assign_rhs_code (c->cand_stmt) == POINTER_PLUS_EXPR)) { @@ -2493,7 +2489,7 @@ record_phi_increments (slsr_cand_t basis, gimple phi) else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; record_increment (arg_cand, diff, PHI_ADJUST); } } @@ -2544,7 +2540,7 @@ record_increments (slsr_cand_t c) uses. */ static int -phi_incr_cost (slsr_cand_t c, double_int incr, gimple phi, int *savings) +phi_incr_cost (slsr_cand_t c, const max_wide_int &incr, gimple phi, int *savings) { unsigned i; int cost = 0; @@ -2569,7 +2565,7 @@ phi_incr_cost (slsr_cand_t c, double_int incr, gimple phi, int *savings) else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; if (incr == diff) { @@ -2634,10 +2630,10 @@ optimize_cands_for_speed_p (slsr_cand_t c) static int lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, - double_int incr, bool count_phis) + const max_wide_int &incr, bool count_phis) { int local_cost, sib_cost, savings = 0; - double_int cand_incr = cand_abs_increment (c); + max_wide_int cand_incr = cand_abs_increment (c); if (cand_already_replaced (c)) local_cost = cost_in; @@ -2680,11 +2676,11 @@ lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, would go dead. */ static int -total_savings (int repl_savings, slsr_cand_t c, double_int incr, +total_savings (int repl_savings, slsr_cand_t c, const max_wide_int &incr, bool count_phis) { int savings = 0; - double_int cand_incr = cand_abs_increment (c); + max_wide_int cand_incr = cand_abs_increment (c); if (incr == cand_incr && !cand_already_replaced (c)) savings += repl_savings + c->dead_savings; @@ -2734,7 +2730,7 @@ analyze_increments (slsr_cand_t first_dep, enum machine_mode mode, bool speed) /* If somehow this increment is bigger than a HWI, we won't be optimizing candidates that use it. And if the increment has a count of zero, nothing will be done with it. */ - if (!incr_vec[i].incr.fits_shwi () || !incr_vec[i].count) + if (!wi::fits_shwi_p (incr_vec[i].incr) || !incr_vec[i].count) incr_vec[i].cost = COST_INFINITE; /* Increments of 0, 1, and -1 are always profitable to replace, @@ -2888,7 +2884,7 @@ ncd_for_two_cands (basic_block bb1, basic_block bb2, candidates, return the earliest candidate in the block in *WHERE. */ static basic_block -ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi, +ncd_with_phi (slsr_cand_t c, const max_wide_int &incr, gimple phi, basic_block ncd, slsr_cand_t *where) { unsigned i; @@ -2908,7 +2904,7 @@ ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi, else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; if ((incr == diff) || (!address_arithmetic_p && incr == -diff)) ncd = ncd_for_two_cands (ncd, gimple_bb (arg_cand->cand_stmt), @@ -2927,7 +2923,7 @@ ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi, return the earliest candidate in the block in *WHERE. */ static basic_block -ncd_of_cand_and_phis (slsr_cand_t c, double_int incr, slsr_cand_t *where) +ncd_of_cand_and_phis (slsr_cand_t c, const max_wide_int &incr, slsr_cand_t *where) { basic_block ncd = NULL; @@ -2952,7 +2948,7 @@ ncd_of_cand_and_phis (slsr_cand_t c, double_int incr, slsr_cand_t *where) *WHERE. */ static basic_block -nearest_common_dominator_for_cands (slsr_cand_t c, double_int incr, +nearest_common_dominator_for_cands (slsr_cand_t c, const max_wide_int &incr, slsr_cand_t *where) { basic_block sib_ncd = NULL, dep_ncd = NULL, this_ncd = NULL, ncd; @@ -3028,13 +3024,13 @@ insert_initializers (slsr_cand_t c) slsr_cand_t where = NULL; gimple init_stmt; tree stride_type, new_name, incr_tree; - double_int incr = incr_vec[i].incr; + max_wide_int incr = incr_vec[i].incr; if (!profitable_increment_p (i) - || incr.is_one () - || (incr.is_minus_one () + || incr == 1 + || (incr == -1 && gimple_assign_rhs_code (c->cand_stmt) != POINTER_PLUS_EXPR) - || incr.is_zero ()) + || incr == 0) continue; /* We may have already identified an existing initializer that @@ -3063,7 +3059,7 @@ insert_initializers (slsr_cand_t c) /* Create the initializer and insert it in the latest possible dominating position. */ - incr_tree = double_int_to_tree (stride_type, incr); + incr_tree = wide_int_to_tree (stride_type, incr); init_stmt = gimple_build_assign_with_ops (MULT_EXPR, new_name, c->stride, incr_tree); if (where) @@ -3120,9 +3116,9 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi) { int j; slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int increment = arg_cand->index - basis->index; + max_wide_int increment = arg_cand->index - basis->index; - if (!address_arithmetic_p && increment.is_negative ()) + if (!address_arithmetic_p && wi::neg_p (increment)) increment = -increment; j = incr_vec_index (increment); @@ -3133,7 +3129,7 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi) c->cand_num); print_gimple_stmt (dump_file, phi, 0, 0); fputs (" increment: ", dump_file); - dump_double_int (dump_file, increment, false); + print_decs (increment, dump_file); if (j < 0) fprintf (dump_file, "\n Not replaced; incr_vec overflow.\n"); @@ -3228,7 +3224,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) tree orig_rhs1, orig_rhs2; tree rhs2; enum tree_code orig_code, repl_code; - double_int cand_incr; + max_wide_int cand_incr; orig_code = gimple_assign_rhs_code (c->cand_stmt); orig_rhs1 = gimple_assign_rhs1 (c->cand_stmt); @@ -3276,7 +3272,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) from the basis name, or an add of the stride to the basis name, respectively. It may be necessary to introduce a cast (or reuse an existing cast). */ - else if (cand_incr.is_one ()) + else if (cand_incr == 1) { tree stride_type = TREE_TYPE (c->stride); tree orig_type = TREE_TYPE (orig_rhs2); @@ -3291,7 +3287,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) c); } - else if (cand_incr.is_minus_one ()) + else if (cand_incr == -1) { tree stride_type = TREE_TYPE (c->stride); tree orig_type = TREE_TYPE (orig_rhs2); @@ -3318,7 +3314,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) fputs (" (duplicate, not actually replacing)\n", dump_file); } - else if (cand_incr.is_zero ()) + else if (cand_incr == 0) { tree lhs = gimple_assign_lhs (c->cand_stmt); tree lhs_type = TREE_TYPE (lhs); @@ -3368,7 +3364,7 @@ replace_profitable_candidates (slsr_cand_t c) { if (!cand_already_replaced (c)) { - double_int increment = cand_abs_increment (c); + max_wide_int increment = cand_abs_increment (c); enum tree_code orig_code = gimple_assign_rhs_code (c->cand_stmt); int i; diff --git a/gcc/gimple.c b/gcc/gimple.c index 3ddceb95cf3..f6ed803857b 100644 --- a/gcc/gimple.c +++ b/gcc/gimple.c @@ -3094,16 +3094,16 @@ gimple_compare_field_offset (tree f1, tree f2) /* Fortran and C do not always agree on what DECL_OFFSET_ALIGN should be, so handle differing ones specially by decomposing the offset into a byte and bit offset manually. */ - if (host_integerp (DECL_FIELD_OFFSET (f1), 0) - && host_integerp (DECL_FIELD_OFFSET (f2), 0)) + if (tree_fits_shwi_p (DECL_FIELD_OFFSET (f1)) + && tree_fits_shwi_p (DECL_FIELD_OFFSET (f2))) { unsigned HOST_WIDE_INT byte_offset1, byte_offset2; unsigned HOST_WIDE_INT bit_offset1, bit_offset2; - bit_offset1 = TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (f1)); - byte_offset1 = (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f1)) + bit_offset1 = tree_to_hwi (DECL_FIELD_BIT_OFFSET (f1)); + byte_offset1 = (tree_to_hwi (DECL_FIELD_OFFSET (f1)) + bit_offset1 / BITS_PER_UNIT); - bit_offset2 = TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (f2)); - byte_offset2 = (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f2)) + bit_offset2 = tree_to_hwi (DECL_FIELD_BIT_OFFSET (f2)); + byte_offset2 = (tree_to_hwi (DECL_FIELD_OFFSET (f2)) + bit_offset2 / BITS_PER_UNIT); if (byte_offset1 != byte_offset2) return false; diff --git a/gcc/gimplify.c b/gcc/gimplify.c index 193e0170178..4de126ebcd7 100644 --- a/gcc/gimplify.c +++ b/gcc/gimplify.c @@ -710,7 +710,7 @@ gimple_add_tmp_var (tree tmp) /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ - if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (tmp))) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; @@ -1728,11 +1728,7 @@ preprocess_case_label_vec_for_gimple (vec<tree> labels, low = CASE_HIGH (labels[i - 1]); if (!low) low = CASE_LOW (labels[i - 1]); - if ((TREE_INT_CST_LOW (low) + 1 - != TREE_INT_CST_LOW (high)) - || (TREE_INT_CST_HIGH (low) - + (TREE_INT_CST_LOW (high) == 0) - != TREE_INT_CST_HIGH (high))) + if ((wide_int (low) + 1) != high) break; } if (i == len) diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc index 5ff529fc62b..f543644a7ac 100644 --- a/gcc/go/go-gcc.cc +++ b/gcc/go/go-gcc.cc @@ -827,9 +827,8 @@ Gcc_backend::type_size(Btype* btype) if (t == error_mark_node) return 1; t = TYPE_SIZE_UNIT(t); - gcc_assert(TREE_CODE(t) == INTEGER_CST); - gcc_assert(TREE_INT_CST_HIGH(t) == 0); - unsigned HOST_WIDE_INT val_wide = TREE_INT_CST_LOW(t); + gcc_assert(cst_fits_uhwi_p (t)); + unsigned HOST_WIDE_INT val_wide = tree_to_hwi (t); size_t ret = static_cast<size_t>(val_wide); gcc_assert(ret == val_wide); return ret; diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc index f46c37fd1f7..45220b640b2 100644 --- a/gcc/go/gofrontend/expressions.cc +++ b/gcc/go/gofrontend/expressions.cc @@ -3342,9 +3342,9 @@ Type_conversion_expression::do_get_tree(Translate_context* context) tree int_type_tree = type_to_tree(int_type->get_backend(gogo)); expr_tree = fold_convert(int_type_tree, expr_tree); - if (host_integerp(expr_tree, 0)) + if (tree_fits_shwi_p(expr_tree)) { - HOST_WIDE_INT intval = tree_low_cst(expr_tree, 0); + HOST_WIDE_INT intval = tree_to_shwi(expr_tree); std::string s; Lex::append_char(intval, true, &s, this->location()); Expression* se = Expression::make_string(s, this->location()); diff --git a/gcc/godump.c b/gcc/godump.c index 0303e4f0628..4a088f94658 100644 --- a/gcc/godump.c +++ b/gcc/godump.c @@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see #include "pointer-set.h" #include "obstack.h" #include "debug.h" +#include "wide-int-print.h" /* We dump this information from the debug hooks. This gives us a stable and maintainable API to hook into. In order to work @@ -728,12 +729,12 @@ go_format_type (struct godump_container *container, tree type, && tree_int_cst_sgn (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) == 0 && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != NULL_TREE && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST - && host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)) + && tree_fits_shwi_p (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { char buf[100]; snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DEC "+1", - tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)); + tree_to_shwi (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))); obstack_grow (ob, buf, strlen (buf)); } obstack_1grow (ob, ']'); @@ -967,7 +968,7 @@ go_output_typedef (struct godump_container *container, tree decl) const char *name; struct macro_hash_value *mhval; void **slot; - char buf[100]; + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; name = IDENTIFIER_POINTER (TREE_PURPOSE (element)); @@ -981,18 +982,15 @@ go_output_typedef (struct godump_container *container, tree decl) if (*slot != NULL) macro_hash_del (*slot); - if (host_integerp (TREE_VALUE (element), 0)) + if (tree_fits_shwi_p (TREE_VALUE (element))) snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (TREE_VALUE (element), 0)); - else if (host_integerp (TREE_VALUE (element), 1)) + tree_to_shwi (TREE_VALUE (element))); + else if (tree_fits_uhwi_p (TREE_VALUE (element))) snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_UNSIGNED, ((unsigned HOST_WIDE_INT) - tree_low_cst (TREE_VALUE (element), 1))); + tree_to_uhwi (TREE_VALUE (element)))); else - snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - ((unsigned HOST_WIDE_INT) - TREE_INT_CST_HIGH (TREE_VALUE (element))), - TREE_INT_CST_LOW (TREE_VALUE (element))); + print_hex (wide_int (element), buf); mhval->value = xstrdup (buf); *slot = mhval; diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c index d0e23021f5d..5de11ba24d7 100644 --- a/gcc/graphite-clast-to-gimple.c +++ b/gcc/graphite-clast-to-gimple.c @@ -63,14 +63,14 @@ gmp_cst_to_tree (tree type, mpz_t val) { tree t = type ? type : integer_type_node; mpz_t tmp; - double_int di; + wide_int wi; mpz_init (tmp); mpz_set (tmp, val); - di = mpz_get_double_int (t, tmp, true); + wi = wi::from_mpz (t, tmp, true); mpz_clear (tmp); - return double_int_to_tree (t, di); + return wide_int_to_tree (t, wi); } /* Sets RES to the min of V1 and V2. */ diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c index 91d3d85ae0c..e56aa871773 100644 --- a/gcc/graphite-scop-detection.c +++ b/gcc/graphite-scop-detection.c @@ -160,10 +160,10 @@ graphite_can_represent_init (tree e) case MULT_EXPR: if (chrec_contains_symbols (TREE_OPERAND (e, 0))) return graphite_can_represent_init (TREE_OPERAND (e, 0)) - && host_integerp (TREE_OPERAND (e, 1), 0); + && tree_fits_shwi_p (TREE_OPERAND (e, 1)); else return graphite_can_represent_init (TREE_OPERAND (e, 1)) - && host_integerp (TREE_OPERAND (e, 0), 0); + && tree_fits_shwi_p (TREE_OPERAND (e, 0)); case PLUS_EXPR: case POINTER_PLUS_EXPR: diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c index 3e5541ab834..afbd89e9ab2 100644 --- a/gcc/graphite-sese-to-poly.c +++ b/gcc/graphite-sese-to-poly.c @@ -53,8 +53,8 @@ along with GCC; see the file COPYING3. If not see static inline void tree_int_to_gmp (tree t, mpz_t res) { - double_int di = tree_to_double_int (t); - mpz_set_double_int (res, di, TYPE_UNSIGNED (TREE_TYPE (t))); + wide_int wi = t; + wi::to_mpz (wi, res, TYPE_SIGN (TREE_TYPE (t))); } /* Returns the index of the PHI argument defined in the outermost @@ -1006,7 +1006,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop, /* loop_i <= expr_nb_iters */ else if (!chrec_contains_undetermined (nb_iters)) { - double_int nit; + max_wide_int nit; isl_pw_aff *aff; isl_set *valid; isl_local_space *ls; @@ -1042,7 +1042,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop, isl_constraint *c; mpz_init (g); - mpz_set_double_int (g, nit, false); + wi::to_mpz (nit, g, SIGNED); mpz_sub_ui (g, g, 1); approx = extract_affine_gmp (g, isl_set_get_space (inner)); x = isl_pw_aff_ge_set (approx, aff); @@ -1505,9 +1505,9 @@ pdr_add_data_dimensions (isl_set *extent, scop_p scop, data_reference_p dr) subscript - low >= 0 and high - subscript >= 0 in case one of the two bounds isn't known. Do the same here? */ - if (host_integerp (low, 0) + if (tree_fits_shwi_p (low) && high - && host_integerp (high, 0) + && tree_fits_shwi_p (high) /* 1-element arrays at end of structures may extend over their declared size. */ && !(array_at_struct_end_p (ref) diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c index 54afbfc22e1..f6f49dadc94 100644 --- a/gcc/ipa-cp.c +++ b/gcc/ipa-cp.c @@ -1372,7 +1372,7 @@ propagate_aggs_accross_jump_function (struct cgraph_edge *cs, if (item->offset < 0) continue; gcc_checking_assert (is_gimple_ip_invariant (item->value)); - val_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (item->value)), 1); + val_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (item->value))); if (merge_agg_lats_step (dest_plats, item->offset, val_size, &aglat, pre_existing, &ret)) diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c index 10499e1cfc0..5f071f10645 100644 --- a/gcc/ipa-devirt.c +++ b/gcc/ipa-devirt.c @@ -199,7 +199,7 @@ hash_type_name (tree t) if (TREE_CODE (v) == POINTER_PLUS_EXPR) { - hash = TREE_INT_CST_LOW (TREE_OPERAND (v, 1)); + hash = TREE_INT_CST_ELT (TREE_OPERAND (v, 1), 0); v = TREE_OPERAND (TREE_OPERAND (v, 0), 0); } diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index 7ec3c49e42f..300b39b5627 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -288,7 +288,7 @@ ipa_print_node_jump_functions_for_edge (FILE *f, struct cgraph_edge *cs) item->offset); if (TYPE_P (item->value)) fprintf (f, "clobber of " HOST_WIDE_INT_PRINT_DEC " bits", - tree_low_cst (TYPE_SIZE (item->value), 1)); + tree_to_uhwi (TYPE_SIZE (item->value))); else { fprintf (f, "cst: "); @@ -1046,7 +1046,7 @@ compute_complex_assign_jump_func (struct ipa_node_params *info, || max_size == -1 || max_size != size) return; - offset += mem_ref_offset (base).low * BITS_PER_UNIT; + offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT; ssa = TREE_OPERAND (base, 0); if (TREE_CODE (ssa) != SSA_NAME || !SSA_NAME_IS_DEFAULT_DEF (ssa) @@ -1105,7 +1105,7 @@ get_ancestor_addr_info (gimple assign, tree *obj_p, HOST_WIDE_INT *offset) || TREE_CODE (SSA_NAME_VAR (parm)) != PARM_DECL) return NULL_TREE; - *offset += mem_ref_offset (expr).low * BITS_PER_UNIT; + *offset += mem_ref_offset (expr).to_short_addr () * BITS_PER_UNIT; *obj_p = obj; return expr; } @@ -1251,7 +1251,7 @@ type_like_member_ptr_p (tree type, tree *method_ptr, tree *delta) fld = TYPE_FIELDS (type); if (!fld || !POINTER_TYPE_P (TREE_TYPE (fld)) || TREE_CODE (TREE_TYPE (TREE_TYPE (fld))) != METHOD_TYPE - || !host_integerp (DECL_FIELD_OFFSET (fld), 1)) + || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) return false; if (method_ptr) @@ -1259,7 +1259,7 @@ type_like_member_ptr_p (tree type, tree *method_ptr, tree *delta) fld = DECL_CHAIN (fld); if (!fld || INTEGRAL_TYPE_P (fld) - || !host_integerp (DECL_FIELD_OFFSET (fld), 1)) + || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) return false; if (delta) *delta = fld; @@ -1329,13 +1329,13 @@ determine_known_aggregate_parts (gimple call, tree arg, if (TREE_CODE (arg) == SSA_NAME) { tree type_size; - if (!host_integerp (TYPE_SIZE (TREE_TYPE (TREE_TYPE (arg))), 1)) + if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (TREE_TYPE (arg))))) return; check_ref = true; arg_base = arg; arg_offset = 0; type_size = TYPE_SIZE (TREE_TYPE (TREE_TYPE (arg))); - arg_size = tree_low_cst (type_size, 1); + arg_size = tree_to_uhwi (type_size); ao_ref_init_from_ptr_and_size (&r, arg_base, NULL_TREE); } else if (TREE_CODE (arg) == ADDR_EXPR) @@ -1982,7 +1982,7 @@ ipa_analyze_virtual_call_uses (struct cgraph_node *node, cs = ipa_note_param_call (node, index, call); ii = cs->indirect_info; ii->offset = anc_offset; - ii->otr_token = tree_low_cst (OBJ_TYPE_REF_TOKEN (target), 1); + ii->otr_token = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target)); ii->otr_type = obj_type_ref_class (target); ii->polymorphic = 1; } @@ -2194,7 +2194,7 @@ ipa_intraprocedural_devirtualization (gimple call) if (!binfo) return NULL_TREE; token = OBJ_TYPE_REF_TOKEN (otr); - fndecl = gimple_get_virt_method_for_binfo (tree_low_cst (token, 1), + fndecl = gimple_get_virt_method_for_binfo (tree_to_uhwi (token), binfo); #ifdef ENABLE_CHECKING if (fndecl) @@ -3640,9 +3640,9 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt, if (TYPE_ALIGN (type) > align) align = TYPE_ALIGN (type); } - misalign += (tree_to_double_int (off) - .sext (TYPE_PRECISION (TREE_TYPE (off))).low - * BITS_PER_UNIT); + misalign += (wi::sext (addr_wide_int (off), + TYPE_PRECISION (TREE_TYPE (off))) + * BITS_PER_UNIT).to_short_addr (); misalign = misalign & (align - 1); if (misalign != 0) align = (misalign & -misalign); diff --git a/gcc/ipa-utils.h b/gcc/ipa-utils.h index 374ac2a3eff..15f312e6065 100644 --- a/gcc/ipa-utils.h +++ b/gcc/ipa-utils.h @@ -118,8 +118,8 @@ possible_polymorphic_call_target_p (tree call, struct cgraph_node *n) { return possible_polymorphic_call_target_p (obj_type_ref_class (call), - tree_low_cst - (OBJ_TYPE_REF_TOKEN (call), 1), + tree_to_uhwi + (OBJ_TYPE_REF_TOKEN (call)), n); } #endif /* GCC_IPA_UTILS_H */ diff --git a/gcc/java/boehm.c b/gcc/java/boehm.c index 5910f0322dc..158c8ebd274 100644 --- a/gcc/java/boehm.c +++ b/gcc/java/boehm.c @@ -32,8 +32,9 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */ #include "java-tree.h" #include "parse.h" #include "diagnostic-core.h" +#include "wide-int.h" -static void mark_reference_fields (tree, double_int *, unsigned int, +static void mark_reference_fields (tree, wide_int *, unsigned int, int *, int *, int *, HOST_WIDE_INT *); /* A procedure-based object descriptor. We know that our @@ -47,7 +48,7 @@ static void mark_reference_fields (tree, double_int *, unsigned int, /* Recursively mark reference fields. */ static void mark_reference_fields (tree field, - double_int *mask, + wide_int *mask, unsigned int ubit, int *pointer_after_end, int *all_bits_set, @@ -107,7 +108,7 @@ mark_reference_fields (tree field, bits for all words in the record. This is conservative, but the size_words != 1 case is impossible in regular java code. */ for (i = 0; i < size_words; ++i) - *mask = (*mask).set_bit (ubit - count - i - 1); + *mask = wi::set_bit (*mask, ubit - count - i - 1); if (count >= ubit - 2) *pointer_after_end = 1; @@ -136,16 +137,17 @@ get_boehm_type_descriptor (tree type) int last_set_index = 0; HOST_WIDE_INT last_view_index = -1; int pointer_after_end = 0; - double_int mask; + wide_int mask; tree field, value, value_type; - mask = double_int_zero; - /* If the GC wasn't requested, just use a null pointer. */ if (! flag_use_boehm_gc) return null_pointer_node; value_type = java_type_for_mode (ptr_mode, 1); + + mask = wi::zero (TYPE_PRECISION (value_type)); + /* If we have a type of unknown size, use a proc. */ if (int_size_in_bytes (type) == -1) goto procedure_object_descriptor; @@ -194,22 +196,22 @@ get_boehm_type_descriptor (tree type) that we don't have to emit reflection data for run time marking. */ count = 0; - mask = double_int_zero; + mask = wi::zero (TYPE_PRECISION (value_type)); ++last_set_index; while (last_set_index) { if ((last_set_index & 1)) - mask = mask.set_bit (log2_size + count); + mask = wi::set_bit (mask, log2_size + count); last_set_index >>= 1; ++count; } - value = double_int_to_tree (value_type, mask); + value = wide_int_to_tree (value_type, mask); } else if (! pointer_after_end) { /* Bottom two bits for bitmap mark type are 01. */ - mask = mask.set_bit (0); - value = double_int_to_tree (value_type, mask); + mask = wi::set_bit (mask, 0); + value = wide_int_to_tree (value_type, mask); } else { @@ -233,5 +235,5 @@ uses_jv_markobj_p (tree dtable) point in asserting unless we hit the bad case. */ gcc_assert (!flag_reduced_reflection || TARGET_VTABLE_USES_DESCRIPTORS == 0); v = (*CONSTRUCTOR_ELTS (dtable))[3].value; - return (PROCEDURE_OBJECT_DESCRIPTOR == TREE_INT_CST_LOW (v)); + return (PROCEDURE_OBJECT_DESCRIPTOR == tree_to_hwi (v)); } diff --git a/gcc/java/class.c b/gcc/java/class.c index cb6789643d3..251873e0811 100644 --- a/gcc/java/class.c +++ b/gcc/java/class.c @@ -1576,14 +1576,14 @@ get_dispatch_vector (tree type) HOST_WIDE_INT i; tree method; tree super = CLASSTYPE_SUPER (type); - HOST_WIDE_INT nvirtuals = tree_low_cst (TYPE_NVIRTUALS (type), 0); + HOST_WIDE_INT nvirtuals = tree_to_shwi (TYPE_NVIRTUALS (type)); vtable = make_tree_vec (nvirtuals); TYPE_VTABLE (type) = vtable; if (super != NULL_TREE) { tree super_vtable = get_dispatch_vector (super); - for (i = tree_low_cst (TYPE_NVIRTUALS (super), 0); --i >= 0; ) + for (i = tree_to_shwi (TYPE_NVIRTUALS (super)); --i >= 0; ) TREE_VEC_ELT (vtable, i) = TREE_VEC_ELT (super_vtable, i); } @@ -1592,8 +1592,8 @@ get_dispatch_vector (tree type) { tree method_index = get_method_index (method); if (method_index != NULL_TREE - && host_integerp (method_index, 0)) - TREE_VEC_ELT (vtable, tree_low_cst (method_index, 0)) = method; + && tree_fits_shwi_p (method_index)) + TREE_VEC_ELT (vtable, tree_to_shwi (method_index)) = method; } } diff --git a/gcc/java/expr.c b/gcc/java/expr.c index a434913d475..068ac29a9fa 100644 --- a/gcc/java/expr.c +++ b/gcc/java/expr.c @@ -44,6 +44,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */ #include "ggc.h" #include "tree-iterator.h" #include "target.h" +#include "wide-int.h" static void flush_quick_stack (void); static void push_value (tree); @@ -1049,8 +1050,8 @@ build_newarray (int atype_value, tree length) tree prim_type = decode_newarray_type (atype_value); tree type = build_java_array_type (prim_type, - host_integerp (length, 0) == INTEGER_CST - ? tree_low_cst (length, 0) : -1); + tree_fits_shwi_p (length) + ? tree_to_shwi (length) : -1); /* Pass a reference to the primitive type class and save the runtime some work. */ @@ -1069,8 +1070,8 @@ build_anewarray (tree class_type, tree length) { tree type = build_java_array_type (class_type, - host_integerp (length, 0) - ? tree_low_cst (length, 0) : -1); + tree_fits_shwi_p (length) + ? tree_to_shwi (length) : -1); return build_call_nary (promote_type (type), build_address_of (soft_anewarray_node), @@ -1258,7 +1259,7 @@ expand_java_pushc (int ival, tree type) else if (type == float_type_node || type == double_type_node) { REAL_VALUE_TYPE x; - REAL_VALUE_FROM_INT (x, ival, 0, TYPE_MODE (type)); + REAL_VALUE_FROM_INT (x, ival, TYPE_MODE (type)); value = build_real (type, x); } else @@ -2672,7 +2673,7 @@ build_jni_stub (tree method) special way, we would do that here. */ for (tem = method_args; tem != NULL_TREE; tem = DECL_CHAIN (tem)) { - int arg_bits = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (tem))); + int arg_bits = tree_to_hwi (TYPE_SIZE (TREE_TYPE (tem))); #ifdef PARM_BOUNDARY arg_bits = (((arg_bits + PARM_BOUNDARY - 1) / PARM_BOUNDARY) * PARM_BOUNDARY); diff --git a/gcc/java/jcf-parse.c b/gcc/java/jcf-parse.c index fbd4e00e029..d9025ced492 100644 --- a/gcc/java/jcf-parse.c +++ b/gcc/java/jcf-parse.c @@ -40,6 +40,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */ #include "cgraph.h" #include "bitmap.h" #include "target.h" +#include "wide-int.h" #ifdef HAVE_LOCALE_H #include <locale.h> @@ -1039,14 +1040,14 @@ get_constant (JCF *jcf, int index) case CONSTANT_Long: { unsigned HOST_WIDE_INT num; - double_int val; + wide_int val; num = JPOOL_UINT (jcf, index); - val = double_int::from_uhwi (num).llshift (32, 64); + val = wi::lshift (wide_int::from (num, 64, SIGNED), 32); num = JPOOL_UINT (jcf, index + 1); - val |= double_int::from_uhwi (num); + val |= num; - value = double_int_to_tree (long_type_node, val); + value = wide_int_to_tree (long_type_node, val); break; } diff --git a/gcc/java/typeck.c b/gcc/java/typeck.c index 9dbb3f0291b..004ebf151b7 100644 --- a/gcc/java/typeck.c +++ b/gcc/java/typeck.c @@ -217,7 +217,7 @@ java_array_type_length (tree array_type) { tree high = TYPE_MAX_VALUE (index_type); if (TREE_CODE (high) == INTEGER_CST) - return TREE_INT_CST_LOW (high) + 1; + return tree_to_uhwi (high) + 1; } } return -1; diff --git a/gcc/lcm.c b/gcc/lcm.c index c13d2a6aa51..7471b0e4c38 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -64,6 +64,7 @@ along with GCC; see the file COPYING3. If not see #include "sbitmap.h" #include "dumpfile.h" +#define LCM_DEBUG_INFO 1 /* Edge based LCM routines. */ static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *); static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *, @@ -106,6 +107,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* We want a maximal solution, so make an optimistic initialization of ANTIN. */ bitmap_vector_ones (antin, last_basic_block); + bitmap_vector_clear (antout, last_basic_block); /* Put every block on the worklist; this is necessary because of the optimistic initialization of ANTIN above. */ @@ -432,6 +434,7 @@ pre_edge_lcm (int n_exprs, sbitmap *transp, /* Allocate an extra element for the exit block in the laterin vector. */ laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs); + bitmap_vector_clear (laterin, last_basic_block); compute_laterin (edge_list, earliest, antloc, later, laterin); #ifdef LCM_DEBUG_INFO diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c index 1bc9aa392a2..3fa156165bd 100644 --- a/gcc/loop-doloop.c +++ b/gcc/loop-doloop.c @@ -409,7 +409,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, basic_block loop_end = desc->out_edge->src; enum machine_mode mode; rtx true_prob_val; - double_int iterations; + max_wide_int iterations; jump_insn = BB_END (loop_end); @@ -461,9 +461,9 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, /* Determine if the iteration counter will be non-negative. Note that the maximum value loaded is iterations_max - 1. */ if (get_max_loop_iterations (loop, &iterations) - && (iterations.ule (double_int_one.llshift - (GET_MODE_PRECISION (mode) - 1, - GET_MODE_PRECISION (mode))))) + && wi::leu_p (iterations, + wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1, + GET_MODE_PRECISION (mode)))) nonneg = 1; break; @@ -549,11 +549,11 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, { rtx init; unsigned level = get_loop_level (loop) + 1; - double_int iter; + wide_int iter; rtx iter_rtx; if (!get_max_loop_iterations (loop, &iter) - || !iter.fits_shwi ()) + || !wi::fits_shwi_p (iter)) iter_rtx = const0_rtx; else iter_rtx = GEN_INT (iter.to_shwi ()); @@ -617,7 +617,7 @@ doloop_optimize (struct loop *loop) struct niter_desc *desc; unsigned word_mode_size; unsigned HOST_WIDE_INT word_mode_max; - double_int iter; + max_wide_int iter; int entered_at_top; if (dump_file) @@ -670,10 +670,10 @@ doloop_optimize (struct loop *loop) count = copy_rtx (desc->niter_expr); iterations = desc->const_iter ? desc->niter_expr : const0_rtx; if (!get_max_loop_iterations (loop, &iter) - || !iter.fits_shwi ()) + || !wi::fits_shwi_p (iter)) iterations_max = const0_rtx; else - iterations_max = GEN_INT (iter.to_shwi ()); + iterations_max = immed_wide_int_const (iter, mode); level = get_loop_level (loop) + 1; /* Generate looping insn. If the pattern FAILs then give up trying @@ -696,7 +696,7 @@ doloop_optimize (struct loop *loop) computed, we must be sure that the number of iterations fits into the new mode. */ && (word_mode_size >= GET_MODE_PRECISION (mode) - || iter.ule (double_int::from_shwi (word_mode_max)))) + || wi::leu_p (iter, word_mode_max))) { if (word_mode_size > GET_MODE_PRECISION (mode)) { diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index b9bc3348733..60d5043d67a 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -2614,8 +2614,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, max = (up - down) / inc + 1; if (!desc->infinite && !desc->assumptions) - record_niter_bound (loop, double_int::from_uhwi (max), - false, true); + record_niter_bound (loop, max, false, true); if (iv0.step == const0_rtx) { @@ -2654,8 +2653,8 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, iv1.step = const0_rtx; if (INTVAL (iv0.step) < 0) { - iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, mode); - iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, mode); + iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode); + iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode); } iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); @@ -2829,8 +2828,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, desc->niter = val & GET_MODE_MASK (desc->mode); if (!desc->infinite && !desc->assumptions) - record_niter_bound (loop, double_int::from_uhwi (desc->niter), - false, true); + record_niter_bound (loop, desc->niter, false, true); } else { @@ -2839,8 +2837,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, goto zero_iter_simplify; if (!desc->infinite && !desc->assumptions) - record_niter_bound (loop, double_int::from_uhwi (max), - false, true); + record_niter_bound (loop, max, false, true); /* simplify_using_initial_values does a copy propagation on the registers in the expression for the number of iterations. This prolongs life @@ -2865,8 +2862,7 @@ zero_iter_simplify: zero_iter: desc->const_iter = true; desc->niter = 0; - record_niter_bound (loop, double_int_zero, - true, true); + record_niter_bound (loop, 0, true, true); desc->noloop_assumptions = NULL_RTX; desc->niter_expr = const0_rtx; return; diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index 568fd7bdab4..f216f715522 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -646,7 +646,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i; struct niter_desc *desc; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_UNROLL)) { @@ -696,7 +696,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) if (desc->niter < 2 * nunroll || ((get_estimated_loop_iterations (loop, &iterations) || get_max_loop_iterations (loop, &iterations)) - && iterations.ult (double_int::from_shwi (2 * nunroll)))) + && wi::ltu_p (iterations, 2 * nunroll))) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -817,11 +817,10 @@ unroll_loop_constant_iterations (struct loop *loop) desc->noloop_assumptions = NULL_RTX; desc->niter -= exit_mod; - loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod); + loop->nb_iterations_upper_bound -= exit_mod; if (loop->any_estimate - && double_int::from_uhwi (exit_mod).ule - (loop->nb_iterations_estimate)) - loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod); + && wi::leu_p (exit_mod, loop->nb_iterations_estimate)) + loop->nb_iterations_estimate -= exit_mod; else loop->any_estimate = false; } @@ -861,11 +860,10 @@ unroll_loop_constant_iterations (struct loop *loop) apply_opt_in_copies (opt_info, exit_mod + 1, false, false); desc->niter -= exit_mod + 1; - loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod + 1); + loop->nb_iterations_upper_bound -= exit_mod + 1; if (loop->any_estimate - && double_int::from_uhwi (exit_mod + 1).ule - (loop->nb_iterations_estimate)) - loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod + 1); + && wi::leu_p (exit_mod + 1, loop->nb_iterations_estimate)) + loop->nb_iterations_estimate -= exit_mod + 1; else loop->any_estimate = false; desc->noloop_assumptions = NULL_RTX; @@ -917,14 +915,10 @@ unroll_loop_constant_iterations (struct loop *loop) desc->niter /= max_unroll + 1; loop->nb_iterations_upper_bound - = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1); if (loop->any_estimate) loop->nb_iterations_estimate - = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1); desc->niter_expr = GEN_INT (desc->niter); /* Remove the edges. */ @@ -945,7 +939,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_UNROLL)) { @@ -1001,7 +995,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) /* Check whether the loop rolls. */ if ((get_estimated_loop_iterations (loop, &iterations) || get_max_loop_iterations (loop, &iterations)) - && iterations.ult (double_int::from_shwi (2 * nunroll))) + && wi::ltu_p (iterations, 2 * nunroll)) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -1311,14 +1305,10 @@ unroll_loop_runtime_iterations (struct loop *loop) simplify_gen_binary (UDIV, desc->mode, old_niter, gen_int_mode (max_unroll + 1, desc->mode)); loop->nb_iterations_upper_bound - = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1); if (loop->any_estimate) loop->nb_iterations_estimate - = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1); if (exit_at_end) { desc->niter_expr = @@ -1326,7 +1316,7 @@ unroll_loop_runtime_iterations (struct loop *loop) desc->noloop_assumptions = NULL_RTX; --loop->nb_iterations_upper_bound; if (loop->any_estimate - && loop->nb_iterations_estimate != double_int_zero) + && loop->nb_iterations_estimate != 0) --loop->nb_iterations_estimate; else loop->any_estimate = false; @@ -1346,7 +1336,7 @@ static void decide_peel_simple (struct loop *loop, int flags) { unsigned npeel; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_PEEL)) { @@ -1390,7 +1380,8 @@ decide_peel_simple (struct loop *loop, int flags) /* If we have realistic estimate on number of iterations, use it. */ if (get_estimated_loop_iterations (loop, &iterations)) { - if (double_int::from_shwi (npeel).ule (iterations)) + /* TODO: unsigned/signed confusion */ + if (wi::leu_p (npeel, iterations)) { if (dump_file) { @@ -1407,7 +1398,7 @@ decide_peel_simple (struct loop *loop, int flags) /* If we have small enough bound on iterations, we can still peel (completely unroll). */ else if (get_max_loop_iterations (loop, &iterations) - && iterations.ult (double_int::from_shwi (npeel))) + && wi::ltu_p (iterations, npeel)) npeel = iterations.to_shwi () + 1; else { @@ -1501,7 +1492,7 @@ decide_unroll_stupid (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_UNROLL_ALL)) { @@ -1558,7 +1549,7 @@ decide_unroll_stupid (struct loop *loop, int flags) /* Check whether the loop rolls. */ if ((get_estimated_loop_iterations (loop, &iterations) || get_max_loop_iterations (loop, &iterations)) - && iterations.ult (double_int::from_shwi (2 * nunroll))) + && wi::ltu_p (iterations, 2 * nunroll)) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c index 72d450ca9d9..a58c3446395 100644 --- a/gcc/lto-streamer-in.c +++ b/gcc/lto-streamer-in.c @@ -695,14 +695,26 @@ input_cfg (struct lto_input_block *ib, struct function *fn, loop->any_upper_bound = streamer_read_hwi (ib); if (loop->any_upper_bound) { - loop->nb_iterations_upper_bound.low = streamer_read_uhwi (ib); - loop->nb_iterations_upper_bound.high = streamer_read_hwi (ib); + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + int i; + int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib); + int len = streamer_read_uhwi (ib); + for (i = 0; i < len; i++) + a[i] = streamer_read_hwi (ib); + + loop->nb_iterations_upper_bound = max_wide_int::from_array (a, len); } loop->any_estimate = streamer_read_hwi (ib); if (loop->any_estimate) { - loop->nb_iterations_estimate.low = streamer_read_uhwi (ib); - loop->nb_iterations_estimate.high = streamer_read_hwi (ib); + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + int i; + int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib); + int len = streamer_read_uhwi (ib); + for (i = 0; i < len; i++) + a[i] = streamer_read_hwi (ib); + + loop->nb_iterations_estimate = max_wide_int::from_array (a, len); } place_new_loop (fn, loop); @@ -1250,12 +1262,17 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in, } else if (tag == LTO_integer_cst) { - /* For shared integer constants in singletons we can use the existing - tree integer constant merging code. */ + /* For shared integer constants in singletons we can use the + existing tree integer constant merging code. */ tree type = stream_read_tree (ib, data_in); - unsigned HOST_WIDE_INT low = streamer_read_uhwi (ib); - HOST_WIDE_INT high = streamer_read_hwi (ib); - result = build_int_cst_wide (type, low, high); + unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); + unsigned HOST_WIDE_INT i; + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + + for (i = 0; i < len; i++) + a[i] = streamer_read_hwi (ib); + result = wide_int_to_tree (type, wide_int::from_array + (a, len, TYPE_PRECISION (type))); streamer_tree_cache_append (data_in->reader_cache, result, hash); } else if (tag == LTO_tree_scc) diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c index 35ad1cf0c80..1cb7faf0433 100644 --- a/gcc/lto-streamer-out.c +++ b/gcc/lto-streamer-out.c @@ -710,8 +710,10 @@ hash_tree (struct streamer_tree_cache_d *cache, tree t) if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) { - v = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), v); - v = iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), v); + int i; + v = iterative_hash_host_wide_int (TREE_INT_CST_NUNITS (t), v); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + v = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), v); } if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST)) @@ -1627,14 +1629,24 @@ output_cfg (struct output_block *ob, struct function *fn) streamer_write_hwi (ob, loop->any_upper_bound); if (loop->any_upper_bound) { - streamer_write_uhwi (ob, loop->nb_iterations_upper_bound.low); - streamer_write_hwi (ob, loop->nb_iterations_upper_bound.high); + int len = loop->nb_iterations_upper_bound.get_len (); + int i; + + streamer_write_uhwi (ob, loop->nb_iterations_upper_bound.get_precision ()); + streamer_write_uhwi (ob, len); + for (i = 0; i < len; i++) + streamer_write_hwi (ob, loop->nb_iterations_upper_bound.elt (i)); } streamer_write_hwi (ob, loop->any_estimate); if (loop->any_estimate) { - streamer_write_uhwi (ob, loop->nb_iterations_estimate.low); - streamer_write_hwi (ob, loop->nb_iterations_estimate.high); + int len = loop->nb_iterations_estimate.get_len (); + int i; + + streamer_write_uhwi (ob, loop->nb_iterations_estimate.get_precision ()); + streamer_write_uhwi (ob, len); + for (i = 0; i < len; i++) + streamer_write_hwi (ob, loop->nb_iterations_estimate.elt (i)); } } @@ -2267,7 +2279,7 @@ write_symbol (struct streamer_tree_cache_d *cache, if (kind == GCCPK_COMMON && DECL_SIZE_UNIT (t) && TREE_CODE (DECL_SIZE_UNIT (t)) == INTEGER_CST) - size = TREE_INT_CST_LOW (DECL_SIZE_UNIT (t)); + size = tree_to_hwi (DECL_SIZE_UNIT (t)); else size = 0; diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c index 0fa0fc9a438..de9bda9ba67 100644 --- a/gcc/lto/lto-lang.c +++ b/gcc/lto/lto-lang.c @@ -315,11 +315,10 @@ static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { /* Verify the arg number is a constant. */ - if (TREE_CODE (arg_num_expr) != INTEGER_CST - || TREE_INT_CST_HIGH (arg_num_expr) != 0) + if (!cst_fits_uhwi_p (arg_num_expr)) return false; - *valp = TREE_INT_CST_LOW (arg_num_expr); + *valp = tree_to_hwi (arg_num_expr); return true; } diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c index bca1dd12032..1168d7b4268 100644 --- a/gcc/lto/lto.c +++ b/gcc/lto/lto.c @@ -1214,8 +1214,8 @@ compare_tree_sccs_1 (tree t1, tree t2, tree **map) if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) { - compare_values (TREE_INT_CST_LOW); - compare_values (TREE_INT_CST_HIGH); + if (!wi::eq_p (t1, t2)) + return false; } if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST)) diff --git a/gcc/machmode.def b/gcc/machmode.def index 1062f186e8a..9ca8b799e97 100644 --- a/gcc/machmode.def +++ b/gcc/machmode.def @@ -229,6 +229,9 @@ UACCUM_MODE (USA, 4, 16, 16); /* 16.16 */ UACCUM_MODE (UDA, 8, 32, 32); /* 32.32 */ UACCUM_MODE (UTA, 16, 64, 64); /* 64.64 */ +/* Should be overridden by EXTRA_MODES_FILE if wrong. */ +#define MAX_BITS_PER_UNIT 8 + /* Allow the target to specify additional modes of various kinds. */ #if HAVE_EXTRA_MODES # include EXTRA_MODES_FILE diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c index 95ec4ecd40f..3839f5e8ed6 100644 --- a/gcc/objc/objc-act.c +++ b/gcc/objc/objc-act.c @@ -50,6 +50,7 @@ along with GCC; see the file COPYING3. If not see #include "cgraph.h" #include "tree-iterator.h" #include "hash-table.h" +#include "wide-int.h" #include "langhooks-def.h" /* Different initialization, code gen and meta data generation for each runtime. */ @@ -3021,8 +3022,8 @@ check_string_class_template (void) #define AT_LEAST_AS_LARGE_AS(F, T) \ (F && TREE_CODE (F) == FIELD_DECL \ - && (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (F))) \ - >= TREE_INT_CST_LOW (TYPE_SIZE (T)))) + && (tree_to_hwi (TYPE_SIZE (TREE_TYPE (F))) \ + >= tree_to_hwi (TYPE_SIZE (T)))) if (!AT_LEAST_AS_LARGE_AS (field_decl, ptr_type_node)) return 0; @@ -4878,14 +4879,9 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags) which specifies the index of the format string argument. Add 2. */ number = TREE_VALUE (second_argument); - if (number - && TREE_CODE (number) == INTEGER_CST - && TREE_INT_CST_HIGH (number) == 0) - { - TREE_VALUE (second_argument) - = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (number) + 2); - } + if (number && TREE_CODE (number) == INTEGER_CST) + TREE_VALUE (second_argument) + = wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2); /* This is the third argument, the "first-to-check", which specifies the index of the first argument to @@ -4893,15 +4889,9 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags) in which case we don't need to add 2. Add 2 if not 0. */ number = TREE_VALUE (third_argument); - if (number - && TREE_CODE (number) == INTEGER_CST - && TREE_INT_CST_HIGH (number) == 0 - && TREE_INT_CST_LOW (number) != 0) - { - TREE_VALUE (third_argument) - = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (number) + 2); - } + if (number && TREE_CODE (number) == INTEGER_CST) + TREE_VALUE (third_argument) + = wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2); } filtered_attributes = chainon (filtered_attributes, new_attribute); @@ -4933,15 +4923,10 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags) { /* Get the value of the argument and add 2. */ tree number = TREE_VALUE (argument); - if (number - && TREE_CODE (number) == INTEGER_CST - && TREE_INT_CST_HIGH (number) == 0 - && TREE_INT_CST_LOW (number) != 0) - { - TREE_VALUE (argument) - = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (number) + 2); - } + if (number && TREE_CODE (number) == INTEGER_CST + && !wi::eq_p (number, 0)) + TREE_VALUE (argument) + = wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2); argument = TREE_CHAIN (argument); } @@ -8893,7 +8878,7 @@ gen_declaration (tree decl) if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) == INTEGER_CST) sprintf (errbuf + strlen (errbuf), ": " HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (DECL_INITIAL (decl))); + tree_to_hwi (DECL_INITIAL (decl))); } return errbuf; @@ -8933,7 +8918,7 @@ gen_type_name_0 (tree type) char sz[20]; sprintf (sz, HOST_WIDE_INT_PRINT_DEC, - (TREE_INT_CST_LOW + (tree_to_hwi (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1)); strcat (errbuf, sz); } diff --git a/gcc/objc/objc-encoding.c b/gcc/objc/objc-encoding.c index c2f7444c448..1c7ba105fa8 100644 --- a/gcc/objc/objc-encoding.c +++ b/gcc/objc/objc-encoding.c @@ -393,12 +393,12 @@ encode_array (tree type, int curtype, int format) array. */ sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); } - else if (TREE_INT_CST_LOW (TYPE_SIZE (array_of)) == 0) + else if (tree_to_hwi (TYPE_SIZE (array_of)) == 0) sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); else sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (an_int_cst) - / TREE_INT_CST_LOW (TYPE_SIZE (array_of))); + tree_to_hwi (an_int_cst) + / tree_to_hwi (TYPE_SIZE (array_of))); obstack_grow (&util_obstack, buffer, strlen (buffer)); encode_type (array_of, curtype, format); @@ -425,7 +425,7 @@ encode_vector (tree type, int curtype, int format) sprintf (buffer, "![" HOST_WIDE_INT_PRINT_DEC ",%d", /* We want to compute the equivalent of sizeof (<vector>). Code inspired by c_sizeof_or_alignof_type. */ - ((TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type)) + ((tree_to_hwi (TYPE_SIZE_UNIT (type)) / (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT))), /* We want to compute the equivalent of __alignof__ (<vector>). Code inspired by @@ -820,7 +820,7 @@ encode_field (tree field_decl, int curtype, int format) between GNU and NeXT runtimes. */ if (DECL_BIT_FIELD_TYPE (field_decl)) { - int size = tree_low_cst (DECL_SIZE (field_decl), 1); + int size = tree_to_uhwi (DECL_SIZE (field_decl)); if (flag_next_runtime) encode_next_bitfield (size); diff --git a/gcc/objc/objc-next-runtime-abi-01.c b/gcc/objc/objc-next-runtime-abi-01.c index 000256d8311..5e896eebc19 100644 --- a/gcc/objc/objc-next-runtime-abi-01.c +++ b/gcc/objc/objc-next-runtime-abi-01.c @@ -1199,7 +1199,7 @@ generate_v1_objc_protocol_extension (tree proto_interface, build_v1_objc_protocol_extension_template (); /* uint32_t size */ - size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_protocol_extension_template)); + size = tree_to_hwi (TYPE_SIZE_UNIT (objc_protocol_extension_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, size)); /* Try for meaningful diagnostics. */ @@ -1343,7 +1343,7 @@ generate_v1_property_table (tree context, tree klass_ctxt) is_proto ? context : klass_ctxt); - init_val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v1_property_template)); + init_val = tree_to_hwi (TYPE_SIZE_UNIT (objc_v1_property_template)); if (is_proto) snprintf (buf, BUFSIZE, "_OBJC_ProtocolPropList_%s", IDENTIFIER_POINTER (PROTOCOL_NAME (context))); @@ -1723,7 +1723,7 @@ build_v1_category_initializer (tree type, tree cat_name, tree class_name, if (flag_objc_abi >= 1) { - int val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_category_template)); + int val = tree_to_hwi (TYPE_SIZE_UNIT (objc_category_template)); expr = build_int_cst (NULL_TREE, val); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, expr); ltyp = objc_prop_list_ptr; @@ -1825,7 +1825,7 @@ generate_objc_class_ext (tree property_list, tree context) build_objc_class_ext_template (); /* uint32_t size */ - size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_class_ext_template)); + size = tree_to_hwi (TYPE_SIZE_UNIT (objc_class_ext_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, size)); ltyp = const_string_type_node; diff --git a/gcc/objc/objc-next-runtime-abi-02.c b/gcc/objc/objc-next-runtime-abi-02.c index 09600068ca6..0f158e13f0b 100644 --- a/gcc/objc/objc-next-runtime-abi-02.c +++ b/gcc/objc/objc-next-runtime-abi-02.c @@ -2318,7 +2318,7 @@ generate_v2_meth_descriptor_table (tree chain, tree protocol, decl = start_var_decl (method_list_template, buf); - entsize = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_method_template)); + entsize = tree_to_hwi (TYPE_SIZE_UNIT (objc_method_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, entsize)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, size)); initlist = @@ -2432,7 +2432,7 @@ generate_v2_property_table (tree context, tree klass_ctxt) is_proto ? context : klass_ctxt); - init_val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_property_template)); + init_val = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_property_template)); if (is_proto) snprintf (buf, BUFSIZE, "_OBJC_ProtocolPropList_%s", IDENTIFIER_POINTER (PROTOCOL_NAME (context))); @@ -2507,7 +2507,7 @@ build_v2_protocol_initializer (tree type, tree protocol_name, tree protocol_list /* const uint32_t size; = sizeof(struct protocol_t) */ expr = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_protocol_template))); + tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_protocol_template))); CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, expr); /* const uint32_t flags; = 0 */ CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, integer_zero_node); @@ -2621,7 +2621,7 @@ generate_v2_dispatch_table (tree chain, const char *name, tree attr) decl = start_var_decl (method_list_template, name); - init_val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_method_template)); + init_val = tree_to_hwi (TYPE_SIZE_UNIT (objc_method_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (integer_type_node, init_val)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, @@ -2848,7 +2848,7 @@ build_v2_ivar_list_initializer (tree class_name, tree type, tree field_decl) build_int_cst (integer_type_node, val)); /* Set size. */ - val = TREE_INT_CST_LOW (DECL_SIZE_UNIT (field_decl)); + val = tree_to_hwi (DECL_SIZE_UNIT (field_decl)); CONSTRUCTOR_APPEND_ELT (ivar, NULL_TREE, build_int_cst (integer_type_node, val)); @@ -2917,7 +2917,7 @@ generate_v2_ivars_list (tree chain, const char *name, tree attr, tree templ) initlist = build_v2_ivar_list_initializer (CLASS_NAME (templ), objc_v2_ivar_template, chain); - ivar_t_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_ivar_template)); + ivar_t_size = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_ivar_template)); decl = start_var_decl (ivar_list_template, name); CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, @@ -3175,7 +3175,7 @@ generate_v2_class_structs (struct imp_entry *impent) buf, meta_clac_meth); } - instanceStart = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_class_template)); + instanceStart = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_class_template)); /* Currently there are no class ivars and generation of class variables for the root of the inheritance has been removed. It @@ -3185,7 +3185,7 @@ generate_v2_class_structs (struct imp_entry *impent) class_ivars = NULL_TREE; /* TODO: Add total size of class variables when implemented. */ - instanceSize = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_class_template)); + instanceSize = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_class_template)); /* So now build the META CLASS structs. */ /* static struct class_ro_t _OBJC_METACLASS_Foo = { ... }; */ @@ -3267,7 +3267,7 @@ generate_v2_class_structs (struct imp_entry *impent) if (field && TREE_CODE (field) == FIELD_DECL) instanceSize = int_byte_position (field) * BITS_PER_UNIT - + tree_low_cst (DECL_SIZE (field), 0); + + tree_to_shwi (DECL_SIZE (field)); else instanceSize = 0; instanceSize /= BITS_PER_UNIT; diff --git a/gcc/omp-low.c b/gcc/omp-low.c index 3326e9ecf0c..6c24d3eeaab 100644 --- a/gcc/omp-low.c +++ b/gcc/omp-low.c @@ -2273,8 +2273,8 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx) : "#pragma omp cancellation point"); return false; } - switch (host_integerp (gimple_call_arg (stmt, 0), 0) - ? tree_low_cst (gimple_call_arg (stmt, 0), 0) + switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0)) + ? tree_to_shwi (gimple_call_arg (stmt, 0)) : 0) { case 1: @@ -2510,9 +2510,7 @@ scan_omp_1_op (tree *tp, int *walk_subtrees, void *data) if (tem != TREE_TYPE (t)) { if (TREE_CODE (t) == INTEGER_CST) - *tp = build_int_cst_wide (tem, - TREE_INT_CST_LOW (t), - TREE_INT_CST_HIGH (t)); + *tp = wide_int_to_tree (tem, t); else TREE_TYPE (t) = tem; } @@ -2909,7 +2907,7 @@ lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf, OMP_CLAUSE_SAFELEN); if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1) - max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0); + max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c)); } if (max_vf > 1) { @@ -6724,12 +6722,11 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd) else { safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen); - if (!host_integerp (safelen, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1) - > INT_MAX) + if (!tree_fits_uhwi_p (safelen) + || tree_to_uhwi (safelen) > INT_MAX) loop->safelen = INT_MAX; else - loop->safelen = tree_low_cst (safelen, 1); + loop->safelen = tree_to_uhwi (safelen); if (loop->safelen == 1) loop->safelen = 0; } @@ -7633,7 +7630,7 @@ expand_omp_atomic (struct omp_region *region) HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ - index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + index = tree_to_uhwi (TYPE_SIZE_UNIT (type)); index = exact_log2 (index); if (index >= 0 && index <= 4) { @@ -8786,9 +8783,9 @@ lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p, /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ - if (host_integerp (fd->loop.step, 0)) + if (tree_fits_shwi_p (fd->loop.step)) { - HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step); + HOST_WIDE_INT step = tree_to_shwi (fd->loop.step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } @@ -8806,7 +8803,7 @@ lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p, /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->loop.n1; if (cond_code == EQ_EXPR - && host_integerp (fd->loop.n2, 0) + && tree_fits_shwi_p (fd->loop.n2) && ! integer_zerop (fd->loop.n2)) vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0); else diff --git a/gcc/optabs.c b/gcc/optabs.c index 06a626c839b..96d10cb069d 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -851,7 +851,8 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab, if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD) { carries = outof_input; - tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); + tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, + op1_mode), op1_mode); tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 0, true, methods); } @@ -866,13 +867,15 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab, outof_input, const1_rtx, 0, unsignedp, methods); if (shift_mask == BITS_PER_WORD - 1) { - tmp = immed_double_const (-1, -1, op1_mode); + tmp = immed_wide_int_const + (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode); tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp, 0, true, methods); } else { - tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode); + tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1, + op1_mode), op1_mode); tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 0, true, methods); } @@ -1035,7 +1038,7 @@ expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab, is true when the effective shift value is less than BITS_PER_WORD. Set SUPERWORD_OP1 to the shift count that should be used to shift OUTOF_INPUT into INTO_TARGET when the condition is false. */ - tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); + tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode); if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1) { /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1 @@ -2888,7 +2891,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, const struct real_format *fmt; int bitpos, word, nwords, i; enum machine_mode imode; - double_int mask; + wide_int mask; rtx temp, insns; /* The format has to have a simple sign bit. */ @@ -2924,7 +2927,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; } - mask = double_int_zero.set_bit (bitpos); + mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode)); if (code == ABS) mask = ~mask; @@ -2946,7 +2949,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, { temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, op0_piece, - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), targ_piece, 1, OPTAB_LIB_WIDEN); if (temp != targ_piece) emit_move_insn (targ_piece, temp); @@ -2964,7 +2967,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, { temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, gen_lowpart (imode, op0), - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); target = lowpart_subreg_maybe_copy (mode, temp, imode); @@ -3565,7 +3568,7 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target, } else { - double_int mask; + wide_int mask; if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) { @@ -3587,10 +3590,9 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target, op1 = operand_subword_force (op1, word, mode); } - mask = double_int_zero.set_bit (bitpos); - + mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode)); sign = expand_binop (imode, and_optab, op1, - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); } @@ -3634,7 +3636,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, int bitpos, bool op0_is_abs) { enum machine_mode imode; - double_int mask; + wide_int mask, nmask; int word, nwords, i; rtx temp, insns; @@ -3658,7 +3660,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; } - mask = double_int_zero.set_bit (bitpos); + mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode)); if (target == 0 || target == op0 @@ -3678,14 +3680,16 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, if (i == word) { if (!op0_is_abs) - op0_piece - = expand_binop (imode, and_optab, op0_piece, - immed_double_int_const (~mask, imode), - NULL_RTX, 1, OPTAB_LIB_WIDEN); - + { + nmask = ~mask; + op0_piece + = expand_binop (imode, and_optab, op0_piece, + immed_wide_int_const (nmask, imode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + } op1 = expand_binop (imode, and_optab, operand_subword_force (op1, i, mode), - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (imode, ior_optab, op0_piece, op1, @@ -3705,15 +3709,17 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, else { op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1), - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); op0 = gen_lowpart (imode, op0); if (!op0_is_abs) - op0 = expand_binop (imode, and_optab, op0, - immed_double_int_const (~mask, imode), - NULL_RTX, 1, OPTAB_LIB_WIDEN); - + { + nmask = ~mask; + op0 = expand_binop (imode, and_optab, op0, + immed_wide_int_const (nmask, imode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + } temp = expand_binop (imode, ior_optab, op0, op1, gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); target = lowpart_subreg_maybe_copy (mode, temp, imode); diff --git a/gcc/postreload.c b/gcc/postreload.c index b0c63422357..cdfe7b7ddfa 100644 --- a/gcc/postreload.c +++ b/gcc/postreload.c @@ -295,27 +295,27 @@ reload_cse_simplify_set (rtx set, rtx insn) #ifdef LOAD_EXTEND_OP if (extend_op != UNKNOWN) { - HOST_WIDE_INT this_val; + wide_int result; - /* ??? I'm lazy and don't wish to handle CONST_DOUBLE. Other - constants, such as SYMBOL_REF, cannot be extended. */ - if (!CONST_INT_P (this_rtx)) + if (!CONST_SCALAR_INT_P (this_rtx)) continue; - this_val = INTVAL (this_rtx); switch (extend_op) { case ZERO_EXTEND: - this_val &= GET_MODE_MASK (GET_MODE (src)); + result = wide_int (std::make_pair (this_rtx, GET_MODE (src))); + if (GET_MODE_PRECISION (GET_MODE (src)) > GET_MODE_PRECISION (word_mode)) + result = wi::zext (result, GET_MODE_PRECISION (word_mode)); break; case SIGN_EXTEND: - /* ??? In theory we're already extended. */ - if (this_val == trunc_int_for_mode (this_val, GET_MODE (src))) - break; + result = wide_int (std::make_pair (this_rtx, GET_MODE (src))); + if (GET_MODE_PRECISION (GET_MODE (src)) > GET_MODE_PRECISION (word_mode)) + result = wi::sext (result, GET_MODE_PRECISION (word_mode)); + break; default: gcc_unreachable (); } - this_rtx = GEN_INT (this_val); + this_rtx = immed_wide_int_const (result, GET_MODE (src)); } #endif this_cost = set_src_cost (this_rtx, speed); diff --git a/gcc/predict.c b/gcc/predict.c index 2f1cb89c4ab..3262868c530 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -1045,15 +1045,15 @@ strips_small_constant (tree t1, tree t2) return NULL; else if (TREE_CODE (t1) == SSA_NAME) ret = t1; - else if (host_integerp (t1, 0)) - value = tree_low_cst (t1, 0); + else if (tree_fits_shwi_p (t1)) + value = tree_to_shwi (t1); else return NULL; if (!t2) return ret; - else if (host_integerp (t2, 0)) - value = tree_low_cst (t2, 0); + else if (tree_fits_shwi_p (t2)) + value = tree_to_shwi (t2); else if (TREE_CODE (t2) == SSA_NAME) { if (ret) @@ -1149,7 +1149,7 @@ is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop, code = invert_tree_comparison (code, false); bound = iv0.base; base = iv1.base; - if (host_integerp (iv1.step, 0)) + if (tree_fits_shwi_p (iv1.step)) step = iv1.step; else return false; @@ -1158,7 +1158,7 @@ is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop, { bound = iv1.base; base = iv0.base; - if (host_integerp (iv0.step, 0)) + if (tree_fits_shwi_p (iv0.step)) step = iv0.step; else return false; @@ -1292,81 +1292,62 @@ predict_iv_comparison (struct loop *loop, basic_block bb, /* If loop bound, base and compare bound are all constants, we can calculate the probability directly. */ - if (host_integerp (loop_bound_var, 0) - && host_integerp (compare_var, 0) - && host_integerp (compare_base, 0)) + if (tree_fits_shwi_p (loop_bound_var) + && tree_fits_shwi_p (compare_var) + && tree_fits_shwi_p (compare_base)) { int probability; - bool of, overflow = false; - double_int mod, compare_count, tem, loop_count; + bool overflow, overall_overflow = false; + max_wide_int compare_count, tem, loop_count; - double_int loop_bound = tree_to_double_int (loop_bound_var); - double_int compare_bound = tree_to_double_int (compare_var); - double_int base = tree_to_double_int (compare_base); - double_int compare_step = tree_to_double_int (compare_step_var); + max_wide_int loop_bound = loop_bound_var; + max_wide_int compare_bound = compare_var; + max_wide_int base = compare_base; + max_wide_int compare_step = compare_step_var; /* (loop_bound - base) / compare_step */ - tem = loop_bound.sub_with_overflow (base, &of); - overflow |= of; - loop_count = tem.divmod_with_overflow (compare_step, - 0, TRUNC_DIV_EXPR, - &mod, &of); - overflow |= of; - - if ((!compare_step.is_negative ()) + tem = wi::sub (loop_bound, base, SIGNED, &overflow); + overall_overflow |= overflow; + loop_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow); + overall_overflow |= overflow; + + if (!wi::neg_p (compare_step) ^ (compare_code == LT_EXPR || compare_code == LE_EXPR)) { /* (loop_bound - compare_bound) / compare_step */ - tem = loop_bound.sub_with_overflow (compare_bound, &of); - overflow |= of; - compare_count = tem.divmod_with_overflow (compare_step, - 0, TRUNC_DIV_EXPR, - &mod, &of); - overflow |= of; + tem = wi::sub (loop_bound, compare_bound, SIGNED, &overflow); + overall_overflow |= overflow; + compare_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow); + overall_overflow |= overflow; } else { /* (compare_bound - base) / compare_step */ - tem = compare_bound.sub_with_overflow (base, &of); - overflow |= of; - compare_count = tem.divmod_with_overflow (compare_step, - 0, TRUNC_DIV_EXPR, - &mod, &of); - overflow |= of; + tem = wi::sub (compare_bound, base, SIGNED, &overflow); + overall_overflow |= overflow; + compare_count = wi::div_trunc (tem, compare_step, SIGNED, &overflow); + overall_overflow |= overflow; } if (compare_code == LE_EXPR || compare_code == GE_EXPR) ++compare_count; if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR) ++loop_count; - if (compare_count.is_negative ()) - compare_count = double_int_zero; - if (loop_count.is_negative ()) - loop_count = double_int_zero; - if (loop_count.is_zero ()) + if (wi::neg_p (compare_count)) + compare_count = 0; + if (wi::neg_p (loop_count)) + loop_count = 0; + if (loop_count == 0) probability = 0; - else if (compare_count.scmp (loop_count) == 1) + else if (wi::cmps (compare_count, loop_count) == 1) probability = REG_BR_PROB_BASE; else { - /* If loop_count is too big, such that REG_BR_PROB_BASE * loop_count - could overflow, shift both loop_count and compare_count right - a bit so that it doesn't overflow. Note both counts are known not - to be negative at this point. */ - int clz_bits = clz_hwi (loop_count.high); - gcc_assert (REG_BR_PROB_BASE < 32768); - if (clz_bits < 16) - { - loop_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT); - compare_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT); - } - tem = compare_count.mul_with_sign (double_int::from_shwi - (REG_BR_PROB_BASE), true, &of); - gcc_assert (!of); - tem = tem.divmod (loop_count, true, TRUNC_DIV_EXPR, &mod); + tem = compare_count * REG_BR_PROB_BASE; + tem = wi::udiv_trunc (tem, loop_count); probability = tem.to_uhwi (); } - if (!overflow) + if (!overall_overflow) predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability); return; @@ -1549,10 +1530,10 @@ predict_loops (void) if (TREE_CODE (niter) == INTEGER_CST) { - if (host_integerp (niter, 1) + if (tree_fits_uhwi_p (niter) && max && compare_tree_int (niter, max - 1) == -1) - nitercst = tree_low_cst (niter, 1) + 1; + nitercst = tree_to_uhwi (niter) + 1; else nitercst = max; predictor = PRED_LOOP_ITERATIONS; @@ -1666,7 +1647,7 @@ predict_loops (void) if (loop_bound_var) predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base, loop_bound_code, - tree_low_cst (loop_bound_step, 0)); + tree_to_shwi (loop_bound_step)); } /* Free basic blocks from get_loop_body. */ diff --git a/gcc/pretty-print.h b/gcc/pretty-print.h index a60be3285ea..0003f285b41 100644 --- a/gcc/pretty-print.h +++ b/gcc/pretty-print.h @@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see #include "obstack.h" #include "input.h" +#include "wide-int-print.h" /* Maximum number of format string arguments. */ #define PP_NL_ARGMAX 30 @@ -261,6 +262,13 @@ pp_get_prefix (const pretty_printer *pp) { return pp->prefix; } #define pp_decimal_int(PP, I) pp_scalar (PP, "%d", I) #define pp_unsigned_wide_integer(PP, I) \ pp_scalar (PP, HOST_WIDE_INT_PRINT_UNSIGNED, (unsigned HOST_WIDE_INT) I) +#define pp_wide_int(PP, W, SGN) \ + do \ + { \ + print_dec (W, pp_buffer (PP)->digit_buffer, SGN); \ + pp_string (PP, pp_buffer (PP)->digit_buffer); \ + } \ + while (0) #define pp_wide_integer(PP, I) \ pp_scalar (PP, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) I) #define pp_widest_integer(PP, I) \ diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c index 7d712c1562d..043f0f797f5 100644 --- a/gcc/print-rtl.c +++ b/gcc/print-rtl.c @@ -614,6 +614,12 @@ print_rtx (const_rtx in_rtx) fprintf (outfile, " [%s]", s); } break; + + case CONST_WIDE_INT: + if (! flag_simple) + fprintf (outfile, " "); + cwi_output_hex (outfile, in_rtx); + break; #endif case CODE_LABEL: diff --git a/gcc/print-tree.c b/gcc/print-tree.c index 2b9aa704bec..c464166f408 100644 --- a/gcc/print-tree.c +++ b/gcc/print-tree.c @@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-ssa.h" #include "tree-dump.h" #include "dumpfile.h" +#include "wide-int-print.h" /* Define the hash table of nodes already seen. Such nodes are not repeated; brief cross-references are used. */ @@ -121,16 +122,7 @@ print_node_brief (FILE *file, const char *prefix, const_tree node, int indent) fprintf (file, " overflow"); fprintf (file, " "); - if (TREE_INT_CST_HIGH (node) == 0) - fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (node)); - else if (TREE_INT_CST_HIGH (node) == -1 - && TREE_INT_CST_LOW (node) != 0) - fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED, - -TREE_INT_CST_LOW (node)); - else - fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (node), - (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (node)); + print_dec (wide_int (node), file, TYPE_SIGN (TREE_TYPE (node))); } if (TREE_CODE (node) == REAL_CST) { @@ -335,7 +327,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent) if (TREE_VISITED (node)) fputs (" visited", file); - if (code != TREE_VEC && code != SSA_NAME) + if (code != TREE_VEC && code != INTEGER_CST && code != SSA_NAME) { if (TREE_LANG_FLAG_0 (node)) fputs (" tree_0", file); @@ -741,17 +733,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent) fprintf (file, " overflow"); fprintf (file, " "); - if (TREE_INT_CST_HIGH (node) == 0) - fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, - TREE_INT_CST_LOW (node)); - else if (TREE_INT_CST_HIGH (node) == -1 - && TREE_INT_CST_LOW (node) != 0) - fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED, - -TREE_INT_CST_LOW (node)); - else - fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (node), - (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (node)); + print_dec (wide_int (node), file, TYPE_SIGN (TREE_TYPE (node))); break; case REAL_CST: diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c index 10adf472a08..c198b5b6963 100644 --- a/gcc/read-rtl.c +++ b/gcc/read-rtl.c @@ -811,6 +811,29 @@ validate_const_int (const char *string) fatal_with_file_and_line ("invalid decimal constant \"%s\"\n", string); } +static void +validate_const_wide_int (const char *string) +{ + const char *cp; + int valid = 1; + + cp = string; + while (*cp && ISSPACE (*cp)) + cp++; + /* Skip the leading 0x. */ + if (cp[0] == '0' || cp[1] == 'x') + cp += 2; + else + valid = 0; + if (*cp == 0) + valid = 0; + for (; *cp; cp++) + if (! ISXDIGIT (*cp)) + valid = 0; + if (!valid) + fatal_with_file_and_line ("invalid hex constant \"%s\"\n", string); +} + /* Record that PTR uses iterator ITERATOR. */ static void @@ -1324,6 +1347,54 @@ read_rtx_code (const char *code_name) gcc_unreachable (); } + if (CONST_WIDE_INT_P (return_rtx)) + { + read_name (&name); + validate_const_wide_int (name.string); + { + const char *s = name.string; + int len; + int index = 0; + int gs = HOST_BITS_PER_WIDE_INT/4; + int pos; + char * buf = XALLOCAVEC (char, gs + 1); + unsigned HOST_WIDE_INT wi; + int wlen; + + /* Skip the leading spaces. */ + while (*s && ISSPACE (*s)) + s++; + + /* Skip the leading 0x. */ + gcc_assert (s[0] == '0'); + gcc_assert (s[1] == 'x'); + s += 2; + + len = strlen (s); + pos = len - gs; + wlen = (len + gs - 1) / gs; /* Number of words needed */ + + return_rtx = const_wide_int_alloc (wlen); + + while (pos > 0) + { +#if HOST_BITS_PER_WIDE_INT == 64 + sscanf (s + pos, "%16" HOST_WIDE_INT_PRINT "x", &wi); +#else + sscanf (s + pos, "%8" HOST_WIDE_INT_PRINT "x", &wi); +#endif + CWI_ELT (return_rtx, index++) = wi; + pos -= gs; + } + strncpy (buf, s, gs - pos); + buf [gs - pos] = 0; + sscanf (buf, "%" HOST_WIDE_INT_PRINT "x", &wi); + CWI_ELT (return_rtx, index++) = wi; + /* TODO: After reading, do we want to canonicalize with: + value = lookup_const_wide_int (value); ? */ + } + } + c = read_skip_spaces (); /* Syntactic sugar for AND and IOR, allowing Lisp-like arbitrary number of arguments for them. */ diff --git a/gcc/real.c b/gcc/real.c index b59be66af94..f9651163aaf 100644 --- a/gcc/real.c +++ b/gcc/real.c @@ -29,6 +29,7 @@ #include "realmpfr.h" #include "tm_p.h" #include "dfp.h" +#include "wide-int.h" /* The floating point model used internally is not exactly IEEE 754 compliant, and close to the description in the ISO C99 standard, @@ -1377,42 +1378,38 @@ real_to_integer (const REAL_VALUE_TYPE *r) } } -/* Likewise, but to an integer pair, HI+LOW. */ +/* Likewise, but producing a wide-int of PRECISION. If + the value cannot be represented in precision, FAIL is set to + TRUE. */ -void -real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, - const REAL_VALUE_TYPE *r) +wide_int +real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) { - REAL_VALUE_TYPE t; - HOST_WIDE_INT low, high; + HOST_WIDE_INT val[2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; int exp; + int words; + wide_int result; + int w; switch (r->cl) { case rvc_zero: underflow: - low = high = 0; - break; + return wi::zero (precision); case rvc_inf: case rvc_nan: overflow: - high = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); + *fail = true; + if (r->sign) - low = 0; + return wi::set_bit_in_zero (precision - 1, precision); else - { - high--; - low = -1; - } - break; + return ~wi::set_bit_in_zero (precision - 1, precision); case rvc_normal: if (r->decimal) - { - decimal_real_to_integer2 (plow, phigh, r); - return; - } + return decimal_real_to_integer (r, fail, precision); exp = REAL_EXP (r); if (exp <= 0) @@ -1421,42 +1418,49 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, undefined, so it doesn't matter what we return, and some callers expect to be able to use this routine for both signed and unsigned conversions. */ - if (exp > HOST_BITS_PER_DOUBLE_INT) + if (exp > precision) goto overflow; - rshift_significand (&t, r, HOST_BITS_PER_DOUBLE_INT - exp); - if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) + words = (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; + + for (int i = 0; i < 2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT; i++) + val[i] = 0; + +#if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) + for (int i = 0; i < words; i++) { - high = t.sig[SIGSZ-1]; - low = t.sig[SIGSZ-2]; + int j = SIGSZ - words + i; + val[i] = (j < 0) ? 0 : r->sig[j]; } - else +#else + gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG); + for (int i = 0; i < words; i++) { - gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG); - high = t.sig[SIGSZ-1]; - high = high << (HOST_BITS_PER_LONG - 1) << 1; - high |= t.sig[SIGSZ-2]; - - low = t.sig[SIGSZ-3]; - low = low << (HOST_BITS_PER_LONG - 1) << 1; - low |= t.sig[SIGSZ-4]; + int j = SIGSZ - (words * 2) + (i + 2) + 1; + if (j < 0) + val[i] = 0; + else + { + val[i] = r->sig[j]; + val[i] <<= HOST_BITS_PER_LONG; + val[i] |= r->sig[j - 1]; + } } +#endif + w = SIGSZ * HOST_BITS_PER_LONG + words * HOST_BITS_PER_WIDE_INT; + result = wide_int::from_array (val, + (w + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT, w, w); + result = wi::lrshift (result, (words * HOST_BITS_PER_WIDE_INT) - exp); + result = wide_int::from (result, precision, UNSIGNED); if (r->sign) - { - if (low == 0) - high = -high; - else - low = -low, high = ~high; - } - break; + return -result; + else + return result; default: gcc_unreachable (); } - - *plow = low; - *phigh = high; } /* A subroutine of real_to_decimal. Compute the quotient and remainder @@ -2144,43 +2148,131 @@ real_from_string3 (REAL_VALUE_TYPE *r, const char *s, enum machine_mode mode) real_convert (r, mode, r); } -/* Initialize R from the integer pair HIGH+LOW. */ +/* Initialize R from a HOST_WIDE_INT. */ void real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, - unsigned HOST_WIDE_INT low, HOST_WIDE_INT high, - int unsigned_p) + HOST_WIDE_INT val, + signop sgn) { - if (low == 0 && high == 0) + if (val == 0) get_zero (r, 0); else { memset (r, 0, sizeof (*r)); r->cl = rvc_normal; - r->sign = high < 0 && !unsigned_p; - SET_REAL_EXP (r, HOST_BITS_PER_DOUBLE_INT); + r->sign = val < 0 && sgn == SIGNED; + SET_REAL_EXP (r, HOST_BITS_PER_WIDE_INT); + /* TODO: This fails for -MAXHOSTWIDEINT, wide_int version would + have worked. */ if (r->sign) + val = -val; + + if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) + r->sig[SIGSZ-1] = val; + else { - high = ~high; - if (low == 0) - high += 1; - else - low = -low; + gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT); + r->sig[SIGSZ-1] = val >> (HOST_BITS_PER_LONG - 1) >> 1; + r->sig[SIGSZ-2] = val; } - if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) + normalize (r); + } + + if (DECIMAL_FLOAT_MODE_P (mode)) + decimal_from_integer (r); + else if (mode != VOIDmode) + real_convert (r, mode, r); +} + +/* Initialize R from the integer pair HIGH+LOW. */ + +void +real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, + const wide_int_ref &val_in, signop sgn) +{ + if (val_in == 0) + get_zero (r, 0); + else + { + unsigned int len = val_in.get_precision (); + int i, j, e=0; + int maxbitlen = MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT; + const unsigned int realmax = SIGNIFICAND_BITS/HOST_BITS_PER_WIDE_INT * HOST_BITS_PER_WIDE_INT; + + memset (r, 0, sizeof (*r)); + r->cl = rvc_normal; + r->sign = wi::neg_p (val_in, sgn); + + if (len == 0) + len = 1; + + /* We have to ensure we can negate the largest negative number. */ + wide_int val = wide_int::from (val_in, maxbitlen, sgn); + + if (r->sign) + val = -val; + else + val = val; + + /* Ensure a multiple of HOST_BITS_PER_WIDE_INT, ceiling, as elt + won't work with precisions that are not a multiple of + HOST_BITS_PER_WIDE_INT. */ + len += HOST_BITS_PER_WIDE_INT - 1; + + /* Ensure we can represent the largest negative number. */ + len += 1; + + len = len/HOST_BITS_PER_WIDE_INT * HOST_BITS_PER_WIDE_INT; + + /* Cap the size to the size allowed by real.h. */ + if (len > realmax) { - r->sig[SIGSZ-1] = high; - r->sig[SIGSZ-2] = low; + HOST_WIDE_INT cnt_l_z; + cnt_l_z = wi::clz (val); + + if (maxbitlen - cnt_l_z > realmax) + { + e = maxbitlen - cnt_l_z - realmax; + + /* This value is too large, we must shift it right to + preserve all the bits we can, and then bump the + exponent up by that amount. */ + val = wi::lrshift (val, e); + } + len = realmax; } + + /* Clear out top bits so elt will work with precisions that aren't + a multiple of HOST_BITS_PER_WIDE_INT. */ + val = wide_int::from (val, len, sgn); + len = len / HOST_BITS_PER_WIDE_INT; + + SET_REAL_EXP (r, len * HOST_BITS_PER_WIDE_INT + e); + + j = SIGSZ - 1; + if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) + for (i = len - 1; i >= 0; i--) + { + r->sig[j--] = val.elt (i); + if (j < 0) + break; + } else { gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT); - r->sig[SIGSZ-1] = high >> (HOST_BITS_PER_LONG - 1) >> 1; - r->sig[SIGSZ-2] = high; - r->sig[SIGSZ-3] = low >> (HOST_BITS_PER_LONG - 1) >> 1; - r->sig[SIGSZ-4] = low; + for (i = len - 1; i >= 0; i--) + { + HOST_WIDE_INT e = val.elt (i); + r->sig[j--] = e >> (HOST_BITS_PER_LONG - 1) >> 1; + if (j < 0) + break; + r->sig[j--] = e; + if (j < 0) + break; + } } normalize (r); @@ -2270,7 +2362,7 @@ ten_to_ptwo (int n) for (i = 0; i < n; ++i) t *= t; - real_from_integer (&tens[n], VOIDmode, t, 0, 1); + real_from_integer (&tens[n], VOIDmode, t, UNSIGNED); } else { @@ -2309,7 +2401,7 @@ real_digit (int n) gcc_assert (n <= 9); if (n > 0 && num[n].cl == rvc_zero) - real_from_integer (&num[n], VOIDmode, n, 0, 1); + real_from_integer (&num[n], VOIDmode, n, UNSIGNED); return &num[n]; } diff --git a/gcc/real.h b/gcc/real.h index b92453411d7..54b6ed7ac42 100644 --- a/gcc/real.h +++ b/gcc/real.h @@ -21,6 +21,9 @@ #define GCC_REAL_H #include "machmode.h" +#include "signop.h" +#include "wide-int.h" +#include "insn-modes.h" /* An expanded form of the represented number. */ @@ -267,8 +270,6 @@ extern void real_to_hexadecimal (char *, const REAL_VALUE_TYPE *, /* Render R as an integer. */ extern HOST_WIDE_INT real_to_integer (const REAL_VALUE_TYPE *); -extern void real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *, - const REAL_VALUE_TYPE *); /* Initialize R from a decimal or hexadecimal string. Return -1 if the value underflows, +1 if overflows, and 0 otherwise. */ @@ -276,9 +277,9 @@ extern int real_from_string (REAL_VALUE_TYPE *, const char *); /* Wrapper to allow different internal representation for decimal floats. */ extern void real_from_string3 (REAL_VALUE_TYPE *, const char *, enum machine_mode); -/* Initialize R from an integer pair HIGH/LOW. */ +/* Initialize R from an integer. */ extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode, - unsigned HOST_WIDE_INT, HOST_WIDE_INT, int); + HOST_WIDE_INT, signop); extern long real_to_target_fmt (long *, const REAL_VALUE_TYPE *, const struct real_format *); @@ -361,11 +362,8 @@ extern const struct real_format arm_half_format; #define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \ ((OUT) = real_to_target (NULL, &(IN), mode_for_size (32, MODE_FLOAT, 0))) -#define REAL_VALUE_FROM_INT(r, lo, hi, mode) \ - real_from_integer (&(r), mode, lo, hi, 0) - -#define REAL_VALUE_FROM_UNSIGNED_INT(r, lo, hi, mode) \ - real_from_integer (&(r), mode, lo, hi, 1) +#define REAL_VALUE_FROM_INT(r, val, mode) \ + real_from_integer (&(r), mode, val, SIGNED) /* Real values to IEEE 754 decimal floats. */ @@ -383,9 +381,6 @@ extern const struct real_format arm_half_format; extern REAL_VALUE_TYPE real_value_truncate (enum machine_mode, REAL_VALUE_TYPE); -#define REAL_VALUE_TO_INT(plow, phigh, r) \ - real_to_integer2 (plow, phigh, &(r)) - extern REAL_VALUE_TYPE real_value_negate (const REAL_VALUE_TYPE *); extern REAL_VALUE_TYPE real_value_abs (const REAL_VALUE_TYPE *); @@ -489,4 +484,13 @@ extern bool real_isinteger (const REAL_VALUE_TYPE *c, enum machine_mode mode); number, (1 - b**-p) * b**emax for a given FP format FMT as a hex float string. BUF must be large enough to contain the result. */ extern void get_max_float (const struct real_format *, char *, size_t); + +#ifndef GENERATOR_FILE +/* real related routines. */ +extern wide_int real_to_integer (const REAL_VALUE_TYPE *, bool *, int); +extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode, + const wide_int_ref &, signop); +extern wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int); +#endif + #endif /* ! GCC_REAL_H */ diff --git a/gcc/recog.c b/gcc/recog.c index a3ca98c62fd..e4f4fadfd86 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -1148,7 +1148,7 @@ immediate_operand (rtx op, enum machine_mode mode) : mode, op)); } -/* Returns 1 if OP is an operand that is a CONST_INT. */ +/* Returns 1 if OP is an operand that is a CONST_INT of mode MODE. */ int const_int_operand (rtx op, enum machine_mode mode) @@ -1163,8 +1163,64 @@ const_int_operand (rtx op, enum machine_mode mode) return 1; } +#if TARGET_SUPPORTS_WIDE_INT +/* Returns 1 if OP is an operand that is a CONST_INT or CONST_WIDE_INT + of mode MODE. */ +int +const_scalar_int_operand (rtx op, enum machine_mode mode) +{ + if (!CONST_SCALAR_INT_P (op)) + return 0; + + if (CONST_INT_P (op)) + return const_int_operand (op, mode); + + if (mode != VOIDmode) + { + int prec = GET_MODE_PRECISION (mode); + int bitsize = GET_MODE_BITSIZE (mode); + + if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize) + return 0; + + if (prec == bitsize) + return 1; + else + { + /* Multiword partial int. */ + HOST_WIDE_INT x + = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1); + return (sext_hwi (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) == x); + } + } + return 1; +} + +/* Returns 1 if OP is an operand that is a CONST_WIDE_INT of mode + MODE. This most likely is not as useful as + const_scalar_int_operand since it does not accept CONST_INTs, but + is here for consistancy. */ +int +const_wide_int_operand (rtx op, enum machine_mode mode) +{ + if (!CONST_WIDE_INT_P (op)) + return 0; + + return const_scalar_int_operand (op, mode); +} + /* Returns 1 if OP is an operand that is a constant integer or constant - floating-point number. */ + floating-point number of MODE. */ + +int +const_double_operand (rtx op, enum machine_mode mode) +{ + return (GET_CODE (op) == CONST_DOUBLE) + && (GET_MODE (op) == mode || mode == VOIDmode); +} +#else +/* Returns 1 if OP is an operand that is a constant integer or constant + floating-point number of MODE. */ int const_double_operand (rtx op, enum machine_mode mode) @@ -1180,8 +1236,9 @@ const_double_operand (rtx op, enum machine_mode mode) && (mode == VOIDmode || GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)); } - -/* Return 1 if OP is a general operand that is not an immediate operand. */ +#endif +/* Return 1 if OP is a general operand that is not an immediate + operand of mode MODE. */ int nonimmediate_operand (rtx op, enum machine_mode mode) @@ -1189,7 +1246,8 @@ nonimmediate_operand (rtx op, enum machine_mode mode) return (general_operand (op, mode) && ! CONSTANT_P (op)); } -/* Return 1 if OP is a register reference or immediate value of mode MODE. */ +/* Return 1 if OP is a register reference or immediate value of mode + MODE. */ int nonmemory_operand (rtx op, enum machine_mode mode) diff --git a/gcc/rtl.c b/gcc/rtl.c index 52b7747b693..5c61e87dcd7 100644 --- a/gcc/rtl.c +++ b/gcc/rtl.c @@ -109,7 +109,7 @@ const enum rtx_class rtx_class[NUM_RTX_CODE] = { const unsigned char rtx_code_size[NUM_RTX_CODE] = { #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) \ (((ENUM) == CONST_INT || (ENUM) == CONST_DOUBLE \ - || (ENUM) == CONST_FIXED) \ + || (ENUM) == CONST_FIXED || (ENUM) == CONST_WIDE_INT) \ ? RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (HOST_WIDE_INT) \ : RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (rtunion)), @@ -181,18 +181,24 @@ shallow_copy_rtvec (rtvec vec) unsigned int rtx_size (const_rtx x) { + if (CONST_WIDE_INT_P (x)) + return (RTX_HDR_SIZE + + sizeof (struct hwivec_def) + + ((CONST_WIDE_INT_NUNITS (x) - 1) + * sizeof (HOST_WIDE_INT))); if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_HAS_BLOCK_INFO_P (x)) return RTX_HDR_SIZE + sizeof (struct block_symbol); return RTX_CODE_SIZE (GET_CODE (x)); } -/* Allocate an rtx of code CODE. The CODE is stored in the rtx; - all the rest is initialized to zero. */ +/* Allocate an rtx of code CODE with EXTRA bytes in it. The CODE is + stored in the rtx; all the rest is initialized to zero. */ rtx -rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) +rtx_alloc_stat_v (RTX_CODE code MEM_STAT_DECL, int extra) { - rtx rt = ggc_alloc_rtx_def_stat (RTX_CODE_SIZE (code) PASS_MEM_STAT); + rtx rt = ggc_alloc_rtx_def_stat (RTX_CODE_SIZE (code) + extra + PASS_MEM_STAT); /* We want to clear everything up to the FLD array. Normally, this is one int, but we don't want to assume that and it isn't very @@ -210,6 +216,29 @@ rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) return rt; } +/* Allocate an rtx of code CODE. The CODE is stored in the rtx; + all the rest is initialized to zero. */ + +rtx +rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) +{ + return rtx_alloc_stat_v (code PASS_MEM_STAT, 0); +} + +/* Write the wide constant X to OUTFILE. */ + +void +cwi_output_hex (FILE *outfile, const_rtx x) +{ + int i = CWI_GET_NUM_ELEM (x); + gcc_assert (i > 0); + if (CWI_ELT (x, i-1) == 0) + fprintf (outfile, "0x"); + fprintf (outfile, HOST_WIDE_INT_PRINT_HEX, CWI_ELT (x, --i)); + while (--i >= 0) + fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX, CWI_ELT (x, i)); +} + /* Return true if ORIG is a sharable CONST. */ @@ -428,7 +457,6 @@ rtx_equal_p_cb (const_rtx x, const_rtx y, rtx_equal_p_callback_function cb) if (XWINT (x, i) != XWINT (y, i)) return 0; break; - case 'n': case 'i': if (XINT (x, i) != XINT (y, i)) @@ -646,6 +674,10 @@ iterative_hash_rtx (const_rtx x, hashval_t hash) return iterative_hash_object (i, hash); case CONST_INT: return iterative_hash_object (INTVAL (x), hash); + case CONST_WIDE_INT: + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hash = iterative_hash_object (CONST_WIDE_INT_ELT (x, i), hash); + return hash; case SYMBOL_REF: if (XSTR (x, 0)) return iterative_hash (XSTR (x, 0), strlen (XSTR (x, 0)) + 1, @@ -811,6 +843,16 @@ rtl_check_failed_block_symbol (const char *file, int line, const char *func) /* XXX Maybe print the vector? */ void +cwi_check_failed_bounds (const_rtx x, int n, const char *file, int line, + const char *func) +{ + internal_error + ("RTL check: access of hwi elt %d of vector with last elt %d in %s, at %s:%d", + n, CWI_GET_NUM_ELEM (x) - 1, func, trim_filename (file), line); +} + +/* XXX Maybe print the vector? */ +void rtvec_check_failed_bounds (const_rtvec r, int n, const char *file, int line, const char *func) { diff --git a/gcc/rtl.def b/gcc/rtl.def index 15a997b8c25..a76b28b66b3 100644 --- a/gcc/rtl.def +++ b/gcc/rtl.def @@ -345,6 +345,9 @@ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) +/* numeric integer constant */ +DEF_RTL_EXPR(CONST_WIDE_INT, "const_wide_int", "", RTX_CONST_OBJ) + /* fixed-point constant */ DEF_RTL_EXPR(CONST_FIXED, "const_fixed", "www", RTX_CONST_OBJ) diff --git a/gcc/rtl.h b/gcc/rtl.h index 247a0d06701..afd731f93cd 100644 --- a/gcc/rtl.h +++ b/gcc/rtl.h @@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see #ifndef GCC_RTL_H #define GCC_RTL_H +#include <utility> #include "statistics.h" #include "machmode.h" #include "input.h" @@ -28,6 +29,7 @@ along with GCC; see the file COPYING3. If not see #include "fixed-value.h" #include "alias.h" #include "hashtab.h" +#include "wide-int.h" #include "flags.h" /* Value used by some passes to "recognize" noop moves as valid @@ -249,6 +251,16 @@ struct GTY(()) object_block { vec<rtx, va_gc> *anchors; }; +struct GTY((variable_size)) hwivec_def { + HOST_WIDE_INT elem[1]; +}; + +/* Number of elements of the HWIVEC if RTX is a CONST_WIDE_INT. */ +#define CWI_GET_NUM_ELEM(RTX) \ + ((int)RTL_FLAG_CHECK1("CWI_GET_NUM_ELEM", (RTX), CONST_WIDE_INT)->u2.num_elem) +#define CWI_PUT_NUM_ELEM(RTX, NUM) \ + (RTL_FLAG_CHECK1("CWI_PUT_NUM_ELEM", (RTX), CONST_WIDE_INT)->u2.num_elem = (NUM)) + /* RTL expression ("rtx"). */ struct GTY((chain_next ("RTX_NEXT (&%h)"), @@ -335,6 +347,14 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"), 1 in a VALUE or DEBUG_EXPR is NO_LOC_P in var-tracking.c. */ unsigned return_val : 1; + union { + /* RTXs are free to use up to 32 bit from here. */ + + /* In a CONST_WIDE_INT (aka hwivec_def), this is the number of HOST_WIDE_INTs + in the hwivec_def. */ + unsigned GTY ((tag ("CONST_WIDE_INT"))) num_elem:32; + } GTY ((desc ("GET_CODE (&%0)"))) u2; + /* The first element of the operands of this rtx. The number of operands and their types are controlled by the `code' field, according to rtl.def. */ @@ -344,6 +364,7 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"), struct block_symbol block_sym; struct real_value rv; struct fixed_value fv; + struct hwivec_def hwiv; } GTY ((special ("rtx_def"), desc ("GET_CODE (&%0)"))) u; }; @@ -383,13 +404,13 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"), for a variable number of things. The principle use is inside PARALLEL expressions. */ +#define NULL_RTVEC (rtvec) 0 + struct GTY((variable_size)) rtvec_def { int num_elem; /* number of elements */ rtx GTY ((length ("%h.num_elem"))) elem[1]; }; -#define NULL_RTVEC (rtvec) 0 - #define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem) #define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (NUM)) @@ -399,12 +420,38 @@ struct GTY((variable_size)) rtvec_def { /* Predicate yielding nonzero iff X is an rtx for a memory location. */ #define MEM_P(X) (GET_CODE (X) == MEM) +#if TARGET_SUPPORTS_WIDE_INT + +/* Match CONST_*s that can represent compile-time constant integers. */ +#define CASE_CONST_SCALAR_INT \ + case CONST_INT: \ + case CONST_WIDE_INT + +/* Match CONST_*s for which pointer equality corresponds to value + equality. */ +#define CASE_CONST_UNIQUE \ + case CONST_INT: \ + case CONST_WIDE_INT: \ + case CONST_DOUBLE: \ + case CONST_FIXED + +/* Match all CONST_* rtxes. */ +#define CASE_CONST_ANY \ + case CONST_INT: \ + case CONST_WIDE_INT: \ + case CONST_DOUBLE: \ + case CONST_FIXED: \ + case CONST_VECTOR + +#else + /* Match CONST_*s that can represent compile-time constant integers. */ #define CASE_CONST_SCALAR_INT \ case CONST_INT: \ case CONST_DOUBLE -/* Match CONST_*s for which pointer equality corresponds to value equality. */ +/* Match CONST_*s for which pointer equality corresponds to value +equality. */ #define CASE_CONST_UNIQUE \ case CONST_INT: \ case CONST_DOUBLE: \ @@ -416,10 +463,17 @@ struct GTY((variable_size)) rtvec_def { case CONST_DOUBLE: \ case CONST_FIXED: \ case CONST_VECTOR +#endif + + + /* Predicate yielding nonzero iff X is an rtx for a constant integer. */ #define CONST_INT_P(X) (GET_CODE (X) == CONST_INT) +/* Predicate yielding nonzero iff X is an rtx for a constant integer. */ +#define CONST_WIDE_INT_P(X) (GET_CODE (X) == CONST_WIDE_INT) + /* Predicate yielding nonzero iff X is an rtx for a constant fixed-point. */ #define CONST_FIXED_P(X) (GET_CODE (X) == CONST_FIXED) @@ -432,8 +486,13 @@ struct GTY((variable_size)) rtvec_def { (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) == VOIDmode) /* Predicate yielding true iff X is an rtx for a integer const. */ +#if TARGET_SUPPORTS_WIDE_INT +#define CONST_SCALAR_INT_P(X) \ + (CONST_INT_P (X) || CONST_WIDE_INT_P (X)) +#else #define CONST_SCALAR_INT_P(X) \ (CONST_INT_P (X) || CONST_DOUBLE_AS_INT_P (X)) +#endif /* Predicate yielding true iff X is an rtx for a double-int. */ #define CONST_DOUBLE_AS_FLOAT_P(X) \ @@ -594,6 +653,15 @@ struct GTY((variable_size)) rtvec_def { __FUNCTION__); \ &_rtx->u.hwint[_n]; })) +#define CWI_ELT(RTX, I) __extension__ \ +(*({ __typeof (RTX) const _cwi = (RTX); \ + int _max = CWI_GET_NUM_ELEM (_cwi); \ + const int _i = (I); \ + if (_i < 0 || _i >= _max) \ + cwi_check_failed_bounds (_cwi, _i, __FILE__, __LINE__, \ + __FUNCTION__); \ + &_cwi->u.hwiv.elem[_i]; })) + #define XCWINT(RTX, N, C) __extension__ \ (*({ __typeof (RTX) const _rtx = (RTX); \ if (GET_CODE (_rtx) != (C)) \ @@ -630,6 +698,11 @@ struct GTY((variable_size)) rtvec_def { __FUNCTION__); \ &_symbol->u.block_sym; }) +#define HWIVEC_CHECK(RTX,C) __extension__ \ +({ __typeof (RTX) const _symbol = (RTX); \ + RTL_CHECKC1 (_symbol, 0, C); \ + &_symbol->u.hwiv; }) + extern void rtl_check_failed_bounds (const_rtx, int, const char *, int, const char *) ATTRIBUTE_NORETURN; @@ -650,6 +723,9 @@ extern void rtl_check_failed_code_mode (const_rtx, enum rtx_code, enum machine_m ATTRIBUTE_NORETURN; extern void rtl_check_failed_block_symbol (const char *, int, const char *) ATTRIBUTE_NORETURN; +extern void cwi_check_failed_bounds (const_rtx, int, const char *, int, + const char *) + ATTRIBUTE_NORETURN; extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int, const char *) ATTRIBUTE_NORETURN; @@ -662,12 +738,14 @@ extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int, #define RTL_CHECKC2(RTX, N, C1, C2) ((RTX)->u.fld[N]) #define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[I]) #define XWINT(RTX, N) ((RTX)->u.hwint[N]) +#define CWI_ELT(RTX, I) ((RTX)->u.hwiv.elem[I]) #define XCWINT(RTX, N, C) ((RTX)->u.hwint[N]) #define XCMWINT(RTX, N, C, M) ((RTX)->u.hwint[N]) #define XCNMWINT(RTX, N, C, M) ((RTX)->u.hwint[N]) #define XCNMPRV(RTX, C, M) (&(RTX)->u.rv) #define XCNMPFV(RTX, C, M) (&(RTX)->u.fv) #define BLOCK_SYMBOL_CHECK(RTX) (&(RTX)->u.block_sym) +#define HWIVEC_CHECK(RTX,C) (&(RTX)->u.hwiv) #endif @@ -810,8 +888,8 @@ extern void rtl_check_failed_flag (const char *, const_rtx, const char *, #define XCCFI(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cfi) #define XCCSELIB(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cselib) -#define XCVECEXP(RTX, N, M, C) RTVEC_ELT (XCVEC (RTX, N, C), M) -#define XCVECLEN(RTX, N, C) GET_NUM_ELEM (XCVEC (RTX, N, C)) +#define XCVECEXP(RTX, N, M, C) RTVEC_ELT (XCVEC (RTX, N, C), M) +#define XCVECLEN(RTX, N, C) GET_NUM_ELEM (XCVEC (RTX, N, C)) #define XC2EXP(RTX, N, C1, C2) (RTL_CHECKC2 (RTX, N, C1, C2).rt_rtx) @@ -1154,9 +1232,19 @@ rhs_regno (const_rtx x) #define INTVAL(RTX) XCWINT (RTX, 0, CONST_INT) #define UINTVAL(RTX) ((unsigned HOST_WIDE_INT) INTVAL (RTX)) +/* For a CONST_WIDE_INT, CONST_WIDE_INT_NUNITS is the number of + elements actually needed to represent the constant. + CONST_WIDE_INT_ELT gets one of the elements. 0 is the least + significant HOST_WIDE_INT. */ +#define CONST_WIDE_INT_VEC(RTX) HWIVEC_CHECK (RTX, CONST_WIDE_INT) +#define CONST_WIDE_INT_NUNITS(RTX) CWI_GET_NUM_ELEM (RTX) +#define CONST_WIDE_INT_ELT(RTX, N) CWI_ELT (RTX, N) + /* For a CONST_DOUBLE: +#if TARGET_SUPPORTS_WIDE_INT == 0 For a VOIDmode, there are two integers CONST_DOUBLE_LOW is the low-order word and ..._HIGH the high-order. +#endif For a float, there is a REAL_VALUE_TYPE structure, and CONST_DOUBLE_REAL_VALUE(r) is a pointer to it. */ #define CONST_DOUBLE_LOW(r) XCMWINT (r, 0, CONST_DOUBLE, VOIDmode) @@ -1311,6 +1399,88 @@ struct address_info { bool autoinc_p; }; +/* This is used to bundle an rtx and a mode together so that the pair + can be used as the second operand of a wide int expression. If we + ever put modes into rtx integer constants, this should go away and + then just pass an rtx in. */ +typedef std::pair <rtx, enum machine_mode> rtx_mode_t; + +namespace wi +{ + template <> + struct int_traits <rtx_mode_t> + { + static const enum precision_type precision_type = VAR_PRECISION; + static const bool host_dependent_precision = false; + static unsigned int get_precision (const rtx_mode_t &); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, + const rtx_mode_t &); + }; +} + +inline unsigned int +wi::int_traits <rtx_mode_t>::get_precision (const rtx_mode_t &x) +{ + return GET_MODE_PRECISION (x.second); +} + +inline wi::storage_ref +wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *, + unsigned int precision, + const rtx_mode_t &x) +{ + gcc_checking_assert (precision == get_precision (x)); + switch (GET_CODE (x.first)) + { + case CONST_INT: + if (precision < HOST_BITS_PER_WIDE_INT) + gcc_checking_assert (INTVAL (x.first) == sext_hwi (INTVAL (x.first), precision)); + + return wi::storage_ref (&INTVAL (x.first), 1, precision); + + case CONST_WIDE_INT: + return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0), + CONST_WIDE_INT_NUNITS (x.first), precision); + +#if TARGET_SUPPORTS_WIDE_INT == 0 + case CONST_DOUBLE: + return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision); +#endif + + default: + gcc_unreachable (); + } +} + +namespace wi +{ + hwi_with_prec shwi (HOST_WIDE_INT, enum machine_mode mode); + wide_int min_value (enum machine_mode, signop); + wide_int max_value (enum machine_mode, signop); +} + +inline wi::hwi_with_prec +wi::shwi (HOST_WIDE_INT val, enum machine_mode mode) +{ + return shwi (val, GET_MODE_PRECISION (mode)); +} + +/* Produce the smallest number that is represented in MODE. The precision + is taken from MODE and the sign from SGN. */ +inline wide_int +wi::min_value (enum machine_mode mode, signop sgn) +{ + return min_value (GET_MODE_PRECISION (mode), sgn); +} + +/* Produce the largest number that is represented in MODE. The precision + is taken from MODE and the sign from SGN. */ +inline wide_int +wi::max_value (enum machine_mode mode, signop sgn) +{ + return max_value (GET_MODE_PRECISION (mode), sgn); +} + extern void init_rtlanal (void); extern int rtx_cost (rtx, enum rtx_code, int, bool); extern int address_cost (rtx, enum machine_mode, addr_space_t, bool); @@ -1766,6 +1936,12 @@ extern rtx plus_constant (enum machine_mode, rtx, HOST_WIDE_INT); /* In rtl.c */ extern rtx rtx_alloc_stat (RTX_CODE MEM_STAT_DECL); #define rtx_alloc(c) rtx_alloc_stat (c MEM_STAT_INFO) +extern rtx rtx_alloc_stat_v (RTX_CODE MEM_STAT_DECL, int); +#define rtx_alloc_v(c, SZ) rtx_alloc_stat_v (c MEM_STAT_INFO, SZ) +#define const_wide_int_alloc(NWORDS) \ + rtx_alloc_v (CONST_WIDE_INT, \ + (sizeof (struct hwivec_def) \ + + ((NWORDS)-1) * sizeof (HOST_WIDE_INT))) \ extern rtvec rtvec_alloc (int); extern rtvec shallow_copy_rtvec (rtvec); @@ -1822,10 +1998,17 @@ extern void start_sequence (void); extern void push_to_sequence (rtx); extern void push_to_sequence2 (rtx, rtx); extern void end_sequence (void); +#if TARGET_SUPPORTS_WIDE_INT == 0 extern double_int rtx_to_double_int (const_rtx); -extern rtx immed_double_int_const (double_int, enum machine_mode); +#endif +extern void cwi_output_hex (FILE *, const_rtx); +#ifndef GENERATOR_FILE +extern rtx immed_wide_int_const (const wide_int &cst, enum machine_mode mode); +#endif +#if TARGET_SUPPORTS_WIDE_INT == 0 extern rtx immed_double_const (HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode); +#endif /* In loop-iv.c */ diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 9769b69bdb5..12a5ce71553 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -279,8 +279,8 @@ rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size, if (!decl) decl_size = -1; else if (DECL_P (decl) && DECL_SIZE_UNIT (decl)) - decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0) - ? tree_low_cst (DECL_SIZE_UNIT (decl), 0) + decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl)) + ? tree_to_shwi (DECL_SIZE_UNIT (decl)) : -1); else if (TREE_CODE (decl) == STRING_CST) decl_size = TREE_STRING_LENGTH (decl); @@ -3129,6 +3129,8 @@ commutative_operand_precedence (rtx op) /* Constants always come the second operand. Prefer "nice" constants. */ if (code == CONST_INT) return -8; + if (code == CONST_WIDE_INT) + return -8; if (code == CONST_DOUBLE) return -7; if (code == CONST_FIXED) @@ -3141,6 +3143,8 @@ commutative_operand_precedence (rtx op) case RTX_CONST_OBJ: if (code == CONST_INT) return -6; + if (code == CONST_WIDE_INT) + return -6; if (code == CONST_DOUBLE) return -5; if (code == CONST_FIXED) @@ -5327,7 +5331,10 @@ get_address_mode (rtx mem) /* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single words, storing in *FIRST the word that comes first in memory in the target - and in *SECOND the other. */ + and in *SECOND the other. + + TODO: This function needs to be rewritten to work on any size + integer. */ void split_double (rtx value, rtx *first, rtx *second) @@ -5404,6 +5411,22 @@ split_double (rtx value, rtx *first, rtx *second) } } } + else if (GET_CODE (value) == CONST_WIDE_INT) + { + /* All of this is scary code and needs to be converted to + properly work with any size integer. */ + gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2); + if (WORDS_BIG_ENDIAN) + { + *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1)); + *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0)); + } + else + { + *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0)); + *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1)); + } + } else if (!CONST_DOUBLE_P (value)) { if (WORDS_BIG_ENDIAN) diff --git a/gcc/sched-vis.c b/gcc/sched-vis.c index a965c4d54d6..8fa29bfa046 100644 --- a/gcc/sched-vis.c +++ b/gcc/sched-vis.c @@ -428,6 +428,23 @@ print_value (pretty_printer *pp, const_rtx x, int verbose) pp_scalar (pp, HOST_WIDE_INT_PRINT_HEX, (unsigned HOST_WIDE_INT) INTVAL (x)); break; + + case CONST_WIDE_INT: + { + const char *sep = "<"; + int i; + for (i = CONST_WIDE_INT_NUNITS (x) - 1; i >= 0; i--) + { + pp_string (pp, sep); + sep = ","; + sprintf (tmp, HOST_WIDE_INT_PRINT_HEX, + (unsigned HOST_WIDE_INT) CONST_WIDE_INT_ELT (x, i)); + pp_string (pp, tmp); + } + pp_greater (pp); + } + break; + case CONST_DOUBLE: if (FLOAT_MODE_P (GET_MODE (x))) { diff --git a/gcc/sdbout.c b/gcc/sdbout.c index 1eedde3f520..e97d29948dc 100644 --- a/gcc/sdbout.c +++ b/gcc/sdbout.c @@ -535,10 +535,10 @@ plain_type_1 (tree type, int level) = (TYPE_DOMAIN (type) && TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != 0 && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != 0 - && host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - && host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) - ? (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1) + && tree_fits_shwi_p (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + && tree_fits_shwi_p (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + ? (tree_to_shwi (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + - tree_to_shwi (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + 1) : 0); return PUSH_DERIVED_LEVEL (DT_ARY, m); @@ -994,8 +994,8 @@ sdbout_field_types (tree type) if (TREE_CODE (tail) == FIELD_DECL && DECL_NAME (tail) && DECL_SIZE (tail) - && host_integerp (DECL_SIZE (tail), 1) - && host_integerp (bit_position (tail), 0)) + && tree_fits_uhwi_p (DECL_SIZE (tail)) + && tree_fits_shwi_p (bit_position (tail))) { if (POINTER_TYPE_P (TREE_TYPE (tail))) sdbout_one_type (TREE_TYPE (TREE_TYPE (tail))); @@ -1134,7 +1134,7 @@ sdbout_one_type (tree type) continue; PUT_SDB_DEF (IDENTIFIER_POINTER (child_type_name)); - PUT_SDB_INT_VAL (tree_low_cst (BINFO_OFFSET (child), 0)); + PUT_SDB_INT_VAL (tree_to_shwi (BINFO_OFFSET (child))); PUT_SDB_SCL (member_scl); sdbout_type (BINFO_TYPE (child)); PUT_SDB_ENDEF; @@ -1152,10 +1152,10 @@ sdbout_one_type (tree type) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (host_integerp (value, 0)) + if (tree_fits_hwi_p (value)) { PUT_SDB_DEF (IDENTIFIER_POINTER (TREE_PURPOSE (tem))); - PUT_SDB_INT_VAL (tree_low_cst (value, 0)); + PUT_SDB_INT_VAL (tree_to_shwi (value)); PUT_SDB_SCL (C_MOE); PUT_SDB_TYPE (T_MOE); PUT_SDB_ENDEF; @@ -1173,8 +1173,8 @@ sdbout_one_type (tree type) if (TREE_CODE (tem) == FIELD_DECL && DECL_NAME (tem) && DECL_SIZE (tem) - && host_integerp (DECL_SIZE (tem), 1) - && host_integerp (bit_position (tem), 0)) + && tree_fits_uhwi_p (DECL_SIZE (tem)) + && tree_fits_shwi_p (bit_position (tem))) { const char *name; @@ -1185,7 +1185,7 @@ sdbout_one_type (tree type) PUT_SDB_INT_VAL (int_bit_position (tem)); PUT_SDB_SCL (C_FIELD); sdbout_type (DECL_BIT_FIELD_TYPE (tem)); - PUT_SDB_SIZE (tree_low_cst (DECL_SIZE (tem), 1)); + PUT_SDB_SIZE (tree_to_uhwi (DECL_SIZE (tem))); } else { diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c index 4eb27c5da5d..7d436986084 100644 --- a/gcc/sel-sched-ir.c +++ b/gcc/sel-sched-ir.c @@ -1141,10 +1141,10 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs) if (lhs == NULL || rhs == NULL) return false; - /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point - to use reg, if const can be used. Moreover, scheduling const as rhs may - lead to mode mismatch cause consts don't have modes but they could be - merged from branches where the same const used in different modes. */ + /* Do not schedule constants as rhs: no point to use reg, if const + can be used. Moreover, scheduling const as rhs may lead to mode + mismatch cause consts don't have modes but they could be merged + from branches where the same const used in different modes. */ if (CONSTANT_P (rhs)) return false; diff --git a/gcc/signop.h b/gcc/signop.h new file mode 100644 index 00000000000..05dac902df5 --- /dev/null +++ b/gcc/signop.h @@ -0,0 +1,35 @@ +/* Operations with SIGNED and UNSIGNED. -*- C++ -*- + Copyright (C) 2012-2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef SIGNOP_H +#define SIGNOP_H + +/* This type is used for the large number of functions that produce + different results depending on if the operands are signed types or + unsigned types. The signedness of a tree type can be found by + using the TYPE_SIGN macro. */ + +enum signop_e { + SIGNED, + UNSIGNED +}; + +typedef enum signop_e signop; + +#endif diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 432842ecf0b..eda11e6085e 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -86,6 +86,22 @@ mode_signbit_p (enum machine_mode mode, const_rtx x) if (width <= HOST_BITS_PER_WIDE_INT && CONST_INT_P (x)) val = INTVAL (x); +#if TARGET_SUPPORTS_WIDE_INT + else if (CONST_WIDE_INT_P (x)) + { + unsigned int i; + unsigned int elts = CONST_WIDE_INT_NUNITS (x); + if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) + return false; + for (i = 0; i < elts - 1; i++) + if (CONST_WIDE_INT_ELT (x, i) != 0) + return false; + val = CONST_WIDE_INT_ELT (x, elts - 1); + width %= HOST_BITS_PER_WIDE_INT; + if (width == 0) + width = HOST_BITS_PER_WIDE_INT; + } +#else else if (width <= HOST_BITS_PER_DOUBLE_INT && CONST_DOUBLE_AS_INT_P (x) && CONST_DOUBLE_LOW (x) == 0) @@ -93,8 +109,9 @@ mode_signbit_p (enum machine_mode mode, const_rtx x) val = CONST_DOUBLE_HIGH (x); width -= HOST_BITS_PER_WIDE_INT; } +#endif else - /* FIXME: We don't yet have a representation for wider modes. */ + /* X is not an integer constant. */ return false; if (width < HOST_BITS_PER_WIDE_INT) @@ -298,13 +315,13 @@ delegitimize_mem_from_attrs (rtx x) &mode, &unsignedp, &volatilep, false); if (bitsize != GET_MODE_BITSIZE (mode) || (bitpos % BITS_PER_UNIT) - || (toffset && !host_integerp (toffset, 0))) + || (toffset && !tree_fits_shwi_p (toffset))) decl = NULL; else { offset += bitpos / BITS_PER_UNIT; if (toffset) - offset += TREE_INT_CST_LOW (toffset); + offset += tree_to_hwi (toffset); } break; } @@ -1526,7 +1543,6 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, rtx op, enum machine_mode op_mode) { unsigned int width = GET_MODE_PRECISION (mode); - unsigned int op_width = GET_MODE_PRECISION (op_mode); if (code == VEC_DUPLICATE) { @@ -1594,336 +1610,114 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, if (code == FLOAT && CONST_SCALAR_INT_P (op)) { - HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; - if (CONST_INT_P (op)) - lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); - else - lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); + if (op_mode == VOIDmode) + { + /* CONST_INT have VOIDmode as the mode. We assume that all + the bits of the constant are significant, though, this is + a dangerous assumption as many times CONST_INTs are + created and used with garbage in the bits outside of the + precision of the implied mode of the const_int. */ + op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0); + } - REAL_VALUE_FROM_INT (d, lv, hv, mode); + real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED); d = real_value_truncate (mode, d); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op)) { - HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; - if (CONST_INT_P (op)) - lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); - else - lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); - - if (op_mode == VOIDmode - || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT) - /* We should never get a negative number. */ - gcc_assert (hv >= 0); - else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT) - hv = 0, lv &= GET_MODE_MASK (op_mode); + if (op_mode == VOIDmode) + { + /* CONST_INT have VOIDmode as the mode. We assume that all + the bits of the constant are significant, though, this is + a dangerous assumption as many times CONST_INTs are + created and used with garbage in the bits outside of the + precision of the implied mode of the const_int. */ + op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0); + } - REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); + real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED); d = real_value_truncate (mode, d); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } - if (CONST_INT_P (op) - && width <= HOST_BITS_PER_WIDE_INT && width > 0) + if (CONST_SCALAR_INT_P (op) && width > 0) { - HOST_WIDE_INT arg0 = INTVAL (op); - HOST_WIDE_INT val; + wide_int result; + enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode; + wide_int op0 = std::make_pair (op, imode); + +#if TARGET_SUPPORTS_WIDE_INT == 0 + /* This assert keeps the simplification from producing a result + that cannot be represented in a CONST_DOUBLE but a lot of + upstream callers expect that this function never fails to + simplify something and so you if you added this to the test + above the code would die later anyway. If this assert + happens, you just need to make the port support wide int. */ + gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); +#endif switch (code) { case NOT: - val = ~ arg0; + result = ~op0; break; case NEG: - val = - arg0; + result = -op0; break; case ABS: - val = (arg0 >= 0 ? arg0 : - arg0); + result = wi::abs (op0); break; case FFS: - arg0 &= GET_MODE_MASK (mode); - val = ffs_hwi (arg0); + result = wi::shwi (wi::ffs (op0), mode); break; case CLZ: - arg0 &= GET_MODE_MASK (mode); - if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val)) - ; - else - val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1; + result = wi::shwi (wi::clz (op0), mode); break; case CLRSB: - arg0 &= GET_MODE_MASK (mode); - if (arg0 == 0) - val = GET_MODE_PRECISION (mode) - 1; - else if (arg0 >= 0) - val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2; - else if (arg0 < 0) - val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2; + result = wi::shwi (wi::clrsb (op0), mode); break; - + case CTZ: - arg0 &= GET_MODE_MASK (mode); - if (arg0 == 0) - { - /* Even if the value at zero is undefined, we have to come - up with some replacement. Seems good enough. */ - if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val)) - val = GET_MODE_PRECISION (mode); - } - else - val = ctz_hwi (arg0); + result = wi::shwi (wi::ctz (op0), mode); break; case POPCOUNT: - arg0 &= GET_MODE_MASK (mode); - val = 0; - while (arg0) - val++, arg0 &= arg0 - 1; + result = wi::shwi (wi::popcount (op0), mode); break; case PARITY: - arg0 &= GET_MODE_MASK (mode); - val = 0; - while (arg0) - val++, arg0 &= arg0 - 1; - val &= 1; + result = wi::shwi (wi::parity (op0), mode); break; case BSWAP: - { - unsigned int s; - - val = 0; - for (s = 0; s < width; s += 8) - { - unsigned int d = width - s - 8; - unsigned HOST_WIDE_INT byte; - byte = (arg0 >> s) & 0xff; - val |= byte << d; - } - } + result = op0.bswap (); break; case TRUNCATE: - val = arg0; - break; - case ZERO_EXTEND: - /* When zero-extending a CONST_INT, we need to know its - original mode. */ - gcc_assert (op_mode != VOIDmode); - if (op_width == HOST_BITS_PER_WIDE_INT) - { - /* If we were really extending the mode, - we would have to distinguish between zero-extension - and sign-extension. */ - gcc_assert (width == op_width); - val = arg0; - } - else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) - val = arg0 & GET_MODE_MASK (op_mode); - else - return 0; + result = wide_int::from (op0, width, UNSIGNED); break; case SIGN_EXTEND: - if (op_mode == VOIDmode) - op_mode = mode; - op_width = GET_MODE_PRECISION (op_mode); - if (op_width == HOST_BITS_PER_WIDE_INT) - { - /* If we were really extending the mode, - we would have to distinguish between zero-extension - and sign-extension. */ - gcc_assert (width == op_width); - val = arg0; - } - else if (op_width < HOST_BITS_PER_WIDE_INT) - { - val = arg0 & GET_MODE_MASK (op_mode); - if (val_signbit_known_set_p (op_mode, val)) - val |= ~GET_MODE_MASK (op_mode); - } - else - return 0; + result = wide_int::from (op0, width, SIGNED); break; case SQRT: - case FLOAT_EXTEND: - case FLOAT_TRUNCATE: - case SS_TRUNCATE: - case US_TRUNCATE: - case SS_NEG: - case US_NEG: - case SS_ABS: - return 0; - - default: - gcc_unreachable (); - } - - return gen_int_mode (val, mode); - } - - /* We can do some operations on integer CONST_DOUBLEs. Also allow - for a DImode operation on a CONST_INT. */ - else if (width <= HOST_BITS_PER_DOUBLE_INT - && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op))) - { - double_int first, value; - - if (CONST_DOUBLE_AS_INT_P (op)) - first = double_int::from_pair (CONST_DOUBLE_HIGH (op), - CONST_DOUBLE_LOW (op)); - else - first = double_int::from_shwi (INTVAL (op)); - - switch (code) - { - case NOT: - value = ~first; - break; - - case NEG: - value = -first; - break; - - case ABS: - if (first.is_negative ()) - value = -first; - else - value = first; - break; - - case FFS: - value.high = 0; - if (first.low != 0) - value.low = ffs_hwi (first.low); - else if (first.high != 0) - value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high); - else - value.low = 0; - break; - - case CLZ: - value.high = 0; - if (first.high != 0) - value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1 - - HOST_BITS_PER_WIDE_INT; - else if (first.low != 0) - value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1; - else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low)) - value.low = GET_MODE_PRECISION (mode); - break; - - case CTZ: - value.high = 0; - if (first.low != 0) - value.low = ctz_hwi (first.low); - else if (first.high != 0) - value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high); - else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low)) - value.low = GET_MODE_PRECISION (mode); - break; - - case POPCOUNT: - value = double_int_zero; - while (first.low) - { - value.low++; - first.low &= first.low - 1; - } - while (first.high) - { - value.low++; - first.high &= first.high - 1; - } - break; - - case PARITY: - value = double_int_zero; - while (first.low) - { - value.low++; - first.low &= first.low - 1; - } - while (first.high) - { - value.low++; - first.high &= first.high - 1; - } - value.low &= 1; - break; - - case BSWAP: - { - unsigned int s; - - value = double_int_zero; - for (s = 0; s < width; s += 8) - { - unsigned int d = width - s - 8; - unsigned HOST_WIDE_INT byte; - - if (s < HOST_BITS_PER_WIDE_INT) - byte = (first.low >> s) & 0xff; - else - byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; - - if (d < HOST_BITS_PER_WIDE_INT) - value.low |= byte << d; - else - value.high |= byte << (d - HOST_BITS_PER_WIDE_INT); - } - } - break; - - case TRUNCATE: - /* This is just a change-of-mode, so do nothing. */ - value = first; - break; - - case ZERO_EXTEND: - gcc_assert (op_mode != VOIDmode); - - if (op_width > HOST_BITS_PER_WIDE_INT) - return 0; - - value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode)); - break; - - case SIGN_EXTEND: - if (op_mode == VOIDmode - || op_width > HOST_BITS_PER_WIDE_INT) - return 0; - else - { - value.low = first.low & GET_MODE_MASK (op_mode); - if (val_signbit_known_set_p (op_mode, value.low)) - value.low |= ~GET_MODE_MASK (op_mode); - - value.high = HWI_SIGN_EXTEND (value.low); - } - break; - - case SQRT: - return 0; - default: return 0; } - return immed_double_int_const (value, mode); + return immed_wide_int_const (result, mode); } else if (CONST_DOUBLE_AS_FLOAT_P (op) @@ -1975,11 +1769,10 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, } return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } - else if (CONST_DOUBLE_AS_FLOAT_P (op) && SCALAR_FLOAT_MODE_P (GET_MODE (op)) && GET_MODE_CLASS (mode) == MODE_INT - && width <= HOST_BITS_PER_DOUBLE_INT && width > 0) + && width > 0) { /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX operators are intentionally left unspecified (to ease implementation @@ -1988,9 +1781,13 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, /* This was formerly used only for non-IEEE float. eggert@twinsun.com says it is safe for IEEE also. */ - HOST_WIDE_INT xh, xl, th, tl; REAL_VALUE_TYPE x, t; REAL_VALUE_FROM_CONST_DOUBLE (x, op); + wide_int wmax, wmin; + /* This is part of the abi to real_to_integer, but we check + things before making this call. */ + bool fail; + switch (code) { case FIX: @@ -1998,44 +1795,18 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, return const0_rtx; /* Test against the signed upper bound. */ - if (width > HOST_BITS_PER_WIDE_INT) - { - th = ((unsigned HOST_WIDE_INT) 1 - << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1; - tl = -1; - } - else - { - th = 0; - tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; - } - real_from_integer (&t, VOIDmode, tl, th, 0); + wmax = wi::max_value (width, SIGNED); + real_from_integer (&t, VOIDmode, wmax, SIGNED); if (REAL_VALUES_LESS (t, x)) - { - xh = th; - xl = tl; - break; - } + return immed_wide_int_const (wmax, mode); /* Test against the signed lower bound. */ - if (width > HOST_BITS_PER_WIDE_INT) - { - th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1); - tl = 0; - } - else - { - th = -1; - tl = HOST_WIDE_INT_M1U << (width - 1); - } - real_from_integer (&t, VOIDmode, tl, th, 0); + wmin = wi::min_value (width, SIGNED); + real_from_integer (&t, VOIDmode, wmin, SIGNED); if (REAL_VALUES_LESS (x, t)) - { - xh = th; - xl = tl; - break; - } - REAL_VALUE_TO_INT (&xl, &xh, x); + return immed_wide_int_const (wmin, mode); + + return immed_wide_int_const (real_to_integer (&x, &fail, width), mode); break; case UNSIGNED_FIX: @@ -2043,37 +1814,17 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, return const0_rtx; /* Test against the unsigned upper bound. */ - if (width == HOST_BITS_PER_DOUBLE_INT) - { - th = -1; - tl = -1; - } - else if (width >= HOST_BITS_PER_WIDE_INT) - { - th = ((unsigned HOST_WIDE_INT) 1 - << (width - HOST_BITS_PER_WIDE_INT)) - 1; - tl = -1; - } - else - { - th = 0; - tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1; - } - real_from_integer (&t, VOIDmode, tl, th, 1); + wmax = wi::max_value (width, UNSIGNED); + real_from_integer (&t, VOIDmode, wmax, UNSIGNED); if (REAL_VALUES_LESS (t, x)) - { - xh = th; - xl = tl; - break; - } + return immed_wide_int_const (wmax, mode); - REAL_VALUE_TO_INT (&xl, &xh, x); + return immed_wide_int_const (real_to_integer (&t, &fail, width), mode); break; default: gcc_unreachable (); } - return immed_double_const (xl, xh, mode); } return NULL_RTX; @@ -2262,49 +2013,52 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (SCALAR_INT_MODE_P (mode)) { - double_int coeff0, coeff1; + wide_int coeff0; + wide_int coeff1; rtx lhs = op0, rhs = op1; - coeff0 = double_int_one; - coeff1 = double_int_one; + coeff0 = wi::one (GET_MODE_PRECISION (mode)); + coeff1 = wi::one (GET_MODE_PRECISION (mode)); if (GET_CODE (lhs) == NEG) { - coeff0 = double_int_minus_one; + coeff0 = wi::minus_one (GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == MULT - && CONST_INT_P (XEXP (lhs, 1))) + && CONST_SCALAR_INT_P (XEXP (lhs, 1))) { - coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1))); + coeff0 = std::make_pair (XEXP (lhs, 1), mode); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT && CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) >= 0 - && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode)) { - coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1))); + coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)), + GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) { - coeff1 = double_int_minus_one; + coeff1 = wi::minus_one (GET_MODE_PRECISION (mode)); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == MULT && CONST_INT_P (XEXP (rhs, 1))) { - coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1))); + coeff1 = std::make_pair (XEXP (rhs, 1), mode); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT && CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) >= 0 - && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode)) { - coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1))); + coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)), + GET_MODE_PRECISION (mode)); rhs = XEXP (rhs, 0); } @@ -2312,11 +2066,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx orig = gen_rtx_PLUS (mode, op0, op1); rtx coeff; - double_int val; bool speed = optimize_function_for_speed_p (cfun); - val = coeff0 + coeff1; - coeff = immed_double_int_const (val, mode); + coeff = immed_wide_int_const (coeff0 + coeff1, mode); tem = simplify_gen_binary (MULT, mode, lhs, coeff); return set_src_cost (tem, speed) <= set_src_cost (orig, speed) @@ -2438,49 +2190,52 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (SCALAR_INT_MODE_P (mode)) { - double_int coeff0, negcoeff1; + wide_int coeff0; + wide_int negcoeff1; rtx lhs = op0, rhs = op1; - coeff0 = double_int_one; - negcoeff1 = double_int_minus_one; + coeff0 = wi::one (GET_MODE_PRECISION (mode)); + negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode)); if (GET_CODE (lhs) == NEG) { - coeff0 = double_int_minus_one; + coeff0 = wi::minus_one (GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == MULT - && CONST_INT_P (XEXP (lhs, 1))) + && CONST_SCALAR_INT_P (XEXP (lhs, 1))) { - coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1))); + coeff0 = std::make_pair (XEXP (lhs, 1), mode); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT && CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) >= 0 - && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode)) { - coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1))); + coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)), + GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) { - negcoeff1 = double_int_one; + negcoeff1 = wi::one (GET_MODE_PRECISION (mode)); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == MULT && CONST_INT_P (XEXP (rhs, 1))) { - negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1))); + negcoeff1 = -wide_int (std::make_pair (XEXP (rhs, 1), mode)); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT && CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) >= 0 - && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode)) { - negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1))); + negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)), + GET_MODE_PRECISION (mode)); negcoeff1 = -negcoeff1; rhs = XEXP (rhs, 0); } @@ -2489,11 +2244,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx orig = gen_rtx_MINUS (mode, op0, op1); rtx coeff; - double_int val; bool speed = optimize_function_for_speed_p (cfun); - val = coeff0 + negcoeff1; - coeff = immed_double_int_const (val, mode); + coeff = immed_wide_int_const (coeff0 + negcoeff1, mode); tem = simplify_gen_binary (MULT, mode, lhs, coeff); return set_src_cost (tem, speed) <= set_src_cost (orig, speed) @@ -2645,26 +2398,13 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && trueop1 == CONST1_RTX (mode)) return op0; - /* Convert multiply by constant power of two into shift unless - we are still generating RTL. This test is a kludge. */ - if (CONST_INT_P (trueop1) - && (val = exact_log2 (UINTVAL (trueop1))) >= 0 - /* If the mode is larger than the host word size, and the - uppermost bit is set, then this isn't a power of two due - to implicit sign extension. */ - && (width <= HOST_BITS_PER_WIDE_INT - || val != HOST_BITS_PER_WIDE_INT - 1)) - return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); - - /* Likewise for multipliers wider than a word. */ - if (CONST_DOUBLE_AS_INT_P (trueop1) - && GET_MODE (op0) == mode - && CONST_DOUBLE_LOW (trueop1) == 0 - && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0 - && (val < HOST_BITS_PER_DOUBLE_INT - 1 - || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT)) - return simplify_gen_binary (ASHIFT, mode, op0, - GEN_INT (val + HOST_BITS_PER_WIDE_INT)); + /* Convert multiply by constant power of two into shift. */ + if (CONST_SCALAR_INT_P (trueop1)) + { + val = wi::exact_log2 (std::make_pair (trueop1, mode)); + if (val >= 0 && val < GET_MODE_BITSIZE (mode)) + return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); + } /* x*2 is x+x and x*(-1) is -x */ if (CONST_DOUBLE_AS_FLOAT_P (trueop1) @@ -3768,8 +3508,6 @@ rtx simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { - HOST_WIDE_INT arg0, arg1, arg0s, arg1s; - HOST_WIDE_INT val; unsigned int width = GET_MODE_PRECISION (mode); if (VECTOR_MODE_P (mode) @@ -3963,299 +3701,134 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, /* We can fold some multi-word operations. */ if (GET_MODE_CLASS (mode) == MODE_INT - && width == HOST_BITS_PER_DOUBLE_INT - && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0)) - && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1))) + && CONST_SCALAR_INT_P (op0) + && CONST_SCALAR_INT_P (op1)) { - double_int o0, o1, res, tmp; + wide_int result; + wide_int wop0 = std::make_pair (op0, mode); bool overflow; - - o0 = rtx_to_double_int (op0); - o1 = rtx_to_double_int (op1); - + unsigned int bitsize = GET_MODE_BITSIZE (mode); + rtx_mode_t pop1 = std::make_pair (op1, mode); + +#if TARGET_SUPPORTS_WIDE_INT == 0 + /* This assert keeps the simplification from producing a result + that cannot be represented in a CONST_DOUBLE but a lot of + upstream callers expect that this function never fails to + simplify something and so you if you added this to the test + above the code would die later anyway. If this assert + happens, you just need to make the port support wide int. */ + gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); +#endif switch (code) { case MINUS: - /* A - B == A + (-B). */ - o1 = -o1; - - /* Fall through.... */ + result = wop0 - pop1; + break; case PLUS: - res = o0 + o1; + result = wop0 + pop1; break; case MULT: - res = o0 * o1; + result = wop0 * pop1; break; case DIV: - res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR, - &tmp, &overflow); + result = wi::div_trunc (wop0, pop1, SIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; - + case MOD: - tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR, - &res, &overflow); + result = wi::mod_trunc (wop0, pop1, SIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; case UDIV: - res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR, - &tmp, &overflow); + result = wi::div_trunc (wop0, pop1, UNSIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; case UMOD: - tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR, - &res, &overflow); + result = wi::mod_trunc (wop0, pop1, UNSIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; case AND: - res = o0 & o1; + result = wop0 & pop1; break; case IOR: - res = o0 | o1; + result = wop0 | pop1; break; case XOR: - res = o0 ^ o1; + result = wop0 ^ pop1; break; case SMIN: - res = o0.smin (o1); + result = wi::smin (wop0, pop1); break; case SMAX: - res = o0.smax (o1); + result = wi::smax (wop0, pop1); break; case UMIN: - res = o0.umin (o1); + result = wi::umin (wop0, pop1); break; case UMAX: - res = o0.umax (o1); + result = wi::umax (wop0, pop1); break; - case LSHIFTRT: case ASHIFTRT: + case LSHIFTRT: + case ASHIFTRT: case ASHIFT: - case ROTATE: case ROTATERT: + case ROTATE: + case ROTATERT: { - unsigned HOST_WIDE_INT cnt; + wide_int wop1 = pop1; + if (wi::neg_p (wop1)) + return NULL_RTX; if (SHIFT_COUNT_TRUNCATED) + wop1 = wi::umod_trunc (wop1, width); + + switch (code) { - o1.high = 0; - o1.low &= GET_MODE_PRECISION (mode) - 1; - } + case LSHIFTRT: + result = wi::lrshift (wop0, wop1, bitsize); + break; + + case ASHIFTRT: + result = wi::arshift (wop0, wop1, bitsize); + break; + + case ASHIFT: + result = wi::lshift (wop0, wop1, bitsize); + break; + + case ROTATE: + result = wi::lrotate (wop0, wop1); + break; + + case ROTATERT: + result = wi::rrotate (wop0, wop1); + break; - if (!o1.fits_uhwi () - || o1.to_uhwi () >= GET_MODE_PRECISION (mode)) - return 0; - - cnt = o1.to_uhwi (); - unsigned short prec = GET_MODE_PRECISION (mode); - - if (code == LSHIFTRT || code == ASHIFTRT) - res = o0.rshift (cnt, prec, code == ASHIFTRT); - else if (code == ASHIFT) - res = o0.alshift (cnt, prec); - else if (code == ROTATE) - res = o0.lrotate (cnt, prec); - else /* code == ROTATERT */ - res = o0.rrotate (cnt, prec); + default: + gcc_unreachable (); + } + break; } - break; - - default: - return 0; - } - - return immed_double_int_const (res, mode); - } - - if (CONST_INT_P (op0) && CONST_INT_P (op1) - && width <= HOST_BITS_PER_WIDE_INT && width != 0) - { - /* Get the integer argument values in two forms: - zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ - - arg0 = INTVAL (op0); - arg1 = INTVAL (op1); - - if (width < HOST_BITS_PER_WIDE_INT) - { - arg0 &= GET_MODE_MASK (mode); - arg1 &= GET_MODE_MASK (mode); - - arg0s = arg0; - if (val_signbit_known_set_p (mode, arg0s)) - arg0s |= ~GET_MODE_MASK (mode); - - arg1s = arg1; - if (val_signbit_known_set_p (mode, arg1s)) - arg1s |= ~GET_MODE_MASK (mode); - } - else - { - arg0s = arg0; - arg1s = arg1; - } - - /* Compute the value of the arithmetic. */ - - switch (code) - { - case PLUS: - val = arg0s + arg1s; - break; - - case MINUS: - val = arg0s - arg1s; - break; - - case MULT: - val = arg0s * arg1s; - break; - - case DIV: - if (arg1s == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = arg0s / arg1s; - break; - - case MOD: - if (arg1s == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = arg0s % arg1s; - break; - - case UDIV: - if (arg1 == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = (unsigned HOST_WIDE_INT) arg0 / arg1; - break; - - case UMOD: - if (arg1 == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = (unsigned HOST_WIDE_INT) arg0 % arg1; - break; - - case AND: - val = arg0 & arg1; - break; - - case IOR: - val = arg0 | arg1; - break; - - case XOR: - val = arg0 ^ arg1; - break; - - case LSHIFTRT: - case ASHIFT: - case ASHIFTRT: - /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure - the value is in range. We can't return any old value for - out-of-range arguments because either the middle-end (via - shift_truncation_mask) or the back-end might be relying on - target-specific knowledge. Nor can we rely on - shift_truncation_mask, since the shift might not be part of an - ashlM3, lshrM3 or ashrM3 instruction. */ - if (SHIFT_COUNT_TRUNCATED) - arg1 = (unsigned HOST_WIDE_INT) arg1 % width; - else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) - return 0; - - val = (code == ASHIFT - ? ((unsigned HOST_WIDE_INT) arg0) << arg1 - : ((unsigned HOST_WIDE_INT) arg0) >> arg1); - - /* Sign-extend the result for arithmetic right shifts. */ - if (code == ASHIFTRT && arg0s < 0 && arg1 > 0) - val |= HOST_WIDE_INT_M1U << (width - arg1); - break; - - case ROTATERT: - if (arg1 < 0) - return 0; - - arg1 %= width; - val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) - | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); - break; - - case ROTATE: - if (arg1 < 0) - return 0; - - arg1 %= width; - val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) - | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); - break; - - case COMPARE: - /* Do nothing here. */ - return 0; - - case SMIN: - val = arg0s <= arg1s ? arg0s : arg1s; - break; - - case UMIN: - val = ((unsigned HOST_WIDE_INT) arg0 - <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); - break; - - case SMAX: - val = arg0s > arg1s ? arg0s : arg1s; - break; - - case UMAX: - val = ((unsigned HOST_WIDE_INT) arg0 - > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); - break; - - case SS_PLUS: - case US_PLUS: - case SS_MINUS: - case US_MINUS: - case SS_MULT: - case US_MULT: - case SS_DIV: - case US_DIV: - case SS_ASHIFT: - case US_ASHIFT: - /* ??? There are simplifications that can be done. */ - return 0; - default: - gcc_unreachable (); + return NULL_RTX; } - - return gen_int_mode (val, mode); + return immed_wide_int_const (result, mode); } return NULL_RTX; @@ -4938,10 +4511,11 @@ comparison_result (enum rtx_code code, int known_results) } } -/* Check if the given comparison (done in the given MODE) is actually a - tautology or a contradiction. - If no simplification is possible, this function returns zero. - Otherwise, it returns either const_true_rtx or const0_rtx. */ +/* Check if the given comparison (done in the given MODE) is actually + a tautology or a contradiction. If the mode is VOID_mode, the + comparison is done in "infinite precision". If no simplification + is possible, this function returns zero. Otherwise, it returns + either const_true_rtx or const0_rtx. */ rtx simplify_const_relational_operation (enum rtx_code code, @@ -5065,59 +4639,22 @@ simplify_const_relational_operation (enum rtx_code code, /* Otherwise, see if the operands are both integers. */ if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) - && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0)) - && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1))) + && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1)) { - int width = GET_MODE_PRECISION (mode); - HOST_WIDE_INT l0s, h0s, l1s, h1s; - unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; - - /* Get the two words comprising each integer constant. */ - if (CONST_DOUBLE_AS_INT_P (trueop0)) - { - l0u = l0s = CONST_DOUBLE_LOW (trueop0); - h0u = h0s = CONST_DOUBLE_HIGH (trueop0); - } - else - { - l0u = l0s = INTVAL (trueop0); - h0u = h0s = HWI_SIGN_EXTEND (l0s); - } - - if (CONST_DOUBLE_AS_INT_P (trueop1)) - { - l1u = l1s = CONST_DOUBLE_LOW (trueop1); - h1u = h1s = CONST_DOUBLE_HIGH (trueop1); - } - else - { - l1u = l1s = INTVAL (trueop1); - h1u = h1s = HWI_SIGN_EXTEND (l1s); - } - - /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, - we have to sign or zero-extend the values. */ - if (width != 0 && width < HOST_BITS_PER_WIDE_INT) - { - l0u &= GET_MODE_MASK (mode); - l1u &= GET_MODE_MASK (mode); - - if (val_signbit_known_set_p (mode, l0s)) - l0s |= ~GET_MODE_MASK (mode); - - if (val_signbit_known_set_p (mode, l1s)) - l1s |= ~GET_MODE_MASK (mode); - } - if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) - h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); - - if (h0u == h1u && l0u == l1u) + /* It would be nice if we really had a mode here. However, the + largest int representable on the target is as good as + infinite. */ + enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode; + wide_int wo0; + rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode); + + wo0 = std::make_pair (trueop0, cmode); + if (wo0 == ptrueop1) return comparison_result (code, CMP_EQ); else { - int cr; - cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT; - cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU; + int cr = wi::lts_p (wo0, ptrueop1) ? CMP_LT : CMP_GT; + cr |= wi::ltu_p (wo0, ptrueop1) ? CMP_LTU : CMP_GTU; return comparison_result (code, cr); } } @@ -5573,9 +5110,9 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, return 0; } -/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED - or CONST_VECTOR, - returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. +/* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE + or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or + CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. Works by unpacking OP into a collection of 8-bit values represented as a little-endian array of 'unsigned char', selecting by BYTE, @@ -5585,13 +5122,11 @@ static rtx simplify_immed_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { - /* We support up to 512-bit values (for V8DFmode). */ enum { - max_bitsize = 512, value_bit = 8, value_mask = (1 << value_bit) - 1 }; - unsigned char value[max_bitsize / value_bit]; + unsigned char value[MAX_BITSIZE_MODE_ANY_MODE/value_bit]; int value_start; int i; int elem; @@ -5603,6 +5138,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, rtvec result_v = NULL; enum mode_class outer_class; enum machine_mode outer_submode; + int max_bitsize; /* Some ports misuse CCmode. */ if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op)) @@ -5612,6 +5148,10 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, if (COMPLEX_MODE_P (outermode)) return NULL_RTX; + /* We support any size mode. */ + max_bitsize = MAX (GET_MODE_BITSIZE (outermode), + GET_MODE_BITSIZE (innermode)); + /* Unpack the value. */ if (GET_CODE (op) == CONST_VECTOR) @@ -5661,8 +5201,20 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, *vp++ = INTVAL (el) < 0 ? -1 : 0; break; + case CONST_WIDE_INT: + { + wide_int val = std::make_pair (el, innermode); + unsigned char extend = wi::sign_mask (val); + + for (i = 0; i < elem_bitsize; i += value_bit) + *vp++ = wi::extract_uhwi (val, i, value_bit); + for (; i < elem_bitsize; i += value_bit) + *vp++ = extend; + } + break; + case CONST_DOUBLE: - if (GET_MODE (el) == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode) { unsigned char extend = 0; /* If this triggers, someone should have generated a @@ -5685,7 +5237,8 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, } else { - long tmp[max_bitsize / 32]; + /* This is big enough for anything on the platform. */ + long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32]; int bitsize = GET_MODE_BITSIZE (GET_MODE (el)); gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el))); @@ -5805,24 +5358,28 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, case MODE_INT: case MODE_PARTIAL_INT: { - unsigned HOST_WIDE_INT hi = 0, lo = 0; - - for (i = 0; - i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; - i += value_bit) - lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; - for (; i < elem_bitsize; i += value_bit) - hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) - << (i - HOST_BITS_PER_WIDE_INT); - - /* immed_double_const doesn't call trunc_int_for_mode. I don't - know why. */ - if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) - elems[elem] = gen_int_mode (lo, outer_submode); - else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT) - elems[elem] = immed_double_const (lo, hi, outer_submode); - else - return NULL_RTX; + int u; + int base = 0; + int units + = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT; + HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; + wide_int r; + + for (u = 0; u < units; u++) + { + unsigned HOST_WIDE_INT buf = 0; + for (i = 0; + i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize; + i += value_bit) + buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; + + tmp[u] = buf; + base += HOST_BITS_PER_WIDE_INT; + } + r = wide_int::from_array (tmp, units, + GET_MODE_PRECISION (outer_submode)); + elems[elem] = immed_wide_int_const (r, outer_submode); } break; @@ -5830,7 +5387,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, case MODE_DECIMAL_FLOAT: { REAL_VALUE_TYPE r; - long tmp[max_bitsize / 32]; + long tmp[MAX_BITSIZE_MODE_ANY_INT / 32]; /* real_from_target wants its input in words affected by FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, diff --git a/gcc/stmt.c b/gcc/stmt.c index b3fd25510ab..4241d5ddb68 100644 --- a/gcc/stmt.c +++ b/gcc/stmt.c @@ -1662,8 +1662,8 @@ dump_case_nodes (FILE *f, struct case_node *root, dump_case_nodes (f, root->left, indent_step, indent_level); - low = tree_low_cst (root->low, 0); - high = tree_low_cst (root->high, 0); + low = tree_to_shwi (root->low); + high = tree_to_shwi (root->high); fputs (";; ", f); if (high == low) @@ -1740,7 +1740,7 @@ expand_switch_as_decision_tree_p (tree range, who knows... */ max_ratio = optimize_insn_for_size_p () ? 3 : 10; if (count < case_values_threshold () - || ! host_integerp (range, /*pos=*/1) + || ! tree_fits_uhwi_p (range) || compare_tree_int (range, max_ratio * count) > 0) return true; @@ -1905,7 +1905,7 @@ emit_case_dispatch_table (tree index_expr, tree index_type, /* Get table of labels to jump to, in order of case index. */ - ncases = tree_low_cst (range, 0) + 1; + ncases = tree_to_shwi (range) + 1; labelvec = XALLOCAVEC (rtx, ncases); memset (labelvec, 0, ncases * sizeof (rtx)); @@ -1915,11 +1915,11 @@ emit_case_dispatch_table (tree index_expr, tree index_type, value since that should fit in a HOST_WIDE_INT while the actual values may not. */ HOST_WIDE_INT i_low - = tree_low_cst (fold_build2 (MINUS_EXPR, index_type, - n->low, minval), 1); + = tree_to_uhwi (fold_build2 (MINUS_EXPR, index_type, + n->low, minval)); HOST_WIDE_INT i_high - = tree_low_cst (fold_build2 (MINUS_EXPR, index_type, - n->high, minval), 1); + = tree_to_uhwi (fold_build2 (MINUS_EXPR, index_type, + n->high, minval)); HOST_WIDE_INT i; for (i = i_low; i <= i_high; i ++) @@ -2117,9 +2117,7 @@ expand_case (gimple stmt) original type. Make sure to drop overflow flags. */ low = fold_convert (index_type, low); if (TREE_OVERFLOW (low)) - low = build_int_cst_wide (index_type, - TREE_INT_CST_LOW (low), - TREE_INT_CST_HIGH (low)); + low = wide_int_to_tree (index_type, low); /* The canonical from of a case label in GIMPLE is that a simple case has an empty CASE_HIGH. For the casesi and tablejump expanders, @@ -2128,9 +2126,7 @@ expand_case (gimple stmt) high = low; high = fold_convert (index_type, high); if (TREE_OVERFLOW (high)) - high = build_int_cst_wide (index_type, - TREE_INT_CST_LOW (high), - TREE_INT_CST_HIGH (high)); + high = wide_int_to_tree (index_type, high); basic_block case_bb = label_to_block_fn (cfun, lab); edge case_edge = find_edge (bb, case_bb); diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index 20e577d8482..1057bbbbabd 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -331,9 +331,9 @@ mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) unsigned HOST_WIDE_INT uhwi; unsigned int ui; - if (!host_integerp (size, 1)) + if (!tree_fits_uhwi_p (size)) return BLKmode; - uhwi = tree_low_cst (size, 1); + uhwi = tree_to_uhwi (size); ui = uhwi; if (uhwi != ui) return BLKmode; @@ -481,10 +481,10 @@ mode_for_array (tree elem_type, tree size) return TYPE_MODE (elem_type); limit_p = true; - if (host_integerp (size, 1) && host_integerp (elem_size, 1)) + if (tree_fits_uhwi_p (size) && tree_fits_uhwi_p (elem_size)) { - int_size = tree_low_cst (size, 1); - int_elem_size = tree_low_cst (elem_size, 1); + int_size = tree_to_uhwi (size); + int_elem_size = tree_to_uhwi (elem_size); if (int_elem_size > 0 && int_size % int_elem_size == 0 && targetm.array_mode_supported_p (TYPE_MODE (elem_type), @@ -690,7 +690,7 @@ layout_decl (tree decl, unsigned int known_align) if (size != 0 && TREE_CODE (size) == INTEGER_CST && compare_tree_int (size, larger_than_size) > 0) { - int size_as_int = TREE_INT_CST_LOW (size); + int size_as_int = tree_to_hwi (size); if (compare_tree_int (size, size_as_int) == 0) warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int); @@ -1055,7 +1055,7 @@ excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, offset = offset % align; return ((offset + size + align - 1) / align - > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1) + > ((unsigned HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type)) / align)); } #endif @@ -1115,14 +1115,14 @@ place_field (record_layout_info rli, tree field) /* Work out the known alignment so far. Note that A & (-A) is the value of the least-significant bit in A that is one. */ if (! integer_zerop (rli->bitpos)) - known_align = (tree_low_cst (rli->bitpos, 1) - & - tree_low_cst (rli->bitpos, 1)); + known_align = (tree_to_uhwi (rli->bitpos) + & - tree_to_uhwi (rli->bitpos)); else if (integer_zerop (rli->offset)) known_align = 0; - else if (host_integerp (rli->offset, 1)) + else if (tree_fits_uhwi_p (rli->offset)) known_align = (BITS_PER_UNIT - * (tree_low_cst (rli->offset, 1) - & - tree_low_cst (rli->offset, 1))); + * (tree_to_uhwi (rli->offset) + & - tree_to_uhwi (rli->offset))); else known_align = rli->offset_align; @@ -1196,15 +1196,16 @@ place_field (record_layout_info rli, tree field) || TYPE_ALIGN (type) <= BITS_PER_UNIT) && maximum_field_alignment == 0 && ! integer_zerop (DECL_SIZE (field)) - && host_integerp (DECL_SIZE (field), 1) - && host_integerp (rli->offset, 1) - && host_integerp (TYPE_SIZE (type), 1)) + && tree_fits_uhwi_p (DECL_SIZE (field)) + /* BUG!!! rli->offset is checked as unsigned but used as signed. */ + && tree_fits_uhwi_p (rli->offset) + && tree_fits_uhwi_p (TYPE_SIZE (type))) { unsigned int type_align = TYPE_ALIGN (type); tree dsize = DECL_SIZE (field); - HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); - HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); - HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); + HOST_WIDE_INT field_size = tree_to_uhwi (dsize); + HOST_WIDE_INT offset = tree_to_shwi (rli->offset); + HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) @@ -1240,15 +1241,16 @@ place_field (record_layout_info rli, tree field) && DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field) && ! integer_zerop (DECL_SIZE (field)) - && host_integerp (DECL_SIZE (field), 1) - && host_integerp (rli->offset, 1) - && host_integerp (TYPE_SIZE (type), 1)) + && tree_fits_uhwi_p (DECL_SIZE (field)) + /* BUG!!! rli->offset is checked as unsigned but used as signed. */ + && tree_fits_shwi_p (rli->offset) + && tree_fits_uhwi_p (TYPE_SIZE (type))) { unsigned int type_align = TYPE_ALIGN (type); tree dsize = DECL_SIZE (field); - HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); - HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); - HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); + HOST_WIDE_INT field_size = tree_to_uhwi (dsize); + HOST_WIDE_INT offset = tree_to_shwi (rli->offset); + HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) @@ -1302,18 +1304,19 @@ place_field (record_layout_info rli, tree field) if (DECL_BIT_FIELD_TYPE (field) && !integer_zerop (DECL_SIZE (field)) && !integer_zerop (DECL_SIZE (rli->prev_field)) - && host_integerp (DECL_SIZE (rli->prev_field), 0) - && host_integerp (TYPE_SIZE (type), 0) + && tree_fits_shwi_p (DECL_SIZE (rli->prev_field)) + /* BUG!!! TYPE_SIZE (type) is checked as unsigned but used as signed. */ + && tree_fits_shwi_p (TYPE_SIZE (type)) && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) { /* We're in the middle of a run of equal type size fields; make sure we realign if we run out of bits. (Not decl size, type size!) */ - HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1); + HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field)); if (rli->remaining_in_alignment < bitsize) { - HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1); + HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type)); /* out of bits; bump up to next 'word'. */ rli->bitpos @@ -1385,13 +1388,13 @@ place_field (record_layout_info rli, tree field) until we see a bitfield (and come by here again) we just skip calculating it. */ if (DECL_SIZE (field) != NULL - && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1) - && host_integerp (DECL_SIZE (field), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field))) + && tree_fits_uhwi_p (DECL_SIZE (field))) { unsigned HOST_WIDE_INT bitsize - = tree_low_cst (DECL_SIZE (field), 1); + = tree_to_uhwi (DECL_SIZE (field)); unsigned HOST_WIDE_INT typesize - = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1); + = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))); if (typesize < bitsize) rli->remaining_in_alignment = 0; @@ -1423,14 +1426,14 @@ place_field (record_layout_info rli, tree field) approximate this by seeing if its position changed), lay out the field again; perhaps we can use an integral mode for it now. */ if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) - actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)); + actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) + & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); else if (integer_zerop (DECL_FIELD_OFFSET (field))) actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); - else if (host_integerp (DECL_FIELD_OFFSET (field), 1)) + else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) actual_align = (BITS_PER_UNIT - * (tree_low_cst (DECL_FIELD_OFFSET (field), 1) - & - tree_low_cst (DECL_FIELD_OFFSET (field), 1))); + * (tree_to_uhwi (DECL_FIELD_OFFSET (field)) + & - tree_to_uhwi (DECL_FIELD_OFFSET (field)))); else actual_align = DECL_OFFSET_ALIGN (field); /* ACTUAL_ALIGN is still the actual alignment *within the record* . @@ -1586,7 +1589,7 @@ compute_record_mode (tree type) line. */ SET_TYPE_MODE (type, BLKmode); - if (! host_integerp (TYPE_SIZE (type), 1)) + if (! tree_fits_uhwi_p (TYPE_SIZE (type))) return; /* A record which has any BLKmode members must itself be @@ -1602,9 +1605,9 @@ compute_record_mode (tree type) && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) && !(TYPE_SIZE (TREE_TYPE (field)) != 0 && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) - || ! host_integerp (bit_position (field), 1) + || ! tree_fits_uhwi_p (bit_position (field)) || DECL_SIZE (field) == 0 - || ! host_integerp (DECL_SIZE (field), 1)) + || ! tree_fits_uhwi_p (DECL_SIZE (field))) return; /* If this field is the whole struct, remember its mode so @@ -1623,8 +1626,8 @@ compute_record_mode (tree type) matches the type's size. This only applies to RECORD_TYPE. This does not apply to unions. */ if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode - && host_integerp (TYPE_SIZE (type), 1) - && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type))) + && tree_fits_uhwi_p (TYPE_SIZE (type)) + && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type))) SET_TYPE_MODE (type, mode); else SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1)); @@ -1765,11 +1768,11 @@ finish_bitfield_representative (tree repr, tree field) size = size_diffop (DECL_FIELD_OFFSET (field), DECL_FIELD_OFFSET (repr)); - gcc_assert (host_integerp (size, 1)); - bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1) - + tree_low_cst (DECL_SIZE (field), 1)); + gcc_assert (tree_fits_uhwi_p (size)); + bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)) + + tree_to_uhwi (DECL_SIZE (field))); /* Round up bitsize to multiples of BITS_PER_UNIT. */ bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); @@ -1787,11 +1790,11 @@ finish_bitfield_representative (tree repr, tree field) return; maxsize = size_diffop (DECL_FIELD_OFFSET (nextf), DECL_FIELD_OFFSET (repr)); - if (host_integerp (maxsize, 1)) + if (tree_fits_uhwi_p (maxsize)) { - maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1) - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)); + maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf)) + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); /* If the group ends within a bitfield nextf does not need to be aligned to BITS_PER_UNIT. Thus round up. */ maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); @@ -1808,9 +1811,9 @@ finish_bitfield_representative (tree repr, tree field) use bitsize as fallback for this case. */ tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)), DECL_FIELD_OFFSET (repr)); - if (host_integerp (maxsize, 1)) - maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)); + if (tree_fits_uhwi_p (maxsize)) + maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); else maxbitsize = bitsize; } @@ -1921,8 +1924,8 @@ finish_bitfield_layout (record_layout_info rli) representative to be generated. That will at most generate worse code but still maintain correctness with respect to the C++ memory model. */ - else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1) - && host_integerp (DECL_FIELD_OFFSET (field), 1)) + else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)) + && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) || operand_equal_p (DECL_FIELD_OFFSET (repr), DECL_FIELD_OFFSET (field), 0))) { @@ -2196,12 +2199,10 @@ layout_type (tree type) && tree_int_cst_lt (ub, lb)) { unsigned prec = TYPE_PRECISION (TREE_TYPE (lb)); - lb = double_int_to_tree - (ssizetype, - tree_to_double_int (lb).sext (prec)); - ub = double_int_to_tree - (ssizetype, - tree_to_double_int (ub).sext (prec)); + lb = wide_int_to_tree (ssizetype, + wi::sext (addr_wide_int (lb), prec)); + ub = wide_int_to_tree (ssizetype, + wi::sext (addr_wide_int (ub), prec)); } length = fold_convert (sizetype, @@ -2477,16 +2478,14 @@ initialize_sizetypes (void) TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)); TYPE_SIZE (sizetype) = bitsize_int (precision); TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype))); - set_min_and_max_values_for_integral_type (sizetype, precision, - /*is_unsigned=*/true); + set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED); SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT)); TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)); TYPE_SIZE (bitsizetype) = bitsize_int (bprecision); TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype))); - set_min_and_max_values_for_integral_type (bitsizetype, bprecision, - /*is_unsigned=*/true); + set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED); /* Create the signed variants of *sizetype. */ ssizetype = make_signed_type (TYPE_PRECISION (sizetype)); @@ -2506,58 +2505,18 @@ initialize_sizetypes (void) void set_min_and_max_values_for_integral_type (tree type, int precision, - bool is_unsigned) + signop sgn) { - tree min_value; - tree max_value; - /* For bitfields with zero width we end up creating integer types with zero precision. Don't assign any minimum/maximum values to those types, they don't have any valid value. */ if (precision < 1) return; - if (is_unsigned) - { - min_value = build_int_cst (type, 0); - max_value - = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0 - ? -1 - : ((HOST_WIDE_INT) 1 << precision) - 1, - precision - HOST_BITS_PER_WIDE_INT > 0 - ? ((unsigned HOST_WIDE_INT) ~0 - >> (HOST_BITS_PER_WIDE_INT - - (precision - HOST_BITS_PER_WIDE_INT))) - : 0); - } - else - { - min_value - = build_int_cst_wide (type, - (precision - HOST_BITS_PER_WIDE_INT > 0 - ? 0 - : (HOST_WIDE_INT) (-1) << (precision - 1)), - (((HOST_WIDE_INT) (-1) - << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 - ? precision - HOST_BITS_PER_WIDE_INT - 1 - : 0)))); - max_value - = build_int_cst_wide (type, - (precision - HOST_BITS_PER_WIDE_INT > 0 - ? -1 - : (HOST_WIDE_INT) - (((unsigned HOST_WIDE_INT) 1 - << (precision - 1)) - 1)), - (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 - ? (HOST_WIDE_INT) - ((((unsigned HOST_WIDE_INT) 1 - << (precision - HOST_BITS_PER_WIDE_INT - - 1))) - 1) - : 0)); - } - - TYPE_MIN_VALUE (type) = min_value; - TYPE_MAX_VALUE (type) = max_value; + TYPE_MIN_VALUE (type) + = wide_int_to_tree (type, wi::min_value (precision, sgn)); + TYPE_MAX_VALUE (type) + = wide_int_to_tree (type, wi::max_value (precision, sgn)); } /* Set the extreme values of TYPE based on its precision in bits, @@ -2570,14 +2529,7 @@ fixup_signed_type (tree type) { int precision = TYPE_PRECISION (type); - /* We can not represent properly constants greater then - HOST_BITS_PER_DOUBLE_INT, still we need the types - as they are used by i386 vector extensions and friends. */ - if (precision > HOST_BITS_PER_DOUBLE_INT) - precision = HOST_BITS_PER_DOUBLE_INT; - - set_min_and_max_values_for_integral_type (type, precision, - /*is_unsigned=*/false); + set_min_and_max_values_for_integral_type (type, precision, SIGNED); /* Lay out the type: set its alignment, size, etc. */ layout_type (type); @@ -2592,16 +2544,9 @@ fixup_unsigned_type (tree type) { int precision = TYPE_PRECISION (type); - /* We can not represent properly constants greater then - HOST_BITS_PER_DOUBLE_INT, still we need the types - as they are used by i386 vector extensions and friends. */ - if (precision > HOST_BITS_PER_DOUBLE_INT) - precision = HOST_BITS_PER_DOUBLE_INT; - TYPE_UNSIGNED (type) = 1; - set_min_and_max_values_for_integral_type (type, precision, - /*is_unsigned=*/true); + set_min_and_max_values_for_integral_type (type, precision, UNSIGNED); /* Lay out the type: set its alignment, size, etc. */ layout_type (type); diff --git a/gcc/system.h b/gcc/system.h index ce817d4ec96..a706312248b 100644 --- a/gcc/system.h +++ b/gcc/system.h @@ -714,6 +714,10 @@ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN; #define gcc_unreachable() (fancy_abort (__FILE__, __LINE__, __FUNCTION__)) #endif +/* Until we can use STATIC_ASSERT. */ +#define STATIC_ASSERT(X) \ + typedef int assertion1[(X) ? 1 : -1] ATTRIBUTE_UNUSED + /* Provide a fake boolean type. We make no attempt to use the C99 _Bool, as it may not be available in the bootstrap compiler, and even if it is, it is liable to be buggy. diff --git a/gcc/targhooks.c b/gcc/targhooks.c index ec73a6406d2..3fbd7190e83 100644 --- a/gcc/targhooks.c +++ b/gcc/targhooks.c @@ -979,7 +979,7 @@ tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED, HOST_WIDE_INT default_vector_alignment (const_tree type) { - return tree_low_cst (TYPE_SIZE (type), 0); + return tree_to_shwi (TYPE_SIZE (type)); } bool diff --git a/gcc/testsuite/gcc.dg/20020219-1.c b/gcc/testsuite/gcc.dg/20020219-1.c index d2ba755f50f..e3d22a76af1 100644 --- a/gcc/testsuite/gcc.dg/20020219-1.c +++ b/gcc/testsuite/gcc.dg/20020219-1.c @@ -1,5 +1,5 @@ /* PR c/4389 - This testcase failed because host_integerp (x, 0) was returning + This testcase failed because tree_fits_shwi_p (x) was returning 1 even for constants bigger than 2^31. It fails under under hppa hpux without -mdisable-indexing because the pointer x - 1 is used as the base address of an indexed load. Because the struct A is not diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c b/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c index 0952b5a04f8..0bd1a188278 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c @@ -25,5 +25,5 @@ int main() return 0; } -/* { dg-final { scan-tree-dump-times "bounded by 0" 0 "cunrolli"} } */ +/* { dg-final { scan-tree-dump-times "bounded by 0x0\[^0-9a-f\]" 0 "cunrolli"} } */ /* { dg-final { cleanup-tree-dump "cunrolli" } } */ diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c index 037cbcef743..3371c7ea26e 100644 --- a/gcc/trans-mem.c +++ b/gcc/trans-mem.c @@ -1073,8 +1073,8 @@ tm_log_add (basic_block entry_block, tree addr, gimple stmt) if (entry_block && transaction_invariant_address_p (lp->addr, entry_block) && TYPE_SIZE_UNIT (type) != NULL - && host_integerp (TYPE_SIZE_UNIT (type), 1) - && (tree_low_cst (TYPE_SIZE_UNIT (type), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) + && ((HOST_WIDE_INT)(tree_to_uhwi (TYPE_SIZE_UNIT (type))) < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) /* We must be able to copy this type normally. I.e., no special constructors and the like. */ @@ -1157,9 +1157,9 @@ tm_log_emit_stmt (tree addr, gimple stmt) code = BUILT_IN_TM_LOG_DOUBLE; else if (type == long_double_type_node) code = BUILT_IN_TM_LOG_LDOUBLE; - else if (host_integerp (size, 1)) + else if (tree_fits_uhwi_p (size)) { - unsigned int n = tree_low_cst (size, 1); + unsigned int n = tree_to_uhwi (size); switch (n) { case 1: @@ -2075,9 +2075,9 @@ build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) else if (type == long_double_type_node) code = BUILT_IN_TM_LOAD_LDOUBLE; else if (TYPE_SIZE_UNIT (type) != NULL - && host_integerp (TYPE_SIZE_UNIT (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { - switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) { case 1: code = BUILT_IN_TM_LOAD_1; @@ -2147,9 +2147,9 @@ build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) else if (type == long_double_type_node) code = BUILT_IN_TM_STORE_LDOUBLE; else if (TYPE_SIZE_UNIT (type) != NULL - && host_integerp (TYPE_SIZE_UNIT (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { - switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) { case 1: code = BUILT_IN_TM_STORE_1; @@ -3088,7 +3088,7 @@ expand_block_edges (struct tm_region *const region, basic_block bb) // TM_ABORT directly get what they deserve. tree arg = gimple_call_arg (stmt, 0); if (TREE_CODE (arg) == INTEGER_CST - && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 + && (tree_to_hwi (arg) & AR_OUTERABORT) != 0 && !decl_is_tm_clone (current_function_decl)) { // Find the GTMA_IS_OUTER transaction. diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c index 81da521277f..3e83344d8f0 100644 --- a/gcc/tree-affine.c +++ b/gcc/tree-affine.c @@ -27,33 +27,37 @@ along with GCC; see the file COPYING3. If not see #include "gimple.h" #include "flags.h" #include "dumpfile.h" +#include "wide-int-print.h" + /* Extends CST as appropriate for the affine combinations COMB. */ -double_int -double_int_ext_for_comb (double_int cst, aff_tree *comb) +max_wide_int +wide_int_ext_for_comb (max_wide_int cst, aff_tree *comb) { - return cst.sext (TYPE_PRECISION (comb->type)); + return wi::sext (cst, TYPE_PRECISION (comb->type)); } - /* Initializes affine combination COMB so that its value is zero in TYPE. */ static void aff_combination_zero (aff_tree *comb, tree type) { + int i; comb->type = type; - comb->offset = double_int_zero; + comb->offset = 0; comb->n = 0; + for (i = 0; i < MAX_AFF_ELTS; i++) + comb->elts[i].coef = 0; comb->rest = NULL_TREE; } /* Sets COMB to CST. */ void -aff_combination_const (aff_tree *comb, tree type, double_int cst) +aff_combination_const (aff_tree *comb, tree type, const max_wide_int &cst) { aff_combination_zero (comb, type); - comb->offset = double_int_ext_for_comb (cst, comb); + comb->offset = wide_int_ext_for_comb (cst, comb);; } /* Sets COMB to single element ELT. */ @@ -65,37 +69,35 @@ aff_combination_elt (aff_tree *comb, tree type, tree elt) comb->n = 1; comb->elts[0].val = elt; - comb->elts[0].coef = double_int_one; + comb->elts[0].coef = 1; } /* Scales COMB by SCALE. */ void -aff_combination_scale (aff_tree *comb, double_int scale) +aff_combination_scale (aff_tree *comb, max_wide_int scale) { unsigned i, j; - scale = double_int_ext_for_comb (scale, comb); - if (scale.is_one ()) + scale = wide_int_ext_for_comb (scale, comb); + if (scale == 1) return; - if (scale.is_zero ()) + if (scale == 0) { aff_combination_zero (comb, comb->type); return; } - comb->offset - = double_int_ext_for_comb (scale * comb->offset, comb); + comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb); for (i = 0, j = 0; i < comb->n; i++) { - double_int new_coef; + max_wide_int new_coef; - new_coef - = double_int_ext_for_comb (scale * comb->elts[i].coef, comb); + new_coef = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb); /* A coefficient may become zero due to overflow. Remove the zero elements. */ - if (new_coef.is_zero ()) + if (new_coef == 0) continue; comb->elts[j].coef = new_coef; comb->elts[j].val = comb->elts[i].val; @@ -117,30 +119,29 @@ aff_combination_scale (aff_tree *comb, double_int scale) } else comb->rest = fold_build2 (MULT_EXPR, type, comb->rest, - double_int_to_tree (type, scale)); + wide_int_to_tree (type, scale)); } } /* Adds ELT * SCALE to COMB. */ void -aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) +aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale) { unsigned i; tree type; - scale = double_int_ext_for_comb (scale, comb); - if (scale.is_zero ()) + scale = wide_int_ext_for_comb (scale, comb); + if (scale == 0) return; for (i = 0; i < comb->n; i++) if (operand_equal_p (comb->elts[i].val, elt, 0)) { - double_int new_coef; + max_wide_int new_coef; - new_coef = comb->elts[i].coef + scale; - new_coef = double_int_ext_for_comb (new_coef, comb); - if (!new_coef.is_zero ()) + new_coef = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb); + if (new_coef != 0) { comb->elts[i].coef = new_coef; return; @@ -152,7 +153,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) if (comb->rest) { gcc_assert (comb->n == MAX_AFF_ELTS - 1); - comb->elts[comb->n].coef = double_int_one; + comb->elts[comb->n].coef = 1; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; @@ -171,12 +172,12 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) if (POINTER_TYPE_P (type)) type = sizetype; - if (scale.is_one ()) + if (scale == 1) elt = fold_convert (type, elt); else elt = fold_build2 (MULT_EXPR, type, fold_convert (type, elt), - double_int_to_tree (type, scale)); + wide_int_to_tree (type, scale)); if (comb->rest) comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest, @@ -188,9 +189,9 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) /* Adds CST to C. */ static void -aff_combination_add_cst (aff_tree *c, double_int cst) +aff_combination_add_cst (aff_tree *c, const max_wide_int &cst) { - c->offset = double_int_ext_for_comb (c->offset + cst, c); + c->offset = wide_int_ext_for_comb (c->offset + cst, c); } /* Adds COMB2 to COMB1. */ @@ -204,7 +205,7 @@ aff_combination_add (aff_tree *comb1, aff_tree *comb2) for (i = 0; i < comb2->n; i++) aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef); if (comb2->rest) - aff_combination_add_elt (comb1, comb2->rest, double_int_one); + aff_combination_add_elt (comb1, comb2->rest, 1); } /* Converts affine combination COMB to TYPE. */ @@ -229,11 +230,11 @@ aff_combination_convert (aff_tree *comb, tree type) if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type)) return; - comb->offset = double_int_ext_for_comb (comb->offset, comb); + comb->offset = wide_int_ext_for_comb (comb->offset, comb); for (i = j = 0; i < comb->n; i++) { - double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb); - if (new_coef.is_zero ()) + max_wide_int new_coef = comb->elts[i].coef; + if (new_coef == 0) continue; comb->elts[j].coef = new_coef; comb->elts[j].val = fold_convert (type, comb->elts[i].val); @@ -243,7 +244,7 @@ aff_combination_convert (aff_tree *comb, tree type) comb->n = j; if (comb->n < MAX_AFF_ELTS && comb->rest) { - comb->elts[comb->n].coef = double_int_one; + comb->elts[comb->n].coef = 1; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; @@ -268,7 +269,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) switch (code) { case INTEGER_CST: - aff_combination_const (comb, type, tree_to_double_int (expr)); + aff_combination_const (comb, type, expr); return; case POINTER_PLUS_EXPR: @@ -282,7 +283,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp); if (code == MINUS_EXPR) - aff_combination_scale (&tmp, double_int_minus_one); + aff_combination_scale (&tmp, -1); aff_combination_add (comb, &tmp); return; @@ -291,19 +292,19 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) if (TREE_CODE (cst) != INTEGER_CST) break; tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, tree_to_double_int (cst)); + aff_combination_scale (comb, cst); return; case NEGATE_EXPR: tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, double_int_minus_one); + aff_combination_scale (comb, -1); return; case BIT_NOT_EXPR: /* ~x = -x - 1 */ tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, double_int_minus_one); - aff_combination_add_cst (comb, double_int_minus_one); + aff_combination_scale (comb, -1); + aff_combination_add_cst (comb, -1); return; case ADDR_EXPR: @@ -321,11 +322,10 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) false); if (bitpos % BITS_PER_UNIT != 0) break; - aff_combination_const (comb, type, - double_int::from_uhwi (bitpos / BITS_PER_UNIT)); + aff_combination_const (comb, type, bitpos / BITS_PER_UNIT); core = build_fold_addr_expr (core); if (TREE_CODE (core) == ADDR_EXPR) - aff_combination_add_elt (comb, core, double_int_one); + aff_combination_add_elt (comb, core, 1); else { tree_to_aff_combination (core, type, &tmp); @@ -368,25 +368,25 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) combination COMB. */ static tree -add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, - aff_tree *comb) +add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale, + aff_tree *comb ATTRIBUTE_UNUSED) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; - scale = double_int_ext_for_comb (scale, comb); + scale = wide_int_ext_for_comb (scale, comb); - if (scale.is_minus_one () + if (scale == -1 && POINTER_TYPE_P (TREE_TYPE (elt))) { elt = convert_to_ptrofftype (elt); elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt); - scale = double_int_one; + scale = max_wide_int (1); } - if (scale.is_one ()) + if (scale == 1) { if (!expr) { @@ -404,7 +404,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, expr, fold_convert (type1, elt)); } - if (scale.is_minus_one ()) + if (scale == -1) { if (!expr) return fold_build1 (NEGATE_EXPR, type1, @@ -423,9 +423,9 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, elt = fold_convert (type1, elt); if (!expr) return fold_build2 (MULT_EXPR, type1, elt, - double_int_to_tree (type1, scale)); + wide_int_to_tree (type1, scale)); - if (scale.is_negative ()) + if (wi::neg_p (scale)) { code = MINUS_EXPR; scale = -scale; @@ -434,7 +434,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, - double_int_to_tree (type1, scale)); + wide_int_to_tree (type1, scale)); if (POINTER_TYPE_P (TREE_TYPE (expr))) { if (code == MINUS_EXPR) @@ -452,7 +452,7 @@ aff_combination_to_tree (aff_tree *comb) tree type = comb->type; tree expr = NULL_TREE; unsigned i; - double_int off, sgn; + max_wide_int off, sgn; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; @@ -464,21 +464,21 @@ aff_combination_to_tree (aff_tree *comb) comb); if (comb->rest) - expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb); + expr = add_elt_to_tree (expr, type, comb->rest, 1, comb); /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is unsigned. */ - if (comb->offset.is_negative ()) + if (wi::neg_p (comb->offset)) { off = -comb->offset; - sgn = double_int_minus_one; + sgn = -1; } else { off = comb->offset; - sgn = double_int_one; + sgn = 1; } - return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn, + return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn, comb); } @@ -505,7 +505,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m) comb->elts[m] = comb->elts[comb->n]; if (comb->rest) { - comb->elts[comb->n].coef = double_int_one; + comb->elts[comb->n].coef = 1; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; @@ -517,7 +517,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m) static void -aff_combination_add_product (aff_tree *c, double_int coef, tree val, +aff_combination_add_product (aff_tree *c, const max_wide_int &coef, tree val, aff_tree *r) { unsigned i; @@ -568,7 +568,7 @@ aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r) for (i = 0; i < c2->n; i++) aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r); if (c2->rest) - aff_combination_add_product (c1, double_int_one, c2->rest, r); + aff_combination_add_product (c1, 1, c2->rest, r); aff_combination_add_product (c1, c2->offset, NULL, r); } @@ -615,7 +615,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED, aff_tree to_add, current, curre; tree e, rhs; gimple def; - double_int scale; + max_wide_int scale; void **slot; struct name_expansion *exp; @@ -760,25 +760,24 @@ free_affine_expand_cache (struct pointer_map_t **cache) is set to true. */ static bool -double_int_constant_multiple_p (double_int val, double_int div, - bool *mult_set, double_int *mult) +wide_int_constant_multiple_p (max_wide_int val, max_wide_int div, + bool *mult_set, max_wide_int *mult) { - double_int rem, cst; + max_wide_int rem, cst; - if (val.is_zero ()) + if (val == 0) { - if (*mult_set && !mult->is_zero ()) + if (*mult_set && mult != 0) return false; *mult_set = true; - *mult = double_int_zero; + *mult = 0; return true; } - if (div.is_zero ()) + if (div == 0) return false; - cst = val.sdivmod (div, FLOOR_DIV_EXPR, &rem); - if (!rem.is_zero ()) + if (!wi::multiple_of_p (val, div, SIGNED, &cst)) return false; if (*mult_set && *mult != cst) @@ -794,14 +793,14 @@ double_int_constant_multiple_p (double_int val, double_int div, bool aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, - double_int *mult) + max_wide_int *mult) { bool mult_set = false; unsigned i; - if (val->n == 0 && val->offset.is_zero ()) + if (val->n == 0 && val->offset == 0) { - *mult = double_int_zero; + *mult = 0; return true; } if (val->n != div->n) @@ -810,8 +809,8 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, if (val->rest || div->rest) return false; - if (!double_int_constant_multiple_p (val->offset, div->offset, - &mult_set, mult)) + if (!wide_int_constant_multiple_p (val->offset, div->offset, + &mult_set, mult)) return false; for (i = 0; i < div->n; i++) @@ -820,8 +819,8 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, = aff_combination_find_elt (val, div->elts[i].val, NULL); if (!elt) return false; - if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef, - &mult_set, mult)) + if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef, + &mult_set, mult)) return false; } @@ -835,13 +834,13 @@ static void print_aff (FILE *file, aff_tree *val) { unsigned i; - bool uns = TYPE_UNSIGNED (val->type); + signop sgn = TYPE_SIGN (val->type); if (POINTER_TYPE_P (val->type)) - uns = false; + sgn = SIGNED; fprintf (file, "{\n type = "); print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS); fprintf (file, "\n offset = "); - dump_double_int (file, val->offset, uns); + print_dec (val->offset, file, sgn); if (val->n > 0) { fprintf (file, "\n elements = {\n"); @@ -851,7 +850,7 @@ print_aff (FILE *file, aff_tree *val) print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS); fprintf (file, " * "); - dump_double_int (file, val->elts[i].coef, uns); + print_dec (val->elts[i].coef, file, sgn); if (i != val->n - 1) fprintf (file, ", \n"); } @@ -878,7 +877,7 @@ debug_aff (aff_tree *val) location is stored to SIZE. */ void -get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size) +get_inner_reference_aff (tree ref, aff_tree *addr, max_wide_int *size) { HOST_WIDE_INT bitsize, bitpos; tree toff; @@ -899,37 +898,36 @@ get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size) aff_combination_add (addr, &tmp); } - aff_combination_const (&tmp, sizetype, - double_int::from_shwi (bitpos / BITS_PER_UNIT)); + aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT); aff_combination_add (addr, &tmp); - *size = double_int::from_shwi ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT); + *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT; } /* Returns true if a region of size SIZE1 at position 0 and a region of size SIZE2 at position DIFF cannot overlap. */ bool -aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2) +aff_comb_cannot_overlap_p (aff_tree *diff, const max_wide_int &size1, const max_wide_int &size2) { - double_int d, bound; + max_wide_int d, bound; /* Unless the difference is a constant, we fail. */ if (diff->n != 0) return false; d = diff->offset; - if (d.is_negative ()) + if (wi::neg_p (d)) { /* The second object is before the first one, we succeed if the last element of the second object is before the start of the first one. */ - bound = d + size2 + double_int_minus_one; - return bound.is_negative (); + bound = d + size2 - 1; + return wi::neg_p (bound); } else { /* We succeed if the second object starts after the first one ends. */ - return size1.sle (d); + return wi::les_p (size1, d); } } diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h index b2558f7a4bd..be91ff6969e 100644 --- a/gcc/tree-affine.h +++ b/gcc/tree-affine.h @@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see /* Affine combination of trees. We keep track of at most MAX_AFF_ELTS elements to make things simpler; this is sufficient in most cases. */ +#include <wide-int.h> + #define MAX_AFF_ELTS 8 /* Element of an affine combination. */ @@ -30,7 +32,7 @@ struct aff_comb_elt tree val; /* Its coefficient in the combination. */ - double_int coef; + max_wide_int coef; }; typedef struct affine_tree_combination @@ -39,7 +41,7 @@ typedef struct affine_tree_combination tree type; /* Constant offset. */ - double_int offset; + max_wide_int offset; /* Number of elements of the combination. */ unsigned n; @@ -58,25 +60,25 @@ typedef struct affine_tree_combination tree rest; } aff_tree; -double_int double_int_ext_for_comb (double_int, aff_tree *); -void aff_combination_const (aff_tree *, tree, double_int); +max_wide_int wide_int_ext_for_comb (max_wide_int, aff_tree *); +void aff_combination_const (aff_tree *, tree, const max_wide_int &); void aff_combination_elt (aff_tree *, tree, tree); -void aff_combination_scale (aff_tree *, double_int); +void aff_combination_scale (aff_tree *, max_wide_int); void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *); void aff_combination_add (aff_tree *, aff_tree *); -void aff_combination_add_elt (aff_tree *, tree, double_int); +void aff_combination_add_elt (aff_tree *, tree, max_wide_int); void aff_combination_remove_elt (aff_tree *, unsigned); void aff_combination_convert (aff_tree *, tree); void tree_to_aff_combination (tree, tree, aff_tree *); tree aff_combination_to_tree (aff_tree *); void unshare_aff_combination (aff_tree *); -bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, double_int *); +bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, max_wide_int *); void aff_combination_expand (aff_tree *, struct pointer_map_t **); void tree_to_aff_combination_expand (tree, tree, aff_tree *, struct pointer_map_t **); -void get_inner_reference_aff (tree, aff_tree *, double_int *); +void get_inner_reference_aff (tree, aff_tree *, max_wide_int *); void free_affine_expand_cache (struct pointer_map_t **); -bool aff_comb_cannot_overlap_p (aff_tree *, double_int, double_int); +bool aff_comb_cannot_overlap_p (aff_tree *, const max_wide_int &, const max_wide_int &); /* Debugging functions. */ void debug_aff (aff_tree *); diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c index c38f694a5db..fa41e297e1b 100644 --- a/gcc/tree-call-cdce.c +++ b/gcc/tree-call-cdce.c @@ -195,7 +195,7 @@ check_pow (gimple pow_call) return false; if (REAL_VALUES_LESS (bcv, dconst1)) return false; - real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1); + real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED); if (REAL_VALUES_LESS (mv, bcv)) return false; return true; @@ -412,7 +412,7 @@ gen_conditions_for_pow_cst_base (tree base, tree expn, REAL_VALUE_TYPE bcv = TREE_REAL_CST (base); gcc_assert (!REAL_VALUES_EQUAL (bcv, dconst1) && !REAL_VALUES_LESS (bcv, dconst1)); - real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1); + real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED); gcc_assert (!REAL_VALUES_LESS (mv, bcv)); exp_domain = get_domain (0, false, false, diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index 3a1319132d5..a08c998b961 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -44,6 +44,8 @@ along with GCC; see the file COPYING3. If not see #include "tree-ssa-live.h" #include "omp-low.h" #include "tree-cfgcleanup.h" +#include "wide-int.h" +#include "wide-int-print.h" /* This file contains functions for building the Control Flow Graph (CFG) for a function tree. */ @@ -1304,12 +1306,12 @@ group_case_labels_stmt (gimple stmt) { tree merge_case = gimple_switch_label (stmt, i); basic_block merge_bb = label_to_block (CASE_LABEL (merge_case)); - double_int bhp1 = tree_to_double_int (base_high) + double_int_one; + wide_int bhp1 = wide_int (base_high) + 1; /* Merge the cases if they jump to the same place, and their ranges are consecutive. */ if (merge_bb == base_bb - && tree_to_double_int (CASE_LOW (merge_case)) == bhp1) + && wide_int (CASE_LOW (merge_case)) == bhp1) { base_high = CASE_HIGH (merge_case) ? CASE_HIGH (merge_case) : CASE_LOW (merge_case); @@ -2576,24 +2578,25 @@ verify_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) if (TREE_CODE (t) == BIT_FIELD_REF) { - if (!host_integerp (TREE_OPERAND (t, 1), 1) - || !host_integerp (TREE_OPERAND (t, 2), 1)) + if (!tree_fits_uhwi_p (TREE_OPERAND (t, 1)) + || !tree_fits_uhwi_p (TREE_OPERAND (t, 2))) { error ("invalid position or size operand to BIT_FIELD_REF"); return t; } if (INTEGRAL_TYPE_P (TREE_TYPE (t)) && (TYPE_PRECISION (TREE_TYPE (t)) - != TREE_INT_CST_LOW (TREE_OPERAND (t, 1)))) + != tree_to_uhwi (TREE_OPERAND (t, 1)))) { error ("integral result type precision does not match " "field size of BIT_FIELD_REF"); return t; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (t)) + && !AGGREGATE_TYPE_P (TREE_TYPE (t)) && TYPE_MODE (TREE_TYPE (t)) != BLKmode && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t))) - != TREE_INT_CST_LOW (TREE_OPERAND (t, 1)))) + != tree_to_uhwi (TREE_OPERAND (t, 1)))) { error ("mode precision of non-integral result does not " "match field size of BIT_FIELD_REF"); @@ -3402,7 +3405,7 @@ verify_gimple_assign_binary (gimple stmt) only allow shifting by a constant multiple of the element size. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type)) && (TREE_CODE (rhs2) != INTEGER_CST - || !div_if_zero_remainder (EXACT_DIV_EXPR, rhs2, + || !div_if_zero_remainder (rhs2, TYPE_SIZE (TREE_TYPE (rhs1_type))))) { error ("non-element sized vector shift of floating point vector"); @@ -6148,7 +6151,7 @@ move_stmt_eh_region_tree_nr (tree old_t_nr, struct move_stmt_d *p) { int old_nr, new_nr; - old_nr = tree_low_cst (old_t_nr, 0); + old_nr = tree_to_shwi (old_t_nr); new_nr = move_stmt_eh_region_nr (old_nr, p); return build_int_cst (integer_type_node, new_nr); @@ -7072,13 +7075,13 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity) if (loop->any_upper_bound) { fprintf (file, ", upper_bound = "); - dump_double_int (file, loop->nb_iterations_upper_bound, true); + print_decu (loop->nb_iterations_upper_bound, file); } if (loop->any_estimate) { fprintf (file, ", estimate = "); - dump_double_int (file, loop->nb_iterations_estimate, true); + print_decu (loop->nb_iterations_estimate, file); } fprintf (file, ")\n"); diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c index 64f2dcb13bf..b31566d8182 100644 --- a/gcc/tree-chrec.c +++ b/gcc/tree-chrec.c @@ -477,7 +477,7 @@ chrec_fold_multiply (tree type, static tree tree_fold_binomial (tree type, tree n, unsigned int k) { - double_int num, denom, idx, di_res; + wide_int num, denom, idx, di_res; bool overflow; unsigned int i; tree res; @@ -489,20 +489,20 @@ tree_fold_binomial (tree type, tree n, unsigned int k) return fold_convert (type, n); /* Numerator = n. */ - num = TREE_INT_CST (n); + num = n; /* Check that k <= n. */ - if (num.ult (double_int::from_uhwi (k))) + if (wi::ltu_p (num, k)) return NULL_TREE; /* Denominator = 2. */ - denom = double_int::from_uhwi (2); + denom = wi::two (TYPE_PRECISION (TREE_TYPE (n))); /* Index = Numerator-1. */ - idx = num - double_int_one; + idx = num - 1; /* Numerator = Numerator*Index = n*(n-1). */ - num = num.mul_with_sign (idx, false, &overflow); + num = wi::smul (num, idx, &overflow); if (overflow) return NULL_TREE; @@ -512,17 +512,17 @@ tree_fold_binomial (tree type, tree n, unsigned int k) --idx; /* Numerator *= Index. */ - num = num.mul_with_sign (idx, false, &overflow); + num = wi::smul (num, idx, &overflow); if (overflow) return NULL_TREE; /* Denominator *= i. */ - denom *= double_int::from_uhwi (i); + denom *= i; } /* Result = Numerator / Denominator. */ - di_res = num.div (denom, true, EXACT_DIV_EXPR); - res = build_int_cst_wide (type, di_res.low, di_res.high); + di_res = wi::udiv_trunc (num, denom); + res = wide_int_to_tree (type, di_res); return int_fits_type_p (res, type) ? res : NULL_TREE; } diff --git a/gcc/tree-core.h b/gcc/tree-core.h index c7fc84f87ad..9e33bdd2e64 100644 --- a/gcc/tree-core.h +++ b/gcc/tree-core.h @@ -744,7 +744,8 @@ struct GTY(()) tree_base { /* The following fields are present in tree_base to save space. The nodes using them do not require any of the flags above and so can make better use of the 4-byte sized word. */ - /* VEC length. This field is only used with TREE_VEC. */ + /* VEC length. This field is only used with TREE_VEC and + TREE_INT_CST. */ int length; /* SSA version number. This field is only used with SSA_NAME. */ unsigned int version; @@ -1022,7 +1023,7 @@ struct GTY(()) tree_common { struct GTY(()) tree_int_cst { struct tree_typed typed; - double_int int_cst; + HOST_WIDE_INT val[1]; }; diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c index 9133df4a2b7..21085ae2293 100644 --- a/gcc/tree-data-ref.c +++ b/gcc/tree-data-ref.c @@ -773,8 +773,8 @@ dr_analyze_innermost (struct data_reference *dr, struct loop *nest) { if (!integer_zerop (TREE_OPERAND (base, 1))) { - double_int moff = mem_ref_offset (base); - tree mofft = double_int_to_tree (sizetype, moff); + addr_wide_int moff = mem_ref_offset (base); + tree mofft = wide_int_to_tree (sizetype, moff); if (!poffset) poffset = mofft; else @@ -1370,10 +1370,10 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b, if (!loop_nest) { aff_tree off1, off2; - double_int size1, size2; + max_wide_int size1, size2; get_inner_reference_aff (DR_REF (a), &off1, &size1); get_inner_reference_aff (DR_REF (b), &off2, &size2); - aff_combination_scale (&off1, double_int_minus_one); + aff_combination_scale (&off1, -1); aff_combination_add (&off2, &off1); if (aff_comb_cannot_overlap_p (&off2, size1, size2)) return false; @@ -1748,15 +1748,15 @@ analyze_ziv_subscript (tree chrec_a, static tree max_stmt_executions_tree (struct loop *loop) { - double_int nit; + max_wide_int nit; if (!max_stmt_executions (loop, &nit)) return chrec_dont_know; - if (!double_int_fits_to_tree_p (unsigned_type_node, nit)) + if (!wi::fits_to_tree_p (nit, unsigned_type_node)) return chrec_dont_know; - return double_int_to_tree (unsigned_type_node, nit); + return wide_int_to_tree (unsigned_type_node, nit); } /* Determine whether the CHREC is always positive/negative. If the expression @@ -2834,16 +2834,16 @@ gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst) HOST_WIDE_INT cd = 0, val; tree step; - if (!host_integerp (cst, 0)) + if (!tree_fits_shwi_p (cst)) return true; - val = tree_low_cst (cst, 0); + val = tree_to_shwi (cst); while (TREE_CODE (chrec) == POLYNOMIAL_CHREC) { step = CHREC_RIGHT (chrec); - if (!host_integerp (step, 0)) + if (!tree_fits_shwi_p (step)) return true; - cd = gcd (cd, tree_low_cst (step, 0)); + cd = gcd (cd, tree_to_shwi (step)); chrec = CHREC_LEFT (chrec); } diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c index bc825be8232..9df1958d8a9 100644 --- a/gcc/tree-dfa.c +++ b/gcc/tree-dfa.c @@ -383,7 +383,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, HOST_WIDE_INT bitsize = -1; HOST_WIDE_INT maxsize = -1; tree size_tree = NULL_TREE; - double_int bit_offset = double_int_zero; + addr_wide_int bit_offset = 0; HOST_WIDE_INT hbit_offset; bool seen_variable_array_ref = false; tree base_type; @@ -403,10 +403,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, } if (size_tree != NULL_TREE) { - if (! host_integerp (size_tree, 1)) + if (! tree_fits_uhwi_p (size_tree)) bitsize = -1; else - bitsize = TREE_INT_CST_LOW (size_tree); + bitsize = tree_to_uhwi (size_tree); } /* Initially, maxsize is the same as the accessed element size. @@ -422,7 +422,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, switch (TREE_CODE (exp)) { case BIT_FIELD_REF: - bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2)); + bit_offset += TREE_OPERAND (exp, 2); break; case COMPONENT_REF: @@ -432,11 +432,12 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (this_offset && TREE_CODE (this_offset) == INTEGER_CST) { - double_int doffset = tree_to_double_int (this_offset); - doffset = doffset.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - doffset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field)); - bit_offset = bit_offset + doffset; + addr_wide_int woffset = this_offset; + woffset = wi::lshift (woffset, + (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + woffset += DECL_FIELD_BIT_OFFSET (field); + bit_offset += woffset; /* If we had seen a variable array ref already and we just referenced the last field of a struct or a union member @@ -453,13 +454,13 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, { tree fsize = DECL_SIZE_UNIT (field); tree ssize = TYPE_SIZE_UNIT (stype); - if (host_integerp (fsize, 0) - && host_integerp (ssize, 0) - && doffset.fits_shwi ()) - maxsize += ((TREE_INT_CST_LOW (ssize) - - TREE_INT_CST_LOW (fsize)) + if (tree_fits_shwi_p (fsize) + && tree_fits_shwi_p (ssize) + && wi::fits_shwi_p (woffset)) + maxsize += ((tree_to_shwi (ssize) + - tree_to_shwi (fsize)) * BITS_PER_UNIT - - doffset.to_shwi ()); + - woffset.to_shwi ()); else maxsize = -1; } @@ -473,9 +474,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, because that would get us out of the structure otherwise. */ if (maxsize != -1 && csize - && host_integerp (csize, 1) - && bit_offset.fits_shwi ()) - maxsize = TREE_INT_CST_LOW (csize) + && tree_fits_uhwi_p (csize) + && wi::fits_shwi_p (bit_offset)) + maxsize = tree_to_shwi (csize) - bit_offset.to_shwi (); else maxsize = -1; @@ -496,13 +497,14 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, && (unit_size = array_ref_element_size (exp), TREE_CODE (unit_size) == INTEGER_CST)) { - double_int doffset - = (TREE_INT_CST (index) - TREE_INT_CST (low_bound)) - .sext (TYPE_PRECISION (TREE_TYPE (index))); - doffset *= tree_to_double_int (unit_size); - doffset = doffset.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - bit_offset = bit_offset + doffset; + addr_wide_int woffset + = wi::sext (addr_wide_int (index) - low_bound, + TYPE_PRECISION (TREE_TYPE (index))); + woffset *= addr_wide_int (unit_size); + woffset = wi::lshift (woffset, + (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + bit_offset += woffset; /* An array ref with a constant index up in the structure hierarchy will constrain the size of any variable array ref @@ -517,10 +519,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, because that would get us outside of the array otherwise. */ if (maxsize != -1 && asize - && host_integerp (asize, 1) - && bit_offset.fits_shwi ()) - maxsize = TREE_INT_CST_LOW (asize) - - bit_offset.to_shwi (); + && tree_fits_uhwi_p (asize) + && wi::fits_shwi_p (bit_offset)) + maxsize = tree_to_uhwi (asize) - bit_offset.to_shwi (); else maxsize = -1; @@ -535,7 +536,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, break; case IMAGPART_EXPR: - bit_offset += double_int::from_uhwi (bitsize); + bit_offset += bitsize; break; case VIEW_CONVERT_EXPR: @@ -549,11 +550,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); else { - double_int off = mem_ref_offset (exp); - off = off.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - off = off + bit_offset; - if (off.fits_shwi ()) + addr_wide_int off = mem_ref_offset (exp); + off = wi::lshift (off, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + off += bit_offset; + if (wi::fits_shwi_p (off)) { bit_offset = off; exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); @@ -571,7 +572,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (TMR_INDEX (exp) || TMR_INDEX2 (exp)) { exp = TREE_OPERAND (TMR_BASE (exp), 0); - bit_offset = double_int_zero; + bit_offset = 0; maxsize = -1; goto done; } @@ -579,11 +580,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, exp = TREE_OPERAND (TMR_BASE (exp), 0); else { - double_int off = mem_ref_offset (exp); - off = off.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); + addr_wide_int off = mem_ref_offset (exp); + off = wi::lshift (off, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); off += bit_offset; - if (off.fits_shwi ()) + if (wi::fits_shwi_p (off)) { bit_offset = off; exp = TREE_OPERAND (TMR_BASE (exp), 0); @@ -600,7 +601,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, } done: - if (!bit_offset.fits_shwi ()) + if (!wi::fits_shwi_p (bit_offset)) { *poffset = 0; *psize = bitsize; @@ -624,9 +625,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (seen_variable_array_ref && maxsize != -1 - && (!host_integerp (TYPE_SIZE (base_type), 1) + && (!tree_fits_uhwi_p (TYPE_SIZE (base_type)) || (hbit_offset + maxsize - == (signed) TREE_INT_CST_LOW (TYPE_SIZE (base_type))))) + == (signed) tree_to_uhwi (TYPE_SIZE (base_type))))) maxsize = -1; /* In case of a decl or constant base object we can do better. */ @@ -636,16 +637,16 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, /* If maxsize is unknown adjust it according to the size of the base decl. */ if (maxsize == -1 - && host_integerp (DECL_SIZE (exp), 1)) - maxsize = TREE_INT_CST_LOW (DECL_SIZE (exp)) - hbit_offset; + && tree_fits_uhwi_p (DECL_SIZE (exp))) + maxsize = tree_to_uhwi (DECL_SIZE (exp)) - hbit_offset; } else if (CONSTANT_CLASS_P (exp)) { /* If maxsize is unknown adjust it according to the size of the base type constant. */ if (maxsize == -1 - && host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)) - maxsize = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp))) - hbit_offset; + && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) + maxsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) - hbit_offset; } /* ??? Due to negative offsets in ARRAY_REF we can end up with diff --git a/gcc/tree-dfa.h b/gcc/tree-dfa.h index 7d0a47009f6..017cb823dd8 100644 --- a/gcc/tree-dfa.h +++ b/gcc/tree-dfa.h @@ -59,7 +59,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, { case BIT_FIELD_REF: { - HOST_WIDE_INT this_off = TREE_INT_CST_LOW (TREE_OPERAND (exp, 2)); + HOST_WIDE_INT this_off = tree_to_hwi (TREE_OPERAND (exp, 2)); if (this_off % BITS_PER_UNIT) return NULL_TREE; byte_offset += this_off / BITS_PER_UNIT; @@ -74,12 +74,12 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, if (!this_offset || TREE_CODE (this_offset) != INTEGER_CST - || (TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)) + || (tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)) % BITS_PER_UNIT)) return NULL_TREE; - hthis_offset = TREE_INT_CST_LOW (this_offset); - hthis_offset += (TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)) + hthis_offset = tree_to_hwi (this_offset); + hthis_offset += (tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)) / BITS_PER_UNIT); byte_offset += hthis_offset; } @@ -102,10 +102,10 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, && (unit_size = array_ref_element_size (exp), TREE_CODE (unit_size) == INTEGER_CST)) { - HOST_WIDE_INT hindex = TREE_INT_CST_LOW (index); + HOST_WIDE_INT hindex = tree_to_hwi (index); - hindex -= TREE_INT_CST_LOW (low_bound); - hindex *= TREE_INT_CST_LOW (unit_size); + hindex -= tree_to_hwi (low_bound); + hindex *= tree_to_hwi (unit_size); byte_offset += hindex; } else @@ -117,7 +117,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, break; case IMAGPART_EXPR: - byte_offset += TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (exp))); + byte_offset += tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (exp))); break; case VIEW_CONVERT_EXPR: @@ -135,9 +135,8 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, { if (!integer_zerop (TREE_OPERAND (exp, 1))) { - double_int off = mem_ref_offset (exp); - gcc_assert (off.high == -1 || off.high == 0); - byte_offset += off.to_shwi (); + addr_wide_int off = mem_ref_offset (exp); + byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); } @@ -158,9 +157,8 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, return NULL_TREE; if (!integer_zerop (TMR_OFFSET (exp))) { - double_int off = mem_ref_offset (exp); - gcc_assert (off.high == -1 || off.high == 0); - byte_offset += off.to_shwi (); + addr_wide_int off = mem_ref_offset (exp); + byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); } diff --git a/gcc/tree-dump.c b/gcc/tree-dump.c index 17db244da64..08245d1982f 100644 --- a/gcc/tree-dump.c +++ b/gcc/tree-dump.c @@ -30,6 +30,8 @@ along with GCC; see the file COPYING3. If not see #include "tree-iterator.h" #include "tree-pretty-print.h" #include "tree-cfg.h" +#include "wide-int.h" +#include "wide-int-print.h" static unsigned int queue (dump_info_p, const_tree, int); static void dump_index (dump_info_p, unsigned int); @@ -561,9 +563,8 @@ dequeue_and_dump (dump_info_p di) break; case INTEGER_CST: - if (TREE_INT_CST_HIGH (t)) - dump_int (di, "high", TREE_INT_CST_HIGH (t)); - dump_int (di, "low", TREE_INT_CST_LOW (t)); + fprintf (di->stream, "int: "); + print_decs (wide_int (t), di->stream); break; case STRING_CST: diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h new file mode 100644 index 00000000000..85f4bc9910e --- /dev/null +++ b/gcc/tree-flow-inline.h @@ -0,0 +1,1304 @@ +/* Inline functions for tree-flow.h + Copyright (C) 2001-2013 Free Software Foundation, Inc. + Contributed by Diego Novillo <dnovillo@redhat.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef _TREE_FLOW_INLINE_H +#define _TREE_FLOW_INLINE_H 1 + +/* Inline functions for manipulating various data structures defined in + tree-flow.h. See tree-flow.h for documentation. */ + +/* Return true when gimple SSA form was built. + gimple_in_ssa_p is queried by gimplifier in various early stages before SSA + infrastructure is initialized. Check for presence of the datastructures + at first place. */ +static inline bool +gimple_in_ssa_p (const struct function *fun) +{ + return fun && fun->gimple_df && fun->gimple_df->in_ssa_p; +} + +/* Artificial variable used for the virtual operand FUD chain. */ +static inline tree +gimple_vop (const struct function *fun) +{ + gcc_checking_assert (fun && fun->gimple_df); + return fun->gimple_df->vop; +} + +/* Initialize the hashtable iterator HTI to point to hashtable TABLE */ + +static inline void * +first_htab_element (htab_iterator *hti, htab_t table) +{ + hti->htab = table; + hti->slot = table->entries; + hti->limit = hti->slot + htab_size (table); + do + { + PTR x = *(hti->slot); + if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY) + break; + } while (++(hti->slot) < hti->limit); + + if (hti->slot < hti->limit) + return *(hti->slot); + return NULL; +} + +/* Return current non-empty/deleted slot of the hashtable pointed to by HTI, + or NULL if we have reached the end. */ + +static inline bool +end_htab_p (const htab_iterator *hti) +{ + if (hti->slot >= hti->limit) + return true; + return false; +} + +/* Advance the hashtable iterator pointed to by HTI to the next element of the + hashtable. */ + +static inline void * +next_htab_element (htab_iterator *hti) +{ + while (++(hti->slot) < hti->limit) + { + PTR x = *(hti->slot); + if (x != HTAB_EMPTY_ENTRY && x != HTAB_DELETED_ENTRY) + return x; + }; + return NULL; +} + +/* Get the number of the next statement uid to be allocated. */ +static inline unsigned int +gimple_stmt_max_uid (struct function *fn) +{ + return fn->last_stmt_uid; +} + +/* Set the number of the next statement uid to be allocated. */ +static inline void +set_gimple_stmt_max_uid (struct function *fn, unsigned int maxid) +{ + fn->last_stmt_uid = maxid; +} + +/* Set the number of the next statement uid to be allocated. */ +static inline unsigned int +inc_gimple_stmt_max_uid (struct function *fn) +{ + return fn->last_stmt_uid++; +} + +/* Return the line number for EXPR, or return -1 if we have no line + number information for it. */ +static inline int +get_lineno (const_gimple stmt) +{ + location_t loc; + + if (!stmt) + return -1; + + loc = gimple_location (stmt); + if (loc == UNKNOWN_LOCATION) + return -1; + + return LOCATION_LINE (loc); +} + +/* Delink an immediate_uses node from its chain. */ +static inline void +delink_imm_use (ssa_use_operand_t *linknode) +{ + /* Return if this node is not in a list. */ + if (linknode->prev == NULL) + return; + + linknode->prev->next = linknode->next; + linknode->next->prev = linknode->prev; + linknode->prev = NULL; + linknode->next = NULL; +} + +/* Link ssa_imm_use node LINKNODE into the chain for LIST. */ +static inline void +link_imm_use_to_list (ssa_use_operand_t *linknode, ssa_use_operand_t *list) +{ + /* Link the new node at the head of the list. If we are in the process of + traversing the list, we won't visit any new nodes added to it. */ + linknode->prev = list; + linknode->next = list->next; + list->next->prev = linknode; + list->next = linknode; +} + +/* Link ssa_imm_use node LINKNODE into the chain for DEF. */ +static inline void +link_imm_use (ssa_use_operand_t *linknode, tree def) +{ + ssa_use_operand_t *root; + + if (!def || TREE_CODE (def) != SSA_NAME) + linknode->prev = NULL; + else + { + root = &(SSA_NAME_IMM_USE_NODE (def)); + if (linknode->use) + gcc_checking_assert (*(linknode->use) == def); + link_imm_use_to_list (linknode, root); + } +} + +/* Set the value of a use pointed to by USE to VAL. */ +static inline void +set_ssa_use_from_ptr (use_operand_p use, tree val) +{ + delink_imm_use (use); + *(use->use) = val; + link_imm_use (use, val); +} + +/* Link ssa_imm_use node LINKNODE into the chain for DEF, with use occurring + in STMT. */ +static inline void +link_imm_use_stmt (ssa_use_operand_t *linknode, tree def, gimple stmt) +{ + if (stmt) + link_imm_use (linknode, def); + else + link_imm_use (linknode, NULL); + linknode->loc.stmt = stmt; +} + +/* Relink a new node in place of an old node in the list. */ +static inline void +relink_imm_use (ssa_use_operand_t *node, ssa_use_operand_t *old) +{ + /* The node one had better be in the same list. */ + gcc_checking_assert (*(old->use) == *(node->use)); + node->prev = old->prev; + node->next = old->next; + if (old->prev) + { + old->prev->next = node; + old->next->prev = node; + /* Remove the old node from the list. */ + old->prev = NULL; + } +} + +/* Relink ssa_imm_use node LINKNODE into the chain for OLD, with use occurring + in STMT. */ +static inline void +relink_imm_use_stmt (ssa_use_operand_t *linknode, ssa_use_operand_t *old, + gimple stmt) +{ + if (stmt) + relink_imm_use (linknode, old); + else + link_imm_use (linknode, NULL); + linknode->loc.stmt = stmt; +} + + +/* Return true is IMM has reached the end of the immediate use list. */ +static inline bool +end_readonly_imm_use_p (const imm_use_iterator *imm) +{ + return (imm->imm_use == imm->end_p); +} + +/* Initialize iterator IMM to process the list for VAR. */ +static inline use_operand_p +first_readonly_imm_use (imm_use_iterator *imm, tree var) +{ + imm->end_p = &(SSA_NAME_IMM_USE_NODE (var)); + imm->imm_use = imm->end_p->next; +#ifdef ENABLE_CHECKING + imm->iter_node.next = imm->imm_use->next; +#endif + if (end_readonly_imm_use_p (imm)) + return NULL_USE_OPERAND_P; + return imm->imm_use; +} + +/* Bump IMM to the next use in the list. */ +static inline use_operand_p +next_readonly_imm_use (imm_use_iterator *imm) +{ + use_operand_p old = imm->imm_use; + +#ifdef ENABLE_CHECKING + /* If this assertion fails, it indicates the 'next' pointer has changed + since the last bump. This indicates that the list is being modified + via stmt changes, or SET_USE, or somesuch thing, and you need to be + using the SAFE version of the iterator. */ + gcc_assert (imm->iter_node.next == old->next); + imm->iter_node.next = old->next->next; +#endif + + imm->imm_use = old->next; + if (end_readonly_imm_use_p (imm)) + return NULL_USE_OPERAND_P; + return imm->imm_use; +} + +/* tree-cfg.c */ +extern bool has_zero_uses_1 (const ssa_use_operand_t *head); +extern bool single_imm_use_1 (const ssa_use_operand_t *head, + use_operand_p *use_p, gimple *stmt); + +/* Return true if VAR has no nondebug uses. */ +static inline bool +has_zero_uses (const_tree var) +{ + const ssa_use_operand_t *const ptr = &(SSA_NAME_IMM_USE_NODE (var)); + + /* A single use_operand means there is no items in the list. */ + if (ptr == ptr->next) + return true; + + /* If there are debug stmts, we have to look at each use and see + whether there are any nondebug uses. */ + if (!MAY_HAVE_DEBUG_STMTS) + return false; + + return has_zero_uses_1 (ptr); +} + +/* Return true if VAR has a single nondebug use. */ +static inline bool +has_single_use (const_tree var) +{ + const ssa_use_operand_t *const ptr = &(SSA_NAME_IMM_USE_NODE (var)); + + /* If there aren't any uses whatsoever, we're done. */ + if (ptr == ptr->next) + return false; + + /* If there's a single use, check that it's not a debug stmt. */ + if (ptr == ptr->next->next) + return !is_gimple_debug (USE_STMT (ptr->next)); + + /* If there are debug stmts, we have to look at each of them. */ + if (!MAY_HAVE_DEBUG_STMTS) + return false; + + return single_imm_use_1 (ptr, NULL, NULL); +} + + +/* If VAR has only a single immediate nondebug use, return true, and + set USE_P and STMT to the use pointer and stmt of occurrence. */ +static inline bool +single_imm_use (const_tree var, use_operand_p *use_p, gimple *stmt) +{ + const ssa_use_operand_t *const ptr = &(SSA_NAME_IMM_USE_NODE (var)); + + /* If there aren't any uses whatsoever, we're done. */ + if (ptr == ptr->next) + { + return_false: + *use_p = NULL_USE_OPERAND_P; + *stmt = NULL; + return false; + } + + /* If there's a single use, check that it's not a debug stmt. */ + if (ptr == ptr->next->next) + { + if (!is_gimple_debug (USE_STMT (ptr->next))) + { + *use_p = ptr->next; + *stmt = ptr->next->loc.stmt; + return true; + } + else + goto return_false; + } + + /* If there are debug stmts, we have to look at each of them. */ + if (!MAY_HAVE_DEBUG_STMTS) + goto return_false; + + return single_imm_use_1 (ptr, use_p, stmt); +} + +/* Return the number of nondebug immediate uses of VAR. */ +static inline unsigned int +num_imm_uses (const_tree var) +{ + const ssa_use_operand_t *const start = &(SSA_NAME_IMM_USE_NODE (var)); + const ssa_use_operand_t *ptr; + unsigned int num = 0; + + if (!MAY_HAVE_DEBUG_STMTS) + for (ptr = start->next; ptr != start; ptr = ptr->next) + num++; + else + for (ptr = start->next; ptr != start; ptr = ptr->next) + if (!is_gimple_debug (USE_STMT (ptr))) + num++; + + return num; +} + +/* Return the tree pointed-to by USE. */ +static inline tree +get_use_from_ptr (use_operand_p use) +{ + return *(use->use); +} + +/* Return the tree pointed-to by DEF. */ +static inline tree +get_def_from_ptr (def_operand_p def) +{ + return *def; +} + +/* Return a use_operand_p pointer for argument I of PHI node GS. */ + +static inline use_operand_p +gimple_phi_arg_imm_use_ptr (gimple gs, int i) +{ + return &gimple_phi_arg (gs, i)->imm_use; +} + +/* Return the tree operand for argument I of PHI node GS. */ + +static inline tree +gimple_phi_arg_def (gimple gs, size_t index) +{ + struct phi_arg_d *pd = gimple_phi_arg (gs, index); + return get_use_from_ptr (&pd->imm_use); +} + +/* Return a pointer to the tree operand for argument I of PHI node GS. */ + +static inline tree * +gimple_phi_arg_def_ptr (gimple gs, size_t index) +{ + return &gimple_phi_arg (gs, index)->def; +} + +/* Return the edge associated with argument I of phi node GS. */ + +static inline edge +gimple_phi_arg_edge (gimple gs, size_t i) +{ + return EDGE_PRED (gimple_bb (gs), i); +} + +/* Return the source location of gimple argument I of phi node GS. */ + +static inline source_location +gimple_phi_arg_location (gimple gs, size_t i) +{ + return gimple_phi_arg (gs, i)->locus; +} + +/* Return the source location of the argument on edge E of phi node GS. */ + +static inline source_location +gimple_phi_arg_location_from_edge (gimple gs, edge e) +{ + return gimple_phi_arg (gs, e->dest_idx)->locus; +} + +/* Set the source location of gimple argument I of phi node GS to LOC. */ + +static inline void +gimple_phi_arg_set_location (gimple gs, size_t i, source_location loc) +{ + gimple_phi_arg (gs, i)->locus = loc; +} + +/* Return TRUE if argument I of phi node GS has a location record. */ + +static inline bool +gimple_phi_arg_has_location (gimple gs, size_t i) +{ + return gimple_phi_arg_location (gs, i) != UNKNOWN_LOCATION; +} + + +/* Return the PHI nodes for basic block BB, or NULL if there are no + PHI nodes. */ +static inline gimple_seq +phi_nodes (const_basic_block bb) +{ + gcc_checking_assert (!(bb->flags & BB_RTL)); + return bb->il.gimple.phi_nodes; +} + +static inline gimple_seq * +phi_nodes_ptr (basic_block bb) +{ + gcc_checking_assert (!(bb->flags & BB_RTL)); + return &bb->il.gimple.phi_nodes; +} + +/* Set PHI nodes of a basic block BB to SEQ. */ + +static inline void +set_phi_nodes (basic_block bb, gimple_seq seq) +{ + gimple_stmt_iterator i; + + gcc_checking_assert (!(bb->flags & BB_RTL)); + bb->il.gimple.phi_nodes = seq; + if (seq) + for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i)) + gimple_set_bb (gsi_stmt (i), bb); +} + +/* Return the phi argument which contains the specified use. */ + +static inline int +phi_arg_index_from_use (use_operand_p use) +{ + struct phi_arg_d *element, *root; + size_t index; + gimple phi; + + /* Since the use is the first thing in a PHI argument element, we can + calculate its index based on casting it to an argument, and performing + pointer arithmetic. */ + + phi = USE_STMT (use); + + element = (struct phi_arg_d *)use; + root = gimple_phi_arg (phi, 0); + index = element - root; + + /* Make sure the calculation doesn't have any leftover bytes. If it does, + then imm_use is likely not the first element in phi_arg_d. */ + gcc_checking_assert ((((char *)element - (char *)root) + % sizeof (struct phi_arg_d)) == 0 + && index < gimple_phi_capacity (phi)); + + return index; +} + +/* Return true if T (assumed to be a DECL) is a global variable. + A variable is considered global if its storage is not automatic. */ + +static inline bool +is_global_var (const_tree t) +{ + return (TREE_STATIC (t) || DECL_EXTERNAL (t)); +} + + +/* Return true if VAR may be aliased. A variable is considered as + maybe aliased if it has its address taken by the local TU + or possibly by another TU and might be modified through a pointer. */ + +static inline bool +may_be_aliased (const_tree var) +{ + return (TREE_CODE (var) != CONST_DECL + && !((TREE_STATIC (var) || TREE_PUBLIC (var) || DECL_EXTERNAL (var)) + && TREE_READONLY (var) + && !TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (var))) + && (TREE_PUBLIC (var) + || DECL_EXTERNAL (var) + || TREE_ADDRESSABLE (var))); +} + + +/* PHI nodes should contain only ssa_names and invariants. A test + for ssa_name is definitely simpler; don't let invalid contents + slip in in the meantime. */ + +static inline bool +phi_ssa_name_p (const_tree t) +{ + if (TREE_CODE (t) == SSA_NAME) + return true; + gcc_checking_assert (is_gimple_min_invariant (t)); + return false; +} + + +/* Returns the loop of the statement STMT. */ + +static inline struct loop * +loop_containing_stmt (gimple stmt) +{ + basic_block bb = gimple_bb (stmt); + if (!bb) + return NULL; + + return bb->loop_father; +} + + +/* ----------------------------------------------------------------------- */ + +/* The following set of routines are used to iterator over various type of + SSA operands. */ + +/* Return true if PTR is finished iterating. */ +static inline bool +op_iter_done (const ssa_op_iter *ptr) +{ + return ptr->done; +} + +/* Get the next iterator use value for PTR. */ +static inline use_operand_p +op_iter_next_use (ssa_op_iter *ptr) +{ + use_operand_p use_p; + gcc_checking_assert (ptr->iter_type == ssa_op_iter_use); + if (ptr->uses) + { + use_p = USE_OP_PTR (ptr->uses); + ptr->uses = ptr->uses->next; + return use_p; + } + if (ptr->i < ptr->numops) + { + return PHI_ARG_DEF_PTR (ptr->stmt, (ptr->i)++); + } + ptr->done = true; + return NULL_USE_OPERAND_P; +} + +/* Get the next iterator def value for PTR. */ +static inline def_operand_p +op_iter_next_def (ssa_op_iter *ptr) +{ + gcc_checking_assert (ptr->iter_type == ssa_op_iter_def); + if (ptr->flags & SSA_OP_VDEF) + { + tree *p; + ptr->flags &= ~SSA_OP_VDEF; + p = gimple_vdef_ptr (ptr->stmt); + if (p && *p) + return p; + } + if (ptr->flags & SSA_OP_DEF) + { + while (ptr->i < ptr->numops) + { + tree *val = gimple_op_ptr (ptr->stmt, ptr->i); + ptr->i++; + if (*val) + { + if (TREE_CODE (*val) == TREE_LIST) + val = &TREE_VALUE (*val); + if (TREE_CODE (*val) == SSA_NAME + || is_gimple_reg (*val)) + return val; + } + } + ptr->flags &= ~SSA_OP_DEF; + } + + ptr->done = true; + return NULL_DEF_OPERAND_P; +} + +/* Get the next iterator tree value for PTR. */ +static inline tree +op_iter_next_tree (ssa_op_iter *ptr) +{ + tree val; + gcc_checking_assert (ptr->iter_type == ssa_op_iter_tree); + if (ptr->uses) + { + val = USE_OP (ptr->uses); + ptr->uses = ptr->uses->next; + return val; + } + if (ptr->flags & SSA_OP_VDEF) + { + ptr->flags &= ~SSA_OP_VDEF; + if ((val = gimple_vdef (ptr->stmt))) + return val; + } + if (ptr->flags & SSA_OP_DEF) + { + while (ptr->i < ptr->numops) + { + val = gimple_op (ptr->stmt, ptr->i); + ptr->i++; + if (val) + { + if (TREE_CODE (val) == TREE_LIST) + val = TREE_VALUE (val); + if (TREE_CODE (val) == SSA_NAME + || is_gimple_reg (val)) + return val; + } + } + ptr->flags &= ~SSA_OP_DEF; + } + + ptr->done = true; + return NULL_TREE; +} + + +/* This functions clears the iterator PTR, and marks it done. This is normally + used to prevent warnings in the compile about might be uninitialized + components. */ + +static inline void +clear_and_done_ssa_iter (ssa_op_iter *ptr) +{ + ptr->i = 0; + ptr->numops = 0; + ptr->uses = NULL; + ptr->iter_type = ssa_op_iter_none; + ptr->stmt = NULL; + ptr->done = true; + ptr->flags = 0; +} + +/* Initialize the iterator PTR to the virtual defs in STMT. */ +static inline void +op_iter_init (ssa_op_iter *ptr, gimple stmt, int flags) +{ + /* PHI nodes require a different iterator initialization path. We + do not support iterating over virtual defs or uses without + iterating over defs or uses at the same time. */ + gcc_checking_assert (gimple_code (stmt) != GIMPLE_PHI + && (!(flags & SSA_OP_VDEF) || (flags & SSA_OP_DEF)) + && (!(flags & SSA_OP_VUSE) || (flags & SSA_OP_USE))); + ptr->numops = 0; + if (flags & (SSA_OP_DEF | SSA_OP_VDEF)) + { + switch (gimple_code (stmt)) + { + case GIMPLE_ASSIGN: + case GIMPLE_CALL: + ptr->numops = 1; + break; + case GIMPLE_ASM: + ptr->numops = gimple_asm_noutputs (stmt); + break; + default: + ptr->numops = 0; + flags &= ~(SSA_OP_DEF | SSA_OP_VDEF); + break; + } + } + ptr->uses = (flags & (SSA_OP_USE|SSA_OP_VUSE)) ? gimple_use_ops (stmt) : NULL; + if (!(flags & SSA_OP_VUSE) + && ptr->uses + && gimple_vuse (stmt) != NULL_TREE) + ptr->uses = ptr->uses->next; + ptr->done = false; + ptr->i = 0; + + ptr->stmt = stmt; + ptr->flags = flags; +} + +/* Initialize iterator PTR to the use operands in STMT based on FLAGS. Return + the first use. */ +static inline use_operand_p +op_iter_init_use (ssa_op_iter *ptr, gimple stmt, int flags) +{ + gcc_checking_assert ((flags & SSA_OP_ALL_DEFS) == 0 + && (flags & SSA_OP_USE)); + op_iter_init (ptr, stmt, flags); + ptr->iter_type = ssa_op_iter_use; + return op_iter_next_use (ptr); +} + +/* Initialize iterator PTR to the def operands in STMT based on FLAGS. Return + the first def. */ +static inline def_operand_p +op_iter_init_def (ssa_op_iter *ptr, gimple stmt, int flags) +{ + gcc_checking_assert ((flags & SSA_OP_ALL_USES) == 0 + && (flags & SSA_OP_DEF)); + op_iter_init (ptr, stmt, flags); + ptr->iter_type = ssa_op_iter_def; + return op_iter_next_def (ptr); +} + +/* Initialize iterator PTR to the operands in STMT based on FLAGS. Return + the first operand as a tree. */ +static inline tree +op_iter_init_tree (ssa_op_iter *ptr, gimple stmt, int flags) +{ + op_iter_init (ptr, stmt, flags); + ptr->iter_type = ssa_op_iter_tree; + return op_iter_next_tree (ptr); +} + + +/* If there is a single operand in STMT matching FLAGS, return it. Otherwise + return NULL. */ +static inline tree +single_ssa_tree_operand (gimple stmt, int flags) +{ + tree var; + ssa_op_iter iter; + + var = op_iter_init_tree (&iter, stmt, flags); + if (op_iter_done (&iter)) + return NULL_TREE; + op_iter_next_tree (&iter); + if (op_iter_done (&iter)) + return var; + return NULL_TREE; +} + + +/* If there is a single operand in STMT matching FLAGS, return it. Otherwise + return NULL. */ +static inline use_operand_p +single_ssa_use_operand (gimple stmt, int flags) +{ + use_operand_p var; + ssa_op_iter iter; + + var = op_iter_init_use (&iter, stmt, flags); + if (op_iter_done (&iter)) + return NULL_USE_OPERAND_P; + op_iter_next_use (&iter); + if (op_iter_done (&iter)) + return var; + return NULL_USE_OPERAND_P; +} + + + +/* If there is a single operand in STMT matching FLAGS, return it. Otherwise + return NULL. */ +static inline def_operand_p +single_ssa_def_operand (gimple stmt, int flags) +{ + def_operand_p var; + ssa_op_iter iter; + + var = op_iter_init_def (&iter, stmt, flags); + if (op_iter_done (&iter)) + return NULL_DEF_OPERAND_P; + op_iter_next_def (&iter); + if (op_iter_done (&iter)) + return var; + return NULL_DEF_OPERAND_P; +} + + +/* Return true if there are zero operands in STMT matching the type + given in FLAGS. */ +static inline bool +zero_ssa_operands (gimple stmt, int flags) +{ + ssa_op_iter iter; + + op_iter_init_tree (&iter, stmt, flags); + return op_iter_done (&iter); +} + + +/* Return the number of operands matching FLAGS in STMT. */ +static inline int +num_ssa_operands (gimple stmt, int flags) +{ + ssa_op_iter iter; + tree t; + int num = 0; + + gcc_checking_assert (gimple_code (stmt) != GIMPLE_PHI); + FOR_EACH_SSA_TREE_OPERAND (t, stmt, iter, flags) + num++; + return num; +} + +static inline use_operand_p +op_iter_init_phiuse (ssa_op_iter *ptr, gimple phi, int flags); + +/* Delink all immediate_use information for STMT. */ +static inline void +delink_stmt_imm_use (gimple stmt) +{ + ssa_op_iter iter; + use_operand_p use_p; + + if (ssa_operands_active (cfun)) + FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_ALL_USES) + delink_imm_use (use_p); +} + + +/* If there is a single DEF in the PHI node which matches FLAG, return it. + Otherwise return NULL_DEF_OPERAND_P. */ +static inline tree +single_phi_def (gimple stmt, int flags) +{ + tree def = PHI_RESULT (stmt); + if ((flags & SSA_OP_DEF) && is_gimple_reg (def)) + return def; + if ((flags & SSA_OP_VIRTUAL_DEFS) && !is_gimple_reg (def)) + return def; + return NULL_TREE; +} + +/* Initialize the iterator PTR for uses matching FLAGS in PHI. FLAGS should + be either SSA_OP_USES or SSA_OP_VIRTUAL_USES. */ +static inline use_operand_p +op_iter_init_phiuse (ssa_op_iter *ptr, gimple phi, int flags) +{ + tree phi_def = gimple_phi_result (phi); + int comp; + + clear_and_done_ssa_iter (ptr); + ptr->done = false; + + gcc_checking_assert ((flags & (SSA_OP_USE | SSA_OP_VIRTUAL_USES)) != 0); + + comp = (is_gimple_reg (phi_def) ? SSA_OP_USE : SSA_OP_VIRTUAL_USES); + + /* If the PHI node doesn't the operand type we care about, we're done. */ + if ((flags & comp) == 0) + { + ptr->done = true; + return NULL_USE_OPERAND_P; + } + + ptr->stmt = phi; + ptr->numops = gimple_phi_num_args (phi); + ptr->iter_type = ssa_op_iter_use; + ptr->flags = flags; + return op_iter_next_use (ptr); +} + + +/* Start an iterator for a PHI definition. */ + +static inline def_operand_p +op_iter_init_phidef (ssa_op_iter *ptr, gimple phi, int flags) +{ + tree phi_def = PHI_RESULT (phi); + int comp; + + clear_and_done_ssa_iter (ptr); + ptr->done = false; + + gcc_checking_assert ((flags & (SSA_OP_DEF | SSA_OP_VIRTUAL_DEFS)) != 0); + + comp = (is_gimple_reg (phi_def) ? SSA_OP_DEF : SSA_OP_VIRTUAL_DEFS); + + /* If the PHI node doesn't have the operand type we care about, + we're done. */ + if ((flags & comp) == 0) + { + ptr->done = true; + return NULL_DEF_OPERAND_P; + } + + ptr->iter_type = ssa_op_iter_def; + /* The first call to op_iter_next_def will terminate the iterator since + all the fields are NULL. Simply return the result here as the first and + therefore only result. */ + return PHI_RESULT_PTR (phi); +} + +/* Return true is IMM has reached the end of the immediate use stmt list. */ + +static inline bool +end_imm_use_stmt_p (const imm_use_iterator *imm) +{ + return (imm->imm_use == imm->end_p); +} + +/* Finished the traverse of an immediate use stmt list IMM by removing the + placeholder node from the list. */ + +static inline void +end_imm_use_stmt_traverse (imm_use_iterator *imm) +{ + delink_imm_use (&(imm->iter_node)); +} + +/* Immediate use traversal of uses within a stmt require that all the + uses on a stmt be sequentially listed. This routine is used to build up + this sequential list by adding USE_P to the end of the current list + currently delimited by HEAD and LAST_P. The new LAST_P value is + returned. */ + +static inline use_operand_p +move_use_after_head (use_operand_p use_p, use_operand_p head, + use_operand_p last_p) +{ + gcc_checking_assert (USE_FROM_PTR (use_p) == USE_FROM_PTR (head)); + /* Skip head when we find it. */ + if (use_p != head) + { + /* If use_p is already linked in after last_p, continue. */ + if (last_p->next == use_p) + last_p = use_p; + else + { + /* Delink from current location, and link in at last_p. */ + delink_imm_use (use_p); + link_imm_use_to_list (use_p, last_p); + last_p = use_p; + } + } + return last_p; +} + + +/* This routine will relink all uses with the same stmt as HEAD into the list + immediately following HEAD for iterator IMM. */ + +static inline void +link_use_stmts_after (use_operand_p head, imm_use_iterator *imm) +{ + use_operand_p use_p; + use_operand_p last_p = head; + gimple head_stmt = USE_STMT (head); + tree use = USE_FROM_PTR (head); + ssa_op_iter op_iter; + int flag; + + /* Only look at virtual or real uses, depending on the type of HEAD. */ + flag = (is_gimple_reg (use) ? SSA_OP_USE : SSA_OP_VIRTUAL_USES); + + if (gimple_code (head_stmt) == GIMPLE_PHI) + { + FOR_EACH_PHI_ARG (use_p, head_stmt, op_iter, flag) + if (USE_FROM_PTR (use_p) == use) + last_p = move_use_after_head (use_p, head, last_p); + } + else + { + if (flag == SSA_OP_USE) + { + FOR_EACH_SSA_USE_OPERAND (use_p, head_stmt, op_iter, flag) + if (USE_FROM_PTR (use_p) == use) + last_p = move_use_after_head (use_p, head, last_p); + } + else if ((use_p = gimple_vuse_op (head_stmt)) != NULL_USE_OPERAND_P) + { + if (USE_FROM_PTR (use_p) == use) + last_p = move_use_after_head (use_p, head, last_p); + } + } + /* Link iter node in after last_p. */ + if (imm->iter_node.prev != NULL) + delink_imm_use (&imm->iter_node); + link_imm_use_to_list (&(imm->iter_node), last_p); +} + +/* Initialize IMM to traverse over uses of VAR. Return the first statement. */ +static inline gimple +first_imm_use_stmt (imm_use_iterator *imm, tree var) +{ + imm->end_p = &(SSA_NAME_IMM_USE_NODE (var)); + imm->imm_use = imm->end_p->next; + imm->next_imm_name = NULL_USE_OPERAND_P; + + /* iter_node is used as a marker within the immediate use list to indicate + where the end of the current stmt's uses are. Initialize it to NULL + stmt and use, which indicates a marker node. */ + imm->iter_node.prev = NULL_USE_OPERAND_P; + imm->iter_node.next = NULL_USE_OPERAND_P; + imm->iter_node.loc.stmt = NULL; + imm->iter_node.use = NULL; + + if (end_imm_use_stmt_p (imm)) + return NULL; + + link_use_stmts_after (imm->imm_use, imm); + + return USE_STMT (imm->imm_use); +} + +/* Bump IMM to the next stmt which has a use of var. */ + +static inline gimple +next_imm_use_stmt (imm_use_iterator *imm) +{ + imm->imm_use = imm->iter_node.next; + if (end_imm_use_stmt_p (imm)) + { + if (imm->iter_node.prev != NULL) + delink_imm_use (&imm->iter_node); + return NULL; + } + + link_use_stmts_after (imm->imm_use, imm); + return USE_STMT (imm->imm_use); +} + +/* This routine will return the first use on the stmt IMM currently refers + to. */ + +static inline use_operand_p +first_imm_use_on_stmt (imm_use_iterator *imm) +{ + imm->next_imm_name = imm->imm_use->next; + return imm->imm_use; +} + +/* Return TRUE if the last use on the stmt IMM refers to has been visited. */ + +static inline bool +end_imm_use_on_stmt_p (const imm_use_iterator *imm) +{ + return (imm->imm_use == &(imm->iter_node)); +} + +/* Bump to the next use on the stmt IMM refers to, return NULL if done. */ + +static inline use_operand_p +next_imm_use_on_stmt (imm_use_iterator *imm) +{ + imm->imm_use = imm->next_imm_name; + if (end_imm_use_on_stmt_p (imm)) + return NULL_USE_OPERAND_P; + else + { + imm->next_imm_name = imm->imm_use->next; + return imm->imm_use; + } +} + +/* Return true if VAR cannot be modified by the program. */ + +static inline bool +unmodifiable_var_p (const_tree var) +{ + if (TREE_CODE (var) == SSA_NAME) + var = SSA_NAME_VAR (var); + + return TREE_READONLY (var) && (TREE_STATIC (var) || DECL_EXTERNAL (var)); +} + +/* Return true if REF, a handled component reference, has an ARRAY_REF + somewhere in it. */ + +static inline bool +ref_contains_array_ref (const_tree ref) +{ + gcc_checking_assert (handled_component_p (ref)); + + do { + if (TREE_CODE (ref) == ARRAY_REF) + return true; + ref = TREE_OPERAND (ref, 0); + } while (handled_component_p (ref)); + + return false; +} + +/* Return true if REF has an VIEW_CONVERT_EXPR somewhere in it. */ + +static inline bool +contains_view_convert_expr_p (const_tree ref) +{ + while (handled_component_p (ref)) + { + if (TREE_CODE (ref) == VIEW_CONVERT_EXPR) + return true; + ref = TREE_OPERAND (ref, 0); + } + + return false; +} + +/* Return true, if the two ranges [POS1, SIZE1] and [POS2, SIZE2] + overlap. SIZE1 and/or SIZE2 can be (unsigned)-1 in which case the + range is open-ended. Otherwise return false. */ + +static inline bool +ranges_overlap_p (unsigned HOST_WIDE_INT pos1, + unsigned HOST_WIDE_INT size1, + unsigned HOST_WIDE_INT pos2, + unsigned HOST_WIDE_INT size2) +{ + if (pos1 >= pos2 + && (size2 == (unsigned HOST_WIDE_INT)-1 + || pos1 < (pos2 + size2))) + return true; + if (pos2 >= pos1 + && (size1 == (unsigned HOST_WIDE_INT)-1 + || pos2 < (pos1 + size1))) + return true; + + return false; +} + +/* Accessor to tree-ssa-operands.c caches. */ +static inline struct ssa_operands * +gimple_ssa_operands (const struct function *fun) +{ + return &fun->gimple_df->ssa_operands; +} + + +/* Returns the base object and a constant BITS_PER_UNIT offset in *POFFSET that + denotes the starting address of the memory access EXP. + Returns NULL_TREE if the offset is not constant or any component + is not BITS_PER_UNIT-aligned. + VALUEIZE if non-NULL is used to valueize SSA names. It should return + its argument or a constant if the argument is known to be constant. */ +/* ??? This is a static inline here to avoid the overhead of the indirect calls + to VALUEIZE. But is this overhead really that significant? And should we + perhaps just rely on WHOPR to specialize the function? */ + +static inline tree +get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, + tree (*valueize) (tree)) +{ + HOST_WIDE_INT byte_offset = 0; + + /* Compute cumulative byte-offset for nested component-refs and array-refs, + and find the ultimate containing object. */ + while (1) + { + switch (TREE_CODE (exp)) + { + case BIT_FIELD_REF: + { + HOST_WIDE_INT this_off = tree_to_hwi (TREE_OPERAND (exp, 2)); + if (this_off % BITS_PER_UNIT) + return NULL_TREE; + byte_offset += this_off / BITS_PER_UNIT; + } + break; + + case COMPONENT_REF: + { + tree field = TREE_OPERAND (exp, 1); + tree this_offset = component_ref_field_offset (exp); + HOST_WIDE_INT hthis_offset; + + if (!this_offset + || TREE_CODE (this_offset) != INTEGER_CST + || (tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)) + % BITS_PER_UNIT)) + return NULL_TREE; + + hthis_offset = tree_to_hwi (this_offset); + hthis_offset += (tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)) + / BITS_PER_UNIT); + byte_offset += hthis_offset; + } + break; + + case ARRAY_REF: + case ARRAY_RANGE_REF: + { + tree index = TREE_OPERAND (exp, 1); + tree low_bound, unit_size; + + if (valueize + && TREE_CODE (index) == SSA_NAME) + index = (*valueize) (index); + + /* If the resulting bit-offset is constant, track it. */ + if (TREE_CODE (index) == INTEGER_CST + && (low_bound = array_ref_low_bound (exp), + TREE_CODE (low_bound) == INTEGER_CST) + && (unit_size = array_ref_element_size (exp), + TREE_CODE (unit_size) == INTEGER_CST)) + { + HOST_WIDE_INT hindex = tree_to_hwi (index); + + hindex -= tree_to_hwi (low_bound); + hindex *= tree_to_hwi (unit_size); + byte_offset += hindex; + } + else + return NULL_TREE; + } + break; + + case REALPART_EXPR: + break; + + case IMAGPART_EXPR: + byte_offset += tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (exp))); + break; + + case VIEW_CONVERT_EXPR: + break; + + case MEM_REF: + { + tree base = TREE_OPERAND (exp, 0); + if (valueize + && TREE_CODE (base) == SSA_NAME) + base = (*valueize) (base); + + /* Hand back the decl for MEM[&decl, off]. */ + if (TREE_CODE (base) == ADDR_EXPR) + { + if (!integer_zerop (TREE_OPERAND (exp, 1))) + { + addr_wide_int off = mem_ref_offset (exp); + byte_offset += off.to_short_addr (); + } + exp = TREE_OPERAND (base, 0); + } + goto done; + } + + case TARGET_MEM_REF: + { + tree base = TREE_OPERAND (exp, 0); + if (valueize + && TREE_CODE (base) == SSA_NAME) + base = (*valueize) (base); + + /* Hand back the decl for MEM[&decl, off]. */ + if (TREE_CODE (base) == ADDR_EXPR) + { + if (TMR_INDEX (exp) || TMR_INDEX2 (exp)) + return NULL_TREE; + if (!integer_zerop (TMR_OFFSET (exp))) + { + addr_wide_int off = mem_ref_offset (exp); + byte_offset += off.to_short_addr (); + } + exp = TREE_OPERAND (base, 0); + } + goto done; + } + + default: + goto done; + } + + exp = TREE_OPERAND (exp, 0); + } +done: + + *poffset = byte_offset; + return exp; +} + +#endif /* _TREE_FLOW_INLINE_H */ diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h new file mode 100644 index 00000000000..9d6ecb47d7e --- /dev/null +++ b/gcc/tree-flow.h @@ -0,0 +1,697 @@ +/* Data and Control Flow Analysis for Trees. + Copyright (C) 2001-2013 Free Software Foundation, Inc. + Contributed by Diego Novillo <dnovillo@redhat.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef _TREE_FLOW_H +#define _TREE_FLOW_H 1 + +#include "bitmap.h" +#include "sbitmap.h" +#include "basic-block.h" +#include "hashtab.h" +#include "gimple.h" +#include "tree-ssa-operands.h" +#include "cgraph.h" +#include "ipa-reference.h" +#include "tree-ssa-alias.h" +#include "wide-int.h" + + +/* This structure is used to map a gimple statement to a label, + or list of labels to represent transaction restart. */ + +struct GTY(()) tm_restart_node { + gimple stmt; + tree label_or_list; +}; + +/* Gimple dataflow datastructure. All publicly available fields shall have + gimple_ accessor defined in tree-flow-inline.h, all publicly modifiable + fields should have gimple_set accessor. */ +struct GTY(()) gimple_df { + /* A vector of all the noreturn calls passed to modify_stmt. + cleanup_control_flow uses it to detect cases where a mid-block + indirect call has been turned into a noreturn call. When this + happens, all the instructions after the call are no longer + reachable and must be deleted as dead. */ + vec<gimple, va_gc> *modified_noreturn_calls; + + /* Array of all SSA_NAMEs used in the function. */ + vec<tree, va_gc> *ssa_names; + + /* Artificial variable used for the virtual operand FUD chain. */ + tree vop; + + /* The PTA solution for the ESCAPED artificial variable. */ + struct pt_solution escaped; + + /* A map of decls to artificial ssa-names that point to the partition + of the decl. */ + struct pointer_map_t * GTY((skip(""))) decls_to_pointers; + + /* Free list of SSA_NAMEs. */ + vec<tree, va_gc> *free_ssanames; + + /* Hashtable holding definition for symbol. If this field is not NULL, it + means that the first reference to this variable in the function is a + USE or a VUSE. In those cases, the SSA renamer creates an SSA name + for this variable with an empty defining statement. */ + htab_t GTY((param_is (union tree_node))) default_defs; + + /* True if there are any symbols that need to be renamed. */ + unsigned int ssa_renaming_needed : 1; + + /* True if all virtual operands need to be renamed. */ + unsigned int rename_vops : 1; + + /* True if the code is in ssa form. */ + unsigned int in_ssa_p : 1; + + /* True if IPA points-to information was computed for this function. */ + unsigned int ipa_pta : 1; + + struct ssa_operands ssa_operands; + + /* Map gimple stmt to tree label (or list of labels) for transaction + restart and abort. */ + htab_t GTY ((param_is (struct tm_restart_node))) tm_restart; +}; + + +typedef struct +{ + htab_t htab; + PTR *slot; + PTR *limit; +} htab_iterator; + +/* Iterate through the elements of hashtable HTAB, using htab_iterator ITER, + storing each element in RESULT, which is of type TYPE. */ +#define FOR_EACH_HTAB_ELEMENT(HTAB, RESULT, TYPE, ITER) \ + for (RESULT = (TYPE) first_htab_element (&(ITER), (HTAB)); \ + !end_htab_p (&(ITER)); \ + RESULT = (TYPE) next_htab_element (&(ITER))) + +/* It is advantageous to avoid things like life analysis for variables which + do not need PHI nodes. This enum describes whether or not a particular + variable may need a PHI node. */ + +enum need_phi_state { + /* This is the default. If we are still in this state after finding + all the definition and use sites, then we will assume the variable + needs PHI nodes. This is probably an overly conservative assumption. */ + NEED_PHI_STATE_UNKNOWN, + + /* This state indicates that we have seen one or more sets of the + variable in a single basic block and that the sets dominate all + uses seen so far. If after finding all definition and use sites + we are still in this state, then the variable does not need any + PHI nodes. */ + NEED_PHI_STATE_NO, + + /* This state indicates that we have either seen multiple definitions of + the variable in multiple blocks, or that we encountered a use in a + block that was not dominated by the block containing the set(s) of + this variable. This variable is assumed to need PHI nodes. */ + NEED_PHI_STATE_MAYBE +}; + + +/* Immediate use lists are used to directly access all uses for an SSA + name and get pointers to the statement for each use. + + The structure ssa_use_operand_d consists of PREV and NEXT pointers + to maintain the list. A USE pointer, which points to address where + the use is located and a LOC pointer which can point to the + statement where the use is located, or, in the case of the root + node, it points to the SSA name itself. + + The list is anchored by an occurrence of ssa_operand_d *in* the + ssa_name node itself (named 'imm_uses'). This node is uniquely + identified by having a NULL USE pointer. and the LOC pointer + pointing back to the ssa_name node itself. This node forms the + base for a circular list, and initially this is the only node in + the list. + + Fast iteration allows each use to be examined, but does not allow + any modifications to the uses or stmts. + + Normal iteration allows insertion, deletion, and modification. the + iterator manages this by inserting a marker node into the list + immediately before the node currently being examined in the list. + this marker node is uniquely identified by having null stmt *and* a + null use pointer. + + When iterating to the next use, the iteration routines check to see + if the node after the marker has changed. if it has, then the node + following the marker is now the next one to be visited. if not, the + marker node is moved past that node in the list (visualize it as + bumping the marker node through the list). this continues until + the marker node is moved to the original anchor position. the + marker node is then removed from the list. + + If iteration is halted early, the marker node must be removed from + the list before continuing. */ +typedef struct immediate_use_iterator_d +{ + /* This is the current use the iterator is processing. */ + ssa_use_operand_t *imm_use; + /* This marks the last use in the list (use node from SSA_NAME) */ + ssa_use_operand_t *end_p; + /* This node is inserted and used to mark the end of the uses for a stmt. */ + ssa_use_operand_t iter_node; + /* This is the next ssa_name to visit. IMM_USE may get removed before + the next one is traversed to, so it must be cached early. */ + ssa_use_operand_t *next_imm_name; +} imm_use_iterator; + + +/* Use this iterator when simply looking at stmts. Adding, deleting or + modifying stmts will cause this iterator to malfunction. */ + +#define FOR_EACH_IMM_USE_FAST(DEST, ITER, SSAVAR) \ + for ((DEST) = first_readonly_imm_use (&(ITER), (SSAVAR)); \ + !end_readonly_imm_use_p (&(ITER)); \ + (void) ((DEST) = next_readonly_imm_use (&(ITER)))) + +/* Use this iterator to visit each stmt which has a use of SSAVAR. */ + +#define FOR_EACH_IMM_USE_STMT(STMT, ITER, SSAVAR) \ + for ((STMT) = first_imm_use_stmt (&(ITER), (SSAVAR)); \ + !end_imm_use_stmt_p (&(ITER)); \ + (void) ((STMT) = next_imm_use_stmt (&(ITER)))) + +/* Use this to terminate the FOR_EACH_IMM_USE_STMT loop early. Failure to + do so will result in leaving a iterator marker node in the immediate + use list, and nothing good will come from that. */ +#define BREAK_FROM_IMM_USE_STMT(ITER) \ + { \ + end_imm_use_stmt_traverse (&(ITER)); \ + break; \ + } + + +/* Use this iterator in combination with FOR_EACH_IMM_USE_STMT to + get access to each occurrence of ssavar on the stmt returned by + that iterator.. for instance: + + FOR_EACH_IMM_USE_STMT (stmt, iter, var) + { + FOR_EACH_IMM_USE_ON_STMT (use_p, iter) + { + SET_USE (use_p, blah); + } + update_stmt (stmt); + } */ + +#define FOR_EACH_IMM_USE_ON_STMT(DEST, ITER) \ + for ((DEST) = first_imm_use_on_stmt (&(ITER)); \ + !end_imm_use_on_stmt_p (&(ITER)); \ + (void) ((DEST) = next_imm_use_on_stmt (&(ITER)))) + + + +static inline void update_stmt (gimple); +static inline int get_lineno (const_gimple); + +/* Accessors for basic block annotations. */ +static inline gimple_seq phi_nodes (const_basic_block); +static inline void set_phi_nodes (basic_block, gimple_seq); + +/*--------------------------------------------------------------------------- + Global declarations +---------------------------------------------------------------------------*/ +struct int_tree_map { + unsigned int uid; + tree to; +}; + +/* Macros for showing usage statistics. */ +#define SCALE(x) ((unsigned long) ((x) < 1024*10 \ + ? (x) \ + : ((x) < 1024*1024*10 \ + ? (x) / 1024 \ + : (x) / (1024*1024)))) + +#define LABEL(x) ((x) < 1024*10 ? 'b' : ((x) < 1024*1024*10 ? 'k' : 'M')) + +#define PERCENT(x,y) ((float)(x) * 100.0 / (float)(y)) + +/*--------------------------------------------------------------------------- + OpenMP Region Tree +---------------------------------------------------------------------------*/ + +/* Parallel region information. Every parallel and workshare + directive is enclosed between two markers, the OMP_* directive + and a corresponding OMP_RETURN statement. */ + +struct omp_region +{ + /* The enclosing region. */ + struct omp_region *outer; + + /* First child region. */ + struct omp_region *inner; + + /* Next peer region. */ + struct omp_region *next; + + /* Block containing the omp directive as its last stmt. */ + basic_block entry; + + /* Block containing the OMP_RETURN as its last stmt. */ + basic_block exit; + + /* Block containing the OMP_CONTINUE as its last stmt. */ + basic_block cont; + + /* If this is a combined parallel+workshare region, this is a list + of additional arguments needed by the combined parallel+workshare + library call. */ + vec<tree, va_gc> *ws_args; + + /* The code for the omp directive of this region. */ + enum gimple_code type; + + /* Schedule kind, only used for OMP_FOR type regions. */ + enum omp_clause_schedule_kind sched_kind; + + /* True if this is a combined parallel+workshare region. */ + bool is_combined_parallel; +}; + +extern struct omp_region *root_omp_region; +extern struct omp_region *new_omp_region (basic_block, enum gimple_code, + struct omp_region *); +extern void free_omp_regions (void); +void omp_expand_local (basic_block); +tree copy_var_decl (tree, tree, tree); + +/*--------------------------------------------------------------------------- + Function prototypes +---------------------------------------------------------------------------*/ +/* In tree-cfg.c */ + +/* Location to track pending stmt for edge insertion. */ +#define PENDING_STMT(e) ((e)->insns.g) + +extern void delete_tree_cfg_annotations (void); +extern bool stmt_ends_bb_p (gimple); +extern bool is_ctrl_stmt (gimple); +extern bool is_ctrl_altering_stmt (gimple); +extern bool simple_goto_p (gimple); +extern bool stmt_can_make_abnormal_goto (gimple); +extern basic_block single_noncomplex_succ (basic_block bb); +extern void gimple_dump_bb (FILE *, basic_block, int, int); +extern void gimple_debug_bb (basic_block); +extern basic_block gimple_debug_bb_n (int); +extern void gimple_dump_cfg (FILE *, int); +extern void gimple_debug_cfg (int); +extern void dump_cfg_stats (FILE *); +extern void dot_cfg (void); +extern void debug_cfg_stats (void); +extern void debug_loops (int); +extern void debug_loop (struct loop *, int); +extern void debug (struct loop &ref); +extern void debug (struct loop *ptr); +extern void debug_verbose (struct loop &ref); +extern void debug_verbose (struct loop *ptr); +extern void debug_loop_num (unsigned, int); +extern void print_loops (FILE *, int); +extern void print_loops_bb (FILE *, basic_block, int, int); +extern void cleanup_dead_labels (void); +extern void group_case_labels_stmt (gimple); +extern void group_case_labels (void); +extern gimple first_stmt (basic_block); +extern gimple last_stmt (basic_block); +extern gimple last_and_only_stmt (basic_block); +extern edge find_taken_edge (basic_block, tree); +extern basic_block label_to_block_fn (struct function *, tree); +#define label_to_block(t) (label_to_block_fn (cfun, t)) +extern void notice_special_calls (gimple); +extern void clear_special_calls (void); +extern void verify_gimple_in_seq (gimple_seq); +extern void verify_gimple_in_cfg (struct function *); +extern tree gimple_block_label (basic_block); +extern void extract_true_false_edges_from_block (basic_block, edge *, edge *); +extern bool gimple_duplicate_sese_region (edge, edge, basic_block *, unsigned, + basic_block *, bool); +extern bool gimple_duplicate_sese_tail (edge, edge, basic_block *, unsigned, + basic_block *); +extern void gather_blocks_in_sese_region (basic_block entry, basic_block exit, + vec<basic_block> *bbs_p); +extern void add_phi_args_after_copy_bb (basic_block); +extern void add_phi_args_after_copy (basic_block *, unsigned, edge); +extern bool gimple_purge_dead_eh_edges (basic_block); +extern bool gimple_purge_all_dead_eh_edges (const_bitmap); +extern bool gimple_purge_dead_abnormal_call_edges (basic_block); +extern bool gimple_purge_all_dead_abnormal_call_edges (const_bitmap); +extern tree gimplify_build1 (gimple_stmt_iterator *, enum tree_code, + tree, tree); +extern tree gimplify_build2 (gimple_stmt_iterator *, enum tree_code, + tree, tree, tree); +extern tree gimplify_build3 (gimple_stmt_iterator *, enum tree_code, + tree, tree, tree, tree); +extern void init_empty_tree_cfg (void); +extern void init_empty_tree_cfg_for_function (struct function *); +extern void fold_cond_expr_cond (void); +extern void make_abnormal_goto_edges (basic_block, bool); +extern void replace_uses_by (tree, tree); +extern void start_recording_case_labels (void); +extern void end_recording_case_labels (void); +extern basic_block move_sese_region_to_fn (struct function *, basic_block, + basic_block, tree); +void remove_edge_and_dominated_blocks (edge); +bool tree_node_can_be_shared (tree); + +/* In tree-cfgcleanup.c */ +extern bitmap cfgcleanup_altered_bbs; +extern bool cleanup_tree_cfg (void); + +/* In tree-pretty-print.c. */ +extern void dump_generic_bb (FILE *, basic_block, int, int); +extern int op_code_prio (enum tree_code); +extern int op_prio (const_tree); +extern const char *op_symbol_code (enum tree_code); + +/* In tree-dfa.c */ +extern void renumber_gimple_stmt_uids (void); +extern void renumber_gimple_stmt_uids_in_blocks (basic_block *, int); +extern void dump_dfa_stats (FILE *); +extern void debug_dfa_stats (void); +extern void dump_variable (FILE *, tree); +extern void debug_variable (tree); +extern void set_ssa_default_def (struct function *, tree, tree); +extern tree ssa_default_def (struct function *, tree); +extern tree get_or_create_ssa_default_def (struct function *, tree); +extern bool stmt_references_abnormal_ssa_name (gimple); +extern tree get_addr_base_and_unit_offset (tree, HOST_WIDE_INT *); +extern void dump_enumerated_decls (FILE *, int); + +/* In tree-phinodes.c */ +extern void reserve_phi_args_for_new_edge (basic_block); +extern void add_phi_node_to_bb (gimple phi, basic_block bb); +extern gimple create_phi_node (tree, basic_block); +extern void add_phi_arg (gimple, tree, edge, source_location); +extern void remove_phi_args (edge); +extern void remove_phi_node (gimple_stmt_iterator *, bool); +extern void remove_phi_nodes (basic_block); +extern void release_phi_node (gimple); +extern void phinodes_print_statistics (void); + +/* In gimple-low.c */ +extern void record_vars_into (tree, tree); +extern void record_vars (tree); +extern bool gimple_seq_may_fallthru (gimple_seq); +extern bool gimple_stmt_may_fallthru (gimple); +extern bool gimple_check_call_matching_types (gimple, tree, bool); + +/* In tree-into-ssa.c */ +void update_ssa (unsigned); +void delete_update_ssa (void); +tree create_new_def_for (tree, gimple, def_operand_p); +bool need_ssa_update_p (struct function *); +bool name_registered_for_update_p (tree); +void release_ssa_name_after_update_ssa (tree); +void mark_virtual_operands_for_renaming (struct function *); +tree get_current_def (tree); +void set_current_def (tree, tree); + +/* In tree-ssa-ccp.c */ +tree fold_const_aggregate_ref (tree); +tree gimple_fold_stmt_to_constant (gimple, tree (*)(tree)); + +/* In tree-ssa-dom.c */ +extern void dump_dominator_optimization_stats (FILE *); +extern void debug_dominator_optimization_stats (void); +int loop_depth_of_name (tree); +tree degenerate_phi_result (gimple); +bool simple_iv_increment_p (gimple); + +/* In tree-ssa-copy.c */ +extern void propagate_value (use_operand_p, tree); +extern void propagate_tree_value (tree *, tree); +extern void propagate_tree_value_into_stmt (gimple_stmt_iterator *, tree); +extern void replace_exp (use_operand_p, tree); +extern bool may_propagate_copy (tree, tree); +extern bool may_propagate_copy_into_stmt (gimple, tree); +extern bool may_propagate_copy_into_asm (tree); + +/* In tree-ssa-loop-ch.c */ +bool do_while_loop_p (struct loop *); + +/* Affine iv. */ + +typedef struct +{ + /* Iv = BASE + STEP * i. */ + tree base, step; + + /* True if this iv does not overflow. */ + bool no_overflow; +} affine_iv; + +/* Description of number of iterations of a loop. All the expressions inside + the structure can be evaluated at the end of the loop's preheader + (and due to ssa form, also anywhere inside the body of the loop). */ + +struct tree_niter_desc +{ + tree assumptions; /* The boolean expression. If this expression evaluates + to false, then the other fields in this structure + should not be used; there is no guarantee that they + will be correct. */ + tree may_be_zero; /* The boolean expression. If it evaluates to true, + the loop will exit in the first iteration (i.e. + its latch will not be executed), even if the niter + field says otherwise. */ + tree niter; /* The expression giving the number of iterations of + a loop (provided that assumptions == true and + may_be_zero == false), more precisely the number + of executions of the latch of the loop. */ + max_wide_int max; /* The upper bound on the number of iterations of + the loop. */ + + /* The simplified shape of the exit condition. The loop exits if + CONTROL CMP BOUND is false, where CMP is one of NE_EXPR, + LT_EXPR, or GT_EXPR, and step of CONTROL is positive if CMP is + LE_EXPR and negative if CMP is GE_EXPR. This information is used + by loop unrolling. */ + affine_iv control; + tree bound; + enum tree_code cmp; +}; + +/* In tree-ssa-phiopt.c */ +bool empty_block_p (basic_block); +basic_block *blocks_in_phiopt_order (void); +bool nonfreeing_call_p (gimple); + +/* In tree-ssa-loop*.c */ + +unsigned int tree_ssa_lim (void); +unsigned int tree_ssa_unswitch_loops (void); +unsigned int canonicalize_induction_variables (void); +unsigned int tree_unroll_loops_completely (bool, bool); +unsigned int tree_ssa_prefetch_arrays (void); +void tree_ssa_iv_optimize (void); +unsigned tree_predictive_commoning (void); +tree canonicalize_loop_ivs (struct loop *, tree *, bool); +bool parallelize_loops (void); + +bool loop_only_exit_p (const struct loop *, const_edge); +bool number_of_iterations_exit (struct loop *, edge, + struct tree_niter_desc *niter, bool, + bool every_iteration = true); +tree find_loop_niter (struct loop *, edge *); +tree loop_niter_by_eval (struct loop *, edge); +tree find_loop_niter_by_eval (struct loop *, edge *); +void estimate_numbers_of_iterations (void); +bool scev_probably_wraps_p (tree, tree, gimple, struct loop *, bool); +bool convert_affine_scev (struct loop *, tree, tree *, tree *, gimple, bool); + +bool nowrap_type_p (tree); +enum ev_direction {EV_DIR_GROWS, EV_DIR_DECREASES, EV_DIR_UNKNOWN}; +enum ev_direction scev_direction (const_tree); + +void free_numbers_of_iterations_estimates (void); +void free_numbers_of_iterations_estimates_loop (struct loop *); +void rewrite_into_loop_closed_ssa (bitmap, unsigned); +void verify_loop_closed_ssa (bool); +bool for_each_index (tree *, bool (*) (tree, tree *, void *), void *); +void create_iv (tree, tree, tree, struct loop *, gimple_stmt_iterator *, bool, + tree *, tree *); +basic_block split_loop_exit_edge (edge); +void standard_iv_increment_position (struct loop *, gimple_stmt_iterator *, + bool *); +basic_block ip_end_pos (struct loop *); +basic_block ip_normal_pos (struct loop *); +bool gimple_duplicate_loop_to_header_edge (struct loop *, edge, + unsigned int, sbitmap, + edge, vec<edge> *, + int); +struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, edge); +tree expand_simple_operations (tree); +void substitute_in_loop_info (struct loop *, tree, tree); +edge single_dom_exit (struct loop *); +bool can_unroll_loop_p (struct loop *loop, unsigned factor, + struct tree_niter_desc *niter); +void tree_unroll_loop (struct loop *, unsigned, + edge, struct tree_niter_desc *); +typedef void (*transform_callback)(struct loop *, void *); +void tree_transform_and_unroll_loop (struct loop *, unsigned, + edge, struct tree_niter_desc *, + transform_callback, void *); +bool contains_abnormal_ssa_name_p (tree); +bool stmt_dominates_stmt_p (gimple, gimple); + +/* In tree-ssa-dce.c */ +void mark_virtual_operand_for_renaming (tree); +void mark_virtual_phi_result_for_renaming (gimple); + +/* In tree-ssa-threadedge.c */ +extern void threadedge_initialize_values (void); +extern void threadedge_finalize_values (void); +extern vec<tree> ssa_name_values; +#define SSA_NAME_VALUE(x) \ + (SSA_NAME_VERSION(x) < ssa_name_values.length () \ + ? ssa_name_values[SSA_NAME_VERSION(x)] \ + : NULL_TREE) +extern void set_ssa_name_value (tree, tree); +extern bool potentially_threadable_block (basic_block); +extern void thread_across_edge (gimple, edge, bool, + vec<tree> *, tree (*) (gimple, gimple)); +extern void propagate_threaded_block_debug_into (basic_block, basic_block); + +/* In tree-ssa-loop-im.c */ +/* The possibilities of statement movement. */ + +enum move_pos + { + MOVE_IMPOSSIBLE, /* No movement -- side effect expression. */ + MOVE_PRESERVE_EXECUTION, /* Must not cause the non-executed statement + become executed -- memory accesses, ... */ + MOVE_POSSIBLE /* Unlimited movement. */ + }; +extern enum move_pos movement_possibility (gimple); +char *get_lsm_tmp_name (tree, unsigned); + +/* In tree-flow-inline.h */ +static inline bool unmodifiable_var_p (const_tree); +static inline bool ref_contains_array_ref (const_tree); + +/* In tree-eh.c */ +extern void make_eh_edges (gimple); +extern bool make_eh_dispatch_edges (gimple); +extern edge redirect_eh_edge (edge, basic_block); +extern void redirect_eh_dispatch_edge (gimple, edge, basic_block); +extern bool stmt_could_throw_p (gimple); +extern bool stmt_can_throw_internal (gimple); +extern bool stmt_can_throw_external (gimple); +extern void add_stmt_to_eh_lp_fn (struct function *, gimple, int); +extern void add_stmt_to_eh_lp (gimple, int); +extern bool remove_stmt_from_eh_lp (gimple); +extern bool remove_stmt_from_eh_lp_fn (struct function *, gimple); +extern int lookup_stmt_eh_lp_fn (struct function *, gimple); +extern int lookup_stmt_eh_lp (gimple); +extern bool maybe_clean_eh_stmt_fn (struct function *, gimple); +extern bool maybe_clean_eh_stmt (gimple); +extern bool maybe_clean_or_replace_eh_stmt (gimple, gimple); +extern bool maybe_duplicate_eh_stmt_fn (struct function *, gimple, + struct function *, gimple, + struct pointer_map_t *, int); +extern bool maybe_duplicate_eh_stmt (gimple, gimple); +extern bool verify_eh_edges (gimple); +extern bool verify_eh_dispatch_edge (gimple); +extern void maybe_remove_unreachable_handlers (void); + +/* In tree-ssa-pre.c */ +void debug_value_expressions (unsigned int); + +/* In tree-loop-linear.c */ +extern void linear_transform_loops (void); +extern unsigned perfect_loop_nest_depth (struct loop *); + +/* In graphite.c */ +extern void graphite_transform_loops (void); + +/* In tree-data-ref.c */ +extern void tree_check_data_deps (void); + +/* In tree-ssa-loop-ivopts.c */ +bool expr_invariant_in_loop_p (struct loop *, tree); +bool stmt_invariant_in_loop_p (struct loop *, gimple); +struct loop *outermost_invariant_loop_for_expr (struct loop *, tree); +bool multiplier_allowed_in_address_p (HOST_WIDE_INT, enum machine_mode, + addr_space_t); +bool may_be_nonaddressable_p (tree expr); + +/* In tree-ssa-threadupdate.c. */ +extern bool thread_through_all_blocks (bool); +extern void register_jump_thread (vec<edge>, bool); + +/* In gimplify.c */ +tree force_gimple_operand_1 (tree, gimple_seq *, gimple_predicate, tree); +tree force_gimple_operand (tree, gimple_seq *, bool, tree); +tree force_gimple_operand_gsi_1 (gimple_stmt_iterator *, tree, + gimple_predicate, tree, + bool, enum gsi_iterator_update); +tree force_gimple_operand_gsi (gimple_stmt_iterator *, tree, bool, tree, + bool, enum gsi_iterator_update); +tree gimple_fold_indirect_ref (tree); + +/* In tree-ssa-live.c */ +extern void remove_unused_locals (void); +extern void dump_scope_blocks (FILE *, int); +extern void debug_scope_blocks (int); +extern void debug_scope_block (tree, int); + +/* In tree-ssa-address.c */ + +/* Description of a memory address. */ + +struct mem_address +{ + tree symbol, base, index, step, offset; +}; + +struct affine_tree_combination; +tree create_mem_ref (gimple_stmt_iterator *, tree, + struct affine_tree_combination *, tree, tree, tree, bool); +rtx addr_for_mem_ref (struct mem_address *, addr_space_t, bool); +void get_address_description (tree, struct mem_address *); +tree maybe_fold_tmr (tree); + +unsigned int execute_fixup_cfg (void); +bool fixup_noreturn_call (gimple stmt); + +/* In ipa-pure-const.c */ +void warn_function_noreturn (tree); + +/* In tree-ssa-ter.c */ +bool stmt_is_replaceable_p (gimple); + +/* In tree-parloops.c */ +bool parallelized_function_p (tree); + +#include "tree-flow-inline.h" + +void swap_tree_operands (gimple, tree *, tree *); + +#endif /* _TREE_FLOW_H */ diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c index 2221b9c5486..4543b270e5d 100644 --- a/gcc/tree-inline.c +++ b/gcc/tree-inline.c @@ -839,8 +839,7 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data) *walk_subtrees = 0; else if (TREE_CODE (*tp) == INTEGER_CST) - *tp = build_int_cst_wide (new_type, TREE_INT_CST_LOW (*tp), - TREE_INT_CST_HIGH (*tp)); + *tp = wide_int_to_tree (new_type, *tp); else { *tp = copy_node (*tp); @@ -1018,8 +1017,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data) *walk_subtrees = 0; else if (TREE_CODE (*tp) == INTEGER_CST) - *tp = build_int_cst_wide (new_type, TREE_INT_CST_LOW (*tp), - TREE_INT_CST_HIGH (*tp)); + *tp = wide_int_to_tree (new_type, *tp); else { *tp = copy_node (*tp); @@ -1213,7 +1211,7 @@ remap_eh_region_tree_nr (tree old_t_nr, copy_body_data *id) { int old_nr, new_nr; - old_nr = tree_low_cst (old_t_nr, 0); + old_nr = tree_to_shwi (old_t_nr); new_nr = remap_eh_region_nr (old_nr, id); return build_int_cst (integer_type_node, new_nr); diff --git a/gcc/tree-mudflap.c b/gcc/tree-mudflap.c index 4c0d71d757b..9971c1bc994 100644 --- a/gcc/tree-mudflap.c +++ b/gcc/tree-mudflap.c @@ -854,10 +854,10 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp, addr = build1 (ADDR_EXPR, build_pointer_type (type), t); limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type, - fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, - fold_convert (mf_uintptr_type, addr), - size), - integer_one_node); + fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, + fold_convert (mf_uintptr_type, addr), + size), + integer_one_node); } break; diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c index 0bdbc489f8c..92745538d0d 100644 --- a/gcc/tree-object-size.c +++ b/gcc/tree-object-size.c @@ -77,8 +77,8 @@ static unsigned HOST_WIDE_INT offset_limit; static void init_offset_limit (void) { - if (host_integerp (TYPE_MAX_VALUE (sizetype), 1)) - offset_limit = tree_low_cst (TYPE_MAX_VALUE (sizetype), 1); + if (tree_fits_uhwi_p (TYPE_MAX_VALUE (sizetype))) + offset_limit = tree_to_uhwi (TYPE_MAX_VALUE (sizetype)); else offset_limit = -1; offset_limit /= 2; @@ -106,7 +106,7 @@ compute_object_offset (const_tree expr, const_tree var) t = TREE_OPERAND (expr, 1); off = size_binop (PLUS_EXPR, DECL_FIELD_OFFSET (t), - size_int (tree_low_cst (DECL_FIELD_BIT_OFFSET (t), 1) + size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t)) / BITS_PER_UNIT)); break; @@ -141,7 +141,7 @@ compute_object_offset (const_tree expr, const_tree var) case MEM_REF: gcc_assert (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR); - return double_int_to_tree (sizetype, mem_ref_offset (expr)); + return wide_int_to_tree (sizetype, mem_ref_offset (expr)); default: return error_mark_node; @@ -191,10 +191,10 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, } if (sz != unknown[object_size_type]) { - double_int dsz = double_int::from_uhwi (sz) - mem_ref_offset (pt_var); - if (dsz.is_negative ()) + addr_wide_int dsz = addr_wide_int (sz) - mem_ref_offset (pt_var); + if (wi::neg_p (dsz)) sz = 0; - else if (dsz.fits_uhwi ()) + else if (wi::fits_uhwi_p (dsz)) sz = dsz.to_uhwi (); else sz = unknown[object_size_type]; @@ -205,16 +205,16 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, } else if (pt_var && DECL_P (pt_var) - && host_integerp (DECL_SIZE_UNIT (pt_var), 1) + && tree_fits_uhwi_p (DECL_SIZE_UNIT (pt_var)) && (unsigned HOST_WIDE_INT) - tree_low_cst (DECL_SIZE_UNIT (pt_var), 1) < offset_limit) + tree_to_uhwi (DECL_SIZE_UNIT (pt_var)) < offset_limit) pt_var_size = DECL_SIZE_UNIT (pt_var); else if (pt_var && TREE_CODE (pt_var) == STRING_CST && TYPE_SIZE_UNIT (TREE_TYPE (pt_var)) - && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (pt_var))) && (unsigned HOST_WIDE_INT) - tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) + tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (pt_var))) < offset_limit) pt_var_size = TYPE_SIZE_UNIT (TREE_TYPE (pt_var)); else @@ -239,7 +239,7 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, if (var != pt_var && TREE_CODE (var) == ARRAY_REF) var = TREE_OPERAND (var, 0); if (! TYPE_SIZE_UNIT (TREE_TYPE (var)) - || ! host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (var)), 1) + || ! tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (var))) || (pt_var_size && tree_int_cst_lt (pt_var_size, TYPE_SIZE_UNIT (TREE_TYPE (var))))) @@ -367,8 +367,8 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, else bytes = pt_var_size; - if (host_integerp (bytes, 1)) - return tree_low_cst (bytes, 1); + if (tree_fits_uhwi_p (bytes)) + return tree_to_uhwi (bytes); return unknown[object_size_type]; } @@ -398,9 +398,9 @@ alloc_object_size (const_gimple call, int object_size_type) { tree p = TREE_VALUE (alloc_size); - arg1 = TREE_INT_CST_LOW (TREE_VALUE (p))-1; + arg1 = tree_to_hwi (TREE_VALUE (p))-1; if (TREE_CHAIN (p)) - arg2 = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (p)))-1; + arg2 = tree_to_hwi (TREE_VALUE (TREE_CHAIN (p)))-1; } if (DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL) @@ -431,8 +431,8 @@ alloc_object_size (const_gimple call, int object_size_type) else if (arg1 >= 0) bytes = fold_convert (sizetype, gimple_call_arg (call, arg1)); - if (bytes && host_integerp (bytes, 1)) - return tree_low_cst (bytes, 1); + if (bytes && tree_fits_uhwi_p (bytes)) + return tree_to_uhwi (bytes); return unknown[object_size_type]; } @@ -792,13 +792,13 @@ plus_stmt_object_size (struct object_size_info *osi, tree var, gimple stmt) && (TREE_CODE (op0) == SSA_NAME || TREE_CODE (op0) == ADDR_EXPR)) { - if (! host_integerp (op1, 1)) + if (! tree_fits_uhwi_p (op1)) bytes = unknown[object_size_type]; else if (TREE_CODE (op0) == SSA_NAME) - return merge_object_sizes (osi, var, op0, tree_low_cst (op1, 1)); + return merge_object_sizes (osi, var, op0, tree_to_uhwi (op1)); else { - unsigned HOST_WIDE_INT off = tree_low_cst (op1, 1); + unsigned HOST_WIDE_INT off = tree_to_uhwi (op1); /* op0 will be ADDR_EXPR here. */ bytes = addr_object_size (osi, op0, object_size_type); @@ -1224,10 +1224,10 @@ compute_object_sizes (void) { tree ost = gimple_call_arg (call, 1); - if (host_integerp (ost, 1)) + if (tree_fits_uhwi_p (ost)) { unsigned HOST_WIDE_INT object_size_type - = tree_low_cst (ost, 1); + = tree_to_uhwi (ost); if (object_size_type < 2) result = fold_convert (size_type_node, diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c index ba3e1318e7b..58d2ede6507 100644 --- a/gcc/tree-predcom.c +++ b/gcc/tree-predcom.c @@ -201,6 +201,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-pass.h" #include "tree-affine.h" #include "tree-inline.h" +#include "wide-int-print.h" /* The maximum number of iterations between the considered memory references. */ @@ -228,7 +229,7 @@ typedef struct dref_d unsigned distance; /* Number of iterations offset from the first reference in the component. */ - double_int offset; + max_wide_int offset; /* Number of the reference in a component, in dominance ordering. */ unsigned pos; @@ -344,7 +345,7 @@ dump_dref (FILE *file, dref ref) DR_IS_READ (ref->ref) ? "" : ", write"); fprintf (file, " offset "); - dump_double_int (file, ref->offset, false); + print_decs (ref->offset, file); fprintf (file, "\n"); fprintf (file, " distance %u\n", ref->distance); @@ -617,7 +618,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset, &name_expansions); - aff_combination_const (&delta, type, tree_to_double_int (DR_INIT (dr))); + aff_combination_const (&delta, type, DR_INIT (dr)); aff_combination_add (offset, &delta); } @@ -629,7 +630,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) static bool determine_offset (struct data_reference *a, struct data_reference *b, - double_int *off) + max_wide_int *off) { aff_tree diff, baseb, step; tree typea, typeb; @@ -650,7 +651,7 @@ determine_offset (struct data_reference *a, struct data_reference *b, { /* If the references have loop invariant address, check that they access exactly the same location. */ - *off = double_int_zero; + *off = 0; return (operand_equal_p (DR_OFFSET (a), DR_OFFSET (b), 0) && operand_equal_p (DR_INIT (a), DR_INIT (b), 0)); } @@ -659,7 +660,7 @@ determine_offset (struct data_reference *a, struct data_reference *b, is a multiple of step. */ aff_combination_dr_offset (a, &diff); aff_combination_dr_offset (b, &baseb); - aff_combination_scale (&baseb, double_int_minus_one); + aff_combination_scale (&baseb, -1); aff_combination_add (&diff, &baseb); tree_to_aff_combination_expand (DR_STEP (a), TREE_TYPE (DR_STEP (a)), @@ -733,7 +734,7 @@ split_data_refs_to_components (struct loop *loop, FOR_EACH_VEC_ELT (depends, i, ddr) { - double_int dummy_off; + max_wide_int dummy_off; if (DDR_ARE_DEPENDENT (ddr) == chrec_known) continue; @@ -776,7 +777,7 @@ split_data_refs_to_components (struct loop *loop, dataref = XCNEW (struct dref_d); dataref->ref = dr; dataref->stmt = DR_STMT (dr); - dataref->offset = double_int_zero; + dataref->offset = 0; dataref->distance = 0; dataref->always_accessed @@ -832,7 +833,7 @@ suitable_component_p (struct loop *loop, struct component *comp) first = comp->refs[0]; ok = suitable_reference_p (first->ref, &comp->comp_step); gcc_assert (ok); - first->offset = double_int_zero; + first->offset = 0; for (i = 1; comp->refs.iterate (i, &a); i++) { @@ -896,7 +897,7 @@ order_drefs (const void *a, const void *b) { const dref *const da = (const dref *) a; const dref *const db = (const dref *) b; - int offcmp = (*da)->offset.scmp ((*db)->offset); + int offcmp = wi::cmps ((*da)->offset, (*db)->offset); if (offcmp != 0) return offcmp; @@ -918,16 +919,16 @@ static void add_ref_to_chain (chain_p chain, dref ref) { dref root = get_chain_root (chain); - double_int dist; + max_wide_int dist; - gcc_assert (root->offset.sle (ref->offset)); + gcc_assert (wi::les_p (root->offset, ref->offset)); dist = ref->offset - root->offset; - if (double_int::from_uhwi (MAX_DISTANCE).ule (dist)) + if (wi::leu_p (MAX_DISTANCE, dist)) { free (ref); return; } - gcc_assert (dist.fits_uhwi ()); + gcc_assert (wi::fits_uhwi_p (dist)); chain->refs.safe_push (ref); @@ -1022,7 +1023,7 @@ valid_initializer_p (struct data_reference *ref, unsigned distance, struct data_reference *root) { aff_tree diff, base, step; - double_int off; + max_wide_int off; /* Both REF and ROOT must be accessing the same object. */ if (!operand_equal_p (DR_BASE_ADDRESS (ref), DR_BASE_ADDRESS (root), 0)) @@ -1042,7 +1043,7 @@ valid_initializer_p (struct data_reference *ref, -DISTANCE-th iteration. */ aff_combination_dr_offset (root, &diff); aff_combination_dr_offset (ref, &base); - aff_combination_scale (&base, double_int_minus_one); + aff_combination_scale (&base, -1); aff_combination_add (&diff, &base); tree_to_aff_combination_expand (DR_STEP (root), TREE_TYPE (DR_STEP (root)), @@ -1050,7 +1051,7 @@ valid_initializer_p (struct data_reference *ref, if (!aff_combination_constant_multiple_p (&diff, &step, &off)) return false; - if (off != double_int::from_uhwi (distance)) + if (off != distance) return false; return true; @@ -1178,7 +1179,7 @@ determine_roots_comp (struct loop *loop, unsigned i; dref a; chain_p chain = NULL; - double_int last_ofs = double_int_zero; + max_wide_int last_ofs = 0; /* Invariants are handled specially. */ if (comp->comp_step == RS_INVARIANT) @@ -1193,7 +1194,7 @@ determine_roots_comp (struct loop *loop, FOR_EACH_VEC_ELT (comp->refs, i, a) { if (!chain || DR_IS_WRITE (a->ref) - || double_int::from_uhwi (MAX_DISTANCE).ule (a->offset - last_ofs)) + || wi::leu_p (MAX_DISTANCE, a->offset - last_ofs)) { if (nontrivial_chain_p (chain)) { diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index f8a0342f944..54779bbdd0a 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see #include "dumpfile.h" #include "value-prof.h" #include "predict.h" +#include "wide-int-print.h" #include <new> // For placement-new. @@ -271,8 +272,8 @@ dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) if (min && max && integer_zerop (min) - && host_integerp (max, 0)) - pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); + && tree_fits_shwi_p (max)) + pp_wide_integer (buffer, tree_to_shwi (max) + 1); else { if (min) @@ -1225,14 +1226,27 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, NB: Neither of the following divisors can be trivially used to recover the original literal: - TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) + tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ - pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); + pp_wide_integer (buffer, tree_to_hwi (node)); pp_string (buffer, "B"); /* pseudo-unit */ } + else if (tree_fits_shwi_p (node)) + pp_wide_integer (buffer, tree_to_shwi (node)); + else if (tree_fits_uhwi_p (node)) + pp_unsigned_wide_integer (buffer, tree_to_uhwi (node)); else - pp_double_int (buffer, tree_to_double_int (node), - TYPE_UNSIGNED (TREE_TYPE (node))); + { + wide_int val = node; + + if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node)))) + { + pp_minus (buffer); + val = -val; + } + print_hex (val, pp_buffer (buffer)->digit_buffer); + pp_string (buffer, pp_buffer (buffer)->digit_buffer); + } if (TREE_OVERFLOW (node)) pp_string (buffer, "(OVF)"); break; @@ -1480,7 +1494,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, tree field, val; bool is_struct_init = false; bool is_array_init = false; - double_int curidx = double_int_zero; + max_wide_int curidx; pp_left_brace (buffer); if (TREE_CLOBBER_P (node)) pp_string (buffer, "CLOBBER"); @@ -1495,7 +1509,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, { tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))); is_array_init = true; - curidx = tree_to_double_int (minv); + curidx = minv; } FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { @@ -1509,7 +1523,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, } else if (is_array_init && (TREE_CODE (field) != INTEGER_CST - || tree_to_double_int (field) != curidx)) + || curidx != field)) { pp_left_bracket (buffer); if (TREE_CODE (field) == RANGE_EXPR) @@ -1520,17 +1534,17 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, dump_generic_node (buffer, TREE_OPERAND (field, 1), spc, flags, false); if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST) - curidx = tree_to_double_int (TREE_OPERAND (field, 1)); + curidx = TREE_OPERAND (field, 1); } else dump_generic_node (buffer, field, spc, flags, false); if (TREE_CODE (field) == INTEGER_CST) - curidx = tree_to_double_int (field); + curidx = field; pp_string (buffer, "]="); } } if (is_array_init) - curidx += double_int_one; + curidx += 1; if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 82520bafcdb..a4e6f629392 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -732,17 +732,17 @@ type_internals_preclude_sra_p (tree type, const char **msg) *msg = "zero structure field size"; return true; } - if (!host_integerp (DECL_FIELD_OFFSET (fld), 1)) + if (!tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) { *msg = "structure field offset not fixed"; return true; } - if (!host_integerp (DECL_SIZE (fld), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE (fld))) { *msg = "structure field size not fixed"; return true; } - if (!host_integerp (bit_position (fld), 0)) + if (!tree_fits_shwi_p (bit_position (fld))) { *msg = "structure field size too big"; return true; @@ -979,7 +979,7 @@ completely_scalarize_record (tree base, tree decl, HOST_WIDE_INT offset, struct access *access; HOST_WIDE_INT size; - size = tree_low_cst (DECL_SIZE (fld), 1); + size = tree_to_uhwi (DECL_SIZE (fld)); access = create_access_1 (base, pos, size); access->expr = nref; access->type = ft; @@ -998,7 +998,7 @@ completely_scalarize_record (tree base, tree decl, HOST_WIDE_INT offset, static void completely_scalarize_var (tree var) { - HOST_WIDE_INT size = tree_low_cst (DECL_SIZE (var), 1); + HOST_WIDE_INT size = tree_to_uhwi (DECL_SIZE (var)); struct access *access; access = create_access_1 (var, 0, size); @@ -1360,11 +1360,11 @@ compare_access_positions (const void *a, const void *b) return TYPE_PRECISION (f2->type) - TYPE_PRECISION (f1->type); /* Put any integral type with non-full precision last. */ else if (INTEGRAL_TYPE_P (f1->type) - && (TREE_INT_CST_LOW (TYPE_SIZE (f1->type)) + && (tree_to_hwi (TYPE_SIZE (f1->type)) != TYPE_PRECISION (f1->type))) return 1; else if (INTEGRAL_TYPE_P (f2->type) - && (TREE_INT_CST_LOW (TYPE_SIZE (f2->type)) + && (tree_to_hwi (TYPE_SIZE (f2->type)) != TYPE_PRECISION (f2->type))) return -1; /* Stabilize the sort. */ @@ -1426,7 +1426,7 @@ make_fancy_name_1 (tree expr) index = TREE_OPERAND (expr, 1); if (TREE_CODE (index) != INTEGER_CST) break; - sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (index)); + sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, tree_to_hwi (index)); obstack_grow (&name_obstack, buffer, strlen (buffer)); break; @@ -1440,7 +1440,7 @@ make_fancy_name_1 (tree expr) { obstack_1grow (&name_obstack, '$'); sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (TREE_OPERAND (expr, 1))); + tree_to_hwi (TREE_OPERAND (expr, 1))); obstack_grow (&name_obstack, buffer, strlen (buffer)); } break; @@ -1641,14 +1641,14 @@ build_user_friendly_ref_for_offset (tree *res, tree type, HOST_WIDE_INT offset, continue; tr_pos = bit_position (fld); - if (!tr_pos || !host_integerp (tr_pos, 1)) + if (!tr_pos || !tree_fits_uhwi_p (tr_pos)) continue; - pos = TREE_INT_CST_LOW (tr_pos); + pos = tree_to_uhwi (tr_pos); gcc_assert (TREE_CODE (type) == RECORD_TYPE || pos == 0); tr_size = DECL_SIZE (fld); - if (!tr_size || !host_integerp (tr_size, 1)) + if (!tr_size || !tree_fits_uhwi_p (tr_size)) continue; - size = TREE_INT_CST_LOW (tr_size); + size = tree_to_uhwi (tr_size); if (size == 0) { if (pos != offset) @@ -1671,9 +1671,9 @@ build_user_friendly_ref_for_offset (tree *res, tree type, HOST_WIDE_INT offset, case ARRAY_TYPE: tr_size = TYPE_SIZE (TREE_TYPE (type)); - if (!tr_size || !host_integerp (tr_size, 1)) + if (!tr_size || !tree_fits_uhwi_p (tr_size)) return false; - el_size = tree_low_cst (tr_size, 1); + el_size = tree_to_uhwi (tr_size); minidx = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); if (TREE_CODE (minidx) != INTEGER_CST || el_size == 0) @@ -1749,12 +1749,12 @@ maybe_add_sra_candidate (tree var) reject (var, "has incomplete type"); return false; } - if (!host_integerp (TYPE_SIZE (type), 1)) + if (!tree_fits_uhwi_p (TYPE_SIZE (type))) { reject (var, "type size not fixed"); return false; } - if (tree_low_cst (TYPE_SIZE (type), 1) == 0) + if (tree_to_uhwi (TYPE_SIZE (type)) == 0) { reject (var, "type size is zero"); return false; @@ -2109,7 +2109,7 @@ expr_with_var_bounded_array_refs_p (tree expr) while (handled_component_p (expr)) { if (TREE_CODE (expr) == ARRAY_REF - && !host_integerp (array_ref_low_bound (expr), 0)) + && !tree_fits_shwi_p (array_ref_low_bound (expr))) return true; expr = TREE_OPERAND (expr, 0); } @@ -2478,7 +2478,7 @@ analyze_all_variable_accesses (void) if (TREE_CODE (var) == VAR_DECL && type_consists_of_records_p (TREE_TYPE (var))) { - if ((unsigned) tree_low_cst (TYPE_SIZE (TREE_TYPE (var)), 1) + if ((unsigned) tree_to_uhwi (TYPE_SIZE (TREE_TYPE (var))) <= max_total_scalarization_size) { completely_scalarize_var (var); @@ -2789,12 +2789,12 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write) { HOST_WIDE_INT start_offset, chunk_size; if (bfr - && host_integerp (TREE_OPERAND (bfr, 1), 1) - && host_integerp (TREE_OPERAND (bfr, 2), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (bfr, 1)) + && tree_fits_uhwi_p (TREE_OPERAND (bfr, 2))) { - chunk_size = tree_low_cst (TREE_OPERAND (bfr, 1), 1); + chunk_size = tree_to_uhwi (TREE_OPERAND (bfr, 1)); start_offset = access->offset - + tree_low_cst (TREE_OPERAND (bfr, 2), 1); + + tree_to_uhwi (TREE_OPERAND (bfr, 2)); } else start_offset = chunk_size = 0; @@ -3683,8 +3683,8 @@ find_param_candidates (void) continue; if (!COMPLETE_TYPE_P (type) - || !host_integerp (TYPE_SIZE (type), 1) - || tree_low_cst (TYPE_SIZE (type), 1) == 0 + || !tree_fits_uhwi_p (TYPE_SIZE (type)) + || tree_to_uhwi (TYPE_SIZE (type)) == 0 || (AGGREGATE_TYPE_P (type) && type_internals_preclude_sra_p (type, &msg))) continue; @@ -4057,9 +4057,9 @@ splice_param_accesses (tree parm, bool *ro_grp) } if (POINTER_TYPE_P (TREE_TYPE (parm))) - agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))), 1); + agg_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm)))); else - agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (parm)), 1); + agg_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (parm))); if (total_size >= agg_size) return NULL; @@ -4078,13 +4078,13 @@ decide_one_param_reduction (struct access *repr) tree parm; parm = repr->base; - cur_parm_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (parm)), 1); + cur_parm_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (parm))); gcc_assert (cur_parm_size > 0); if (POINTER_TYPE_P (TREE_TYPE (parm))) { by_ref = true; - agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))), 1); + agg_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm)))); } else { @@ -4511,7 +4511,7 @@ sra_ipa_modify_expr (tree *expr, bool convert, if (TREE_CODE (base) == MEM_REF) { - offset += mem_ref_offset (base).low * BITS_PER_UNIT; + offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT; base = TREE_OPERAND (base, 0); } diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c index 49b69b82da3..af7c75c693b 100644 --- a/gcc/tree-ssa-address.c +++ b/gcc/tree-ssa-address.c @@ -197,15 +197,16 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, struct mem_addr_template *templ; if (addr->step && !integer_onep (addr->step)) - st = immed_double_int_const (tree_to_double_int (addr->step), pointer_mode); + st = immed_wide_int_const (wide_int (addr->step), pointer_mode); else st = NULL_RTX; if (addr->offset && !integer_zerop (addr->offset)) - off = immed_double_int_const - (tree_to_double_int (addr->offset) - .sext (TYPE_PRECISION (TREE_TYPE (addr->offset))), - pointer_mode); + { + addr_wide_int dc = wi::sext (addr_wide_int (addr->offset), + TYPE_PRECISION (TREE_TYPE (addr->offset))); + off = immed_wide_int_const (dc, pointer_mode); + } else off = NULL_RTX; @@ -413,7 +414,7 @@ move_fixed_address_to_symbol (struct mem_address *parts, aff_tree *addr) for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.is_one ()) + if (addr->elts[i].coef != 1) continue; val = addr->elts[i].val; @@ -441,7 +442,7 @@ move_hint_to_base (tree type, struct mem_address *parts, tree base_hint, for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.is_one ()) + if (addr->elts[i].coef != 1) continue; val = addr->elts[i].val; @@ -473,7 +474,7 @@ move_pointer_to_base (struct mem_address *parts, aff_tree *addr) for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.is_one ()) + if (addr->elts[i].coef != 1) continue; val = addr->elts[i].val; @@ -509,7 +510,7 @@ move_variant_to_index (struct mem_address *parts, aff_tree *addr, tree v) return; parts->index = fold_convert (sizetype, val); - parts->step = double_int_to_tree (sizetype, addr->elts[i].coef); + parts->step = wide_int_to_tree (sizetype, addr->elts[i].coef); aff_combination_remove_elt (addr, i); } @@ -552,16 +553,16 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, addr_space_t as = TYPE_ADDR_SPACE (type); enum machine_mode address_mode = targetm.addr_space.address_mode (as); HOST_WIDE_INT coef; - double_int best_mult, amult, amult_neg; + addr_wide_int best_mult, amult, amult_neg; unsigned best_mult_cost = 0, acost; tree mult_elt = NULL_TREE, elt; unsigned i, j; enum tree_code op_code; - best_mult = double_int_zero; + best_mult = 0; for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.fits_shwi ()) + if (!wi::fits_shwi_p (addr->elts[i].coef)) continue; coef = addr->elts[i].coef.to_shwi (); @@ -574,7 +575,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, if (acost > best_mult_cost) { best_mult_cost = acost; - best_mult = addr->elts[i].coef; + best_mult = addr_wide_int::from (addr->elts[i].coef, SIGNED); } } @@ -584,8 +585,8 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, /* Collect elements multiplied by best_mult. */ for (i = j = 0; i < addr->n; i++) { - amult = addr->elts[i].coef; - amult_neg = double_int_ext_for_comb (-amult, addr); + amult = addr_wide_int::from (addr->elts[i].coef, SIGNED); + amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type)); if (amult == best_mult) op_code = PLUS_EXPR; @@ -609,7 +610,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, addr->n = j; parts->index = mult_elt; - parts->step = double_int_to_tree (sizetype, best_mult); + parts->step = wide_int_to_tree (sizetype, best_mult); } /* Splits address ADDR for a memory access of type TYPE into PARTS. @@ -637,8 +638,8 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand, parts->index = NULL_TREE; parts->step = NULL_TREE; - if (!addr->offset.is_zero ()) - parts->offset = double_int_to_tree (sizetype, addr->offset); + if (addr->offset != 0) + parts->offset = wide_int_to_tree (sizetype, addr->offset); else parts->offset = NULL_TREE; @@ -669,9 +670,9 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand, for (i = 0; i < addr->n; i++) { part = fold_convert (sizetype, addr->elts[i].val); - if (!addr->elts[i].coef.is_one ()) + if (addr->elts[i].coef != 1) part = fold_build2 (MULT_EXPR, sizetype, part, - double_int_to_tree (sizetype, addr->elts[i].coef)); + wide_int_to_tree (sizetype, addr->elts[i].coef)); add_to_parts (parts, part); } if (addr->rest) @@ -876,11 +877,11 @@ copy_ref_info (tree new_ref, tree old_ref) && !(TREE_CODE (new_ref) == TARGET_MEM_REF && (TMR_INDEX2 (new_ref) || (TMR_STEP (new_ref) - && (TREE_INT_CST_LOW (TMR_STEP (new_ref)) + && (tree_to_hwi (TMR_STEP (new_ref)) < align))))) { - unsigned int inc = (mem_ref_offset (old_ref) - - mem_ref_offset (new_ref)).low; + unsigned int inc = mem_ref_offset (old_ref).to_uhwi () + - mem_ref_offset (new_ref).to_uhwi (); adjust_ptr_info_misalignment (new_pi, inc); } else diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c index 605377e6feb..c674b3432b5 100644 --- a/gcc/tree-ssa-alias.c +++ b/gcc/tree-ssa-alias.c @@ -577,9 +577,9 @@ ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size) ref->offset = 0; } if (size - && host_integerp (size, 0) - && TREE_INT_CST_LOW (size) * 8 / 8 == TREE_INT_CST_LOW (size)) - ref->max_size = ref->size = TREE_INT_CST_LOW (size) * 8; + && tree_fits_shwi_p (size) + && tree_to_shwi (size) * 8 / 8 == tree_to_shwi (size)) + ref->max_size = ref->size = tree_to_shwi (size) * 8; else ref->max_size = ref->size = -1; ref->ref_alias_set = 0; @@ -874,7 +874,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, tree ptrtype1, dbase2; HOST_WIDE_INT offset1p = offset1, offset2p = offset2; HOST_WIDE_INT doffset1, doffset2; - double_int moff; + addr_wide_int moff; gcc_checking_assert ((TREE_CODE (base1) == MEM_REF || TREE_CODE (base1) == TARGET_MEM_REF) @@ -885,11 +885,12 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, /* The offset embedded in MEM_REFs can be negative. Bias them so that the resulting offset adjustment is positive. */ moff = mem_ref_offset (base1); - moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - offset2p += (-moff).low; + moff = wi::lshift (moff, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + if (wi::neg_p (moff)) + offset2p += (-moff).to_short_addr (); else - offset1p += moff.low; + offset1p += moff.to_short_addr (); /* If only one reference is based on a variable, they cannot alias if the pointer access is beyond the extent of the variable access. @@ -960,12 +961,13 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (TREE_CODE (dbase2) == MEM_REF || TREE_CODE (dbase2) == TARGET_MEM_REF) { - double_int moff = mem_ref_offset (dbase2); - moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - doffset1 -= (-moff).low; + addr_wide_int moff = mem_ref_offset (dbase2); + moff = wi::lshift (moff, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + if (wi::neg_p (moff)) + doffset1 -= (-moff).to_short_addr (); else - doffset2 -= moff.low; + doffset2 -= moff.to_short_addr (); } /* If either reference is view-converted, give up now. */ @@ -1051,21 +1053,23 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, && operand_equal_p (TMR_INDEX2 (base1), TMR_INDEX2 (base2), 0)))))) { - double_int moff; + addr_wide_int moff; /* The offset embedded in MEM_REFs can be negative. Bias them so that the resulting offset adjustment is positive. */ moff = mem_ref_offset (base1); - moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - offset2 += (-moff).low; + moff = wi::lshift (moff, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + if (wi::neg_p (moff)) + offset2 += (-moff).to_short_addr (); else - offset1 += moff.low; + offset1 += moff.to_shwi (); moff = mem_ref_offset (base2); - moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - offset1 += (-moff).low; + moff = wi::lshift (moff, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + if (wi::neg_p (moff)) + offset1 += (-moff).to_short_addr (); else - offset2 += moff.low; + offset2 += moff.to_short_addr (); return ranges_overlap_p (offset1, max_size1, offset2, max_size2); } if (!ptr_derefs_may_alias_p (ptr1, ptr2)) @@ -2014,15 +2018,15 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) if (!tree_int_cst_equal (TREE_OPERAND (base, 1), TREE_OPERAND (ref->base, 1))) { - double_int off1 = mem_ref_offset (base); - off1 = off1.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - off1 = off1 + double_int::from_shwi (offset); - double_int off2 = mem_ref_offset (ref->base); - off2 = off2.lshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - off2 = off2 + double_int::from_shwi (ref_offset); - if (off1.fits_shwi () && off2.fits_shwi ()) + addr_wide_int off1 = mem_ref_offset (base); + off1 = wi::lshift (off1, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + off1 += offset; + addr_wide_int off2 = mem_ref_offset (ref->base); + off2 = wi::lshift (off2, (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + off2 += ref_offset; + if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2)) { offset = off1.to_shwi (); ref_offset = off2.to_shwi (); @@ -2064,7 +2068,7 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) tree len = gimple_call_arg (stmt, 2); tree base = NULL_TREE; HOST_WIDE_INT offset = 0; - if (!host_integerp (len, 0)) + if (!tree_fits_shwi_p (len)) return false; if (TREE_CODE (dest) == ADDR_EXPR) base = get_addr_base_and_unit_offset (TREE_OPERAND (dest, 0), @@ -2074,7 +2078,7 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) if (base && base == ao_ref_base (ref)) { - HOST_WIDE_INT size = TREE_INT_CST_LOW (len); + HOST_WIDE_INT size = tree_to_hwi (len); if (offset <= ref->offset / BITS_PER_UNIT && (offset + size >= ((ref->offset + ref->max_size + BITS_PER_UNIT - 1) diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 15df8da7a3f..d6a4ef34c83 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -98,6 +98,15 @@ along with GCC; see the file COPYING3. If not see array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for final substitution and folding. + This algorithm uses wide-ints at the max precision of the target. + This means that, with one uninteresting exception, variables with + UNSIGNED types never go to VARYING because the bits above the + precision of the type of the variable are always zero. The + uninteresting case is a variable of UNSIGNED type that has the + maximum precision of the target. Such variables can go to VARYING, + but this causes no loss of infomation since these variables will + never be extended. + References: Constant propagation with conditional branches, @@ -129,7 +138,7 @@ along with GCC; see the file COPYING3. If not see #include "dbgcnt.h" #include "params.h" #include "hash-table.h" - +#include "wide-int-print.h" /* Possible lattice values. */ typedef enum @@ -147,9 +156,11 @@ struct prop_value_d { /* Propagated value. */ tree value; - /* Mask that applies to the propagated value during CCP. For - X with a CONSTANT lattice value X & ~mask == value & ~mask. */ - double_int mask; + /* Mask that applies to the propagated value during CCP. For X + with a CONSTANT lattice value X & ~mask == value & ~mask. The + zero bits in the mask cover constant values. The ones mean no + information. */ + max_wide_int mask; }; typedef struct prop_value_d prop_value_t; @@ -184,18 +195,19 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) break; case CONSTANT: if (TREE_CODE (val.value) != INTEGER_CST - || val.mask.is_zero ()) + || val.mask == 0) { fprintf (outf, "%sCONSTANT ", prefix); print_generic_expr (outf, val.value, dump_flags); } else { - double_int cval = tree_to_double_int (val.value).and_not (val.mask); - fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX, - prefix, cval.high, cval.low); - fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")", - val.mask.high, val.mask.low); + wide_int cval = wi::bit_and_not (val.value, val.mask); + fprintf (outf, "%sCONSTANT ", prefix); + print_hex (cval, outf); + fprintf (outf, " ("); + print_hex (val.mask, outf); + fprintf (outf, ")"); } break; default: @@ -237,7 +249,7 @@ debug_lattice_value (prop_value_t val) static prop_value_t get_default_value (tree var) { - prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } }; + prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 }; gimple stmt; stmt = SSA_NAME_DEF_STMT (var); @@ -254,7 +266,7 @@ get_default_value (tree var) else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } } else if (is_gimple_assign (stmt)) @@ -286,7 +298,7 @@ get_default_value (tree var) { /* Otherwise, VAR will never take on a constant value. */ val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } return val; @@ -329,7 +341,7 @@ get_constant_value (tree var) if (val && val->lattice_val == CONSTANT && (TREE_CODE (val->value) != INTEGER_CST - || val->mask.is_zero ())) + || val->mask == 0)) return val->value; return NULL_TREE; } @@ -343,7 +355,7 @@ set_value_varying (tree var) val->lattice_val = VARYING; val->value = NULL_TREE; - val->mask = double_int_minus_one; + val->mask = -1; } /* For float types, modify the value of VAL to make ccp work correctly @@ -420,8 +432,8 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val) /* Bit-lattices have to agree in the still valid bits. */ if (TREE_CODE (old_val.value) == INTEGER_CST && TREE_CODE (new_val.value) == INTEGER_CST) - return tree_to_double_int (old_val.value).and_not (new_val.mask) - == tree_to_double_int (new_val.value).and_not (new_val.mask); + return (wi::bit_and_not (old_val.value, new_val.mask) + == wi::bit_and_not (new_val.value, new_val.mask)); /* Otherwise constant values have to agree. */ return operand_equal_p (old_val.value, new_val.value, 0); @@ -446,9 +458,7 @@ set_lattice_value (tree var, prop_value_t new_val) && TREE_CODE (new_val.value) == INTEGER_CST && TREE_CODE (old_val->value) == INTEGER_CST) { - double_int diff; - diff = tree_to_double_int (new_val.value) - ^ tree_to_double_int (old_val->value); + max_wide_int diff = wi::bit_xor (new_val.value, old_val->value); new_val.mask = new_val.mask | old_val->mask | diff; } @@ -460,7 +470,8 @@ set_lattice_value (tree var, prop_value_t new_val) || (new_val.lattice_val == CONSTANT && TREE_CODE (new_val.value) == INTEGER_CST && (TREE_CODE (old_val->value) != INTEGER_CST - || new_val.mask != old_val->mask))) + || new_val.mask + != old_val->mask))) { /* ??? We would like to delay creation of INTEGER_CSTs from partially constants here. */ @@ -482,21 +493,21 @@ set_lattice_value (tree var, prop_value_t new_val) static prop_value_t get_value_for_expr (tree, bool); static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree); -static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *, - tree, double_int, double_int, - tree, double_int, double_int); +static void bit_value_binop_1 (enum tree_code, tree, max_wide_int *, max_wide_int *, + tree, max_wide_int, max_wide_int, + tree, max_wide_int, max_wide_int); -/* Return a double_int that can be used for bitwise simplifications +/* Return a max_wide_int that can be used for bitwise simplifications from VAL. */ -static double_int -value_to_double_int (prop_value_t val) +static max_wide_int +value_to_wide_int (prop_value_t val) { if (val.value && TREE_CODE (val.value) == INTEGER_CST) - return tree_to_double_int (val.value); - else - return double_int_zero; + return val.value; + + return 0; } /* Return the value for the address expression EXPR based on alignment @@ -514,14 +525,11 @@ get_value_from_alignment (tree expr) get_pointer_alignment_1 (expr, &align, &bitpos); val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) - ? double_int::mask (TYPE_PRECISION (type)) - : double_int_minus_one) - .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1)); - val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT; + ? wi::mask <max_wide_int> (TYPE_PRECISION (type), false) + : -1).and_not (align / BITS_PER_UNIT - 1); + val.lattice_val = val.mask == -1 ? VARYING : CONSTANT; if (val.lattice_val == CONSTANT) - val.value - = double_int_to_tree (type, - double_int::from_uhwi (bitpos / BITS_PER_UNIT)); + val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT); else val.value = NULL_TREE; @@ -550,7 +558,7 @@ get_value_for_expr (tree expr, bool for_bits_p) { val.lattice_val = CONSTANT; val.value = expr; - val.mask = double_int_zero; + val.mask = 0; canonicalize_float_value (&val); } else if (TREE_CODE (expr) == ADDR_EXPR) @@ -558,7 +566,7 @@ get_value_for_expr (tree expr, bool for_bits_p) else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = 1; val.value = NULL_TREE; } return val; @@ -802,7 +810,7 @@ do_dbg_cnt (void) if (!dbg_cnt (ccp)) { const_val[i].lattice_val = VARYING; - const_val[i].mask = double_int_minus_one; + const_val[i].mask = -1; const_val[i].value = NULL_TREE; } } @@ -841,11 +849,11 @@ ccp_finalize (void) /* Trailing constant bits specify the alignment, trailing value bits the misalignment. */ - tem = val->mask.low; + tem = val->mask.to_uhwi (); align = (tem & -tem); if (align > 1) set_ptr_info_alignment (get_ptr_info (name), align, - TREE_INT_CST_LOW (val->value) & (align - 1)); + tree_to_hwi (val->value) & (align - 1)); } /* Perform substitutions based on the known constant values. */ @@ -886,7 +894,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* any M VARYING = VARYING. */ val1->lattice_val = VARYING; - val1->mask = double_int_minus_one; + val1->mask = -1; val1->value = NULL_TREE; } else if (val1->lattice_val == CONSTANT @@ -899,10 +907,9 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) For INTEGER_CSTs mask unequal bits. If no equal bits remain, drop to varying. */ - val1->mask = val1->mask | val2->mask - | (tree_to_double_int (val1->value) - ^ tree_to_double_int (val2->value)); - if (val1->mask.is_minus_one ()) + val1->mask = (val1->mask | val2->mask + | (wi::bit_xor (val1->value, val2->value))); + if (val1->mask == -1) { val1->lattice_val = VARYING; val1->value = NULL_TREE; @@ -935,7 +942,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* Any other combination is VARYING. */ val1->lattice_val = VARYING; - val1->mask = double_int_minus_one; + val1->mask = -1; val1->value = NULL_TREE; } } @@ -1090,8 +1097,8 @@ ccp_fold (gimple stmt) static void bit_value_unop_1 (enum tree_code code, tree type, - double_int *val, double_int *mask, - tree rtype, double_int rval, double_int rmask) + max_wide_int *val, max_wide_int *mask, + tree rtype, const max_wide_int &rval, const max_wide_int &rmask) { switch (code) { @@ -1102,33 +1109,32 @@ bit_value_unop_1 (enum tree_code code, tree type, case NEGATE_EXPR: { - double_int temv, temm; + max_wide_int temv, temm; /* Return ~rval + 1. */ bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, - type, temv, temm, - type, double_int_one, double_int_zero); + type, temv, temm, type, 1, 0); break; } CASE_CONVERT: { - bool uns; + signop sgn; /* First extend mask and value according to the original type. */ - uns = TYPE_UNSIGNED (rtype); - *mask = rmask.ext (TYPE_PRECISION (rtype), uns); - *val = rval.ext (TYPE_PRECISION (rtype), uns); + sgn = TYPE_SIGN (rtype); + *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn); + *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn); /* Then extend mask and value according to the target type. */ - uns = TYPE_UNSIGNED (type); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); - *val = (*val).ext (TYPE_PRECISION (type), uns); + sgn = TYPE_SIGN (type); + *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn); + *val = wi::ext (*val, TYPE_PRECISION (type), sgn); break; } default: - *mask = double_int_minus_one; + *mask = -1; break; } } @@ -1139,14 +1145,17 @@ bit_value_unop_1 (enum tree_code code, tree type, static void bit_value_binop_1 (enum tree_code code, tree type, - double_int *val, double_int *mask, - tree r1type, double_int r1val, double_int r1mask, - tree r2type, double_int r2val, double_int r2mask) + max_wide_int *val, max_wide_int *mask, + tree r1type, max_wide_int r1val, max_wide_int r1mask, + tree r2type, max_wide_int r2val, max_wide_int r2mask) { - bool uns = TYPE_UNSIGNED (type); - /* Assume we'll get a constant result. Use an initial varying value, - we fall back to varying in the end if necessary. */ - *mask = double_int_minus_one; + signop sgn = TYPE_SIGN (type); + int width = TYPE_PRECISION (type); + + /* Assume we'll get a constant result. Use an initial non varying + value, we fall back to varying in the end if necessary. */ + *mask = -1; + switch (code) { case BIT_AND_EXPR: @@ -1172,13 +1181,35 @@ bit_value_binop_1 (enum tree_code code, tree type, case LROTATE_EXPR: case RROTATE_EXPR: - if (r2mask.is_zero ()) + if (r2mask == 0) { - HOST_WIDE_INT shift = r2val.low; - if (code == RROTATE_EXPR) - shift = -shift; - *mask = r1mask.lrotate (shift, TYPE_PRECISION (type)); - *val = r1val.lrotate (shift, TYPE_PRECISION (type)); + wide_int shift = r2val; + if (shift == 0) + { + *mask = r1mask; + *val = r1val; + } + else + { + if (wi::neg_p (shift)) + { + shift = -shift; + if (code == RROTATE_EXPR) + code = LROTATE_EXPR; + else + code = RROTATE_EXPR; + } + if (code == RROTATE_EXPR) + { + *mask = wi::rrotate (r1mask, shift, width); + *val = wi::rrotate (r1val, shift, width); + } + else + { + *mask = wi::lrotate (r1mask, shift, width); + *val = wi::lrotate (r1val, shift, width); + } + } } break; @@ -1187,53 +1218,56 @@ bit_value_binop_1 (enum tree_code code, tree type, /* ??? We can handle partially known shift counts if we know its sign. That way we can tell that (x << (y | 8)) & 255 is zero. */ - if (r2mask.is_zero ()) + if (r2mask == 0) { - HOST_WIDE_INT shift = r2val.low; - if (code == RSHIFT_EXPR) - shift = -shift; - /* We need to know if we are doing a left or a right shift - to properly shift in zeros for left shift and unsigned - right shifts and the sign bit for signed right shifts. - For signed right shifts we shift in varying in case - the sign bit was varying. */ - if (shift > 0) - { - *mask = r1mask.llshift (shift, TYPE_PRECISION (type)); - *val = r1val.llshift (shift, TYPE_PRECISION (type)); - } - else if (shift < 0) - { - shift = -shift; - *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns); - *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns); - } - else + wide_int shift = r2val; + if (shift == 0) { *mask = r1mask; *val = r1val; } + else + { + if (wi::neg_p (shift)) + { + shift = -shift; + if (code == RSHIFT_EXPR) + code = LSHIFT_EXPR; + else + code = RSHIFT_EXPR; + } + if (code == RSHIFT_EXPR) + { + *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn); + *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn); + } + else + { + *mask = wi::sext (wi::lshift (r1mask, shift), width); + *val = wi::sext (wi::lshift (r1val, shift), width); + } + } } break; case PLUS_EXPR: case POINTER_PLUS_EXPR: { - double_int lo, hi; + max_wide_int lo, hi; /* Do the addition with unknown bits set to zero, to give carry-ins of zero wherever possible. */ lo = r1val.and_not (r1mask) + r2val.and_not (r2mask); - lo = lo.ext (TYPE_PRECISION (type), uns); + lo = wi::ext (lo, width, sgn); /* Do the addition with unknown bits set to one, to give carry-ins of one wherever possible. */ hi = (r1val | r1mask) + (r2val | r2mask); - hi = hi.ext (TYPE_PRECISION (type), uns); + hi = wi::ext (hi, width, sgn); /* Each bit in the result is known if (a) the corresponding bits in both inputs are known, and (b) the carry-in to that bit position is known. We can check condition (b) by seeing if we got the same result with minimised carries as with maximised carries. */ *mask = r1mask | r2mask | (lo ^ hi); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); + *mask = wi::ext (*mask, width, sgn); /* It shouldn't matter whether we choose lo or hi here. */ *val = lo; break; @@ -1241,7 +1275,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case MINUS_EXPR: { - double_int temv, temm; + max_wide_int temv, temm; bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm, r2type, r2val, r2mask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, @@ -1254,18 +1288,18 @@ bit_value_binop_1 (enum tree_code code, tree type, { /* Just track trailing zeros in both operands and transfer them to the other. */ - int r1tz = (r1val | r1mask).trailing_zeros (); - int r2tz = (r2val | r2mask).trailing_zeros (); - if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT) + int r1tz = wi::ctz (r1val | r1mask); + int r2tz = wi::ctz (r2val | r2mask); + if (r1tz + r2tz >= width) { - *mask = double_int_zero; - *val = double_int_zero; + *mask = 0; + *val = 0; } else if (r1tz + r2tz > 0) { - *mask = ~double_int::mask (r1tz + r2tz); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); - *val = double_int_zero; + *mask = wi::ext (wi::mask <max_wide_int> (r1tz + r2tz, true), + width, sgn); + *val = 0; } break; } @@ -1273,71 +1307,78 @@ bit_value_binop_1 (enum tree_code code, tree type, case EQ_EXPR: case NE_EXPR: { - double_int m = r1mask | r2mask; + max_wide_int m = r1mask | r2mask; if (r1val.and_not (m) != r2val.and_not (m)) { - *mask = double_int_zero; - *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one); + *mask = 0; + *val = ((code == EQ_EXPR) ? 0 : 1); } else { /* We know the result of a comparison is always one or zero. */ - *mask = double_int_one; - *val = double_int_zero; + *mask = 1; + *val = 0; } break; } case GE_EXPR: case GT_EXPR: - { - double_int tem = r1val; - r1val = r2val; - r2val = tem; - tem = r1mask; - r1mask = r2mask; - r2mask = tem; - code = swap_tree_comparison (code); - } - /* Fallthru. */ case LT_EXPR: case LE_EXPR: { + max_wide_int o1val, o2val, o1mask, o2mask; int minmax, maxmin; + + if ((code == GE_EXPR) || (code == GT_EXPR)) + { + o1val = r2val; + o1mask = r2mask; + o2val = r1val; + o2mask = r1mask; + code = swap_tree_comparison (code); + } + else + { + o1val = r1val; + o1mask = r1mask; + o2val = r2val; + o2mask = r2mask; + } /* If the most significant bits are not known we know nothing. */ - if (r1mask.is_negative () || r2mask.is_negative ()) + if (wi::neg_p (o1mask) || wi::neg_p (o2mask)) break; /* For comparisons the signedness is in the comparison operands. */ - uns = TYPE_UNSIGNED (r1type); + sgn = TYPE_SIGN (r1type); /* If we know the most significant bits we know the values value ranges by means of treating varying bits as zero or one. Do a cross comparison of the max/min pairs. */ - maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns); - minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns); - if (maxmin < 0) /* r1 is less than r2. */ + maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn); + minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn); + if (maxmin < 0) /* o1 is less than o2. */ { - *mask = double_int_zero; - *val = double_int_one; + *mask = 0; + *val = 1; } - else if (minmax > 0) /* r1 is not less or equal to r2. */ + else if (minmax > 0) /* o1 is not less or equal to o2. */ { - *mask = double_int_zero; - *val = double_int_zero; + *mask = 0; + *val = 0; } - else if (maxmin == minmax) /* r1 and r2 are equal. */ + else if (maxmin == minmax) /* o1 and o2 are equal. */ { /* This probably should never happen as we'd have folded the thing during fully constant value folding. */ - *mask = double_int_zero; - *val = (code == LE_EXPR ? double_int_one : double_int_zero); + *mask = 0; + *val = (code == LE_EXPR ? 1 : 0); } else { /* We know the result of a comparison is always one or zero. */ - *mask = double_int_one; - *val = double_int_zero; + *mask = 1; + *val = 0; } break; } @@ -1353,7 +1394,7 @@ static prop_value_t bit_value_unop (enum tree_code code, tree type, tree rhs) { prop_value_t rval = get_value_for_expr (rhs, true); - double_int value, mask; + max_wide_int value, mask; prop_value_t val; if (rval.lattice_val == UNDEFINED) @@ -1361,21 +1402,21 @@ bit_value_unop (enum tree_code code, tree type, tree rhs) gcc_assert ((rval.lattice_val == CONSTANT && TREE_CODE (rval.value) == INTEGER_CST) - || rval.mask.is_minus_one ()); + || rval.mask == -1); bit_value_unop_1 (code, type, &value, &mask, - TREE_TYPE (rhs), value_to_double_int (rval), rval.mask); - if (!mask.is_minus_one ()) + TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask); + if (mask != -1) { val.lattice_val = CONSTANT; val.mask = mask; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1388,7 +1429,7 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) { prop_value_t r1val = get_value_for_expr (rhs1, true); prop_value_t r2val = get_value_for_expr (rhs2, true); - double_int value, mask; + max_wide_int value, mask; prop_value_t val; if (r1val.lattice_val == UNDEFINED @@ -1396,31 +1437,31 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; return val; } gcc_assert ((r1val.lattice_val == CONSTANT && TREE_CODE (r1val.value) == INTEGER_CST) - || r1val.mask.is_minus_one ()); + || r1val.mask == -1); gcc_assert ((r2val.lattice_val == CONSTANT && TREE_CODE (r2val.value) == INTEGER_CST) - || r2val.mask.is_minus_one ()); + || r2val.mask == -1); bit_value_binop_1 (code, type, &value, &mask, - TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask, - TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask); - if (!mask.is_minus_one ()) + TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask, + TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask); + if (mask != -1) { val.lattice_val = CONSTANT; val.mask = mask; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1436,49 +1477,50 @@ bit_value_assume_aligned (gimple stmt) unsigned HOST_WIDE_INT aligni, misaligni = 0; prop_value_t ptrval = get_value_for_expr (ptr, true); prop_value_t alignval; - double_int value, mask; + max_wide_int value, mask; prop_value_t val; + if (ptrval.lattice_val == UNDEFINED) return ptrval; gcc_assert ((ptrval.lattice_val == CONSTANT && TREE_CODE (ptrval.value) == INTEGER_CST) - || ptrval.mask.is_minus_one ()); + || ptrval.mask == -1); align = gimple_call_arg (stmt, 1); - if (!host_integerp (align, 1)) + if (!tree_fits_uhwi_p (align)) return ptrval; - aligni = tree_low_cst (align, 1); + aligni = tree_to_uhwi (align); if (aligni <= 1 || (aligni & (aligni - 1)) != 0) return ptrval; if (gimple_call_num_args (stmt) > 2) { misalign = gimple_call_arg (stmt, 2); - if (!host_integerp (misalign, 1)) + if (!tree_fits_uhwi_p (misalign)) return ptrval; - misaligni = tree_low_cst (misalign, 1); + misaligni = tree_to_uhwi (misalign); if (misaligni >= aligni) return ptrval; } align = build_int_cst_type (type, -aligni); alignval = get_value_for_expr (align, true); bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask, - type, value_to_double_int (ptrval), ptrval.mask, - type, value_to_double_int (alignval), alignval.mask); - if (!mask.is_minus_one ()) + type, value_to_wide_int (ptrval), ptrval.mask, + type, value_to_wide_int (alignval), alignval.mask); + if (mask != -1) { val.lattice_val = CONSTANT; val.mask = mask; - gcc_assert ((mask.low & (aligni - 1)) == 0); - gcc_assert ((value.low & (aligni - 1)) == 0); - value.low |= misaligni; + gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0); + gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0); + value |= misaligni; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1530,7 +1572,7 @@ evaluate_stmt (gimple stmt) /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.value = simplified; - val.mask = double_int_zero; + val.mask = 0; } } /* If the statement is likely to have a VARYING result, then do not @@ -1558,7 +1600,7 @@ evaluate_stmt (gimple stmt) /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.value = simplified; - val.mask = double_int_zero; + val.mask = 0; } } @@ -1570,7 +1612,7 @@ evaluate_stmt (gimple stmt) enum gimple_code code = gimple_code (stmt); val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; if (code == GIMPLE_ASSIGN) { enum tree_code subcode = gimple_assign_rhs_code (stmt); @@ -1626,20 +1668,18 @@ evaluate_stmt (gimple stmt) case BUILT_IN_STRNDUP: val.lattice_val = CONSTANT; val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = double_int::from_shwi - (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT) - / BITS_PER_UNIT - 1)); + val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT + / BITS_PER_UNIT - 1); break; case BUILT_IN_ALLOCA: case BUILT_IN_ALLOCA_WITH_ALIGN: align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN - ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)) + ? tree_to_hwi (gimple_call_arg (stmt, 1)) : BIGGEST_ALIGNMENT); val.lattice_val = CONSTANT; val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align) - / BITS_PER_UNIT - 1)); + val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1); break; /* These builtins return their first argument, unmodified. */ @@ -1674,12 +1714,12 @@ evaluate_stmt (gimple stmt) if (likelyvalue == UNDEFINED) { val.lattice_val = likelyvalue; - val.mask = double_int_zero; + val.mask = 0; } else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } val.value = NULL_TREE; @@ -1805,10 +1845,10 @@ fold_builtin_alloca_with_align (gimple stmt) arg = get_constant_value (gimple_call_arg (stmt, 0)); if (arg == NULL_TREE || TREE_CODE (arg) != INTEGER_CST - || !host_integerp (arg, 1)) + || !tree_fits_uhwi_p (arg)) return NULL_TREE; - size = TREE_INT_CST_LOW (arg); + size = tree_to_hwi (arg); /* Heuristic: don't fold large allocas. */ threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME); @@ -1826,7 +1866,7 @@ fold_builtin_alloca_with_align (gimple stmt) n_elem = size * 8 / BITS_PER_UNIT; array_type = build_array_type_nelts (elem_type, n_elem); var = create_tmp_var (array_type, NULL); - DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)); + DECL_ALIGN (var) = tree_to_hwi (gimple_call_arg (stmt, 1)); { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs); if (pi != NULL && !pi->pt.anything) @@ -1861,7 +1901,7 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi) fold more conditionals here. */ val = evaluate_stmt (stmt); if (val.lattice_val != CONSTANT - || !val.mask.is_zero ()) + || val.mask != 0) return false; if (dump_file) @@ -2041,7 +2081,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p) block = gimple_bb (stmt); val = evaluate_stmt (stmt); if (val.lattice_val != CONSTANT - || !val.mask.is_zero ()) + || val.mask != 0) return SSA_PROP_VARYING; /* Find which edge out of the conditional block will be taken and add it @@ -2113,7 +2153,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) Mark them VARYING. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) { - prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } }; + prop_value_t v = { VARYING, NULL_TREE, -1 }; set_lattice_value (def, v); } diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c index 456725d3e3b..0ad3293040c 100644 --- a/gcc/tree-ssa-forwprop.c +++ b/gcc/tree-ssa-forwprop.c @@ -812,9 +812,9 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0), &def_rhs_offset))) { - double_int off = mem_ref_offset (lhs); + addr_wide_int off = mem_ref_offset (lhs); tree new_ptr; - off += double_int::from_shwi (def_rhs_offset); + off += def_rhs_offset; if (TREE_CODE (def_rhs_base) == MEM_REF) { off += mem_ref_offset (def_rhs_base); @@ -824,7 +824,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, new_ptr = build_fold_addr_expr (def_rhs_base); TREE_OPERAND (lhs, 0) = new_ptr; TREE_OPERAND (lhs, 1) - = double_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs, 1)), off); + = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs, 1)), off); tidy_after_forward_propagate_addr (use_stmt); /* Continue propagating into the RHS if this was not the only use. */ if (single_use_p) @@ -903,9 +903,9 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0), &def_rhs_offset))) { - double_int off = mem_ref_offset (rhs); + addr_wide_int off = mem_ref_offset (rhs); tree new_ptr; - off += double_int::from_shwi (def_rhs_offset); + off += def_rhs_offset; if (TREE_CODE (def_rhs_base) == MEM_REF) { off += mem_ref_offset (def_rhs_base); @@ -915,7 +915,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, new_ptr = build_fold_addr_expr (def_rhs_base); TREE_OPERAND (rhs, 0) = new_ptr; TREE_OPERAND (rhs, 1) - = double_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs, 1)), off); + = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs, 1)), off); fold_stmt_inplace (use_stmt_gsi); tidy_after_forward_propagate_addr (use_stmt); return res; @@ -1436,8 +1436,8 @@ constant_pointer_difference (tree p1, tree p2) { p = TREE_OPERAND (q, 0); off = size_binop (PLUS_EXPR, off, - double_int_to_tree (sizetype, - mem_ref_offset (q))); + wide_int_to_tree (sizetype, + mem_ref_offset (q))); } else { @@ -1519,8 +1519,8 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) char *src_buf; use_operand_p use_p; - if (!host_integerp (val2, 0) - || !host_integerp (len2, 1)) + if (!tree_fits_shwi_p (val2) + || !tree_fits_uhwi_p (len2)) break; if (is_gimple_call (stmt1)) { @@ -1539,15 +1539,15 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) src1 = gimple_call_arg (stmt1, 1); len1 = gimple_call_arg (stmt1, 2); lhs1 = gimple_call_lhs (stmt1); - if (!host_integerp (len1, 1)) + if (!tree_fits_uhwi_p (len1)) break; str1 = string_constant (src1, &off1); if (str1 == NULL_TREE) break; - if (!host_integerp (off1, 1) + if (!tree_fits_uhwi_p (off1) || compare_tree_int (off1, TREE_STRING_LENGTH (str1) - 1) > 0 || compare_tree_int (len1, TREE_STRING_LENGTH (str1) - - tree_low_cst (off1, 1)) > 0 + - tree_to_uhwi (off1)) > 0 || TREE_CODE (TREE_TYPE (str1)) != ARRAY_TYPE || TYPE_MODE (TREE_TYPE (TREE_TYPE (str1))) != TYPE_MODE (char_type_node)) @@ -1561,7 +1561,7 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) src1 = gimple_assign_rhs1 (stmt1); if (TREE_CODE (ptr1) != MEM_REF || TYPE_MODE (TREE_TYPE (ptr1)) != TYPE_MODE (char_type_node) - || !host_integerp (src1, 0)) + || !tree_fits_shwi_p (src1)) break; ptr1 = build_fold_addr_expr (ptr1); callee1 = NULL_TREE; @@ -1585,16 +1585,16 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) /* If the difference between the second and first destination pointer is not constant, or is bigger than memcpy length, bail out. */ if (diff == NULL - || !host_integerp (diff, 1) + || !tree_fits_uhwi_p (diff) || tree_int_cst_lt (len1, diff)) break; /* Use maximum of difference plus memset length and memcpy length as the new memcpy length, if it is too big, bail out. */ - src_len = tree_low_cst (diff, 1); - src_len += tree_low_cst (len2, 1); - if (src_len < (unsigned HOST_WIDE_INT) tree_low_cst (len1, 1)) - src_len = tree_low_cst (len1, 1); + src_len = tree_to_uhwi (diff); + src_len += tree_to_uhwi (len2); + if (src_len < (unsigned HOST_WIDE_INT) tree_to_uhwi (len1)) + src_len = tree_to_uhwi (len1); if (src_len > 1024) break; @@ -1620,12 +1620,12 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) src_buf = XALLOCAVEC (char, src_len + 1); if (callee1) memcpy (src_buf, - TREE_STRING_POINTER (str1) + tree_low_cst (off1, 1), - tree_low_cst (len1, 1)); + TREE_STRING_POINTER (str1) + tree_to_uhwi (off1), + tree_to_uhwi (len1)); else - src_buf[0] = tree_low_cst (src1, 0); - memset (src_buf + tree_low_cst (diff, 1), - tree_low_cst (val2, 0), tree_low_cst (len2, 1)); + src_buf[0] = tree_to_shwi (src1); + memset (src_buf + tree_to_uhwi (diff), + tree_to_shwi (val2), tree_to_uhwi (len2)); src_buf[src_len] = '\0'; /* Neither builtin_strncpy_read_str nor builtin_memcpy_read_str handle embedded '\0's. */ @@ -2309,10 +2309,10 @@ simplify_rotate (gimple_stmt_iterator *gsi) return false; /* CNT1 + CNT2 == B case above. */ - if (host_integerp (def_arg2[0], 1) - && host_integerp (def_arg2[1], 1) - && (unsigned HOST_WIDE_INT) tree_low_cst (def_arg2[0], 1) - + tree_low_cst (def_arg2[1], 1) == TYPE_PRECISION (rtype)) + if (tree_fits_uhwi_p (def_arg2[0]) + && tree_fits_uhwi_p (def_arg2[1]) + && (unsigned HOST_WIDE_INT) tree_to_uhwi (def_arg2[0]) + + tree_to_uhwi (def_arg2[1]) == TYPE_PRECISION (rtype)) rotcnt = def_arg2[0]; else if (TREE_CODE (def_arg2[0]) != SSA_NAME || TREE_CODE (def_arg2[1]) != SSA_NAME) @@ -2346,8 +2346,8 @@ simplify_rotate (gimple_stmt_iterator *gsi) /* Check for one shift count being Y and the other B - Y, with optional casts. */ if (cdef_code[i] == MINUS_EXPR - && host_integerp (cdef_arg1[i], 0) - && tree_low_cst (cdef_arg1[i], 0) == TYPE_PRECISION (rtype) + && tree_fits_shwi_p (cdef_arg1[i]) + && tree_to_shwi (cdef_arg1[i]) == TYPE_PRECISION (rtype) && TREE_CODE (cdef_arg2[i]) == SSA_NAME) { tree tem; @@ -2378,8 +2378,8 @@ simplify_rotate (gimple_stmt_iterator *gsi) This alternative is safe even for rotation count of 0. One shift count is Y and the other (-Y) & (B - 1). */ else if (cdef_code[i] == BIT_AND_EXPR - && host_integerp (cdef_arg2[i], 0) - && tree_low_cst (cdef_arg2[i], 0) + && tree_fits_shwi_p (cdef_arg2[i]) + && tree_to_shwi (cdef_arg2[i]) == TYPE_PRECISION (rtype) - 1 && TREE_CODE (cdef_arg1[i]) == SSA_NAME && gimple_assign_rhs_code (stmt) == BIT_IOR_EXPR) @@ -2780,7 +2780,7 @@ associate_pointerplus (gimple_stmt_iterator *gsi) if (gimple_assign_rhs1 (def_stmt) != ptr) return false; - algn = double_int_to_tree (TREE_TYPE (ptr), ~tree_to_double_int (algn)); + algn = wide_int_to_tree (TREE_TYPE (ptr), ~wide_int (algn)); gimple_assign_set_rhs_with_ops (gsi, BIT_AND_EXPR, ptr, algn); fold_stmt_inplace (gsi); update_stmt (stmt); @@ -2942,8 +2942,10 @@ combine_conversions (gimple_stmt_iterator *gsi) tree tem; tem = fold_build2 (BIT_AND_EXPR, inside_type, defop0, - double_int_to_tree - (inside_type, double_int::mask (inter_prec))); + wide_int_to_tree + (inside_type, + wi::mask (inter_prec, false, + TYPE_PRECISION (inside_type)))); if (!useless_type_conversion_p (type, inside_type)) { tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE, true, @@ -3027,11 +3029,11 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi) if (TREE_TYPE (op) != elem_type) return false; - size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type)); - n = TREE_INT_CST_LOW (op1) / size; + size = tree_to_hwi (TYPE_SIZE (elem_type)); + n = tree_to_hwi (op1) / size; if (n != 1) return false; - idx = TREE_INT_CST_LOW (op2) / size; + idx = tree_to_hwi (op2) / size; if (code == VEC_PERM_EXPR) { @@ -3041,7 +3043,7 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi) if (TREE_CODE (m) != VECTOR_CST) return false; nelts = VECTOR_CST_NELTS (m); - idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx)); + idx = tree_to_hwi (VECTOR_CST_ELT (m, idx)); idx %= 2 * nelts; if (idx < nelts) { @@ -3085,7 +3087,7 @@ is_combined_permutation_identity (tree mask1, tree mask2) { tree val = VECTOR_CST_ELT (mask, i); gcc_assert (TREE_CODE (val) == INTEGER_CST); - j = TREE_INT_CST_LOW (val) & (2 * nelts - 1); + j = tree_to_hwi (val) & (2 * nelts - 1); if (j == i) maybe_identity2 = false; else if (j == i + nelts) @@ -3230,7 +3232,7 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi) nelts = TYPE_VECTOR_SUBPARTS (type); elem_type = TREE_TYPE (type); - elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type)); + elem_size = tree_to_hwi (TYPE_SIZE (elem_type)); sel = XALLOCAVEC (unsigned char, nelts); orig = NULL; @@ -3265,9 +3267,9 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi) return false; orig = ref; } - if (TREE_INT_CST_LOW (TREE_OPERAND (op1, 1)) != elem_size) + if (tree_to_hwi (TREE_OPERAND (op1, 1)) != elem_size) return false; - sel[i] = TREE_INT_CST_LOW (TREE_OPERAND (op1, 2)) / elem_size; + sel[i] = tree_to_hwi (TREE_OPERAND (op1, 2)) / elem_size; if (sel[i] != i) maybe_ident = false; } if (i < nelts) diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index 15af428f51f..bcbb5e8e71a 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -1633,7 +1633,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same object and their offset differ in such a way that the locations cannot overlap, then they cannot alias. */ - double_int size1, size2; + max_wide_int size1, size2; aff_tree off1, off2; /* Perform basic offset and type-based disambiguation. */ @@ -1649,7 +1649,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, get_inner_reference_aff (mem2->mem.ref, &off2, &size2); aff_combination_expand (&off1, ttae_cache); aff_combination_expand (&off2, ttae_cache); - aff_combination_scale (&off1, double_int_minus_one); + aff_combination_scale (&off1, -1); aff_combination_add (&off2, &off1); if (aff_comb_cannot_overlap_p (&off2, size1, size2)) diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c index a506706b6e8..b2e2c34f39c 100644 --- a/gcc/tree-ssa-loop-ivcanon.c +++ b/gcc/tree-ssa-loop-ivcanon.c @@ -472,7 +472,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) into unreachable (or trap when debugging experience is supposed to be good). */ if (!elt->is_exit - && elt->bound.ult (double_int::from_uhwi (npeeled))) + && wi::ltu_p (elt->bound, npeeled)) { gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); gimple stmt = gimple_build_call @@ -489,7 +489,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) } /* If we know the exit will be taken after peeling, update. */ else if (elt->is_exit - && elt->bound.ule (double_int::from_uhwi (npeeled))) + && wi::leu_p (elt->bound, npeeled)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); @@ -529,7 +529,7 @@ remove_redundant_iv_tests (struct loop *loop) /* Exit is pointless if it won't be taken before loop reaches upper bound. */ if (elt->is_exit && loop->any_upper_bound - && loop->nb_iterations_upper_bound.ult (elt->bound)) + && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); @@ -546,8 +546,7 @@ remove_redundant_iv_tests (struct loop *loop) || !integer_zerop (niter.may_be_zero) || !niter.niter || TREE_CODE (niter.niter) != INTEGER_CST - || !loop->nb_iterations_upper_bound.ult - (tree_to_double_int (niter.niter))) + || !wi::ltu_p (loop->nb_iterations_upper_bound, niter.niter)) continue; if (dump_file && (dump_flags & TDF_DETAILS)) @@ -656,9 +655,9 @@ try_unroll_loop_completely (struct loop *loop, If the number of execution of loop is determined by standard induction variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving from the iv test. */ - if (host_integerp (niter, 1)) + if (tree_fits_uhwi_p (niter)) { - n_unroll = tree_low_cst (niter, 1); + n_unroll = tree_to_uhwi (niter); n_unroll_found = true; edge_to_cancel = EDGE_SUCC (exit->src, 0); if (edge_to_cancel == exit) @@ -928,7 +927,7 @@ canonicalize_loop_induction_variables (struct loop *loop, by find_loop_niter_by_eval. Be sure to keep it for future. */ if (niter && TREE_CODE (niter) == INTEGER_CST) { - record_niter_bound (loop, tree_to_double_int (niter), + record_niter_bound (loop, niter, exit == single_likely_exit (loop), true); } diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 798f57f6c28..b16b9b52dc4 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -1563,19 +1563,19 @@ idx_record_use (tree base, tree *idx, signedness of TOP and BOT. */ static bool -constant_multiple_of (tree top, tree bot, double_int *mul) +constant_multiple_of (tree top, tree bot, max_wide_int *mul) { tree mby; enum tree_code code; - double_int res, p0, p1; unsigned precision = TYPE_PRECISION (TREE_TYPE (top)); + max_wide_int res, p0, p1; STRIP_NOPS (top); STRIP_NOPS (bot); if (operand_equal_p (top, bot, 0)) { - *mul = double_int_one; + *mul = 1; return true; } @@ -1590,7 +1590,7 @@ constant_multiple_of (tree top, tree bot, double_int *mul) if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res)) return false; - *mul = (res * tree_to_double_int (mby)).sext (precision); + *mul = wi::sext (res * mby, precision); return true; case PLUS_EXPR: @@ -1601,19 +1601,19 @@ constant_multiple_of (tree top, tree bot, double_int *mul) if (code == MINUS_EXPR) p1 = -p1; - *mul = (p0 + p1).sext (precision); + *mul = wi::sext (p0 + p1, precision); return true; case INTEGER_CST: if (TREE_CODE (bot) != INTEGER_CST) return false; - p0 = tree_to_double_int (top).sext (precision); - p1 = tree_to_double_int (bot).sext (precision); - if (p1.is_zero ()) + p0 = wi::sext (top, precision); + p1 = wi::sext (bot, precision); + if (p1 == 0) return false; - *mul = p0.sdivmod (p1, FLOOR_DIV_EXPR, &res).sext (precision); - return res.is_zero (); + *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision); + return res == 0; default: return false; @@ -2028,7 +2028,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, switch (code) { case INTEGER_CST: - if (!cst_and_fits_in_hwi (expr) + if (!cst_fits_shwi_p (expr) || integer_zerop (expr)) return orig_expr; @@ -2065,7 +2065,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, case MULT_EXPR: op1 = TREE_OPERAND (expr, 1); - if (!cst_and_fits_in_hwi (op1)) + if (!cst_fits_shwi_p (op1)) return orig_expr; op0 = TREE_OPERAND (expr, 0); @@ -2087,7 +2087,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, return orig_expr; step = array_ref_element_size (expr); - if (!cst_and_fits_in_hwi (step)) + if (!cst_fits_shwi_p (step)) break; st = int_cst_value (step); @@ -2112,7 +2112,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, tmp = component_ref_field_offset (expr); if (top_compref - && cst_and_fits_in_hwi (tmp)) + && cst_fits_shwi_p (tmp)) { /* Strip the component reference completely. */ op0 = TREE_OPERAND (expr, 0); @@ -2364,7 +2364,7 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step, if (use_bb->loop_father != data->current_loop || !dominated_by_p (CDI_DOMINATORS, data->current_loop->latch, use_bb) || stmt_could_throw_p (use->stmt) - || !cst_and_fits_in_hwi (step)) + || !cst_fits_shwi_p (step)) return; cstepi = int_cst_value (step); @@ -2627,6 +2627,9 @@ new_cost (unsigned runtime, unsigned complexity) { comp_cost cost; + static int ct = 0; + ct++; + cost.cost = runtime; cost.complexity = complexity; @@ -2966,7 +2969,7 @@ get_computation_aff (struct loop *loop, tree common_type, var; tree uutype; aff_tree cbase_aff, var_aff; - double_int rat; + max_wide_int rat; if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype)) { @@ -3467,6 +3470,7 @@ get_shiftadd_cost (tree expr, enum machine_mode mode, comp_cost cost0, res = add_costs (res, force_expr_to_var_cost (multop, speed)); *cost = res; + return true; } @@ -3581,6 +3585,7 @@ force_expr_to_var_cost (tree expr, bool speed) break; default: + /* Just an arbitrary value, FIXME. */ return new_cost (target_spill_cost[speed], 0); } @@ -3603,7 +3608,7 @@ force_expr_to_var_cost (tree expr, bool speed) mult = op0; if (mult != NULL_TREE - && cst_and_fits_in_hwi (TREE_OPERAND (mult, 1)) + && cst_fits_shwi_p (TREE_OPERAND (mult, 1)) && get_shiftadd_cost (expr, mode, cost0, cost1, mult, speed, &sa_cost)) return sa_cost; @@ -3611,10 +3616,10 @@ force_expr_to_var_cost (tree expr, bool speed) break; case MULT_EXPR: - if (cst_and_fits_in_hwi (op0)) + if (cst_fits_shwi_p (op0)) cost = new_cost (mult_by_coeff_cost (int_cst_value (op0), mode, speed), 0); - else if (cst_and_fits_in_hwi (op1)) + else if (cst_fits_shwi_p (op1)) cost = new_cost (mult_by_coeff_cost (int_cst_value (op1), mode, speed), 0); else @@ -3734,7 +3739,7 @@ ptr_difference_cost (struct ivopts_data *data, type = signed_type_for (TREE_TYPE (e1)); tree_to_aff_combination (e1, type, &aff_e1); tree_to_aff_combination (e2, type, &aff_e2); - aff_combination_scale (&aff_e2, double_int_minus_one); + aff_combination_scale (&aff_e2, -1); aff_combination_add (&aff_e1, &aff_e2); return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on); @@ -3789,7 +3794,7 @@ difference_cost (struct ivopts_data *data, type = signed_type_for (TREE_TYPE (e1)); tree_to_aff_combination (e1, type, &aff_e1); tree_to_aff_combination (e2, type, &aff_e2); - aff_combination_scale (&aff_e2, double_int_minus_one); + aff_combination_scale (&aff_e2, -1); aff_combination_add (&aff_e1, &aff_e2); return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on); @@ -3908,16 +3913,16 @@ get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase, { tree ind = TREE_OPERAND (usym, 1); if (TREE_CODE (ind) == INTEGER_CST - && host_integerp (ind, 0) - && TREE_INT_CST_LOW (ind) == 0) + && tree_fits_shwi_p (ind) + && tree_to_shwi (ind) == 0) usym = TREE_OPERAND (usym, 0); } if (TREE_CODE (csym) == ARRAY_REF) { tree ind = TREE_OPERAND (csym, 1); if (TREE_CODE (ind) == INTEGER_CST - && host_integerp (ind, 0) - && TREE_INT_CST_LOW (ind) == 0) + && tree_fits_shwi_p (ind) + && tree_to_shwi (ind) == 0) csym = TREE_OPERAND (csym, 0); } if (operand_equal_p (usym, csym, 0)) @@ -3933,7 +3938,7 @@ get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase, tree_to_aff_combination (ub, TREE_TYPE (ub), &ubase_aff); tree_to_aff_combination (cb, TREE_TYPE (cb), &cbase_aff); - aff_combination_scale (&cbase_aff, double_int::from_shwi (-1 * ratio)); + aff_combination_scale (&cbase_aff, -1 * ratio); aff_combination_add (&ubase_aff, &cbase_aff); expr = aff_combination_to_tree (&ubase_aff); return get_expr_id (data, expr); @@ -3963,7 +3968,7 @@ get_computation_cost_at (struct ivopts_data *data, HOST_WIDE_INT ratio, aratio; bool var_present, symbol_present, stmt_is_after_inc; comp_cost cost; - double_int rat; + max_wide_int rat; bool speed = optimize_bb_for_speed_p (gimple_bb (at)); enum machine_mode mem_mode = (address_p ? TYPE_MODE (TREE_TYPE (*use->op_p)) @@ -4014,7 +4019,7 @@ get_computation_cost_at (struct ivopts_data *data, redundancy elimination is likely to transform the code so that it uses value of the variable before increment anyway, so it is not that much unrealistic. */ - if (cst_and_fits_in_hwi (cstep)) + if (cst_fits_shwi_p (cstep)) cstepi = int_cst_value (cstep); else cstepi = 0; @@ -4022,7 +4027,7 @@ get_computation_cost_at (struct ivopts_data *data, if (!constant_multiple_of (ustep, cstep, &rat)) return infinite_cost; - if (rat.fits_shwi ()) + if (wi::fits_shwi_p (rat)) ratio = rat.to_shwi (); else return infinite_cost; @@ -4039,7 +4044,7 @@ get_computation_cost_at (struct ivopts_data *data, (also holds in the case ratio == -1, TODO. */ - if (cst_and_fits_in_hwi (cbase)) + if (cst_fits_shwi_p (cbase)) { offset = - ratio * int_cst_value (cbase); cost = difference_cost (data, @@ -4294,7 +4299,7 @@ iv_period (struct iv *iv) period = build_low_bits_mask (type, (TYPE_PRECISION (type) - - tree_low_cst (pow2div, 1))); + - tree_to_uhwi (pow2div))); return period; } @@ -4492,7 +4497,7 @@ iv_elimination_compare_lt (struct ivopts_data *data, /* We need to be able to decide whether candidate is increasing or decreasing in order to choose the right comparison operator. */ - if (!cst_and_fits_in_hwi (cand->iv->step)) + if (!cst_fits_shwi_p (cand->iv->step)) return false; step = int_cst_value (cand->iv->step); @@ -4532,11 +4537,11 @@ iv_elimination_compare_lt (struct ivopts_data *data, tree_to_aff_combination (niter->niter, nit_type, &nit); tree_to_aff_combination (fold_convert (nit_type, a), nit_type, &tmpa); tree_to_aff_combination (fold_convert (nit_type, b), nit_type, &tmpb); - aff_combination_scale (&nit, double_int_minus_one); - aff_combination_scale (&tmpa, double_int_minus_one); + aff_combination_scale (&nit, -1); + aff_combination_scale (&tmpa, -1); aff_combination_add (&tmpb, &tmpa); aff_combination_add (&tmpb, &nit); - if (tmpb.n != 0 || tmpb.offset != double_int_one) + if (tmpb.n != 0 || tmpb.offset != 1) return false; /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not @@ -4622,13 +4627,13 @@ may_eliminate_iv (struct ivopts_data *data, entire loop and compare against that instead. */ else { - double_int period_value, max_niter; + max_wide_int period_value, max_niter; max_niter = desc->max; if (stmt_after_increment (loop, cand, use->stmt)) - max_niter += double_int_one; - period_value = tree_to_double_int (period); - if (max_niter.ugt (period_value)) + max_niter += 1; + period_value = period; + if (wi::gtu_p (max_niter, period_value)) { /* See if we can take advantage of inferred loop bound information. */ if (data->loop_single_exit_p) @@ -4636,7 +4641,7 @@ may_eliminate_iv (struct ivopts_data *data, if (!max_loop_iterations (loop, &max_niter)) return false; /* The loop bound is already adjusted by adding 1. */ - if (max_niter.ugt (period_value)) + if (wi::gtu_p (max_niter, period_value)) return false; } else @@ -4644,6 +4649,8 @@ may_eliminate_iv (struct ivopts_data *data, } } + static int cnt = 0; + cnt++; cand_value_at (loop, cand, use->stmt, desc->niter, &bnd); *bound = aff_combination_to_tree (&bnd); diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c index 113c7d103be..02fe2868618 100644 --- a/gcc/tree-ssa-loop-niter.c +++ b/gcc/tree-ssa-loop-niter.c @@ -38,6 +38,7 @@ along with GCC; see the file COPYING3. If not see #include "diagnostic-core.h" #include "tree-inline.h" #include "tree-pass.h" +#include "wide-int-print.h" #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0) @@ -68,7 +69,7 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset) { tree type = TREE_TYPE (expr); tree op0, op1; - double_int off; + max_wide_int off; bool negate = false; *var = expr; @@ -89,18 +90,18 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset) break; *var = op0; + off = op1; /* Always sign extend the offset. */ - off = tree_to_double_int (op1); - off = off.sext (TYPE_PRECISION (type)); - mpz_set_double_int (offset, off, false); + off = wi::sext (off, TYPE_PRECISION (type)); + wi::to_mpz (off, offset, SIGNED); if (negate) mpz_neg (offset, offset); break; case INTEGER_CST: *var = build_int_cst_type (type, 0); - off = tree_to_double_int (expr); - mpz_set_double_int (offset, off, TYPE_UNSIGNED (type)); + off = expr; + wi::to_mpz (off, offset, TYPE_SIGN (type)); break; default: @@ -170,7 +171,7 @@ bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y, } mpz_init (m); - mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true); + wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED); mpz_add_ui (m, m, 1); mpz_sub (bnds->up, x, y); mpz_set (bnds->below, bnds->up); @@ -449,15 +450,15 @@ end: difference of two values in TYPE. */ static void -bounds_add (bounds *bnds, double_int delta, tree type) +bounds_add (bounds *bnds, max_wide_int delta, tree type) { mpz_t mdelta, max; mpz_init (mdelta); - mpz_set_double_int (mdelta, delta, false); + wi::to_mpz (delta, mdelta, SIGNED); mpz_init (max); - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true); + wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED); mpz_add (bnds->up, bnds->up, mdelta); mpz_add (bnds->below, bnds->below, mdelta); @@ -502,8 +503,8 @@ inverse (tree x, tree mask) unsigned HOST_WIDE_INT imask; unsigned HOST_WIDE_INT irslt = 1; - gcc_assert (cst_and_fits_in_hwi (x)); - gcc_assert (cst_and_fits_in_hwi (mask)); + gcc_assert (cst_fits_shwi_p (x)); + gcc_assert (cst_fits_shwi_p (mask)); ix = int_cst_value (x); imask = int_cst_value (mask); @@ -551,7 +552,7 @@ static void number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, bounds *bnds, bool exit_must_be_taken) { - double_int max; + max_wide_int max; mpz_t d; tree type = TREE_TYPE (c); bool bnds_u_valid = ((no_overflow && exit_must_be_taken) @@ -560,10 +561,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, if (integer_onep (s) || (TREE_CODE (c) == INTEGER_CST && TREE_CODE (s) == INTEGER_CST - && tree_to_double_int (c).mod (tree_to_double_int (s), - TYPE_UNSIGNED (type), - EXACT_DIV_EXPR).is_zero ()) - || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (c)) + && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0) + || (TYPE_OVERFLOW_UNDEFINED (type) && multiple_of_p (type, c, s))) { /* If C is an exact multiple of S, then its value will be reached before @@ -581,15 +580,15 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, the whole # of iterations analysis will fail). */ if (!no_overflow) { - max = double_int::mask (TYPE_PRECISION (type) - - tree_low_cst (num_ending_zeros (s), 1)); - mpz_set_double_int (bnd, max, true); + max = wi::mask <max_wide_int> (TYPE_PRECISION (type) - wi::ctz (s), + false); + wi::to_mpz (max, bnd, UNSIGNED); return; } /* Now we know that the induction variable does not overflow, so the loop iterates at most (range of type / S) times. */ - mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (type)), true); + wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED); /* If the induction variable is guaranteed to reach the value of C before overflow, ... */ @@ -598,13 +597,13 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, /* ... then we can strengthen this to C / S, and possibly we can use the upper bound on C given by BNDS. */ if (TREE_CODE (c) == INTEGER_CST) - mpz_set_double_int (bnd, tree_to_double_int (c), true); + wi::to_mpz (c, bnd, UNSIGNED); else if (bnds_u_valid) mpz_set (bnd, bnds->up); } mpz_init (d); - mpz_set_double_int (d, tree_to_double_int (s), true); + wi::to_mpz (s, d, UNSIGNED); mpz_fdiv_q (bnd, bnd, d); mpz_clear (d); } @@ -655,7 +654,8 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final, mpz_init (max); number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds, exit_must_be_taken); - niter->max = mpz_get_double_int (niter_type, max, false); + niter->max = max_wide_int::from (wi::from_mpz (niter_type, max, false), + TYPE_SIGN (niter_type)); mpz_clear (max); /* First the trivial cases -- when the step is 1. */ @@ -671,7 +671,7 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final, bits = num_ending_zeros (s); bound = build_low_bits_mask (niter_type, (TYPE_PRECISION (niter_type) - - tree_low_cst (bits, 1))); + - tree_to_uhwi (bits))); d = fold_binary_to_constant (LSHIFT_EXPR, niter_type, build_int_cst (niter_type, 1), bits); @@ -728,7 +728,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1, tmod = fold_convert (type1, mod); mpz_init (mmod); - mpz_set_double_int (mmod, tree_to_double_int (mod), true); + wi::to_mpz (mod, mmod, UNSIGNED); mpz_neg (mmod, mmod); /* If the induction variable does not overflow and the exit is taken, @@ -810,7 +810,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1, niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, niter->may_be_zero, noloop); - bounds_add (bnds, tree_to_double_int (mod), type); + bounds_add (bnds, mod, type); *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod); ret = true; @@ -900,7 +900,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1, tree assumption = boolean_true_node, bound, diff; tree mbz, mbzl, mbzr, type1; bool rolls_p, no_overflow_p; - double_int dstep; + max_wide_int dstep; mpz_t mstep, max; /* We are going to compute the number of iterations as @@ -926,22 +926,22 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1, /* First check whether the answer does not follow from the bounds we gathered before. */ if (integer_nonzerop (iv0->step)) - dstep = tree_to_double_int (iv0->step); + dstep = iv0->step; else { - dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type)); + dstep = wi::sext (iv1->step, TYPE_PRECISION (type)); dstep = -dstep; } mpz_init (mstep); - mpz_set_double_int (mstep, dstep, true); + wi::to_mpz (dstep, mstep, UNSIGNED); mpz_neg (mstep, mstep); mpz_add_ui (mstep, mstep, 1); rolls_p = mpz_cmp (mstep, bnds->below) <= 0; mpz_init (max); - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true); + wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED); mpz_add (max, max, mstep); no_overflow_p = (mpz_cmp (bnds->up, max) <= 0 /* For pointers, only values lying inside a single object @@ -1068,7 +1068,9 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1, niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node, iv1->base, iv0->base); niter->niter = delta; - niter->max = mpz_get_double_int (niter_type, bnds->up, false); + niter->max = max_wide_int::from (wi::from_mpz (niter_type, bnds->up, + false), + TYPE_SIGN (niter_type)); return true; } @@ -1111,11 +1113,12 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1, mpz_init (mstep); mpz_init (tmp); - mpz_set_double_int (mstep, tree_to_double_int (step), true); + wi::to_mpz (step, mstep, UNSIGNED); mpz_add (tmp, bnds->up, mstep); mpz_sub_ui (tmp, tmp, 1); mpz_fdiv_q (tmp, tmp, mstep); - niter->max = mpz_get_double_int (niter_type, tmp, false); + niter->max = max_wide_int::from (wi::from_mpz (niter_type, tmp, false), + TYPE_SIGN (niter_type)); mpz_clear (mstep); mpz_clear (tmp); @@ -1178,7 +1181,7 @@ number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1, iv0->base = fold_build2 (MINUS_EXPR, type1, iv0->base, build_int_cst (type1, 1)); - bounds_add (bnds, double_int_one, type1); + bounds_add (bnds, 1, type1); return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken, bnds); @@ -1250,8 +1253,7 @@ number_of_iterations_cond (struct loop *loop, niter->assumptions = boolean_true_node; niter->may_be_zero = boolean_false_node; niter->niter = NULL_TREE; - niter->max = double_int_zero; - + niter->max = 0; niter->bound = NULL_TREE; niter->cmp = ERROR_MARK; @@ -1323,7 +1325,7 @@ number_of_iterations_cond (struct loop *loop, if (tem && integer_zerop (tem)) { niter->niter = build_int_cst (unsigned_type_for (type), 0); - niter->max = double_int_zero; + niter->max = 0; return true; } @@ -1399,7 +1401,7 @@ number_of_iterations_cond (struct loop *loop, fprintf (dump_file, " # of iterations "); print_generic_expr (dump_file, niter->niter, TDF_SLIM); fprintf (dump_file, ", bounded by "); - dump_double_int (dump_file, niter->max, true); + print_decu (niter->max, dump_file); fprintf (dump_file, "\n"); } else @@ -1911,7 +1913,7 @@ number_of_iterations_exit (struct loop *loop, edge exit, /* If NITER has simplified into a constant, update MAX. */ if (TREE_CODE (niter->niter) == INTEGER_CST) - niter->max = tree_to_double_int (niter->niter); + niter->max = niter->niter; if (integer_onep (niter->assumptions)) return true; @@ -2023,7 +2025,7 @@ find_loop_niter (struct loop *loop, edge *exit) bool finite_loop_p (struct loop *loop) { - double_int nit; + max_wide_int nit; int flags; if (flag_unsafe_loop_optimizations) @@ -2337,13 +2339,13 @@ find_loop_niter_by_eval (struct loop *loop, edge *exit) */ -static double_int derive_constant_upper_bound_ops (tree, tree, - enum tree_code, tree); +static max_wide_int derive_constant_upper_bound_ops (tree, tree, + enum tree_code, tree); /* Returns a constant upper bound on the value of the right-hand side of an assignment statement STMT. */ -static double_int +static max_wide_int derive_constant_upper_bound_assign (gimple stmt) { enum tree_code code = gimple_assign_rhs_code (stmt); @@ -2358,7 +2360,7 @@ derive_constant_upper_bound_assign (gimple stmt) is considered to be unsigned. If its type is signed, its value must be nonnegative. */ -static double_int +static max_wide_int derive_constant_upper_bound (tree val) { enum tree_code code; @@ -2372,12 +2374,12 @@ derive_constant_upper_bound (tree val) whose type is TYPE. The expression is considered to be unsigned. If its type is signed, its value must be nonnegative. */ -static double_int +static max_wide_int derive_constant_upper_bound_ops (tree type, tree op0, enum tree_code code, tree op1) { tree subtype, maxt; - double_int bnd, max, mmax, cst; + max_wide_int bnd, max, mmax, cst; gimple stmt; if (INTEGRAL_TYPE_P (type)) @@ -2385,12 +2387,12 @@ derive_constant_upper_bound_ops (tree type, tree op0, else maxt = upper_bound_in_type (type, type); - max = tree_to_double_int (maxt); + max = maxt; switch (code) { case INTEGER_CST: - return tree_to_double_int (op0); + return op0; CASE_CONVERT: subtype = TREE_TYPE (op0); @@ -2412,7 +2414,7 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* If the bound does not fit in TYPE, max. value of TYPE could be attained. */ - if (max.ult (bnd)) + if (wi::ltu_p (max, bnd)) return max; return bnd; @@ -2427,25 +2429,25 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to choose the most logical way how to treat this constant regardless of the signedness of the type. */ - cst = tree_to_double_int (op1); - cst = cst.sext (TYPE_PRECISION (type)); + cst = op1; + cst = wi::sext (cst, TYPE_PRECISION (type)); if (code != MINUS_EXPR) cst = -cst; bnd = derive_constant_upper_bound (op0); - if (cst.is_negative ()) + if (wi::neg_p (cst)) { cst = -cst; /* Avoid CST == 0x80000... */ - if (cst.is_negative ()) + if (wi::neg_p (cst)) return max;; /* OP0 + CST. We need to check that BND <= MAX (type) - CST. */ mmax -= cst; - if (bnd.ugt (mmax)) + if (wi::ltu_p (bnd, max)) return max; return bnd + cst; @@ -2465,13 +2467,13 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* This should only happen if the type is unsigned; however, for buggy programs that use overflowing signed arithmetics even with -fno-wrapv, this condition may also be true for signed values. */ - if (bnd.ult (cst)) + if (wi::ltu_p (bnd, cst)) return max; if (TYPE_UNSIGNED (type)) { tree tem = fold_binary (GE_EXPR, boolean_type_node, op0, - double_int_to_tree (type, cst)); + wide_int_to_tree (type, cst)); if (!tem || integer_nonzerop (tem)) return max; } @@ -2488,13 +2490,13 @@ derive_constant_upper_bound_ops (tree type, tree op0, return max; bnd = derive_constant_upper_bound (op0); - return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR); + return wi::udiv_floor (bnd, op1); case BIT_AND_EXPR: if (TREE_CODE (op1) != INTEGER_CST || tree_int_cst_sign_bit (op1)) return max; - return tree_to_double_int (op1); + return op1; case SSA_NAME: stmt = SSA_NAME_DEF_STMT (op0); @@ -2512,7 +2514,7 @@ derive_constant_upper_bound_ops (tree type, tree op0, static void do_warn_aggressive_loop_optimizations (struct loop *loop, - double_int i_bound, gimple stmt) + max_wide_int i_bound, gimple stmt) { /* Don't warn if the loop doesn't have known constant bound. */ if (!loop->nb_iterations @@ -2525,7 +2527,7 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, || loop->warned_aggressive_loop_optimizations /* Only warn if undefined behavior gives us lower estimate than the known constant bound. */ - || i_bound.ucmp (tree_to_double_int (loop->nb_iterations)) >= 0 + || wi::cmpu (i_bound, loop->nb_iterations) >= 0 /* And undefined behavior happens unconditionally. */ || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt))) return; @@ -2537,8 +2539,8 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, gimple estmt = last_stmt (e->src); if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations, "iteration %E invokes undefined behavior", - double_int_to_tree (TREE_TYPE (loop->nb_iterations), - i_bound))) + wide_int_to_tree (TREE_TYPE (loop->nb_iterations), + i_bound))) inform (gimple_location (estmt), "containing loop"); loop->warned_aggressive_loop_optimizations = true; } @@ -2548,13 +2550,13 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, is taken at last when the STMT is executed BOUND + 1 times. REALISTIC is true if BOUND is expected to be close to the real number of iterations. UPPER is true if we are sure the loop iterates at most - BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */ + BOUND times. I_BOUND is an unsigned wide_int upper estimate on BOUND. */ static void -record_estimate (struct loop *loop, tree bound, double_int i_bound, +record_estimate (struct loop *loop, tree bound, max_wide_int i_bound, gimple at_stmt, bool is_exit, bool realistic, bool upper) { - double_int delta; + max_wide_int delta; if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -2564,7 +2566,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound, upper ? "" : "probably "); print_generic_expr (dump_file, bound, TDF_SLIM); fprintf (dump_file, " (bounded by "); - dump_double_int (dump_file, i_bound, true); + print_decu (i_bound, dump_file); fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num); } @@ -2573,7 +2575,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound, if (TREE_CODE (bound) != INTEGER_CST) realistic = false; else - gcc_checking_assert (i_bound == tree_to_double_int (bound)); + gcc_checking_assert (i_bound == bound); if (!upper && !realistic) return; @@ -2604,13 +2606,13 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound, otherwise it can be executed BOUND + 1 times. We will lower the estimate later if such statement must be executed on last iteration */ if (is_exit) - delta = double_int_zero; + delta = 0; else - delta = double_int_one; + delta = 1; i_bound += delta; /* If an overflow occurred, ignore the result. */ - if (i_bound.ult (delta)) + if (wi::ltu_p (i_bound, delta)) return; if (upper && !is_exit) @@ -2630,7 +2632,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt, { tree niter_bound, extreme, delta; tree type = TREE_TYPE (base), unsigned_type; - double_int max; + max_wide_int max; if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step)) return; @@ -2975,27 +2977,21 @@ infer_loop_bounds_from_undefined (struct loop *loop) free (bbs); } - - -/* Compare double ints, callback for qsort. */ +/* Compare wide ints, callback for qsort. */ static int -double_int_cmp (const void *p1, const void *p2) +wide_int_cmp (const void *p1, const void *p2) { - const double_int *d1 = (const double_int *)p1; - const double_int *d2 = (const double_int *)p2; - if (*d1 == *d2) - return 0; - if (d1->ult (*d2)) - return -1; - return 1; + const max_wide_int *d1 = (const max_wide_int *)p1; + const max_wide_int *d2 = (const max_wide_int *)p2; + return wi::cmpu (*d1, *d2); } /* Return index of BOUND in BOUNDS array sorted in increasing order. Lookup by binary search. */ static int -bound_index (vec<double_int> bounds, double_int bound) +bound_index (vec<max_wide_int> bounds, const max_wide_int &bound) { unsigned int end = bounds.length (); unsigned int begin = 0; @@ -3004,11 +3000,11 @@ bound_index (vec<double_int> bounds, double_int bound) while (begin != end) { unsigned int middle = (begin + end) / 2; - double_int index = bounds[middle]; + max_wide_int index = bounds[middle]; if (index == bound) return middle; - else if (index.ult (bound)) + else if (wi::ltu_p (index, bound)) begin = middle + 1; else end = middle; @@ -3027,7 +3023,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) { pointer_map_t *bb_bounds; struct nb_iter_bound *elt; - vec<double_int> bounds = vNULL; + vec<max_wide_int> bounds = vNULL; vec<vec<basic_block> > queues = vNULL; vec<basic_block> queue = vNULL; ptrdiff_t queue_index; @@ -3037,20 +3033,20 @@ discover_iteration_bound_by_body_walk (struct loop *loop) /* Discover what bounds may interest us. */ for (elt = loop->bounds; elt; elt = elt->next) { - double_int bound = elt->bound; + max_wide_int bound = elt->bound; /* Exit terminates loop at given iteration, while non-exits produce undefined effect on the next iteration. */ if (!elt->is_exit) { - bound += double_int_one; + bound += 1; /* If an overflow occurred, ignore the result. */ - if (bound.is_zero ()) + if (bound == 0) continue; } if (!loop->any_upper_bound - || bound.ult (loop->nb_iterations_upper_bound)) + || wi::ltu_p (bound, loop->nb_iterations_upper_bound)) bounds.safe_push (bound); } @@ -3063,7 +3059,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) /* Sort the bounds in decreasing order. */ qsort (bounds.address (), bounds.length (), - sizeof (double_int), double_int_cmp); + sizeof (max_wide_int), wide_int_cmp); /* For every basic block record the lowest bound that is guaranteed to terminate the loop. */ @@ -3071,17 +3067,17 @@ discover_iteration_bound_by_body_walk (struct loop *loop) bb_bounds = pointer_map_create (); for (elt = loop->bounds; elt; elt = elt->next) { - double_int bound = elt->bound; + max_wide_int bound = elt->bound; if (!elt->is_exit) { - bound += double_int_one; + bound += 1; /* If an overflow occurred, ignore the result. */ - if (bound.is_zero ()) + if (bound == 0) continue; } if (!loop->any_upper_bound - || bound.ult (loop->nb_iterations_upper_bound)) + || wi::ltu_p (bound, loop->nb_iterations_upper_bound)) { ptrdiff_t index = bound_index (bounds, bound); void **entry = pointer_map_contains (bb_bounds, @@ -3181,7 +3177,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Found better loop bound "); - dump_double_int (dump_file, bounds[latch_index], true); + print_decu (bounds[latch_index], dump_file); fprintf (dump_file, "\n"); } record_niter_bound (loop, bounds[latch_index], false, true); @@ -3216,7 +3212,7 @@ maybe_lower_iteration_bound (struct loop *loop) for (elt = loop->bounds; elt; elt = elt->next) { if (!elt->is_exit - && elt->bound.ult (loop->nb_iterations_upper_bound)) + && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound)) { if (!not_executed_last_iteration) not_executed_last_iteration = pointer_set_create (); @@ -3290,7 +3286,7 @@ maybe_lower_iteration_bound (struct loop *loop) if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Reducing loop iteration estimate by 1; " "undefined statement must be executed at the last iteration.\n"); - record_niter_bound (loop, loop->nb_iterations_upper_bound - double_int_one, + record_niter_bound (loop, loop->nb_iterations_upper_bound - 1, false, true); } BITMAP_FREE (visited); @@ -3309,7 +3305,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) unsigned i; struct tree_niter_desc niter_desc; edge ex; - double_int bound; + max_wide_int bound; edge likely_exit; /* Give up if we already have tried to compute an estimation. */ @@ -3356,7 +3352,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) if (loop->header->count != 0) { gcov_type nit = expected_loop_iterations_unbounded (loop) + 1; - bound = gcov_type_to_double_int (nit); + bound = gcov_type_to_wide_int (nit); record_niter_bound (loop, bound, true, false); } @@ -3367,8 +3363,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) && TREE_CODE (loop->nb_iterations) == INTEGER_CST) { loop->any_upper_bound = true; - loop->nb_iterations_upper_bound - = tree_to_double_int (loop->nb_iterations); + loop->nb_iterations_upper_bound = loop->nb_iterations; } } @@ -3378,7 +3373,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) the function returns false, otherwise returns true. */ bool -estimated_loop_iterations (struct loop *loop, double_int *nit) +estimated_loop_iterations (struct loop *loop, max_wide_int *nit) { /* When SCEV information is available, try to update loop iterations estimate. Otherwise just return whatever we recorded earlier. */ @@ -3395,13 +3390,13 @@ estimated_loop_iterations (struct loop *loop, double_int *nit) HOST_WIDE_INT estimated_loop_iterations_int (struct loop *loop) { - double_int nit; + max_wide_int nit; HOST_WIDE_INT hwi_nit; if (!estimated_loop_iterations (loop, &nit)) return -1; - if (!nit.fits_shwi ()) + if (!wi::fits_shwi_p (nit)) return -1; hwi_nit = nit.to_shwi (); @@ -3414,7 +3409,7 @@ estimated_loop_iterations_int (struct loop *loop) false, otherwise returns true. */ bool -max_loop_iterations (struct loop *loop, double_int *nit) +max_loop_iterations (struct loop *loop, max_wide_int *nit) { /* When SCEV information is available, try to update loop iterations estimate. Otherwise just return whatever we recorded earlier. */ @@ -3431,13 +3426,13 @@ max_loop_iterations (struct loop *loop, double_int *nit) HOST_WIDE_INT max_loop_iterations_int (struct loop *loop) { - double_int nit; + max_wide_int nit; HOST_WIDE_INT hwi_nit; if (!max_loop_iterations (loop, &nit)) return -1; - if (!nit.fits_shwi ()) + if (!wi::fits_shwi_p (nit)) return -1; hwi_nit = nit.to_shwi (); @@ -3468,18 +3463,18 @@ estimated_stmt_executions_int (struct loop *loop) false, otherwise returns true. */ bool -max_stmt_executions (struct loop *loop, double_int *nit) +max_stmt_executions (struct loop *loop, max_wide_int *nit) { - double_int nit_minus_one; + max_wide_int nit_minus_one; if (!max_loop_iterations (loop, nit)) return false; nit_minus_one = *nit; - *nit += double_int_one; + *nit += 1; - return (*nit).ugt (nit_minus_one); + return wi::gtu_p (*nit, nit_minus_one); } /* Sets NIT to the estimated number of executions of the latch of the @@ -3487,18 +3482,18 @@ max_stmt_executions (struct loop *loop, double_int *nit) false, otherwise returns true. */ bool -estimated_stmt_executions (struct loop *loop, double_int *nit) +estimated_stmt_executions (struct loop *loop, max_wide_int *nit) { - double_int nit_minus_one; + max_wide_int nit_minus_one; if (!estimated_loop_iterations (loop, nit)) return false; nit_minus_one = *nit; - *nit += double_int_one; + *nit += 1; - return (*nit).ugt (nit_minus_one); + return wi::gtu_p (*nit, nit_minus_one); } /* Records estimates on numbers of iterations of loops. */ @@ -3570,7 +3565,7 @@ n_of_executions_at_most (gimple stmt, struct nb_iter_bound *niter_bound, tree niter) { - double_int bound = niter_bound->bound; + max_wide_int bound = niter_bound->bound; tree nit_type = TREE_TYPE (niter), e; enum tree_code cmp; @@ -3578,7 +3573,7 @@ n_of_executions_at_most (gimple stmt, /* If the bound does not even fit into NIT_TYPE, it cannot tell us that the number of iterations is small. */ - if (!double_int_fits_to_tree_p (nit_type, bound)) + if (!wi::fits_to_tree_p (bound, nit_type)) return false; /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1 @@ -3621,16 +3616,16 @@ n_of_executions_at_most (gimple stmt, gsi_next (&bsi)) if (gimple_has_side_effects (gsi_stmt (bsi))) return false; - bound += double_int_one; - if (bound.is_zero () - || !double_int_fits_to_tree_p (nit_type, bound)) + bound += 1; + if (bound == 0 + || !wi::fits_to_tree_p (bound, nit_type)) return false; } cmp = GT_EXPR; } e = fold_binary (cmp, boolean_type_node, - niter, double_int_to_tree (nit_type, bound)); + niter, wide_int_to_tree (nit_type, bound)); return e && integer_nonzerop (e); } @@ -3668,7 +3663,7 @@ scev_probably_wraps_p (tree base, tree step, tree unsigned_type, valid_niter; tree type = TREE_TYPE (step); tree e; - double_int niter; + max_wide_int niter; struct nb_iter_bound *bound; /* FIXME: We really need something like @@ -3734,10 +3729,10 @@ scev_probably_wraps_p (tree base, tree step, estimate_numbers_of_iterations_loop (loop); if (max_loop_iterations (loop, &niter) - && double_int_fits_to_tree_p (TREE_TYPE (valid_niter), niter) + && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter)) && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter, - double_int_to_tree (TREE_TYPE (valid_niter), - niter))) != NULL + wide_int_to_tree (TREE_TYPE (valid_niter), + niter))) != NULL && integer_nonzerop (e)) { fold_undefer_and_ignore_overflow_warnings (); diff --git a/gcc/tree-ssa-loop-niter.h b/gcc/tree-ssa-loop-niter.h index 194550762d8..a794843380d 100644 --- a/gcc/tree-ssa-loop-niter.h +++ b/gcc/tree-ssa-loop-niter.h @@ -29,14 +29,14 @@ extern tree find_loop_niter (struct loop *, edge *); extern bool finite_loop_p (struct loop *); extern tree loop_niter_by_eval (struct loop *, edge); extern tree find_loop_niter_by_eval (struct loop *, edge *); -extern bool estimated_loop_iterations (struct loop *, double_int *); +extern bool estimated_loop_iterations (struct loop *, max_wide_int *); extern HOST_WIDE_INT estimated_loop_iterations_int (struct loop *); -extern bool max_loop_iterations (struct loop *, double_int *); +extern bool max_loop_iterations (struct loop *, max_wide_int *); extern HOST_WIDE_INT max_loop_iterations_int (struct loop *); extern HOST_WIDE_INT max_stmt_executions_int (struct loop *); extern HOST_WIDE_INT estimated_stmt_executions_int (struct loop *); -extern bool max_stmt_executions (struct loop *, double_int *); -extern bool estimated_stmt_executions (struct loop *, double_int *); +extern bool max_stmt_executions (struct loop *, max_wide_int *); +extern bool estimated_stmt_executions (struct loop *, max_wide_int *); extern void estimate_numbers_of_iterations (void); extern bool stmt_dominates_stmt_p (gimple, gimple); extern bool nowrap_type_p (tree); diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c index 5a51ba66c49..b05a99c68df 100644 --- a/gcc/tree-ssa-loop-prefetch.c +++ b/gcc/tree-ssa-loop-prefetch.c @@ -285,7 +285,7 @@ dump_mem_details (FILE *file, tree base, tree step, fprintf (file, "(base "); print_generic_expr (file, base, TDF_SLIM); fprintf (file, ", step "); - if (cst_and_fits_in_hwi (step)) + if (cst_fits_shwi_p (step)) fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step)); else print_generic_expr (file, step, TDF_TREE); @@ -326,7 +326,7 @@ find_or_create_group (struct mem_ref_group **groups, tree base, tree step) /* If step is an integer constant, keep the list of groups sorted by decreasing step. */ - if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step) + if (cst_fits_shwi_p ((*groups)->step) && cst_fits_shwi_p (step) && int_cst_value ((*groups)->step) < int_cst_value (step)) break; } @@ -434,12 +434,12 @@ idx_analyze_ref (tree base, tree *index, void *data) step = iv.step; if (TREE_CODE (ibase) == POINTER_PLUS_EXPR - && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1))) + && cst_fits_shwi_p (TREE_OPERAND (ibase, 1))) { idelta = int_cst_value (TREE_OPERAND (ibase, 1)); ibase = TREE_OPERAND (ibase, 0); } - if (cst_and_fits_in_hwi (ibase)) + if (cst_fits_shwi_p (ibase)) { idelta += int_cst_value (ibase); ibase = build_int_cst (TREE_TYPE (ibase), 0); @@ -448,7 +448,7 @@ idx_analyze_ref (tree base, tree *index, void *data) if (TREE_CODE (base) == ARRAY_REF) { stepsize = array_ref_element_size (base); - if (!cst_and_fits_in_hwi (stepsize)) + if (!cst_fits_shwi_p (stepsize)) return false; imult = int_cst_value (stepsize); step = fold_build2 (MULT_EXPR, sizetype, @@ -505,7 +505,7 @@ analyze_ref (struct loop *loop, tree *ref_p, tree *base, for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0)) { off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); - bit_offset = TREE_INT_CST_LOW (off); + bit_offset = tree_to_hwi (off); gcc_assert (bit_offset % BITS_PER_UNIT == 0); *delta += bit_offset / BITS_PER_UNIT; @@ -546,7 +546,7 @@ gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs, /* Limit non-constant step prefetching only to the innermost loops and only when the step is loop invariant in the entire loop nest. */ - if (!cst_and_fits_in_hwi (step)) + if (!cst_fits_shwi_p (step)) { if (loop->inner != NULL) { @@ -660,7 +660,7 @@ prune_ref_by_self_reuse (struct mem_ref *ref) bool backward; /* If the step size is non constant, we cannot calculate prefetch_mod. */ - if (!cst_and_fits_in_hwi (ref->group->step)) + if (!cst_fits_shwi_p (ref->group->step)) return; step = int_cst_value (ref->group->step); @@ -770,7 +770,7 @@ prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by, int align_unit; /* If the step is non constant we cannot calculate prefetch_before. */ - if (!cst_and_fits_in_hwi (ref->group->step)) { + if (!cst_fits_shwi_p (ref->group->step)) { return; } @@ -1135,7 +1135,7 @@ issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) for (ap = 0; ap < n_prefetches; ap++) { - if (cst_and_fits_in_hwi (ref->group->step)) + if (cst_fits_shwi_p (ref->group->step)) { /* Determine the address to prefetch. */ delta = (ahead + ap * ref->prefetch_mod) * @@ -1449,8 +1449,8 @@ add_subscript_strides (tree access_fn, unsigned stride, if ((unsigned) loop_depth (aloop) <= min_depth) continue; - if (host_integerp (step, 0)) - astep = tree_low_cst (step, 0); + if (tree_fits_shwi_p (step)) + astep = tree_to_shwi (step); else astep = L1_CACHE_LINE_SIZE; @@ -1499,8 +1499,8 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n, if (TREE_CODE (ref) == ARRAY_REF) { stride = TYPE_SIZE_UNIT (TREE_TYPE (ref)); - if (host_integerp (stride, 1)) - astride = tree_low_cst (stride, 1); + if (tree_fits_uhwi_p (stride)) + astride = tree_to_uhwi (stride); else astride = L1_CACHE_LINE_SIZE; diff --git a/gcc/tree-ssa-loop.h b/gcc/tree-ssa-loop.h index 1c96d9cee5e..d8fc0eded1f 100644 --- a/gcc/tree-ssa-loop.h +++ b/gcc/tree-ssa-loop.h @@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-ssa-loop-ivopts.h" #include "tree-ssa-loop-manip.h" #include "tree-ssa-loop-niter.h" +#include "wide-int.h" /* Affine iv. */ @@ -53,7 +54,7 @@ struct tree_niter_desc a loop (provided that assumptions == true and may_be_zero == false), more precisely the number of executions of the latch of the loop. */ - double_int max; /* The upper bound on the number of iterations of + max_wide_int max; /* The upper bound on the number of iterations of the loop. */ /* The simplified shape of the exit condition. The loop exits if diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c index f7f8ec91fce..e7a489d6e77 100644 --- a/gcc/tree-ssa-math-opts.c +++ b/gcc/tree-ssa-math-opts.c @@ -1139,7 +1139,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, multiplication sequence when profitable. */ c = TREE_REAL_CST (arg1); n = real_to_integer (&c); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); c_is_int = real_identical (&c, &cint); if (c_is_int @@ -1185,7 +1185,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are optimizing for space. Don't do this optimization if we don't have a hardware sqrt insn. */ - real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0); + real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED); SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2); if (flag_unsafe_math_optimizations @@ -1249,7 +1249,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, Do not calculate the powi factor when n/2 = 0. */ real_arithmetic (&c2, MULT_EXPR, &c, &dconst2); n = real_to_integer (&c2); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); c2_is_int = real_identical (&c2, &cint); if (flag_unsafe_math_optimizations @@ -1297,11 +1297,11 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, different from pow(x, 1./3.) due to rounding and behavior with negative x, we need to constrain this transformation to unsafe math and positive x or finite math. */ - real_from_integer (&dconst3, VOIDmode, 3, 0, 0); + real_from_integer (&dconst3, VOIDmode, 3, SIGNED); real_arithmetic (&c2, MULT_EXPR, &c, &dconst3); real_round (&c2, mode, &c2); n = real_to_integer (&c2); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3); real_convert (&c2, mode, &c2); @@ -1493,10 +1493,10 @@ execute_cse_sincos (void) } else { - if (!host_integerp (arg1, 0)) + if (!tree_fits_shwi_p (arg1)) break; - - n = TREE_INT_CST_LOW (arg1); + + n = tree_to_shwi (arg1); result = gimple_expand_builtin_powi (&gsi, loc, arg0, n); } @@ -1754,7 +1754,7 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit) case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: - if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2))) + if (!do_shift_rotate (code, n, (int)tree_to_hwi (rhs2))) return NULL_TREE; break; CASE_CONVERT: @@ -1847,7 +1847,7 @@ find_bswap (gimple stmt) increase that number by three here in order to also cover signed -> unsigned converions of the src operand as can be seen in libgcc, and for initial shift/and operation of the src operand. */ - limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); + limit = tree_to_hwi (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit); source_expr = find_bswap_1 (stmt, &n, limit); diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c index adf8a280063..9435cc1b10c 100644 --- a/gcc/tree-ssa-phiopt.c +++ b/gcc/tree-ssa-phiopt.c @@ -679,7 +679,7 @@ jump_function_from_stmt (tree *arg, gimple stmt) &offset); if (tem && TREE_CODE (tem) == MEM_REF - && (mem_ref_offset (tem) + double_int::from_shwi (offset)).is_zero ()) + && (mem_ref_offset (tem) + offset) == 0) { *arg = TREE_OPERAND (tem, 0); return true; @@ -1351,7 +1351,7 @@ add_or_mark_expr (basic_block bb, tree exp, if (TREE_CODE (exp) == MEM_REF && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME - && host_integerp (TREE_OPERAND (exp, 1), 0) + && tree_fits_shwi_p (TREE_OPERAND (exp, 1)) && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0) { tree name = TREE_OPERAND (exp, 0); @@ -1366,7 +1366,7 @@ add_or_mark_expr (basic_block bb, tree exp, map.phase = 0; map.bb = 0; map.store = store; - map.offset = tree_low_cst (TREE_OPERAND (exp, 1), 0); + map.offset = tree_to_shwi (TREE_OPERAND (exp, 1)); map.size = size; slot = seen_ssa_names.find_slot (&map, INSERT); @@ -1978,14 +1978,14 @@ hoist_adjacent_loads (basic_block bb0, basic_block bb1, tree_offset2 = bit_position (field2); tree_size2 = DECL_SIZE (field2); - if (!host_integerp (tree_offset1, 1) - || !host_integerp (tree_offset2, 1) - || !host_integerp (tree_size2, 1)) + if (!tree_fits_uhwi_p (tree_offset1) + || !tree_fits_uhwi_p (tree_offset2) + || !tree_fits_uhwi_p (tree_size2)) continue; - offset1 = TREE_INT_CST_LOW (tree_offset1); - offset2 = TREE_INT_CST_LOW (tree_offset2); - size2 = TREE_INT_CST_LOW (tree_size2); + offset1 = tree_to_uhwi (tree_offset1); + offset2 = tree_to_uhwi (tree_offset2); + size2 = tree_to_uhwi (tree_size2); align1 = DECL_ALIGN (field1) % param_align_bits; if (offset1 % BITS_PER_UNIT != 0) diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index 4774f39ae7c..6a3ac8471a8 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -1581,11 +1581,11 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2, && TREE_CODE (op[1]) == INTEGER_CST && TREE_CODE (op[2]) == INTEGER_CST) { - double_int off = tree_to_double_int (op[0]); - off += -tree_to_double_int (op[1]); - off *= tree_to_double_int (op[2]); - if (off.fits_shwi ()) - newop.off = off.low; + addr_wide_int off = op[0]; + off += -addr_wide_int (op[1]); + off *= addr_wide_int (op[2]); + if (wi::fits_shwi_p (off)) + newop.off = off.to_shwi (); } newoperands[j] = newop; /* If it transforms from an SSA_NAME to an address, fold with diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c index 17541c643d2..2533aedfae2 100644 --- a/gcc/tree-ssa-reassoc.c +++ b/gcc/tree-ssa-reassoc.c @@ -1038,13 +1038,13 @@ decrement_power (gimple stmt) arg1 = gimple_call_arg (stmt, 1); c = TREE_REAL_CST (arg1); power = real_to_integer (&c) - 1; - real_from_integer (&cint, VOIDmode, power, 0, 0); + real_from_integer (&cint, VOIDmode, power, SIGNED); gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint)); return power; CASE_FLT_FN (BUILT_IN_POWI): arg1 = gimple_call_arg (stmt, 1); - power = TREE_INT_CST_LOW (arg1) - 1; + power = tree_to_hwi (arg1) - 1; gimple_call_set_arg (stmt, 1, build_int_cst (TREE_TYPE (arg1), power)); return power; @@ -3552,8 +3552,7 @@ acceptable_pow_call (gimple stmt, tree *base, HOST_WIDE_INT *exponent) return false; *exponent = real_to_integer (&c); - real_from_integer (&cint, VOIDmode, *exponent, - *exponent < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, *exponent, SIGNED); if (!real_identical (&c, &cint)) return false; @@ -3563,10 +3562,10 @@ acceptable_pow_call (gimple stmt, tree *base, HOST_WIDE_INT *exponent) *base = gimple_call_arg (stmt, 0); arg1 = gimple_call_arg (stmt, 1); - if (!host_integerp (arg1, 0)) + if (!tree_fits_shwi_p (arg1)) return false; - *exponent = TREE_INT_CST_LOW (arg1); + *exponent = tree_to_shwi (arg1); break; default: diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c index 91604e2a54e..5847d123d46 100644 --- a/gcc/tree-ssa-sccvn.c +++ b/gcc/tree-ssa-sccvn.c @@ -653,11 +653,11 @@ vn_reference_eq (const_vn_reference_t const vr1, const_vn_reference_t const vr2) } else if (INTEGRAL_TYPE_P (vr1->type) && (TYPE_PRECISION (vr1->type) - != TREE_INT_CST_LOW (TYPE_SIZE (vr1->type)))) + != tree_to_hwi (TYPE_SIZE (vr1->type)))) return false; else if (INTEGRAL_TYPE_P (vr2->type) && (TYPE_PRECISION (vr2->type) - != TREE_INT_CST_LOW (TYPE_SIZE (vr2->type)))) + != tree_to_hwi (TYPE_SIZE (vr2->type)))) return false; i = 0; @@ -777,8 +777,8 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) case MEM_REF: /* The base address gets its own vn_reference_op_s structure. */ temp.op0 = TREE_OPERAND (ref, 1); - if (host_integerp (TREE_OPERAND (ref, 1), 0)) - temp.off = TREE_INT_CST_LOW (TREE_OPERAND (ref, 1)); + if (tree_fits_shwi_p (TREE_OPERAND (ref, 1))) + temp.off = tree_to_shwi (TREE_OPERAND (ref, 1)); break; case BIT_FIELD_REF: /* Record bits and position. */ @@ -798,15 +798,15 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) && TREE_CODE (this_offset) == INTEGER_CST) { tree bit_offset = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); - if (TREE_INT_CST_LOW (bit_offset) % BITS_PER_UNIT == 0) + if (tree_to_hwi (bit_offset) % BITS_PER_UNIT == 0) { - double_int off - = tree_to_double_int (this_offset) - + tree_to_double_int (bit_offset) - .rshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - if (off.fits_shwi ()) - temp.off = off.low; + addr_wide_int off + = (addr_wide_int (this_offset) + + wi::lrshift (addr_wide_int (bit_offset), + BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + if (wi::fits_shwi_p (off)) + temp.off = off.to_shwi (); } } } @@ -822,11 +822,11 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) && TREE_CODE (temp.op1) == INTEGER_CST && TREE_CODE (temp.op2) == INTEGER_CST) { - double_int off = tree_to_double_int (temp.op0); - off += -tree_to_double_int (temp.op1); - off *= tree_to_double_int (temp.op2); - if (off.fits_shwi ()) - temp.off = off.low; + addr_wide_int off = temp.op0; + off += -addr_wide_int (temp.op1); + off *= addr_wide_int (temp.op2); + if (wi::fits_shwi_p (off)) + temp.off = off.to_shwi(); } break; case VAR_DECL: @@ -878,7 +878,7 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) break; case IMAGPART_EXPR: /* This is only interesting for its constant offset. */ - temp.off = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (ref))); + temp.off = tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (ref))); break; default: gcc_unreachable (); @@ -931,10 +931,10 @@ ao_ref_init_from_vn_reference (ao_ref *ref, } if (size_tree != NULL_TREE) { - if (!host_integerp (size_tree, 1)) + if (!tree_fits_uhwi_p (size_tree)) size = -1; else - size = TREE_INT_CST_LOW (size_tree); + size = tree_to_uhwi (size_tree); } /* Initially, maxsize is the same as the accessed element size. @@ -990,7 +990,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref, /* And now the usual component-reference style ops. */ case BIT_FIELD_REF: - offset += tree_low_cst (op->op1, 0); + offset += tree_to_shwi (op->op1); break; case COMPONENT_REF: @@ -1001,13 +1001,13 @@ ao_ref_init_from_vn_reference (ao_ref *ref, parts manually. */ if (op->op1 - || !host_integerp (DECL_FIELD_OFFSET (field), 1)) + || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) max_size = -1; else { - offset += (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (field)) + offset += (tree_to_uhwi (DECL_FIELD_OFFSET (field)) * BITS_PER_UNIT); - offset += TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)); + offset += tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)); } break; } @@ -1015,15 +1015,15 @@ ao_ref_init_from_vn_reference (ao_ref *ref, case ARRAY_RANGE_REF: case ARRAY_REF: /* We recorded the lower bound and the element size. */ - if (!host_integerp (op->op0, 0) - || !host_integerp (op->op1, 0) - || !host_integerp (op->op2, 0)) + if (!tree_fits_shwi_p (op->op0) + || !tree_fits_shwi_p (op->op1) + || !tree_fits_shwi_p (op->op2)) max_size = -1; else { - HOST_WIDE_INT hindex = TREE_INT_CST_LOW (op->op0); - hindex -= TREE_INT_CST_LOW (op->op1); - hindex *= TREE_INT_CST_LOW (op->op2); + HOST_WIDE_INT hindex = tree_to_shwi (op->op0); + hindex -= tree_to_shwi (op->op1); + hindex *= tree_to_shwi (op->op2); hindex *= BITS_PER_UNIT; offset += hindex; } @@ -1146,13 +1146,13 @@ vn_reference_fold_indirect (vec<vn_reference_op_s> *ops, gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF); if (addr_base != TREE_OPERAND (op->op0, 0)) { - double_int off = tree_to_double_int (mem_op->op0); - off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0))); - off += double_int::from_shwi (addr_offset); - mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off); + addr_wide_int off = wi::sext (addr_wide_int (mem_op->op0), + TYPE_PRECISION (TREE_TYPE (mem_op->op0))); + off += addr_offset; + mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); op->op0 = build_fold_addr_expr (addr_base); - if (host_integerp (mem_op->op0, 0)) - mem_op->off = TREE_INT_CST_LOW (mem_op->op0); + if (tree_fits_shwi_p (mem_op->op0)) + mem_op->off = tree_to_shwi (mem_op->op0); else mem_op->off = -1; } @@ -1169,7 +1169,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, vn_reference_op_t mem_op = &(*ops)[i - 1]; gimple def_stmt; enum tree_code code; - double_int off; + addr_wide_int off; def_stmt = SSA_NAME_DEF_STMT (op->op0); if (!is_gimple_assign (def_stmt)) @@ -1180,8 +1180,8 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, && code != POINTER_PLUS_EXPR) return; - off = tree_to_double_int (mem_op->op0); - off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0))); + off = wi::sext (addr_wide_int (mem_op->op0), + TYPE_PRECISION (TREE_TYPE (mem_op->op0))); /* The only thing we have to do is from &OBJ.foo.bar add the offset from .foo.bar to the preceding MEM_REF offset and replace the @@ -1198,7 +1198,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, || TREE_CODE (addr_base) != MEM_REF) return; - off += double_int::from_shwi (addr_offset); + off += addr_offset; off += mem_ref_offset (addr_base); op->op0 = TREE_OPERAND (addr_base, 0); } @@ -1211,13 +1211,13 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, || TREE_CODE (ptroff) != INTEGER_CST) return; - off += tree_to_double_int (ptroff); + off += ptroff; op->op0 = ptr; } - mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off); - if (host_integerp (mem_op->op0, 0)) - mem_op->off = TREE_INT_CST_LOW (mem_op->op0); + mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); + if (tree_fits_shwi_p (mem_op->op0)) + mem_op->off = tree_to_shwi (mem_op->op0); else mem_op->off = -1; if (TREE_CODE (op->op0) == SSA_NAME) @@ -1297,7 +1297,7 @@ fully_constant_vn_reference_p (vn_reference_t ref) && compare_tree_int (op->op0, TREE_STRING_LENGTH (arg0->op0)) < 0) return build_int_cst_type (op->type, (TREE_STRING_POINTER (arg0->op0) - [TREE_INT_CST_LOW (op->op0)])); + [tree_to_hwi (op->op0)])); } return NULL_TREE; @@ -1369,11 +1369,11 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything) && TREE_CODE (vro->op1) == INTEGER_CST && TREE_CODE (vro->op2) == INTEGER_CST) { - double_int off = tree_to_double_int (vro->op0); - off += -tree_to_double_int (vro->op1); - off *= tree_to_double_int (vro->op2); - if (off.fits_shwi ()) - vro->off = off.low; + addr_wide_int off = vro->op0; + off += -addr_wide_int (vro->op1); + off *= addr_wide_int (vro->op2); + if (wi::fits_shwi_p (off)) + vro->off = off.to_shwi (); } } @@ -1581,16 +1581,16 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) if (is_gimple_reg_type (vr->type) && gimple_call_builtin_p (def_stmt, BUILT_IN_MEMSET) && integer_zerop (gimple_call_arg (def_stmt, 1)) - && host_integerp (gimple_call_arg (def_stmt, 2), 1) + && tree_fits_uhwi_p (gimple_call_arg (def_stmt, 2)) && TREE_CODE (gimple_call_arg (def_stmt, 0)) == ADDR_EXPR) { tree ref2 = TREE_OPERAND (gimple_call_arg (def_stmt, 0), 0); tree base2; HOST_WIDE_INT offset2, size2, maxsize2; base2 = get_ref_base_and_extent (ref2, &offset2, &size2, &maxsize2); - size2 = TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2)) * 8; + size2 = tree_to_uhwi (gimple_call_arg (def_stmt, 2)) * 8; if ((unsigned HOST_WIDE_INT)size2 / 8 - == TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2)) + == tree_to_uhwi (gimple_call_arg (def_stmt, 2)) && maxsize2 != -1 && operand_equal_p (base, base2, 0) && offset2 <= offset @@ -1693,7 +1693,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) { tree val = NULL_TREE; HOST_WIDE_INT elsz - = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (TREE_TYPE (rhs1)))); + = tree_to_hwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (rhs1)))); if (gimple_assign_rhs_code (def_stmt2) == COMPLEX_EXPR) { if (off == 0) @@ -1830,7 +1830,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) || TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME) && (TREE_CODE (gimple_call_arg (def_stmt, 1)) == ADDR_EXPR || TREE_CODE (gimple_call_arg (def_stmt, 1)) == SSA_NAME) - && host_integerp (gimple_call_arg (def_stmt, 2), 1)) + && tree_fits_uhwi_p (gimple_call_arg (def_stmt, 2))) { tree lhs, rhs; ao_ref r; @@ -1857,10 +1857,10 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) if (!tem) return (void *)-1; if (TREE_CODE (tem) == MEM_REF - && host_integerp (TREE_OPERAND (tem, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (tem, 1))) { lhs = TREE_OPERAND (tem, 0); - lhs_offset += TREE_INT_CST_LOW (TREE_OPERAND (tem, 1)); + lhs_offset += tree_to_uhwi (TREE_OPERAND (tem, 1)); } else if (DECL_P (tem)) lhs = build_fold_addr_expr (tem); @@ -1883,10 +1883,10 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) if (!tem) return (void *)-1; if (TREE_CODE (tem) == MEM_REF - && host_integerp (TREE_OPERAND (tem, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (tem, 1))) { rhs = TREE_OPERAND (tem, 0); - rhs_offset += TREE_INT_CST_LOW (TREE_OPERAND (tem, 1)); + rhs_offset += tree_to_uhwi (TREE_OPERAND (tem, 1)); } else if (DECL_P (tem)) rhs = build_fold_addr_expr (tem); @@ -1897,14 +1897,14 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) && TREE_CODE (rhs) != ADDR_EXPR) return (void *)-1; - copy_size = TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2)); + copy_size = tree_to_hwi (gimple_call_arg (def_stmt, 2)); /* The bases of the destination and the references have to agree. */ if ((TREE_CODE (base) != MEM_REF && !DECL_P (base)) || (TREE_CODE (base) == MEM_REF && (TREE_OPERAND (base, 0) != lhs - || !host_integerp (TREE_OPERAND (base, 1), 1))) + || !tree_fits_uhwi_p (TREE_OPERAND (base, 1)))) || (DECL_P (base) && (TREE_CODE (lhs) != ADDR_EXPR || TREE_OPERAND (lhs, 0) != base))) @@ -1913,7 +1913,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) /* And the access has to be contained within the memcpy destination. */ at = offset / BITS_PER_UNIT; if (TREE_CODE (base) == MEM_REF) - at += TREE_INT_CST_LOW (TREE_OPERAND (base, 1)); + at += tree_to_hwi (TREE_OPERAND (base, 1)); if (lhs_offset > at || lhs_offset + copy_size < at + maxsize / BITS_PER_UNIT) return (void *)-1; @@ -3216,12 +3216,12 @@ simplify_binary_expression (gimple stmt) /* Pointer plus constant can be represented as invariant address. Do so to allow further propatation, see also tree forwprop. */ if (code == POINTER_PLUS_EXPR - && host_integerp (op1, 1) + && tree_fits_uhwi_p (op1) && TREE_CODE (op0) == ADDR_EXPR && is_gimple_min_invariant (op0)) return build_invariant_address (TREE_TYPE (op0), TREE_OPERAND (op0, 0), - TREE_INT_CST_LOW (op1)); + tree_to_uhwi (op1)); /* Avoid folding if nothing changed. */ if (op0 == gimple_assign_rhs1 (stmt) diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c index 5df1ddf59bd..d2899539ca5 100644 --- a/gcc/tree-ssa-strlen.c +++ b/gcc/tree-ssa-strlen.c @@ -205,10 +205,10 @@ get_stridx (tree exp) s = string_constant (exp, &o); if (s != NULL_TREE - && (o == NULL_TREE || host_integerp (o, 0)) + && (o == NULL_TREE || tree_fits_shwi_p (o)) && TREE_STRING_LENGTH (s) > 0) { - HOST_WIDE_INT offset = o ? tree_low_cst (o, 0) : 0; + HOST_WIDE_INT offset = o ? tree_to_shwi (o) : 0; const char *p = TREE_STRING_POINTER (s); int max = TREE_STRING_LENGTH (s) - 1; @@ -836,16 +836,16 @@ adjust_last_stmt (strinfo si, gimple stmt, bool is_strcat) } len = gimple_call_arg (last.stmt, 2); - if (host_integerp (len, 1)) + if (tree_fits_uhwi_p (len)) { - if (!host_integerp (last.len, 1) + if (!tree_fits_uhwi_p (last.len) || integer_zerop (len) - || (unsigned HOST_WIDE_INT) tree_low_cst (len, 1) - != (unsigned HOST_WIDE_INT) tree_low_cst (last.len, 1) + 1) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (len) + != (unsigned HOST_WIDE_INT) tree_to_uhwi (last.len) + 1) return; /* Don't adjust the length if it is divisible by 4, it is more efficient to store the extra '\0' in that case. */ - if ((((unsigned HOST_WIDE_INT) tree_low_cst (len, 1)) & 3) == 0) + if ((((unsigned HOST_WIDE_INT) tree_to_uhwi (len)) & 3) == 0) return; } else if (TREE_CODE (len) == SSA_NAME) @@ -1300,7 +1300,7 @@ handle_builtin_memcpy (enum built_in_function bcode, gimple_stmt_iterator *gsi) return; if (olddsi != NULL - && host_integerp (len, 1) + && tree_fits_uhwi_p (len) && !integer_zerop (len)) adjust_last_stmt (olddsi, stmt, false); @@ -1326,8 +1326,8 @@ handle_builtin_memcpy (enum built_in_function bcode, gimple_stmt_iterator *gsi) si = NULL; /* Handle memcpy (x, "abcd", 5) or memcpy (x, "abc\0uvw", 7). */ - if (!host_integerp (len, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (len, 1) + if (!tree_fits_uhwi_p (len) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (len) <= (unsigned HOST_WIDE_INT) ~idx) return; } @@ -1616,11 +1616,11 @@ handle_pointer_plus (gimple_stmt_iterator *gsi) if (idx < 0) { tree off = gimple_assign_rhs2 (stmt); - if (host_integerp (off, 1) - && (unsigned HOST_WIDE_INT) tree_low_cst (off, 1) + if (tree_fits_uhwi_p (off) + && (unsigned HOST_WIDE_INT) tree_to_uhwi (off) <= (unsigned HOST_WIDE_INT) ~idx) ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)] - = ~(~idx - (int) tree_low_cst (off, 1)); + = ~(~idx - (int) tree_to_uhwi (off)); return; } diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c index b8cfebd3110..ed76900608e 100644 --- a/gcc/tree-ssa-structalias.c +++ b/gcc/tree-ssa-structalias.c @@ -2974,12 +2974,12 @@ process_constraint (constraint_t t) static HOST_WIDE_INT bitpos_of_field (const tree fdecl) { - if (!host_integerp (DECL_FIELD_OFFSET (fdecl), 0) - || !host_integerp (DECL_FIELD_BIT_OFFSET (fdecl), 0)) + if (!tree_fits_shwi_p (DECL_FIELD_OFFSET (fdecl)) + || !tree_fits_shwi_p (DECL_FIELD_BIT_OFFSET (fdecl))) return -1; - return (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (fdecl)) * BITS_PER_UNIT - + TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (fdecl))); + return (tree_to_shwi (DECL_FIELD_OFFSET (fdecl)) * BITS_PER_UNIT + + tree_to_shwi (DECL_FIELD_BIT_OFFSET (fdecl))); } @@ -3012,14 +3012,14 @@ get_constraint_for_ptr_offset (tree ptr, tree offset, else { /* Sign-extend the offset. */ - double_int soffset = tree_to_double_int (offset) - .sext (TYPE_PRECISION (TREE_TYPE (offset))); - if (!soffset.fits_shwi ()) + addr_wide_int soffset = wi::sext (addr_wide_int (offset), + TYPE_PRECISION (TREE_TYPE (offset))); + if (!wi::fits_shwi_p (soffset)) rhsoffset = UNKNOWN_OFFSET; else { /* Make sure the bit-offset also fits. */ - HOST_WIDE_INT rhsunitoffset = soffset.low; + HOST_WIDE_INT rhsunitoffset = soffset.to_shwi (); rhsoffset = rhsunitoffset * BITS_PER_UNIT; if (rhsunitoffset != rhsoffset / BITS_PER_UNIT) rhsoffset = UNKNOWN_OFFSET; @@ -3409,8 +3409,8 @@ get_constraint_for_1 (tree t, vec<ce_s> *results, bool address_p, && curr) { unsigned HOST_WIDE_INT size; - if (host_integerp (TYPE_SIZE (TREE_TYPE (t)), 1)) - size = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (t))); + if (tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (t)))) + size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (t))); else size = -1; for (; curr; curr = vi_next (curr)) @@ -5328,7 +5328,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack, } if (!DECL_SIZE (field) - || !host_integerp (DECL_SIZE (field), 1)) + || !tree_fits_uhwi_p (DECL_SIZE (field))) has_unknown_size = true; /* If adjacent fields do not contain pointers merge them. */ @@ -5340,7 +5340,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack, && !pair->has_unknown_size && pair->offset + (HOST_WIDE_INT)pair->size == offset + foff) { - pair->size += TREE_INT_CST_LOW (DECL_SIZE (field)); + pair->size += tree_to_hwi (DECL_SIZE (field)); } else { @@ -5348,7 +5348,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack, e.offset = offset + foff; e.has_unknown_size = has_unknown_size; if (!has_unknown_size) - e.size = TREE_INT_CST_LOW (DECL_SIZE (field)); + e.size = tree_to_hwi (DECL_SIZE (field)); else e.size = -1; e.must_have_pointers = must_have_pointers_p; @@ -5604,7 +5604,7 @@ create_variable_info_for_1 (tree decl, const char *name) unsigned int i; if (!declsize - || !host_integerp (declsize, 1)) + || !tree_fits_uhwi_p (declsize)) { vi = new_var_info (decl, name); vi->offset = 0; @@ -5665,7 +5665,7 @@ create_variable_info_for_1 (tree decl, const char *name) vi = new_var_info (decl, name); vi->offset = 0; vi->may_have_pointers = true; - vi->fullsize = TREE_INT_CST_LOW (declsize); + vi->fullsize = tree_to_hwi (declsize); vi->size = vi->fullsize; vi->is_full_var = true; fieldstack.release (); @@ -5673,7 +5673,7 @@ create_variable_info_for_1 (tree decl, const char *name) } vi = new_var_info (decl, name); - vi->fullsize = TREE_INT_CST_LOW (declsize); + vi->fullsize = tree_to_hwi (declsize); for (i = 0, newvi = vi; fieldstack.iterate (i, &fo); ++i, newvi = vi_next (newvi)) diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c index 421e200da9f..b77c8b3bc97 100644 --- a/gcc/tree-ssa.c +++ b/gcc/tree-ssa.c @@ -1421,9 +1421,9 @@ non_rewritable_mem_ref_base (tree ref) || TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE) && useless_type_conversion_p (TREE_TYPE (base), TREE_TYPE (TREE_TYPE (decl))) - && mem_ref_offset (base).fits_uhwi () - && tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (decl))) - .ugt (mem_ref_offset (base)) + && wi::fits_uhwi_p (mem_ref_offset (base)) + && wi::gtu_p (TYPE_SIZE_UNIT (TREE_TYPE (decl)), + mem_ref_offset (base)) && multiple_of_p (sizetype, TREE_OPERAND (base, 1), TYPE_SIZE_UNIT (TREE_TYPE (base)))) return NULL_TREE; diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c index a51f60307ec..b443cc372ca 100644 --- a/gcc/tree-ssanames.c +++ b/gcc/tree-ssanames.c @@ -173,7 +173,7 @@ make_ssa_name_fn (struct function *fn, tree var, gimple stmt) /* Store range information MIN, and MAX to tree ssa_name NAME. */ void -set_range_info (tree name, double_int min, double_int max) +set_range_info (tree name, max_wide_int min, max_wide_int max) { gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name))); range_info_def *ri = SSA_NAME_RANGE_INFO (name); @@ -196,7 +196,7 @@ set_range_info (tree name, double_int min, double_int max) is used to determine if MIN and MAX are valid values. */ enum value_range_type -get_range_info (tree name, double_int *min, double_int *max) +get_range_info (tree name, max_wide_int *min, max_wide_int *max) { enum value_range_type range_type; gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name))); @@ -210,12 +210,12 @@ get_range_info (tree name, double_int *min, double_int *max) return VR_VARYING; /* If min > max, it is VR_ANTI_RANGE. */ - if (ri->min.cmp (ri->max, TYPE_UNSIGNED (TREE_TYPE (name))) == 1) + if (wi::cmp (ri->min, ri->max, TYPE_SIGN (TREE_TYPE (name))) == 1) { /* VR_ANTI_RANGE ~[min, max] is encoded as [max + 1, min - 1]. */ range_type = VR_ANTI_RANGE; - *min = ri->max + double_int_one; - *max = ri->min - double_int_one; + *min = ri->max + 1; + *max = ri->min - 1; } else { diff --git a/gcc/tree-ssanames.h b/gcc/tree-ssanames.h index f80e0b2ce33..dc490fb4ab7 100644 --- a/gcc/tree-ssanames.h +++ b/gcc/tree-ssanames.h @@ -49,9 +49,9 @@ struct GTY(()) ptr_info_def struct GTY (()) range_info_def { /* Minimum for value range. */ - double_int min; + max_wide_int min; /* Maximum for value range. */ - double_int max; + max_wide_int max; }; @@ -68,10 +68,10 @@ struct GTY (()) range_info_def { enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING }; /* Sets the value range to SSA. */ -extern void set_range_info (tree ssa, double_int min, double_int max); +extern void set_range_info (tree ssa, max_wide_int min, max_wide_int max); /* Gets the value range from SSA. */ -extern enum value_range_type get_range_info (tree name, double_int *min, - double_int *max); +extern enum value_range_type get_range_info (tree name, max_wide_int *min, + max_wide_int *max); extern void init_ssanames (struct function *, int); extern void fini_ssanames (void); extern void ssanames_print_statistics (void); diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c index 3314fbaf56e..58c2e871ebd 100644 --- a/gcc/tree-stdarg.c +++ b/gcc/tree-stdarg.c @@ -165,9 +165,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if ((rhs_code == POINTER_PLUS_EXPR || rhs_code == PLUS_EXPR) && TREE_CODE (rhs1) == SSA_NAME - && host_integerp (gimple_assign_rhs2 (stmt), 1)) + && tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))) { - ret += tree_low_cst (gimple_assign_rhs2 (stmt), 1); + ret += tree_to_uhwi (gimple_assign_rhs2 (stmt)); lhs = rhs1; continue; } @@ -175,9 +175,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if (rhs_code == ADDR_EXPR && TREE_CODE (TREE_OPERAND (rhs1, 0)) == MEM_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0)) == SSA_NAME - && host_integerp (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1))) { - ret += tree_low_cst (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1); + ret += tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1)); lhs = TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0); continue; } @@ -232,9 +232,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if ((rhs_code == POINTER_PLUS_EXPR || rhs_code == PLUS_EXPR) && TREE_CODE (rhs1) == SSA_NAME - && host_integerp (gimple_assign_rhs2 (stmt), 1)) + && tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))) { - val -= tree_low_cst (gimple_assign_rhs2 (stmt), 1); + val -= tree_to_uhwi (gimple_assign_rhs2 (stmt)); lhs = rhs1; continue; } @@ -242,9 +242,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if (rhs_code == ADDR_EXPR && TREE_CODE (TREE_OPERAND (rhs1, 0)) == MEM_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0)) == SSA_NAME - && host_integerp (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1))) { - val -= tree_low_cst (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1); + val -= tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1)); lhs = TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0); continue; } @@ -582,15 +582,15 @@ check_all_va_list_escapes (struct stdarg_info *si) if (rhs_code == MEM_REF && TREE_OPERAND (rhs, 0) == use && TYPE_SIZE_UNIT (TREE_TYPE (rhs)) - && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (rhs)), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))) && si->offsets[SSA_NAME_VERSION (use)] != -1) { unsigned HOST_WIDE_INT gpr_size; tree access_size = TYPE_SIZE_UNIT (TREE_TYPE (rhs)); gpr_size = si->offsets[SSA_NAME_VERSION (use)] - + tree_low_cst (TREE_OPERAND (rhs, 1), 0) - + tree_low_cst (access_size, 1); + + tree_to_shwi (TREE_OPERAND (rhs, 1)) + + tree_to_uhwi (access_size); if (gpr_size >= VA_LIST_MAX_GPR_SIZE) cfun->va_list_gpr_size = VA_LIST_MAX_GPR_SIZE; else if (gpr_size > cfun->va_list_gpr_size) diff --git a/gcc/tree-streamer-in.c b/gcc/tree-streamer-in.c index c36d4f6368c..35f3abd51e7 100644 --- a/gcc/tree-streamer-in.c +++ b/gcc/tree-streamer-in.c @@ -146,8 +146,9 @@ unpack_ts_base_value_fields (struct bitpack_d *bp, tree expr) static void unpack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr) { - TREE_INT_CST_LOW (expr) = bp_unpack_var_len_unsigned (bp); - TREE_INT_CST_HIGH (expr) = bp_unpack_var_len_int (bp); + int i; + for (i = 0; i < TREE_INT_CST_NUNITS (expr); i++) + TREE_INT_CST_ELT (expr, i) = bp_unpack_var_len_int (bp); } @@ -567,6 +568,12 @@ streamer_alloc_tree (struct lto_input_block *ib, struct data_in *data_in, unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); result = make_tree_binfo (len); } + else if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) + { + unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); + result = make_int_cst (len); + TREE_INT_CST_NUNITS (result) = len; + } else if (code == CALL_EXPR) { unsigned HOST_WIDE_INT nargs = streamer_read_uhwi (ib); diff --git a/gcc/tree-streamer-out.c b/gcc/tree-streamer-out.c index 942ba1ee44f..f678ae7b269 100644 --- a/gcc/tree-streamer-out.c +++ b/gcc/tree-streamer-out.c @@ -118,10 +118,14 @@ pack_ts_base_value_fields (struct bitpack_d *bp, tree expr) expression EXPR into bitpack BP. */ static void -pack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr) +pack_ts_int_cst_value_fields (struct bitpack_d *bp, + tree expr ATTRIBUTE_UNUSED) { - bp_pack_var_len_unsigned (bp, TREE_INT_CST_LOW (expr)); - bp_pack_var_len_int (bp, TREE_INT_CST_HIGH (expr)); + int i; + /* Note that the number of elements has already been written out in + streamer_write_tree_header. */ + for (i = 0; i < TREE_INT_CST_NUNITS (expr); i++) + bp_pack_var_len_int (bp, TREE_INT_CST_ELT (expr, i)); } @@ -956,6 +960,11 @@ streamer_write_tree_header (struct output_block *ob, tree expr) streamer_write_uhwi (ob, BINFO_N_BASE_BINFOS (expr)); else if (TREE_CODE (expr) == CALL_EXPR) streamer_write_uhwi (ob, call_expr_nargs (expr)); + else if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) + { + gcc_assert (TREE_INT_CST_NUNITS (expr)); + streamer_write_uhwi (ob, TREE_INT_CST_NUNITS (expr)); + } } @@ -965,9 +974,12 @@ streamer_write_tree_header (struct output_block *ob, tree expr) void streamer_write_integer_cst (struct output_block *ob, tree cst, bool ref_p) { + int i; + int len = TREE_INT_CST_NUNITS (cst); gcc_assert (!TREE_OVERFLOW (cst)); streamer_write_record_start (ob, LTO_integer_cst); stream_write_tree (ob, TREE_TYPE (cst), ref_p); - streamer_write_uhwi (ob, TREE_INT_CST_LOW (cst)); - streamer_write_hwi (ob, TREE_INT_CST_HIGH (cst)); + streamer_write_uhwi (ob, len); + for (i = 0; i < len; i++) + streamer_write_hwi (ob, TREE_INT_CST_ELT (cst, i)); } diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c index 862cb3870c8..776b8c13306 100644 --- a/gcc/tree-switch-conversion.c +++ b/gcc/tree-switch-conversion.c @@ -346,15 +346,13 @@ emit_case_bit_tests (gimple swtch, tree index_expr, else test[k].bits++; - lo = tree_low_cst (int_const_binop (MINUS_EXPR, - CASE_LOW (cs), minval), - 1); + lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, + CASE_LOW (cs), minval)); if (CASE_HIGH (cs) == NULL_TREE) hi = lo; else - hi = tree_low_cst (int_const_binop (MINUS_EXPR, - CASE_HIGH (cs), minval), - 1); + hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, + CASE_HIGH (cs), minval)); for (j = lo; j <= hi; j++) if (j >= HOST_BITS_PER_WIDE_INT) @@ -436,7 +434,13 @@ emit_case_bit_tests (gimple swtch, tree index_expr, if (const & csui) goto target */ for (k = 0; k < count; k++) { - tmp = build_int_cst_wide (word_type_node, test[k].lo, test[k].hi); + HOST_WIDE_INT a[2]; + + a[0] = test[k].lo; + a[1] = test[k].hi; + tmp = wide_int_to_tree (word_type_node, + wide_int::from_array (a, 2, + TYPE_PRECISION (word_type_node))); tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp); tmp = force_gimple_operand_gsi (&gsi, tmp, /*simple=*/true, NULL_TREE, @@ -689,13 +693,13 @@ static bool check_range (struct switch_conv_info *info) { gcc_assert (info->range_size); - if (!host_integerp (info->range_size, 1)) + if (!tree_fits_uhwi_p (info->range_size)) { info->reason = "index range way too large or otherwise unusable"; return false; } - if ((unsigned HOST_WIDE_INT) tree_low_cst (info->range_size, 1) + if ((unsigned HOST_WIDE_INT) tree_to_uhwi (info->range_size) > ((unsigned) info->count * SWITCH_CONVERSION_BRANCH_RATIO)) { info->reason = "the maximum range-branch ratio exceeded"; @@ -797,7 +801,7 @@ create_temp_arrays (struct switch_conv_info *info) info->target_inbound_names = info->default_values + info->phi_count; info->target_outbound_names = info->target_inbound_names + info->phi_count; for (i = 0; i < info->phi_count; i++) - vec_alloc (info->constructors[i], tree_low_cst (info->range_size, 1) + 1); + vec_alloc (info->constructors[i], tree_to_uhwi (info->range_size) + 1); } /* Free the arrays created by create_temp_arrays(). The vectors that are @@ -876,7 +880,7 @@ build_constructors (gimple swtch, struct switch_conv_info *info) info->constructors[k]->quick_push (elt); } - pos = int_const_binop (PLUS_EXPR, pos, integer_one_node); + pos = int_const_binop (PLUS_EXPR, pos, build_int_cst (TREE_TYPE (pos), 1)); } gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs))); @@ -901,7 +905,7 @@ build_constructors (gimple swtch, struct switch_conv_info *info) elt.value = unshare_expr_without_location (val); info->constructors[j]->quick_push (elt); - pos = int_const_binop (PLUS_EXPR, pos, integer_one_node); + pos = int_const_binop (PLUS_EXPR, pos, build_int_cst (TREE_TYPE (pos), 1)); } while (!tree_int_cst_lt (high, pos) && tree_int_cst_lt (low, pos)); j++; @@ -956,26 +960,26 @@ array_value_type (gimple swtch, tree type, int num, FOR_EACH_VEC_SAFE_ELT (info->constructors[num], i, elt) { - double_int cst; + wide_int cst; if (TREE_CODE (elt->value) != INTEGER_CST) return type; - cst = TREE_INT_CST (elt->value); + cst = elt->value; while (1) { unsigned int prec = GET_MODE_BITSIZE (mode); if (prec > HOST_BITS_PER_WIDE_INT) return type; - if (sign >= 0 && cst == cst.zext (prec)) + if (sign >= 0 && cst == wi::zext (cst, prec)) { - if (sign == 0 && cst == cst.sext (prec)) + if (sign == 0 && cst == wi::sext (cst, prec)) break; sign = 1; break; } - if (sign <= 0 && cst == cst.sext (prec)) + if (sign <= 0 && cst == wi::sext (cst, prec)) { sign = -1; break; diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index e87e774e5e4..c32ef7ef985 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -107,7 +107,7 @@ vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, tree scalar_type = gimple_expr_type (stmt); HOST_WIDE_INT lhs, rhs; - lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); + lhs = rhs = tree_to_hwi (TYPE_SIZE_UNIT (scalar_type)); if (is_gimple_assign (stmt) && (gimple_assign_cast_p (stmt) @@ -117,7 +117,7 @@ vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, { tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); - rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); + rhs = tree_to_hwi (TYPE_SIZE_UNIT (rhs_type)); if (rhs < lhs) scalar_type = rhs_type; } @@ -569,16 +569,16 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) return true; /* Check the types. */ - type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); - type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); + type_size_a = tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); + type_size_b = tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); if (type_size_a != type_size_b || !types_compatible_p (TREE_TYPE (DR_REF (dra)), TREE_TYPE (DR_REF (drb)))) return true; - init_a = TREE_INT_CST_LOW (DR_INIT (dra)); - init_b = TREE_INT_CST_LOW (DR_INIT (drb)); + init_a = tree_to_hwi (DR_INIT (dra)); + init_b = tree_to_hwi (DR_INIT (drb)); /* Two different locations - no dependence. */ if (init_a != init_b) @@ -681,7 +681,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) if (loop && nested_in_vect_loop_p (loop, stmt)) { tree step = DR_STEP (dr); - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0) { @@ -709,7 +709,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) if (!loop) { tree step = DR_STEP (dr); - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0) { @@ -797,7 +797,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) /* Modulo alignment. */ misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment); - if (!host_integerp (misalign, 1)) + if (!tree_fits_uhwi_p (misalign)) { /* Negative or overflowed misalignment value. */ if (dump_enabled_p ()) @@ -806,7 +806,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) return false; } - SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign)); + SET_DR_MISALIGNMENT (dr, tree_to_hwi (misalign)); if (dump_enabled_p ()) { @@ -985,10 +985,10 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) static bool not_size_aligned (tree exp) { - if (!host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)) + if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) return true; - return (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp))) + return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) > get_object_alignment (exp)); } @@ -2030,12 +2030,12 @@ vect_analyze_group_access (struct data_reference *dr) { tree step = DR_STEP (dr); tree scalar_type = TREE_TYPE (DR_REF (dr)); - HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); + HOST_WIDE_INT type_size = tree_to_hwi (TYPE_SIZE_UNIT (scalar_type)); gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); HOST_WIDE_INT groupsize, last_accessed_element = 1; bool slp_impossible = false; struct loop *loop = NULL; @@ -2156,8 +2156,8 @@ vect_analyze_group_access (struct data_reference *dr) /* Check that the distance between two accesses is equal to the type size. Otherwise, we have gaps. */ - diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) - - TREE_INT_CST_LOW (prev_init)) / type_size; + diff = (tree_to_hwi (DR_INIT (data_ref)) + - tree_to_hwi (prev_init)) / type_size; if (diff != 1) { /* FORNOW: SLP of accesses with gaps is not supported. */ @@ -2347,7 +2347,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) /* Consecutive? */ if (TREE_CODE (step) == INTEGER_CST) { - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)) || (dr_step < 0 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) @@ -2569,11 +2569,11 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) /* Check that the data-refs have the same constant size and step. */ tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))); tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))); - if (!host_integerp (sza, 1) - || !host_integerp (szb, 1) + if (!tree_fits_uhwi_p (sza) + || !tree_fits_uhwi_p (szb) || !tree_int_cst_equal (sza, szb) - || !host_integerp (DR_STEP (dra), 0) - || !host_integerp (DR_STEP (drb), 0) + || !tree_fits_shwi_p (DR_STEP (dra)) + || !tree_fits_shwi_p (DR_STEP (drb)) || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb))) break; @@ -2588,19 +2588,19 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) break; /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */ - HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra)); - HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb)); + HOST_WIDE_INT init_a = tree_to_hwi (DR_INIT (dra)); + HOST_WIDE_INT init_b = tree_to_hwi (DR_INIT (drb)); gcc_assert (init_a < init_b); /* If init_b == init_a + the size of the type * k, we have an interleaving, and DRA is accessed before DRB. */ - HOST_WIDE_INT type_size_a = TREE_INT_CST_LOW (sza); + HOST_WIDE_INT type_size_a = tree_to_hwi (sza); if ((init_b - init_a) % type_size_a != 0) break; /* The step (if not zero) is greater than the difference between data-refs' inits. This splits groups into suitable sizes. */ - HOST_WIDE_INT step = TREE_INT_CST_LOW (DR_STEP (dra)); + HOST_WIDE_INT step = tree_to_hwi (DR_STEP (dra)); if (step != 0 && step <= (init_b - init_a)) break; @@ -2764,8 +2764,8 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, { if (off == NULL_TREE) { - double_int moff = mem_ref_offset (base); - off = double_int_to_tree (sizetype, moff); + addr_wide_int moff = mem_ref_offset (base); + off = wide_int_to_tree (sizetype, moff); } else off = size_binop (PLUS_EXPR, off, @@ -2861,9 +2861,9 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, } break; case MULT_EXPR: - if (scale == 1 && host_integerp (op1, 0)) + if (scale == 1 && tree_fits_shwi_p (op1)) { - scale = tree_low_cst (op1, 0); + scale = tree_to_shwi (op1); off = op0; continue; } @@ -3060,7 +3060,7 @@ again: STRIP_NOPS (off); if (TREE_CODE (DR_INIT (newdr)) == INTEGER_CST && TREE_CODE (off) == MULT_EXPR - && host_integerp (TREE_OPERAND (off, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (off, 1))) { tree step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); @@ -4931,7 +4931,7 @@ vect_supportable_dr_alignment (struct data_reference *dr, { tree vectype = STMT_VINFO_VECTYPE (stmt_info); if ((nested_in_vect_loop - && (TREE_INT_CST_LOW (DR_STEP (dr)) + && (tree_to_hwi (DR_STEP (dr)) != GET_MODE_SIZE (TYPE_MODE (vectype)))) || !loop_vinfo) return dr_explicit_realign; diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index df2f894e0f4..d242959b131 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -45,11 +45,11 @@ static void expand_vector_operations_1 (gimple_stmt_iterator *); static tree build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value) { - int width = tree_low_cst (TYPE_SIZE (inner_type), 1); - int n = HOST_BITS_PER_WIDE_INT / width; - unsigned HOST_WIDE_INT low, high, mask; - tree ret; - + int width = tree_to_uhwi (TYPE_SIZE (inner_type)); + int n = TYPE_PRECISION (type) / width; + unsigned HOST_WIDE_INT low, mask; + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + int i; gcc_assert (n); if (width == HOST_BITS_PER_WIDE_INT) @@ -60,17 +60,11 @@ build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value) low = (unsigned HOST_WIDE_INT) ~0 / mask * (value & mask); } - if (TYPE_PRECISION (type) < HOST_BITS_PER_WIDE_INT) - low &= ((HOST_WIDE_INT)1 << TYPE_PRECISION (type)) - 1, high = 0; - else if (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT) - high = 0; - else if (TYPE_PRECISION (type) == HOST_BITS_PER_DOUBLE_INT) - high = low; - else - gcc_unreachable (); + for (i = 0; i < n; i++) + a[i] = low; - ret = build_int_cst_wide (type, low, high); - return ret; + return wide_int_to_tree + (type, wide_int::from_array (a, n, TYPE_PRECISION (type))); } static GTY(()) tree vector_inner_type; @@ -234,8 +228,8 @@ expand_vector_piecewise (gimple_stmt_iterator *gsi, elem_op_func f, tree part_width = TYPE_SIZE (inner_type); tree index = bitsize_int (0); int nunits = TYPE_VECTOR_SUBPARTS (type); - int delta = tree_low_cst (part_width, 1) - / tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1); + int delta = tree_to_uhwi (part_width) + / tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type))); int i; location_t loc = gimple_location (gsi_stmt (*gsi)); @@ -268,7 +262,7 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type, { tree result, compute_type; enum machine_mode mode; - int n_words = tree_low_cst (TYPE_SIZE_UNIT (type), 1) / UNITS_PER_WORD; + int n_words = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD; location_t loc = gimple_location (gsi_stmt (*gsi)); /* We have three strategies. If the type is already correct, just do @@ -291,7 +285,7 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type, else { /* Use a single scalar operation with a mode no wider than word_mode. */ - mode = mode_for_size (tree_low_cst (TYPE_SIZE (type), 1), MODE_INT, 0); + mode = mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), MODE_INT, 0); compute_type = lang_hooks.types.type_for_mode (mode, 1); result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code); warning_at (loc, OPT_Wvector_operation_performance, @@ -313,7 +307,7 @@ expand_vector_addition (gimple_stmt_iterator *gsi, tree type, tree a, tree b, enum tree_code code) { int parts_per_word = UNITS_PER_WORD - / tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1); + / tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); if (INTEGRAL_TYPE_P (TREE_TYPE (type)) && parts_per_word >= 4 @@ -404,7 +398,8 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, unsigned HOST_WIDE_INT *mulc = XALLOCAVEC (unsigned HOST_WIDE_INT, nunits); int prec = TYPE_PRECISION (TREE_TYPE (type)); int dummy_int; - unsigned int i, unsignedp = TYPE_UNSIGNED (TREE_TYPE (type)); + unsigned int i; + signop sign_p = TYPE_SIGN (TREE_TYPE (type)); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (TYPE_MODE (TREE_TYPE (type))); tree *vec; tree cur_op, mulcst, tem; @@ -428,7 +423,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, tree cst = VECTOR_CST_ELT (op1, i); unsigned HOST_WIDE_INT ml; - if (!host_integerp (cst, unsignedp) || integer_zerop (cst)) + if (!tree_fits_hwi_p (cst, sign_p) || integer_zerop (cst)) return NULL_TREE; pre_shifts[i] = 0; post_shifts[i] = 0; @@ -446,10 +441,10 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, } if (mode == -2) continue; - if (unsignedp) + if (sign_p == UNSIGNED) { unsigned HOST_WIDE_INT mh; - unsigned HOST_WIDE_INT d = tree_low_cst (cst, 1) & mask; + unsigned HOST_WIDE_INT d = tree_to_uhwi (cst) & mask; if (d >= ((unsigned HOST_WIDE_INT) 1 << (prec - 1))) /* FIXME: Can transform this into op0 >= op1 ? 1 : 0. */ @@ -481,9 +476,9 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, unsigned HOST_WIDE_INT d2; int this_pre_shift; - if (!host_integerp (cst2, 1)) + if (!tree_fits_uhwi_p (cst2)) return NULL_TREE; - d2 = tree_low_cst (cst2, 1) & mask; + d2 = tree_to_uhwi (cst2) & mask; if (d2 == 0) return NULL_TREE; this_pre_shift = floor_log2 (d2 & -d2); @@ -519,7 +514,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, } else { - HOST_WIDE_INT d = tree_low_cst (cst, 0); + HOST_WIDE_INT d = tree_to_shwi (cst); unsigned HOST_WIDE_INT abs_d; if (d == -1) @@ -575,7 +570,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, if (use_pow2) { tree addend = NULL_TREE; - if (!unsignedp) + if (sign_p == SIGNED) { tree uns_type; @@ -627,7 +622,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, } if (code == TRUNC_DIV_EXPR) { - if (unsignedp) + if (sign_p == UNSIGNED) { /* q = op0 >> shift; */ cur_op = add_rshift (gsi, type, op0, shifts); @@ -661,7 +656,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, if (op != unknown_optab && optab_handler (op, TYPE_MODE (type)) != CODE_FOR_nothing) { - if (unsignedp) + if (sign_p == UNSIGNED) /* r = op0 & mask; */ return gimplify_build2 (gsi, BIT_AND_EXPR, type, op0, mask); else if (addend != NULL_TREE) @@ -702,7 +697,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, switch (mode) { case 0: - gcc_assert (unsignedp); + gcc_assert (sign_p == UNSIGNED); /* t1 = oprnd0 >> pre_shift; t2 = t1 h* ml; q = t2 >> post_shift; */ @@ -711,7 +706,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, return NULL_TREE; break; case 1: - gcc_assert (unsignedp); + gcc_assert (sign_p == UNSIGNED); for (i = 0; i < nunits; i++) { shift_temps[i] = 1; @@ -722,7 +717,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, case 3: case 4: case 5: - gcc_assert (!unsignedp); + gcc_assert (sign_p == SIGNED); for (i = 0; i < nunits; i++) shift_temps[i] = prec - 1; break; @@ -1049,8 +1044,8 @@ vector_element (gimple_stmt_iterator *gsi, tree vect, tree idx, tree *ptmpvec) /* Given that we're about to compute a binary modulus, we don't care about the high bits of the value. */ - index = TREE_INT_CST_LOW (idx); - if (!host_integerp (idx, 1) || index >= elements) + index = tree_to_hwi (idx); + if (!tree_fits_uhwi_p (idx) || index >= elements) { index &= elements - 1; idx = build_int_cst (TREE_TYPE (idx), index); @@ -1155,7 +1150,7 @@ lower_vec_perm (gimple_stmt_iterator *gsi) unsigned char *sel_int = XALLOCAVEC (unsigned char, elements); for (i = 0; i < elements; ++i) - sel_int[i] = (TREE_INT_CST_LOW (VECTOR_CST_ELT (mask, i)) + sel_int[i] = (tree_to_hwi (VECTOR_CST_ELT (mask, i)) & (2 * elements - 1)); if (can_vec_perm_p (TYPE_MODE (vect_type), false, sel_int)) @@ -1181,8 +1176,8 @@ lower_vec_perm (gimple_stmt_iterator *gsi) { unsigned HOST_WIDE_INT index; - index = TREE_INT_CST_LOW (i_val); - if (!host_integerp (i_val, 1) || index >= elements) + index = tree_to_hwi (i_val); + if (!tree_fits_uhwi_p (i_val) || index >= elements) i_val = build_int_cst (mask_elt_type, index & (elements - 1)); if (two_operand_p && (index & elements) != 0) diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c index 1cc563c3cbb..1fe20d91cb4 100644 --- a/gcc/tree-vect-loop-manip.c +++ b/gcc/tree-vect-loop-manip.c @@ -1817,7 +1817,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio, : LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 2; if (check_profitability) max_iter = MAX (max_iter, (int) th - 1); - record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true); + record_niter_bound (new_loop, max_iter, false, true); dump_printf (MSG_NOTE, "Setting upper bound of nb iterations for epilogue " "loop to %d\n", max_iter); @@ -2053,7 +2053,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 2; if (check_profitability) max_iter = MAX (max_iter, (int) th - 1); - record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true); + record_niter_bound (new_loop, max_iter, false, true); dump_printf (MSG_NOTE, "Setting upper bound of nb iterations for prologue " "loop to %d\n", max_iter); diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index d9125f690d2..bb700d0e60c 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -1265,7 +1265,7 @@ vect_analyze_loop_form (struct loop *loop) dump_printf (MSG_NOTE, "\n"); } } - else if (TREE_INT_CST_LOW (number_of_iterations) == 0) + else if (tree_to_hwi (number_of_iterations) == 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -3092,10 +3092,10 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, } else { - int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree bitsize = TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); - int element_bitsize = tree_low_cst (bitsize, 1); + int element_bitsize = tree_to_uhwi (bitsize); int nelements = vec_size_in_bits / element_bitsize; optab = optab_for_tree_code (code, vectype, optab_default); @@ -3607,7 +3607,7 @@ get_initial_def_for_reduction (gimple stmt, tree init_val, if (SCALAR_FLOAT_TYPE_P (scalar_type)) init_value = build_real (scalar_type, TREE_REAL_CST (init_val)); else - init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val)); + init_value = build_int_cst (scalar_type, tree_to_hwi (init_val)); } else init_value = init_val; @@ -4108,8 +4108,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt, enum tree_code shift_code = ERROR_MARK; bool have_whole_vector_shift = true; int bit_offset; - int element_bitsize = tree_low_cst (bitsize, 1); - int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + int element_bitsize = tree_to_uhwi (bitsize); + int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree vec_temp; if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) @@ -4186,7 +4186,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt, dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code.\n"); - vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); FOR_EACH_VEC_ELT (new_phis, i, new_phi) { if (gimple_code (new_phi) == GIMPLE_PHI) @@ -5908,19 +5908,17 @@ vect_transform_loop (loop_vec_info loop_vinfo) scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor), expected_iterations / vectorization_factor); loop->nb_iterations_upper_bound - = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor), - FLOOR_DIV_EXPR); + = wi::udiv_floor (loop->nb_iterations_upper_bound, vectorization_factor); if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) - && loop->nb_iterations_upper_bound != double_int_zero) - loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one; + && loop->nb_iterations_upper_bound != 0) + loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - 1; if (loop->any_estimate) { loop->nb_iterations_estimate - = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor), - FLOOR_DIV_EXPR); + = wi::udiv_floor (loop->nb_iterations_estimate, vectorization_factor); if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) - && loop->nb_iterations_estimate != double_int_zero) - loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one; + && loop->nb_iterations_estimate != 0) + loop->nb_iterations_estimate = loop->nb_iterations_estimate - 1; } if (dump_enabled_p ()) diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index 0a4e812fd82..e9222232479 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -777,8 +777,8 @@ vect_recog_pow_pattern (vec<gimple> *stmts, tree *type_in, *type_out = NULL_TREE; /* Catch squaring. */ - if ((host_integerp (exp, 0) - && tree_low_cst (exp, 0) == 2) + if ((tree_fits_shwi_p (exp) + && tree_to_shwi (exp) == 2) || (TREE_CODE (exp) == REAL_CST && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2))) { @@ -1629,14 +1629,14 @@ vect_recog_rotate_pattern (vec<gimple> *stmts, tree *type_in, tree *type_out) if (TREE_CODE (def) == INTEGER_CST) { - if (!host_integerp (def, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (def, 1) + if (!tree_fits_uhwi_p (def) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (def) >= GET_MODE_PRECISION (TYPE_MODE (type)) || integer_zerop (def)) return NULL; def2 = build_int_cst (stype, GET_MODE_PRECISION (TYPE_MODE (type)) - - tree_low_cst (def, 1)); + - tree_to_uhwi (def)); } else { @@ -2059,7 +2059,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts, return pattern_stmt; } - if (!host_integerp (oprnd1, TYPE_UNSIGNED (itype)) + if (!tree_fits_hwi_p (oprnd1, TYPE_SIGN (itype)) || integer_zerop (oprnd1) || prec > HOST_BITS_PER_WIDE_INT) return NULL; @@ -2073,7 +2073,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts, { unsigned HOST_WIDE_INT mh, ml; int pre_shift, post_shift; - unsigned HOST_WIDE_INT d = tree_low_cst (oprnd1, 1) + unsigned HOST_WIDE_INT d = tree_to_uhwi (oprnd1) & GET_MODE_MASK (TYPE_MODE (itype)); tree t1, t2, t3, t4; @@ -2190,7 +2190,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts, { unsigned HOST_WIDE_INT ml; int post_shift; - HOST_WIDE_INT d = tree_low_cst (oprnd1, 0); + HOST_WIDE_INT d = tree_to_shwi (oprnd1); unsigned HOST_WIDE_INT abs_d; bool add = false; tree t1, t2, t3, t4; diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 7d9c9ed8d7d..ae2abba530d 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -5026,7 +5026,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, nested within an outer-loop that is being vectorized. */ if (nested_in_vect_loop - && (TREE_INT_CST_LOW (DR_STEP (dr)) + && (tree_to_hwi (DR_STEP (dr)) % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)) { gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index 8b7b3451509..2dc0c702fa8 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -330,7 +330,7 @@ typedef struct _loop_vec_info { #define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->datarefs #define LOOP_VINFO_DDRS(L) (L)->ddrs -#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) +#define LOOP_VINFO_INT_NITERS(L) (tree_to_hwi ((L)->num_iters)) #define LOOP_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts @@ -352,8 +352,8 @@ typedef struct _loop_vec_info { (L)->may_alias_ddrs.length () > 0 #define NITERS_KNOWN_P(n) \ -(host_integerp ((n),0) \ -&& TREE_INT_CST_LOW ((n)) > 0) +(tree_fits_shwi_p ((n)) \ +&& tree_to_shwi ((n)) > 0) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ NITERS_KNOWN_P ((L)->num_iters) diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index 0a059645c50..094b7eb899a 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see #include "expr.h" #include "optabs.h" #include "tree-ssa-threadedge.h" +#include "wide-int.h" @@ -1624,10 +1625,16 @@ extract_range_from_assert (value_range_t *vr_p, tree expr) /* Make sure to not set TREE_OVERFLOW on the final type conversion. We are willingly interpreting large positive unsigned values as negative singed values here. */ - min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min), - 0, false); - max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max), - 0, false); + min = force_fit_type (TREE_TYPE (var), + wide_int::from (min, + TYPE_PRECISION (TREE_TYPE (var)), + TYPE_SIGN (TREE_TYPE (min))), + 0, false); + max = force_fit_type (TREE_TYPE (var), + wide_int::from (max, + TYPE_PRECISION (TREE_TYPE (var)), + TYPE_SIGN (TREE_TYPE (max))), + 0, false); /* We can transform a max, min range to an anti-range or vice-versa. Use set_and_canonicalize_value_range which does @@ -1883,6 +1890,10 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2) /* If the singed operation wraps then int_const_binop has done everything we want. */ ; + /* Signed division of -1/0 overflows and by the time it gets here + returns NULL_TREE. */ + else if (!res) + return NULL_TREE; else if ((TREE_OVERFLOW (res) && !TREE_OVERFLOW (val1) && !TREE_OVERFLOW (val2)) @@ -1974,19 +1985,20 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2) } -/* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO +/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO bitmask if some bit is unset, it means for all numbers in the range the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO bitmask if some bit is set, it means for all numbers in the range the bit is 1, otherwise it might be 0 or 1. */ static bool -zero_nonzero_bits_from_vr (value_range_t *vr, - double_int *may_be_nonzero, - double_int *must_be_nonzero) +zero_nonzero_bits_from_vr (const tree expr_type, + value_range_t *vr, + wide_int *may_be_nonzero, + wide_int *must_be_nonzero) { - *may_be_nonzero = double_int_minus_one; - *must_be_nonzero = double_int_zero; + *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); + *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); if (!range_int_cst_p (vr) || TREE_OVERFLOW (vr->min) || TREE_OVERFLOW (vr->max)) @@ -1994,34 +2006,23 @@ zero_nonzero_bits_from_vr (value_range_t *vr, if (range_int_cst_singleton_p (vr)) { - *may_be_nonzero = tree_to_double_int (vr->min); + *may_be_nonzero = vr->min; *must_be_nonzero = *may_be_nonzero; } else if (tree_int_cst_sgn (vr->min) >= 0 || tree_int_cst_sgn (vr->max) < 0) { - double_int dmin = tree_to_double_int (vr->min); - double_int dmax = tree_to_double_int (vr->max); - double_int xor_mask = dmin ^ dmax; - *may_be_nonzero = dmin | dmax; - *must_be_nonzero = dmin & dmax; - if (xor_mask.high != 0) - { - unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 - << floor_log2 (xor_mask.high)) - 1; - may_be_nonzero->low = ALL_ONES; - may_be_nonzero->high |= mask; - must_be_nonzero->low = 0; - must_be_nonzero->high &= ~mask; - } - else if (xor_mask.low != 0) + wide_int wmin = vr->min; + wide_int wmax = vr->max; + wide_int xor_mask = wmin ^ wmax; + *may_be_nonzero = wmin | wmax; + *must_be_nonzero = wmin & wmax; + if (xor_mask != 0) { - unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 - << floor_log2 (xor_mask.low)) - 1; - may_be_nonzero->low |= mask; - must_be_nonzero->low &= ~mask; + wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false, + (*may_be_nonzero).get_precision ()); + *may_be_nonzero = (*may_be_nonzero) | mask; + *must_be_nonzero = (*must_be_nonzero).and_not (mask); } } @@ -2054,15 +2055,15 @@ ranges_from_anti_range (value_range_t *ar, vr0->type = VR_RANGE; vr0->min = vrp_val_min (type); vr0->max - = double_int_to_tree (type, - tree_to_double_int (ar->min) - double_int_one); + = wide_int_to_tree (type, + wide_int (ar->min) - 1); } if (!vrp_val_is_max (ar->max)) { vr1->type = VR_RANGE; vr1->min - = double_int_to_tree (type, - tree_to_double_int (ar->max) + double_int_one); + = wide_int_to_tree (type, + wide_int (ar->max) + 1); vr1->max = vrp_val_max (type); } if (vr0->type == VR_UNDEFINED) @@ -2228,28 +2229,6 @@ extract_range_from_multiplicative_op_1 (value_range_t *vr, set_value_range (vr, type, min, max, NULL); } -/* Some quadruple precision helpers. */ -static int -quad_int_cmp (double_int l0, double_int h0, - double_int l1, double_int h1, bool uns) -{ - int c = h0.cmp (h1, uns); - if (c != 0) return c; - return l0.ucmp (l1); -} - -static void -quad_int_pair_sort (double_int *l0, double_int *h0, - double_int *l1, double_int *h1, bool uns) -{ - if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0) - { - double_int tmp; - tmp = *l0; *l0 = *l1; *l1 = tmp; - tmp = *h0; *h0 = *h1; *h1 = tmp; - } -} - /* Extract range information from a binary operation CODE based on the ranges of each of its operands, *VR0 and *VR1 with resulting type EXPR_TYPE. The resulting range is stored in *VR. */ @@ -2421,43 +2400,40 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* If we have a PLUS_EXPR with two VR_RANGE integer constant ranges compute the precise range for such case if possible. */ if (range_int_cst_p (&vr0) - && range_int_cst_p (&vr1) - /* We need as many bits as the possibly unsigned inputs. */ - && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT) - { - double_int min0 = tree_to_double_int (vr0.min); - double_int max0 = tree_to_double_int (vr0.max); - double_int min1 = tree_to_double_int (vr1.min); - double_int max1 = tree_to_double_int (vr1.max); - bool uns = TYPE_UNSIGNED (expr_type); - double_int type_min - = double_int::min_value (TYPE_PRECISION (expr_type), uns); - double_int type_max - = double_int::max_value (TYPE_PRECISION (expr_type), uns); - double_int dmin, dmax; + && range_int_cst_p (&vr1)) + { + signop sgn = TYPE_SIGN (expr_type); + unsigned int prec = TYPE_PRECISION (expr_type); + wide_int min0 = wide_int (vr0.min); + wide_int max0 = wide_int (vr0.max); + wide_int min1 = wide_int (vr1.min); + wide_int max1 = wide_int (vr1.max); + wide_int type_min = wi::min_value (TYPE_PRECISION (expr_type), sgn); + wide_int type_max = wi::max_value (TYPE_PRECISION (expr_type), sgn); + wide_int wmin, wmax; int min_ovf = 0; int max_ovf = 0; if (code == PLUS_EXPR) { - dmin = min0 + min1; - dmax = max0 + max1; - - /* Check for overflow in double_int. */ - if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns)) - min_ovf = min0.cmp (dmin, uns); - if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns)) - max_ovf = max0.cmp (dmax, uns); + wmin = min0 + min1; + wmax = max0 + max1; + + /* Check for overflow. */ + if (wi::cmp (min1, 0, sgn) != wi::cmp (wmin, min0, sgn)) + min_ovf = wi::cmp (min0, wmin, sgn); + if (wi::cmp (max1, 0, sgn) != wi::cmp (wmax, max0, sgn)) + max_ovf = wi::cmp (max0, wmax, sgn); } else /* if (code == MINUS_EXPR) */ { - dmin = min0 - max1; - dmax = max0 - min1; + wmin = min0 - max1; + wmax = max0 - min1; - if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns)) - min_ovf = min0.cmp (max1, uns); - if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns)) - max_ovf = max0.cmp (min1, uns); + if (wi::cmp (0, max1, sgn) != wi::cmp (wmin, min0, sgn)) + min_ovf = wi::cmp (min0, max1, sgn); + if (wi::cmp (0, min1, sgn) != wi::cmp (wmax, max0, sgn)) + max_ovf = wi::cmp (max0, min1, sgn); } /* For non-wrapping arithmetic look at possibly smaller @@ -2465,24 +2441,24 @@ extract_range_from_binary_expr_1 (value_range_t *vr, if (!TYPE_OVERFLOW_WRAPS (expr_type)) { if (vrp_val_min (expr_type)) - type_min = tree_to_double_int (vrp_val_min (expr_type)); + type_min = wide_int (vrp_val_min (expr_type)); if (vrp_val_max (expr_type)) - type_max = tree_to_double_int (vrp_val_max (expr_type)); + type_max = wide_int (vrp_val_max (expr_type)); } /* Check for type overflow. */ if (min_ovf == 0) { - if (dmin.cmp (type_min, uns) == -1) + if (wi::cmp (wmin, type_min, sgn) == -1) min_ovf = -1; - else if (dmin.cmp (type_max, uns) == 1) + else if (wi::cmp (wmin, type_max, sgn) == 1) min_ovf = 1; } if (max_ovf == 0) { - if (dmax.cmp (type_min, uns) == -1) + if (wi::cmp (wmax, type_min, sgn) == -1) max_ovf = -1; - else if (dmax.cmp (type_max, uns) == 1) + else if (wi::cmp (wmax, type_max, sgn) == 1) max_ovf = 1; } @@ -2490,16 +2466,14 @@ extract_range_from_binary_expr_1 (value_range_t *vr, { /* If overflow wraps, truncate the values and adjust the range kind and bounds appropriately. */ - double_int tmin - = dmin.ext (TYPE_PRECISION (expr_type), uns); - double_int tmax - = dmax.ext (TYPE_PRECISION (expr_type), uns); + wide_int tmin = wide_int::from (wmin, prec, sgn); + wide_int tmax = wide_int::from (wmax, prec, sgn); if (min_ovf == max_ovf) { /* No overflow or both overflow or underflow. The range kind stays VR_RANGE. */ - min = double_int_to_tree (expr_type, tmin); - max = double_int_to_tree (expr_type, tmax); + min = wide_int_to_tree (expr_type, tmin); + max = wide_int_to_tree (expr_type, tmax); } else if (min_ovf == -1 && max_ovf == 1) @@ -2513,26 +2487,26 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* Min underflow or max overflow. The range kind changes to VR_ANTI_RANGE. */ bool covers = false; - double_int tem = tmin; + wide_int tem = tmin; gcc_assert ((min_ovf == -1 && max_ovf == 0) || (max_ovf == 1 && min_ovf == 0)); type = VR_ANTI_RANGE; - tmin = tmax + double_int_one; - if (tmin.cmp (tmax, uns) < 0) + tmin = tmax + 1; + if (wi::cmp (tmin, tmax, sgn) < 0) covers = true; - tmax = tem + double_int_minus_one; - if (tmax.cmp (tem, uns) > 0) + tmax = tem - 1; + if (wi::cmp (tmax, tem, sgn) > 0) covers = true; /* If the anti-range would cover nothing, drop to varying. Likewise if the anti-range bounds are outside of the types values. */ - if (covers || tmin.cmp (tmax, uns) > 0) + if (covers || wi::cmp (tmin, tmax, sgn) > 0) { set_value_range_to_varying (vr); return; } - min = double_int_to_tree (expr_type, tmin); - max = double_int_to_tree (expr_type, tmax); + min = wide_int_to_tree (expr_type, tmin); + max = wide_int_to_tree (expr_type, tmax); } } else @@ -2545,7 +2519,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) min = negative_overflow_infinity (expr_type); else - min = double_int_to_tree (expr_type, type_min); + min = wide_int_to_tree (expr_type, type_min); } else if (min_ovf == 1) { @@ -2553,10 +2527,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) min = positive_overflow_infinity (expr_type); else - min = double_int_to_tree (expr_type, type_max); + min = wide_int_to_tree (expr_type, type_max); } else - min = double_int_to_tree (expr_type, dmin); + min = wide_int_to_tree (expr_type, wmin); if (max_ovf == -1) { @@ -2564,7 +2538,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) max = negative_overflow_infinity (expr_type); else - max = double_int_to_tree (expr_type, type_min); + max = wide_int_to_tree (expr_type, type_min); } else if (max_ovf == 1) { @@ -2572,10 +2546,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) max = positive_overflow_infinity (expr_type); else - max = double_int_to_tree (expr_type, type_max); + max = wide_int_to_tree (expr_type, type_max); } else - max = double_int_to_tree (expr_type, dmax); + max = wide_int_to_tree (expr_type, wmax); } if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) @@ -2661,97 +2635,86 @@ extract_range_from_binary_expr_1 (value_range_t *vr, else if (code == MULT_EXPR) { /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not - drop to varying. */ + drop to varying. This test requires 2*prec bits if both + operands are signed and 2*prec + 2 bits if either is not. */ + + signop sign = TYPE_SIGN (expr_type); + unsigned int prec = TYPE_PRECISION (expr_type); + unsigned int prec2 = (prec * 2) + (sign == UNSIGNED ? 2 : 0); + if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1) && TYPE_OVERFLOW_WRAPS (expr_type)) { - double_int min0, max0, min1, max1, sizem1, size; - double_int prod0l, prod0h, prod1l, prod1h, - prod2l, prod2h, prod3l, prod3h; - bool uns0, uns1, uns; + wide_int min0, max0, min1, max1; + wide_int prod0, prod1, prod2, prod3; + wide_int sizem1 = wi::mask (prec, false, prec2); + wide_int size = sizem1 + 1; - sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true); - size = sizem1 + double_int_one; - - min0 = tree_to_double_int (vr0.min); - max0 = tree_to_double_int (vr0.max); - min1 = tree_to_double_int (vr1.min); - max1 = tree_to_double_int (vr1.max); - - uns0 = TYPE_UNSIGNED (expr_type); - uns1 = uns0; + /* Extend the values using the sign of the result to PREC2. + From here on out, everthing is just signed math no matter + what the input types were. */ + min0 = wide_int::from (vr0.min, prec2, sign); + max0 = wide_int::from (vr0.max, prec2, sign); + min1 = wide_int::from (vr1.min, prec2, sign); + max1 = wide_int::from (vr1.max, prec2, sign); /* Canonicalize the intervals. */ - if (TYPE_UNSIGNED (expr_type)) + if (sign == UNSIGNED) { - double_int min2 = size - min0; - if (!min2.is_zero () && min2.cmp (max0, true) < 0) + if (wi::ltu_p (size, min0 + max0)) { - min0 = -min2; + min0 -= size; max0 -= size; - uns0 = false; } - min2 = size - min1; - if (!min2.is_zero () && min2.cmp (max1, true) < 0) + if (wi::ltu_p (size, min1 + max1)) { - min1 = -min2; + min1 -= size; max1 -= size; - uns1 = false; } } - uns = uns0 & uns1; - bool overflow; - prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow); - if (!uns0 && min0.is_negative ()) - prod0h -= min1; - if (!uns1 && min1.is_negative ()) - prod0h -= min0; - - prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow); - if (!uns0 && min0.is_negative ()) - prod1h -= max1; - if (!uns1 && max1.is_negative ()) - prod1h -= min0; - - prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow); - if (!uns0 && max0.is_negative ()) - prod2h -= min1; - if (!uns1 && min1.is_negative ()) - prod2h -= max0; - - prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow); - if (!uns0 && max0.is_negative ()) - prod3h -= max1; - if (!uns1 && max1.is_negative ()) - prod3h -= max0; - - /* Sort the 4 products. */ - quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns); - quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns); - quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns); - quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns); - - /* Max - min. */ - if (prod0l.is_zero ()) + prod0 = min0 * min1; + prod1 = min0 * max1; + prod2 = max0 * min1; + prod3 = max0 * max1; + + /* Sort the 4 products so that min is in prod0 and max is in + prod3. */ + /* min0min1 > max0max1 */ + if (wi::gts_p (prod0, prod3)) { - prod1l = double_int_zero; - prod1h = -prod0h; + wide_int tmp = prod3; + prod3 = prod0; + prod0 = tmp; } - else + + /* min0max1 > max0min1 */ + if (wi::gts_p (prod1, prod2)) + { + wide_int tmp = prod2; + prod2 = prod1; + prod1 = tmp; + } + + if (wi::gts_p (prod0, prod1)) { - prod1l = -prod0l; - prod1h = ~prod0h; + wide_int tmp = prod1; + prod1 = prod0; + prod0 = tmp; } - prod2l = prod3l + prod1l; - prod2h = prod3h + prod1h; - if (prod2l.ult (prod3l)) - prod2h += double_int_one; /* carry */ - if (!prod2h.is_zero () - || prod2l.cmp (sizem1, true) >= 0) + if (wi::gts_p (prod2, prod3)) + { + wide_int tmp = prod3; + prod3 = prod2; + prod2 = tmp; + } + + /* diff = max - min. */ + prod2 = prod3 - prod0; + if (wi::geu_p (prod2, sizem1)) { /* the range covers all values. */ set_value_range_to_varying (vr); @@ -2760,8 +2723,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* The following should handle the wrapping and selecting VR_ANTI_RANGE for us. */ - min = double_int_to_tree (expr_type, prod0l); - max = double_int_to_tree (expr_type, prod3l); + min = wide_int_to_tree (expr_type, prod0); + max = wide_int_to_tree (expr_type, prod3); set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); return; } @@ -2808,11 +2771,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, bool saved_flag_wrapv; value_range_t vr1p = VR_INITIALIZER; vr1p.type = VR_RANGE; - vr1p.min - = double_int_to_tree (expr_type, - double_int_one - .llshift (TREE_INT_CST_LOW (vr1.min), - TYPE_PRECISION (expr_type))); + vr1p.min = (wide_int_to_tree + (expr_type, + wi::set_bit_in_zero (tree_to_shwi (vr1.min), + TYPE_PRECISION (expr_type)))); vr1p.max = vr1p.min; /* We have to use a wrapping multiply though as signed overflow on lshifts is implementation defined in C89. */ @@ -2829,34 +2791,34 @@ extract_range_from_binary_expr_1 (value_range_t *vr, int prec = TYPE_PRECISION (expr_type); int overflow_pos = prec; int bound_shift; - double_int bound, complement, low_bound, high_bound; + wide_int bound, complement, low_bound, high_bound; bool uns = TYPE_UNSIGNED (expr_type); bool in_bounds = false; if (!uns) overflow_pos -= 1; - bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max); - /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can + bound_shift = overflow_pos - tree_to_shwi (vr1.max); + /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can overflow. However, for that to happen, vr1.max needs to be zero, which means vr1 is a singleton range of zero, which means it should be handled by the previous LSHIFT_EXPR if-clause. */ - bound = double_int_one.llshift (bound_shift, prec); - complement = ~(bound - double_int_one); + bound = wi::set_bit_in_zero (bound_shift, prec); + complement = ~(bound - 1); if (uns) { - low_bound = bound.zext (prec); - high_bound = complement.zext (prec); - if (tree_to_double_int (vr0.max).ult (low_bound)) + low_bound = bound; + high_bound = complement; + if (wi::ltu_p (vr0.max, low_bound)) { /* [5, 6] << [1, 2] == [10, 24]. */ /* We're shifting out only zeroes, the value increases monotonically. */ in_bounds = true; } - else if (high_bound.ult (tree_to_double_int (vr0.min))) + else if (wi::ltu_p (high_bound, vr0.min)) { /* [0xffffff00, 0xffffffff] << [1, 2] == [0xfffffc00, 0xfffffffe]. */ @@ -2868,10 +2830,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, else { /* [-1, 1] << [1, 2] == [-4, 4]. */ - low_bound = complement.sext (prec); + low_bound = complement; high_bound = bound; - if (tree_to_double_int (vr0.max).slt (high_bound) - && low_bound.slt (tree_to_double_int (vr0.min))) + if (wi::lts_p (vr0.max, high_bound) + && wi::lts_p (low_bound, vr0.min)) { /* For non-negative numbers, we're shifting out only zeroes, the value increases monotonically. @@ -2995,7 +2957,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); if (tree_int_cst_lt (max, vr1.max)) max = vr1.max; - max = int_const_binop (MINUS_EXPR, max, integer_one_node); + max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1)); /* If the dividend is non-negative the modulus will be non-negative as well. */ if (TYPE_UNSIGNED (expr_type) @@ -3007,21 +2969,21 @@ extract_range_from_binary_expr_1 (value_range_t *vr, else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) { bool int_cst_range0, int_cst_range1; - double_int may_be_nonzero0, may_be_nonzero1; - double_int must_be_nonzero0, must_be_nonzero1; + wide_int may_be_nonzero0, may_be_nonzero1; + wide_int must_be_nonzero0, must_be_nonzero1; - int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, + int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, &may_be_nonzero0, &must_be_nonzero0); - int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, + int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, &may_be_nonzero1, &must_be_nonzero1); type = VR_RANGE; if (code == BIT_AND_EXPR) { - double_int dmax; - min = double_int_to_tree (expr_type, - must_be_nonzero0 & must_be_nonzero1); - dmax = may_be_nonzero0 & may_be_nonzero1; + wide_int wmax; + min = wide_int_to_tree (expr_type, + must_be_nonzero0 & must_be_nonzero1); + wmax = may_be_nonzero0 & may_be_nonzero1; /* If both input ranges contain only negative values we can truncate the result range maximum to the minimum of the input range maxima. */ @@ -3029,28 +2991,24 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && tree_int_cst_sgn (vr0.max) < 0 && tree_int_cst_sgn (vr1.max) < 0) { - dmax = dmax.min (tree_to_double_int (vr0.max), - TYPE_UNSIGNED (expr_type)); - dmax = dmax.min (tree_to_double_int (vr1.max), - TYPE_UNSIGNED (expr_type)); + wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); + wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); } /* If either input range contains only non-negative values we can truncate the result range maximum to the respective maximum of the input range. */ if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) - dmax = dmax.min (tree_to_double_int (vr0.max), - TYPE_UNSIGNED (expr_type)); + wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type)); if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) - dmax = dmax.min (tree_to_double_int (vr1.max), - TYPE_UNSIGNED (expr_type)); - max = double_int_to_tree (expr_type, dmax); + wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type)); + max = wide_int_to_tree (expr_type, wmax); } else if (code == BIT_IOR_EXPR) { - double_int dmin; - max = double_int_to_tree (expr_type, - may_be_nonzero0 | may_be_nonzero1); - dmin = must_be_nonzero0 | must_be_nonzero1; + wide_int wmin; + max = wide_int_to_tree (expr_type, + may_be_nonzero0 | may_be_nonzero1); + wmin = must_be_nonzero0 | must_be_nonzero1; /* If the input ranges contain only positive values we can truncate the minimum of the result range to the maximum of the input range minima. */ @@ -3058,31 +3016,27 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && tree_int_cst_sgn (vr0.min) >= 0 && tree_int_cst_sgn (vr1.min) >= 0) { - dmin = dmin.max (tree_to_double_int (vr0.min), - TYPE_UNSIGNED (expr_type)); - dmin = dmin.max (tree_to_double_int (vr1.min), - TYPE_UNSIGNED (expr_type)); + wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); + wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); } /* If either input range contains only negative values we can truncate the minimum of the result range to the respective minimum range. */ if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) - dmin = dmin.max (tree_to_double_int (vr0.min), - TYPE_UNSIGNED (expr_type)); + wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type)); if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) - dmin = dmin.max (tree_to_double_int (vr1.min), - TYPE_UNSIGNED (expr_type)); - min = double_int_to_tree (expr_type, dmin); + wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type)); + min = wide_int_to_tree (expr_type, wmin); } else if (code == BIT_XOR_EXPR) { - double_int result_zero_bits, result_one_bits; + wide_int result_zero_bits, result_one_bits; result_zero_bits = (must_be_nonzero0 & must_be_nonzero1) | ~(may_be_nonzero0 | may_be_nonzero1); result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1) | must_be_nonzero1.and_not (may_be_nonzero0); - max = double_int_to_tree (expr_type, ~result_zero_bits); - min = double_int_to_tree (expr_type, result_one_bits); + max = wide_int_to_tree (expr_type, ~result_zero_bits); + min = wide_int_to_tree (expr_type, result_one_bits); /* If the range has all positive or all negative values the result is better than VARYING. */ if (tree_int_cst_sgn (min) < 0 @@ -3297,15 +3251,21 @@ extract_range_from_unary_expr_1 (value_range_t *vr, if (is_overflow_infinity (vr0.min)) new_min = negative_overflow_infinity (outer_type); else - new_min = force_fit_type_double (outer_type, - tree_to_double_int (vr0.min), - 0, false); + new_min = force_fit_type (outer_type, + wide_int::from + (vr0.min, + TYPE_PRECISION (outer_type), + TYPE_SIGN (TREE_TYPE (vr0.min))), + 0, false); if (is_overflow_infinity (vr0.max)) new_max = positive_overflow_infinity (outer_type); else - new_max = force_fit_type_double (outer_type, - tree_to_double_int (vr0.max), - 0, false); + new_max = force_fit_type (outer_type, + wide_int::from + (vr0.max, + TYPE_PRECISION (outer_type), + TYPE_SIGN (TREE_TYPE (vr0.max))), + 0, false); set_and_canonicalize_value_range (vr, vr0.type, new_min, new_max, NULL); return; @@ -3403,7 +3363,7 @@ extract_range_from_unary_expr_1 (value_range_t *vr, min = (vr0.min != type_min_value ? int_const_binop (PLUS_EXPR, type_min_value, - integer_one_node) + build_int_cst (TREE_TYPE (type_min_value), 1)) : type_min_value); } else @@ -3878,30 +3838,29 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, && (TREE_CODE (init) != SSA_NAME || get_value_range (init)->type == VR_RANGE)) { - double_int nit; + max_wide_int nit; /* We are only entering here for loop header PHI nodes, so using the number of latch executions is the correct thing to use. */ if (max_loop_iterations (loop, &nit)) { value_range_t maxvr = VR_INITIALIZER; - double_int dtmp; - bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step)); - bool overflow = false; - - dtmp = tree_to_double_int (step) - .mul_with_sign (nit, unsigned_p, &overflow); + max_wide_int wtmp; + signop sgn = TYPE_SIGN (TREE_TYPE (step)); + bool overflow; + + wtmp = wi::mul (step, nit, sgn, &overflow); /* If the multiplication overflowed we can't do a meaningful adjustment. Likewise if the result doesn't fit in the type of the induction variable. For a signed type we have to check whether the result has the expected signedness which is that of the step as number of iterations is unsigned. */ if (!overflow - && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp) - && (unsigned_p - || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0))) + && wi::fits_to_tree_p (wtmp, TREE_TYPE (init)) + && (sgn == UNSIGNED + || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0))) { - tem = double_int_to_tree (TREE_TYPE (init), dtmp); + tem = wide_int_to_tree (TREE_TYPE (init), wtmp); extract_range_from_binary_expr (&maxvr, PLUS_EXPR, TREE_TYPE (init), init, tem); /* Likewise if the addition did. */ @@ -4506,7 +4465,7 @@ infer_nonnull_range (gimple stmt, tree op) /* Now see if op appears in the nonnull list. */ for (tree t = TREE_VALUE (attrs); t; t = TREE_CHAIN (t)) { - int idx = TREE_INT_CST_LOW (TREE_VALUE (t)) - 1; + int idx = tree_to_shwi (TREE_VALUE (t)) - 1; tree arg = gimple_call_arg (stmt, idx); if (op == arg) return true; @@ -4662,8 +4621,7 @@ register_new_assert_for (tree name, tree expr, machinery. */ if (TREE_CODE (val) == INTEGER_CST && TREE_OVERFLOW (val)) - val = build_int_cst_wide (TREE_TYPE (val), - TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val)); + val = wide_int_to_tree (TREE_TYPE (val), val); /* The new assertion A will be inserted at BB or E. We need to determine if the new location is dominated by a previously @@ -4816,23 +4774,23 @@ extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, (to transform signed values into unsigned) and at the end xor SGNBIT back. */ -static double_int -masked_increment (double_int val, double_int mask, double_int sgnbit, +static wide_int +masked_increment (wide_int val, wide_int mask, wide_int sgnbit, unsigned int prec) { - double_int bit = double_int_one, res; + wide_int bit = wi::one (prec), res; unsigned int i; val ^= sgnbit; for (i = 0; i < prec; i++, bit += bit) { res = mask; - if ((res & bit).is_zero ()) + if ((res & bit) == 0) continue; - res = bit - double_int_one; + res = bit - 1; res = (val + bit).and_not (res); res &= mask; - if (res.ugt (val)) + if (wi::gtu_p (res, val)) return res ^ sgnbit; } return val ^ sgnbit; @@ -5007,8 +4965,8 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, gimple def_stmt = SSA_NAME_DEF_STMT (name); tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; tree val2 = NULL_TREE; - double_int mask = double_int_zero; unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); + wide_int mask = wi::zero (prec); unsigned int nprec = prec; enum tree_code rhs_code = ERROR_MARK; @@ -5074,15 +5032,14 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, name2 = gimple_assign_rhs1 (def_stmt); cst2 = gimple_assign_rhs2 (def_stmt); if (TREE_CODE (name2) == SSA_NAME - && host_integerp (cst2, 1) + && tree_fits_uhwi_p (cst2) && INTEGRAL_TYPE_P (TREE_TYPE (name2)) - && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1) - && prec <= HOST_BITS_PER_DOUBLE_INT + && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) && live_on_edge (e, name2) && !has_single_use (name2)) { - mask = double_int::mask (tree_low_cst (cst2, 1)); + mask = wi::mask (tree_to_uhwi (cst2), false, prec); val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); } } @@ -5105,26 +5062,26 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, val2 = fold_convert (type, val2); } tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); - new_val = double_int_to_tree (TREE_TYPE (tmp), mask); + new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; } else if (comp_code == LT_EXPR || comp_code == GE_EXPR) { - double_int minval - = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val))); + wide_int minval + = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); new_val = val2; - if (minval == tree_to_double_int (new_val)) + if (minval == wide_int (new_val)) new_val = NULL_TREE; } else { - double_int maxval - = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val))); - mask |= tree_to_double_int (val2); + wide_int maxval + = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); + mask |= wide_int (val2); if (mask == maxval) new_val = NULL_TREE; else - new_val = double_int_to_tree (TREE_TYPE (val2), mask); + new_val = wide_int_to_tree (TREE_TYPE (val2), mask); } if (new_val) @@ -5176,7 +5133,6 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, && INTEGRAL_TYPE_P (TREE_TYPE (name2)) && TREE_CODE (cst2) == INTEGER_CST && !integer_zerop (cst2) - && nprec <= HOST_BITS_PER_DOUBLE_INT && (nprec > 1 || TYPE_UNSIGNED (TREE_TYPE (val)))) { @@ -5199,17 +5155,17 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, } if (names[0] || names[1]) { - double_int minv, maxv = double_int_zero, valv, cst2v; - double_int tem, sgnbit; + wide_int minv, maxv, valv, cst2v; + wide_int tem, sgnbit; bool valid_p = false, valn = false, cst2n = false; enum tree_code ccode = comp_code; - valv = tree_to_double_int (val).zext (nprec); - cst2v = tree_to_double_int (cst2).zext (nprec); - if (!TYPE_UNSIGNED (TREE_TYPE (val))) + valv = wide_int::from (val, nprec, UNSIGNED); + cst2v = wide_int::from (cst2, nprec, UNSIGNED); + if (TYPE_SIGN (TREE_TYPE (val)) == SIGNED) { - valn = valv.sext (nprec).is_negative (); - cst2n = cst2v.sext (nprec).is_negative (); + valn = wi::neg_p (wi::sext (valv, nprec)); + cst2n = wi::neg_p (wi::sext (cst2v, nprec)); } /* If CST2 doesn't have most significant bit set, but VAL is negative, we have comparison like @@ -5217,9 +5173,9 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, if (!cst2n && valn) ccode = ERROR_MARK; if (cst2n) - sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec); + sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); else - sgnbit = double_int_zero; + sgnbit = wi::zero (nprec); minv = valv & cst2v; switch (ccode) { @@ -5229,33 +5185,32 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, have folded the comparison into false) and maximum unsigned value is VAL | ~CST2. */ maxv = valv | ~cst2v; - maxv = maxv.zext (nprec); + maxv = wi::zext (maxv, nprec); valid_p = true; break; + case NE_EXPR: tem = valv | ~cst2v; - tem = tem.zext (nprec); + tem = wi::zext (tem, nprec); /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ - if (valv.is_zero ()) + if (valv == 0) { cst2n = false; - sgnbit = double_int_zero; + sgnbit = wi::zero (nprec); goto gt_expr; } /* If (VAL | ~CST2) is all ones, handle it as (X & CST2) < VAL. */ - if (tem == double_int::mask (nprec)) + if (tem == -1) { cst2n = false; valn = false; - sgnbit = double_int_zero; + sgnbit = wi::zero (nprec); goto lt_expr; } - if (!cst2n - && cst2v.sext (nprec).is_negative ()) - sgnbit - = double_int_one.llshift (nprec - 1, nprec).zext (nprec); - if (!sgnbit.is_zero ()) + if (!cst2n && wi::neg_p (wi::sext (cst2v, nprec))) + sgnbit = wi::set_bit_in_zero (nprec - 1, nprec); + if (sgnbit != 0) { if (valv == sgnbit) { @@ -5263,15 +5218,16 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, valn = true; goto gt_expr; } - if (tem == double_int::mask (nprec - 1)) + if (tem == wi::mask (nprec - 1, false, nprec)) { cst2n = true; goto lt_expr; } if (!cst2n) - sgnbit = double_int_zero; + sgnbit = wi::zero (nprec); } break; + case GE_EXPR: /* Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and maximum unsigned value is ~0. For signed @@ -5286,9 +5242,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, if (minv == valv) break; } - maxv = double_int::mask (nprec - (cst2n ? 1 : 0)); + maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); valid_p = true; break; + case GT_EXPR: gt_expr: /* Find out smallest MINV where MINV > VAL @@ -5297,9 +5254,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, minv = masked_increment (valv, cst2v, sgnbit, nprec); if (minv == valv) break; - maxv = double_int::mask (nprec - (cst2n ? 1 : 0)); + maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec); valid_p = true; break; + case LE_EXPR: /* Minimum unsigned value for <= is 0 and maximum unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. @@ -5316,13 +5274,14 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, maxv = masked_increment (valv, cst2v, sgnbit, nprec); if (maxv == valv) break; - maxv -= double_int_one; + maxv -= 1; } maxv |= ~cst2v; - maxv = maxv.zext (nprec); + maxv = wi::zext (maxv, nprec); minv = sgnbit; valid_p = true; break; + case LT_EXPR: lt_expr: /* Minimum unsigned value for < is 0 and maximum @@ -5345,17 +5304,18 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, if (maxv == valv) break; } - maxv -= double_int_one; + maxv -= 1; maxv |= ~cst2v; - maxv = maxv.zext (nprec); + maxv = wi::zext (maxv, nprec); minv = sgnbit; valid_p = true; break; + default: break; } if (valid_p - && (maxv - minv).zext (nprec) != double_int::mask (nprec)) + && wi::zext (maxv - minv, nprec) != wi::minus_one (nprec)) { tree tmp, new_val, type; int i; @@ -5363,7 +5323,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, for (i = 0; i < 2; i++) if (names[i]) { - double_int maxv2 = maxv; + wide_int maxv2 = maxv; tmp = names[i]; type = TREE_TYPE (names[i]); if (!TYPE_UNSIGNED (type)) @@ -5371,13 +5331,13 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, type = build_nonstandard_integer_type (nprec, 1); tmp = build1 (NOP_EXPR, type, names[i]); } - if (!minv.is_zero ()) + if (minv != 0) { tmp = build2 (PLUS_EXPR, type, tmp, - double_int_to_tree (type, -minv)); + wide_int_to_tree (type, -minv)); maxv2 = maxv - minv; } - new_val = double_int_to_tree (type, maxv2); + new_val = wide_int_to_tree (type, maxv2); if (dump_file) { @@ -6216,7 +6176,8 @@ check_array_ref (location_t location, tree ref, bool ignore_off_by_one) } low_bound = array_ref_low_bound (ref); - up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node); + up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, + build_int_cst (TREE_TYPE (up_bound), 1)); if (TREE_CODE (low_sub) == SSA_NAME) { @@ -6313,7 +6274,7 @@ search_for_addr_array (tree t, location_t location) { tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); tree low_bound, up_bound, el_sz; - double_int idx; + addr_wide_int idx; if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE || !TYPE_DOMAIN (TREE_TYPE (tem))) @@ -6331,8 +6292,8 @@ search_for_addr_array (tree t, location_t location) return; idx = mem_ref_offset (t); - idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR); - if (idx.slt (double_int_zero)) + idx = wi::sdiv_trunc (idx, el_sz); + if (wi::lts_p (idx, 0)) { if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -6344,9 +6305,7 @@ search_for_addr_array (tree t, location_t location) "array subscript is below array bounds"); TREE_NO_WARNING (t) = 1; } - else if (idx.sgt (tree_to_double_int (up_bound) - - tree_to_double_int (low_bound) - + double_int_one)) + else if (wi::gts_p (idx, addr_wide_int (up_bound) - low_bound + 1)) { if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -7541,9 +7500,11 @@ union_ranges (enum value_range_type *vr0type, && vrp_val_is_max (vr1max)) { tree min = int_const_binop (PLUS_EXPR, - *vr0max, integer_one_node); + *vr0max, + build_int_cst (TREE_TYPE (*vr0max), 1)); tree max = int_const_binop (MINUS_EXPR, - vr1min, integer_one_node); + vr1min, + build_int_cst (TREE_TYPE (vr1min), 1)); if (!operand_less_p (max, min)) { *vr0type = VR_ANTI_RANGE; @@ -7565,9 +7526,11 @@ union_ranges (enum value_range_type *vr0type, && vrp_val_is_max (*vr0max)) { tree min = int_const_binop (PLUS_EXPR, - vr1max, integer_one_node); + vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); tree max = int_const_binop (MINUS_EXPR, - *vr0min, integer_one_node); + *vr0min, + build_int_cst (TREE_TYPE (*vr0min), 1)); if (!operand_less_p (max, min)) { *vr0type = VR_ANTI_RANGE; @@ -7603,9 +7566,11 @@ union_ranges (enum value_range_type *vr0type, { /* Arbitrarily choose the right or left gap. */ if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) - *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, vr1min, + build_int_cst (TREE_TYPE (vr1min), 1)); else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) - *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); else goto give_up; } @@ -7636,12 +7601,14 @@ union_ranges (enum value_range_type *vr0type, *vr0type = VR_ANTI_RANGE; if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) { - *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, *vr0min, + build_int_cst (TREE_TYPE (*vr0min), 1)); *vr0min = vr1min; } else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) { - *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, *vr0max, + build_int_cst (TREE_TYPE (*vr0max), 1)); *vr0max = vr1max; } else @@ -7669,7 +7636,8 @@ union_ranges (enum value_range_type *vr0type, && vr1type == VR_RANGE) { if (TREE_CODE (vr1min) == INTEGER_CST) - *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, vr1min, + build_int_cst (TREE_TYPE (vr1min), 1)); else goto give_up; } @@ -7679,7 +7647,8 @@ union_ranges (enum value_range_type *vr0type, if (TREE_CODE (*vr0max) == INTEGER_CST) { *vr0type = vr1type; - *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, *vr0max, + build_int_cst (TREE_TYPE (*vr0max), 1)); *vr0max = vr1max; } else @@ -7703,7 +7672,8 @@ union_ranges (enum value_range_type *vr0type, && vr1type == VR_RANGE) { if (TREE_CODE (vr1max) == INTEGER_CST) - *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); else goto give_up; } @@ -7714,7 +7684,8 @@ union_ranges (enum value_range_type *vr0type, { *vr0type = vr1type; *vr0min = vr1min; - *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, *vr0min, + build_int_cst (TREE_TYPE (*vr0min), 1)); } else goto give_up; @@ -7829,7 +7800,8 @@ intersect_ranges (enum value_range_type *vr0type, if (mineq) { if (TREE_CODE (vr1max) == INTEGER_CST) - *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); else *vr0min = vr1max; } @@ -7838,7 +7810,7 @@ intersect_ranges (enum value_range_type *vr0type, { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, - integer_one_node); + build_int_cst (TREE_TYPE (vr1min), 1)); else *vr0max = vr1min; } @@ -7884,7 +7856,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, *vr0max, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0max), 1)); else *vr0min = *vr0max; *vr0max = vr1max; @@ -7895,7 +7867,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, *vr0min, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0min), 1)); else *vr0max = *vr0min; *vr0min = vr1min; @@ -7947,7 +7919,7 @@ intersect_ranges (enum value_range_type *vr0type, { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, - integer_one_node); + build_int_cst (TREE_TYPE (vr1min), 1)); else *vr0max = vr1min; } @@ -7957,7 +7929,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, *vr0max, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0max), 1)); else *vr0min = *vr0max; *vr0max = vr1max; @@ -7981,7 +7953,7 @@ intersect_ranges (enum value_range_type *vr0type, { if (TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, - integer_one_node); + build_int_cst (TREE_TYPE (vr1max), 1)); else *vr0min = vr1max; } @@ -7991,7 +7963,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, *vr0min, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0min), 1)); else *vr0max = *vr0min; *vr0min = vr1min; @@ -8401,7 +8373,8 @@ simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) if (rhs_code == EQ_EXPR) { if (TREE_CODE (op1) == INTEGER_CST) - op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node); + op1 = int_const_binop (BIT_XOR_EXPR, op1, + build_int_cst (TREE_TYPE (op1), 1)); else return false; } @@ -8587,9 +8560,9 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) tree op = NULL_TREE; value_range_t vr0 = VR_INITIALIZER; value_range_t vr1 = VR_INITIALIZER; - double_int may_be_nonzero0, may_be_nonzero1; - double_int must_be_nonzero0, must_be_nonzero1; - double_int mask; + wide_int may_be_nonzero0, may_be_nonzero1; + wide_int must_be_nonzero0, must_be_nonzero1; + wide_int mask; if (TREE_CODE (op0) == SSA_NAME) vr0 = *(get_value_range (op0)); @@ -8605,22 +8578,22 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) else return false; - if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0)) + if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, &must_be_nonzero0)) return false; - if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1)) + if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, &must_be_nonzero1)) return false; switch (gimple_assign_rhs_code (stmt)) { case BIT_AND_EXPR: mask = may_be_nonzero0.and_not (must_be_nonzero1); - if (mask.is_zero ()) + if (mask == 0) { op = op0; break; } mask = may_be_nonzero1.and_not (must_be_nonzero0); - if (mask.is_zero ()) + if (mask == 0) { op = op1; break; @@ -8628,13 +8601,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) break; case BIT_IOR_EXPR: mask = may_be_nonzero0.and_not (must_be_nonzero1); - if (mask.is_zero ()) + if (mask == 0) { op = op1; break; } mask = may_be_nonzero1.and_not (must_be_nonzero0); - if (mask.is_zero ()) + if (mask == 0) { op = op0; break; @@ -8720,11 +8693,12 @@ test_for_singularity (enum tree_code cond_code, tree op0, by PRECISION and UNSIGNED_P. */ static bool -range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) +range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) { tree src_type; unsigned src_precision; - double_int tem; + max_wide_int tem; + signop src_sgn; /* We can only handle integral and pointer types. */ src_type = TREE_TYPE (vr->min); @@ -8732,13 +8706,13 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) && !POINTER_TYPE_P (src_type)) return false; - /* An extension is fine unless VR is signed and unsigned_p, + /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, and so is an identity transform. */ src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); - if ((src_precision < precision - && !(unsigned_p && !TYPE_UNSIGNED (src_type))) - || (src_precision == precision - && TYPE_UNSIGNED (src_type) == unsigned_p)) + src_sgn = TYPE_SIGN (src_type); + if ((src_precision < dest_precision + && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) + || (src_precision == dest_precision && src_sgn == dest_sgn)) return true; /* Now we can only handle ranges with constant bounds. */ @@ -8747,21 +8721,21 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) || TREE_CODE (vr->max) != INTEGER_CST) return false; - /* For sign changes, the MSB of the double_int has to be clear. + /* For sign changes, the MSB of the wide_int has to be clear. An unsigned value with its MSB set cannot be represented by - a signed double_int, while a negative value cannot be represented - by an unsigned double_int. */ - if (TYPE_UNSIGNED (src_type) != unsigned_p - && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0) + a signed wide_int, while a negative value cannot be represented + by an unsigned wide_int. */ + if (src_sgn != dest_sgn + && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0))) return false; /* Then we can perform the conversion on both ends and compare the result for equality. */ - tem = tree_to_double_int (vr->min).ext (precision, unsigned_p); - if (tree_to_double_int (vr->min) != tem) + tem = wi::ext (vr->min, dest_precision, dest_sgn); + if (tem != vr->min) return false; - tem = tree_to_double_int (vr->max).ext (precision, unsigned_p); - if (tree_to_double_int (vr->max) != tem) + tem = wi::ext (vr->max, dest_precision, dest_sgn); + if (tem != vr->max) return false; return true; @@ -8876,7 +8850,7 @@ simplify_cond_using_ranges (gimple stmt) if (range_int_cst_p (vr) && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (op0)), - TYPE_UNSIGNED (TREE_TYPE (op0))) + TYPE_SIGN (TREE_TYPE (op0))) && int_fits_type_p (op1, TREE_TYPE (innerop)) /* The range must not have overflowed, or if it did overflow we must not be wrapping/trapping overflow and optimizing @@ -9021,9 +8995,9 @@ simplify_conversion_using_ranges (gimple stmt) tree innerop, middleop, finaltype; gimple def_stmt; value_range_t *innervr; - bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p; + signop inner_sgn, middle_sgn, final_sgn; unsigned inner_prec, middle_prec, final_prec; - double_int innermin, innermed, innermax, middlemin, middlemed, middlemax; + max_wide_int innermin, innermed, innermax, middlemin, middlemed, middlemax; finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); if (!INTEGRAL_TYPE_P (finaltype)) @@ -9047,8 +9021,8 @@ simplify_conversion_using_ranges (gimple stmt) /* Simulate the conversion chain to check if the result is equal if the middle conversion is removed. */ - innermin = tree_to_double_int (innervr->min); - innermax = tree_to_double_int (innervr->max); + innermin = innervr->min; + innermax = innervr->max; inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); @@ -9056,34 +9030,35 @@ simplify_conversion_using_ranges (gimple stmt) /* If the first conversion is not injective, the second must not be widening. */ - if ((innermax - innermin).ugt (double_int::mask (middle_prec)) + if (wi::gtu_p (innermax - innermin, + wi::mask <max_wide_int> (middle_prec, false)) && middle_prec < final_prec) return false; /* We also want a medium value so that we can track the effect that narrowing conversions with sign change have. */ - inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop)); - if (inner_unsigned_p) - innermed = double_int::mask (inner_prec).lrshift (1, inner_prec); + inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); + if (inner_sgn == UNSIGNED) + innermed = wi::shifted_mask <max_wide_int> (1, inner_prec - 1, false); else - innermed = double_int_zero; - if (innermin.cmp (innermed, inner_unsigned_p) >= 0 - || innermed.cmp (innermax, inner_unsigned_p) >= 0) + innermed = 0; + if (wi::cmp (innermin, innermed, inner_sgn) >= 0 + || wi::cmp (innermed, innermax, inner_sgn) >= 0) innermed = innermin; - middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop)); - middlemin = innermin.ext (middle_prec, middle_unsigned_p); - middlemed = innermed.ext (middle_prec, middle_unsigned_p); - middlemax = innermax.ext (middle_prec, middle_unsigned_p); + middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); + middlemin = wi::ext (innermin, middle_prec, middle_sgn); + middlemed = wi::ext (innermed, middle_prec, middle_sgn); + middlemax = wi::ext (innermax, middle_prec, middle_sgn); /* Require that the final conversion applied to both the original and the intermediate range produces the same result. */ - final_unsigned_p = TYPE_UNSIGNED (finaltype); - if (middlemin.ext (final_prec, final_unsigned_p) - != innermin.ext (final_prec, final_unsigned_p) - || middlemed.ext (final_prec, final_unsigned_p) - != innermed.ext (final_prec, final_unsigned_p) - || middlemax.ext (final_prec, final_unsigned_p) - != innermax.ext (final_prec, final_unsigned_p)) + final_sgn = TYPE_SIGN (finaltype); + if (wi::ext (middlemin, final_prec, final_sgn) + != wi::ext (innermin, final_prec, final_sgn) + || wi::ext (middlemed, final_prec, final_sgn) + != wi::ext (innermed, final_prec, final_sgn) + || wi::ext (middlemax, final_prec, final_sgn) + != wi::ext (innermax, final_prec, final_sgn)) return false; gimple_assign_set_rhs1 (stmt, innerop); @@ -9113,8 +9088,7 @@ simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) != CODE_FOR_nothing) - && range_fits_type_p (vr, GET_MODE_PRECISION - (TYPE_MODE (TREE_TYPE (rhs1))), 0)) + && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) mode = TYPE_MODE (TREE_TYPE (rhs1)); /* If we can do the conversion in the current input mode do nothing. */ else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), @@ -9131,7 +9105,7 @@ simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) or if the value-range does not fit in the signed type try with a wider mode. */ if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing - && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0)) + && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) break; mode = GET_MODE_WIDER_MODE (mode); @@ -9163,6 +9137,7 @@ static bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); + if (is_gimple_assign (stmt)) { enum tree_code rhs_code = gimple_assign_rhs_code (stmt); @@ -9507,9 +9482,7 @@ vrp_finalize (void) && (TREE_CODE (vr_value[i]->max) == INTEGER_CST)) { if (vr_value[i]->type == VR_RANGE) - set_range_info (name, - tree_to_double_int (vr_value[i]->min), - tree_to_double_int (vr_value[i]->max)); + set_range_info (name, vr_value[i]->min, vr_value[i]->max); else if (vr_value[i]->type == VR_ANTI_RANGE) { /* VR_ANTI_RANGE ~[min, max] is encoded compactly as @@ -9522,16 +9495,17 @@ vrp_finalize (void) if (TYPE_UNSIGNED (TREE_TYPE (name)) && integer_zerop (vr_value[i]->min) && integer_zerop (vr_value[i]->max)) - set_range_info (name, - double_int_one, - double_int::max_value - (TYPE_PRECISION (TREE_TYPE (name)), true)); + { + max_wide_int tmmwi + = max_wide_int::from (wi::max_value (TYPE_PRECISION (TREE_TYPE (name)), + UNSIGNED), + UNSIGNED); + set_range_info (name, 1, tmmwi); + } else set_range_info (name, - tree_to_double_int (vr_value[i]->max) - + double_int_one, - tree_to_double_int (vr_value[i]->min) - - double_int_one); + vr_value[i]->max + 1, + vr_value[i]->min - 1); } } } diff --git a/gcc/tree.c b/gcc/tree.c index f1dddb75de9..f298d385536 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -59,6 +59,7 @@ along with GCC; see the file COPYING3. If not see #include "except.h" #include "debug.h" #include "intl.h" +#include "wide-int.h" /* Tree code classes. */ @@ -558,7 +559,7 @@ init_ttree (void) int_cst_hash_table = htab_create_ggc (1024, int_cst_hash_hash, int_cst_hash_eq, NULL); - int_cst_node = make_node (INTEGER_CST); + int_cst_node = make_int_cst (1); cl_option_hash_table = htab_create_ggc (64, cl_option_hash_hash, cl_option_hash_eq, NULL); @@ -661,7 +662,7 @@ decl_assembler_name_hash (const_tree asmname) /* Compute the number of bytes occupied by a tree with code CODE. This function cannot be used for nodes that have variable sizes, - including TREE_VEC, STRING_CST, and CALL_EXPR. */ + including TREE_VEC, INTEGER_CST, STRING_CST, and CALL_EXPR. */ size_t tree_code_size (enum tree_code code) { @@ -709,7 +710,7 @@ tree_code_size (enum tree_code code) case tcc_constant: /* a constant */ switch (code) { - case INTEGER_CST: return sizeof (struct tree_int_cst); + case INTEGER_CST: gcc_unreachable (); case REAL_CST: return sizeof (struct tree_real_cst); case FIXED_CST: return sizeof (struct tree_fixed_cst); case COMPLEX_CST: return sizeof (struct tree_complex); @@ -756,6 +757,10 @@ tree_size (const_tree node) const enum tree_code code = TREE_CODE (node); switch (code) { + case INTEGER_CST: + return (sizeof (struct tree_int_cst) + + (TREE_INT_CST_NUNITS (node) - 1) * sizeof (HOST_WIDE_INT)); + case TREE_BINFO: return (offsetof (struct tree_binfo, base_binfos) + vec<tree, va_gc> @@ -888,8 +893,9 @@ allocate_decl_uid (void) /* Return a newly allocated node of code CODE. For decl and type nodes, some other fields are initialized. The rest of the node is - initialized to zero. This function cannot be used for TREE_VEC or - OMP_CLAUSE nodes, which is enforced by asserts in tree_code_size. + initialized to zero. This function cannot be used for TREE_VEC, + INTEGER_CST or OMP_CLAUSE nodes, which is enforced by asserts in + tree_code_size. Achoo! I got a code in the node. */ @@ -1086,7 +1092,13 @@ build_int_cst (tree type, HOST_WIDE_INT low) if (!type) type = integer_type_node; - return double_int_to_tree (type, double_int::from_shwi (low)); + return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type))); +} + +tree +build_int_cstu (tree type, unsigned HOST_WIDE_INT cst) +{ + return wide_int_to_tree (type, wi::uhwi (cst, TYPE_PRECISION (type))); } /* Create an INT_CST node with a LOW value sign extended to TYPE. */ @@ -1095,8 +1107,7 @@ tree build_int_cst_type (tree type, HOST_WIDE_INT low) { gcc_assert (type); - - return double_int_to_tree (type, double_int::from_shwi (low)); + return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type))); } /* Constructs tree in type TYPE from with value given by CST. Signedness @@ -1105,28 +1116,10 @@ build_int_cst_type (tree type, HOST_WIDE_INT low) tree double_int_to_tree (tree type, double_int cst) { - bool sign_extended_type = !TYPE_UNSIGNED (type); - - cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type); - - return build_int_cst_wide (type, cst.low, cst.high); -} - -/* Returns true if CST fits into range of TYPE. Signedness of CST is assumed - to be the same as the signedness of TYPE. */ - -bool -double_int_fits_to_tree_p (const_tree type, double_int cst) -{ - bool sign_extended_type = !TYPE_UNSIGNED (type); - - double_int ext - = cst.ext (TYPE_PRECISION (type), !sign_extended_type); - - return cst == ext; + return wide_int_to_tree (type, max_wide_int::from (cst, TYPE_SIGN (type))); } -/* We force the double_int CST to the range of the type TYPE by sign or +/* We force the wide_int CST to the range of the type TYPE by sign or zero extending it. OVERFLOWABLE indicates if we are interested in overflow of the value, when >0 we are only interested in signed overflow, for <0 we are interested in any overflow. OVERFLOWED @@ -1137,26 +1130,37 @@ double_int_fits_to_tree_p (const_tree type, double_int cst) OVERFLOWED is nonzero, or OVERFLOWABLE is >0 and signed overflow occurs or OVERFLOWABLE is <0 and any overflow occurs - We return a new tree node for the extended double_int. The node + We return a new tree node for the extended wide_int. The node is shared if no overflow flags are set. */ tree -force_fit_type_double (tree type, double_int cst, int overflowable, - bool overflowed) +force_fit_type (tree type, const wide_int_ref &cst, + int overflowable, bool overflowed) { - bool sign_extended_type = !TYPE_UNSIGNED (type); + signop sign = TYPE_SIGN (type); /* If we need to set overflow flags, return a new unshared node. */ - if (overflowed || !double_int_fits_to_tree_p (type, cst)) + if (overflowed || !wi::fits_to_tree_p (cst, type)) { if (overflowed || overflowable < 0 - || (overflowable > 0 && sign_extended_type)) + || (overflowable > 0 && sign == SIGNED)) { - tree t = make_node (INTEGER_CST); - TREE_INT_CST (t) - = cst.ext (TYPE_PRECISION (type), !sign_extended_type); + wide_int tmp = wide_int::from (cst, TYPE_PRECISION (type), sign); + int l = tmp.get_len (); + tree t = make_int_cst (l); + if (l > 1) + { + if (tmp.elt (l - 1) == 0) + gcc_assert (tmp.elt (l - 2) < 0); + if (tmp.elt (l - 1) == (HOST_WIDE_INT) -1) + gcc_assert (tmp.elt (l - 2) >= 0); + } + + for (int i = 0; i < l; i++) + TREE_INT_CST_ELT (t, i) = tmp.elt (i); + TREE_TYPE (t) = type; TREE_OVERFLOW (t) = 1; return t; @@ -1164,7 +1168,7 @@ force_fit_type_double (tree type, double_int cst, int overflowable, } /* Else build a shared node. */ - return double_int_to_tree (type, cst); + return wide_int_to_tree (type, cst); } /* These are the hash table functions for the hash table of INTEGER_CST @@ -1176,9 +1180,13 @@ static hashval_t int_cst_hash_hash (const void *x) { const_tree const t = (const_tree) x; + hashval_t code = htab_hash_pointer (TREE_TYPE (t)); + int i; - return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t) - ^ htab_hash_pointer (TREE_TYPE (t))); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + code ^= TREE_INT_CST_ELT (t, i); + + return code; } /* Return nonzero if the value represented by *X (an INTEGER_CST tree node) @@ -1190,34 +1198,64 @@ int_cst_hash_eq (const void *x, const void *y) const_tree const xt = (const_tree) x; const_tree const yt = (const_tree) y; - return (TREE_TYPE (xt) == TREE_TYPE (yt) - && TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt) - && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt)); + if (TREE_TYPE (xt) != TREE_TYPE (yt) + || TREE_INT_CST_NUNITS (xt) != TREE_INT_CST_NUNITS (yt)) + return false; + + for (int i = 0; i < TREE_INT_CST_NUNITS (xt); i++) + if (TREE_INT_CST_ELT (xt, i) != TREE_INT_CST_ELT (yt, i)) + return false; + + return true; } -/* Create an INT_CST node of TYPE and value HI:LOW. +/* Create an INT_CST node of TYPE and value CST. The returned node is always shared. For small integers we use a - per-type vector cache, for larger ones we use a single hash table. */ + per-type vector cache, for larger ones we use a single hash table. + The value is extended from it's precision according to the sign of + the type to be a multiple of HOST_BITS_PER_WIDE_INT. This defines + the upper bits and ensures that hashing and value equality based + upon the underlying HOST_WIDE_INTs works without masking. */ tree -build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) +wide_int_to_tree (tree type, const wide_int_ref &pcst) { tree t; int ix = -1; int limit = 0; + unsigned int i; gcc_assert (type); + unsigned int prec = TYPE_PRECISION (type); + signop sgn = TYPE_SIGN (type); + + /* Verify that everything is canonical. */ + int l = pcst.get_len (); + if (l > 1) + { + if (pcst.elt (l - 1) == 0) + gcc_assert (pcst.elt (l - 2) < 0); + if (pcst.elt (l - 1) == (HOST_WIDE_INT) -1) + gcc_assert (pcst.elt (l - 2) >= 0); + } + + wide_int cst = wide_int::from (pcst, prec, sgn); + unsigned int len = cst.get_len (); + unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + bool recanonize = sgn == UNSIGNED + && small_prec + && (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT == len; switch (TREE_CODE (type)) { case NULLPTR_TYPE: - gcc_assert (hi == 0 && low == 0); + gcc_assert (cst == 0); /* Fallthru. */ case POINTER_TYPE: case REFERENCE_TYPE: /* Cache NULL pointer. */ - if (!hi && !low) + if (cst == 0) { limit = 1; ix = 0; @@ -1227,27 +1265,45 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) case BOOLEAN_TYPE: /* Cache false or true. */ limit = 2; - if (!hi && low < 2) - ix = low; + if (wi::leu_p (cst, 1)) + ix = cst.to_uhwi (); break; case INTEGER_TYPE: case OFFSET_TYPE: - if (TYPE_UNSIGNED (type)) + if (TYPE_SIGN (type) == UNSIGNED) { /* Cache 0..N */ limit = INTEGER_SHARE_LIMIT; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low; + + /* This is a little hokie, but if the prec is smaller than + what is necessary to hold INTEGER_SHARE_LIMIT, then the + obvious test will not get the correct answer. */ + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (cst.to_uhwi () < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT) + ix = cst.to_uhwi (); + } + else if (wi::ltu_p (cst, INTEGER_SHARE_LIMIT)) + ix = cst.to_uhwi (); } else { /* Cache -1..N */ limit = INTEGER_SHARE_LIMIT + 1; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low + 1; - else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1) + + if (cst == -1) ix = 0; + else if (!wi::neg_p (cst)) + { + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (cst.to_shwi () < INTEGER_SHARE_LIMIT) + ix = cst.to_shwi () + 1; + } + else if (wi::lts_p (cst, INTEGER_SHARE_LIMIT)) + ix = cst.to_shwi () + 1; + } } break; @@ -1270,30 +1326,51 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix); if (t) { - /* Make sure no one is clobbering the shared constant. */ + /* Make sure no one is clobbering the shared constant. We + must be careful here because tree-csts and wide-ints are + not canonicalized in the same way. */ gcc_assert (TREE_TYPE (t) == type); - gcc_assert (TREE_INT_CST_LOW (t) == low); - gcc_assert (TREE_INT_CST_HIGH (t) == hi); + gcc_assert (TREE_INT_CST_NUNITS (t) == (int)len); + if (recanonize) + { + len--; + gcc_assert (sext_hwi (TREE_INT_CST_ELT (t, len), small_prec) + == cst.elt (len)); + } + for (i = 0; i < len; i++) + gcc_assert (TREE_INT_CST_ELT (t, i) == cst.elt (i)); } else { /* Create a new shared int. */ - t = make_node (INTEGER_CST); - - TREE_INT_CST_LOW (t) = low; - TREE_INT_CST_HIGH (t) = hi; + t = make_int_cst (cst.get_len ()); + TREE_INT_CST_NUNITS (t) = len; + if (recanonize) + { + len--; + TREE_INT_CST_ELT (t, len) = zext_hwi (cst.elt (len), small_prec); + } + for (i = 0; i < len; i++) + TREE_INT_CST_ELT (t, i) = cst.elt (i); TREE_TYPE (t) = type; - + TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t; } } - else + else if (cst.get_len () == 1 + && (TYPE_SIGN (type) == SIGNED + || recanonize + || cst.elt (0) >= 0)) { - /* Use the cache of larger shared ints. */ + /* 99.99% of all int csts will fit in a single HWI. Do that one + efficiently. */ + /* Use the cache of larger shared ints. */ void **slot; - TREE_INT_CST_LOW (int_cst_node) = low; - TREE_INT_CST_HIGH (int_cst_node) = hi; + if (recanonize) + TREE_INT_CST_ELT (int_cst_node, 0) = zext_hwi (cst.elt (0), small_prec); + else + TREE_INT_CST_ELT (int_cst_node, 0) = cst.elt (0); TREE_TYPE (int_cst_node) = type; slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT); @@ -1304,7 +1381,48 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) t = int_cst_node; *slot = t; /* Make a new node for next time round. */ - int_cst_node = make_node (INTEGER_CST); + int_cst_node = make_int_cst (1); + } + } + else + { + /* The value either hashes properly or we drop it on the floor + for the gc to take care of. There will not be enough of them + to worry about. */ + void **slot; + tree nt; + if (!recanonize + && TYPE_SIGN (type) == UNSIGNED + && cst.elt (len - 1) < 0) + { + unsigned int blocks_needed + = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; + + nt = make_int_cst (blocks_needed + 1); + for (i = len; i < blocks_needed; i++) + TREE_INT_CST_ELT (nt, i) = (HOST_WIDE_INT)-1; + + TREE_INT_CST_ELT (nt, blocks_needed) = 0; + } + else + nt = make_int_cst (len); + if (recanonize) + { + len--; + TREE_INT_CST_ELT (nt, len) = zext_hwi (cst.elt (len), small_prec); + } + + for (i = 0; i < len; i++) + TREE_INT_CST_ELT (nt, i) = cst.elt (i); + TREE_TYPE (nt) = type; + + slot = htab_find_slot (int_cst_hash_table, nt, INSERT); + t = (tree) *slot; + if (!t) + { + /* Insert this one into the hash table. */ + t = nt; + *slot = t; } } @@ -1315,23 +1433,22 @@ void cache_integer_cst (tree t) { tree type = TREE_TYPE (t); - HOST_WIDE_INT hi = TREE_INT_CST_HIGH (t); - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (t); int ix = -1; int limit = 0; + int prec = TYPE_PRECISION (type); gcc_assert (!TREE_OVERFLOW (t)); switch (TREE_CODE (type)) { case NULLPTR_TYPE: - gcc_assert (hi == 0 && low == 0); + gcc_assert (integer_zerop (t)); /* Fallthru. */ case POINTER_TYPE: case REFERENCE_TYPE: /* Cache NULL pointer. */ - if (!hi && !low) + if (integer_zerop (t)) { limit = 1; ix = 0; @@ -1341,8 +1458,8 @@ cache_integer_cst (tree t) case BOOLEAN_TYPE: /* Cache false or true. */ limit = 2; - if (!hi && low < 2) - ix = low; + if (wi::ltu_p (t, 2)) + ix = TREE_INT_CST_ELT (t, 0); break; case INTEGER_TYPE: @@ -1351,17 +1468,35 @@ cache_integer_cst (tree t) { /* Cache 0..N */ limit = INTEGER_SHARE_LIMIT; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low; + + /* This is a little hokie, but if the prec is smaller than + what is necessary to hold INTEGER_SHARE_LIMIT, then the + obvious test will not get the correct answer. */ + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT) + ix = tree_to_uhwi (t); + } + else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT)) + ix = tree_to_uhwi (t); } else { /* Cache -1..N */ limit = INTEGER_SHARE_LIMIT + 1; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low + 1; - else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1) + + if (integer_minus_onep (t)) ix = 0; + else if (!wi::neg_p (t)) + { + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT) + ix = tree_to_shwi (t) + 1; + } + else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT)) + ix = tree_to_shwi (t) + 1; + } } break; @@ -1393,13 +1528,10 @@ cache_integer_cst (tree t) /* If there is already an entry for the number verify it's the same. */ if (*slot) - { - gcc_assert (TREE_INT_CST_LOW ((tree)*slot) == low - && TREE_INT_CST_HIGH ((tree)*slot) == hi); - return; - } - /* Otherwise insert this one into the hash table. */ - *slot = t; + gcc_assert (wi::eq_p (tree (*slot), t)); + else + /* Otherwise insert this one into the hash table. */ + *slot = t; } } @@ -1410,34 +1542,10 @@ cache_integer_cst (tree t) tree build_low_bits_mask (tree type, unsigned bits) { - double_int mask; - gcc_assert (bits <= TYPE_PRECISION (type)); - if (bits == TYPE_PRECISION (type) - && !TYPE_UNSIGNED (type)) - /* Sign extended all-ones mask. */ - mask = double_int_minus_one; - else - mask = double_int::mask (bits); - - return build_int_cst_wide (type, mask.low, mask.high); -} - -/* Checks that X is integer constant that can be expressed in (unsigned) - HOST_WIDE_INT without loss of precision. */ - -bool -cst_and_fits_in_hwi (const_tree x) -{ - if (TREE_CODE (x) != INTEGER_CST) - return false; - - if (TYPE_PRECISION (TREE_TYPE (x)) > HOST_BITS_PER_WIDE_INT) - return false; - - return (TREE_INT_CST_HIGH (x) == 0 - || TREE_INT_CST_HIGH (x) == -1); + return wide_int_to_tree (type, wi::mask (bits, false, + TYPE_PRECISION (type))); } /* Build a newly constructed TREE_VEC node of length LEN. */ @@ -1678,8 +1786,8 @@ real_value_from_int_cst (const_tree type, const_tree i) memset (&d, 0, sizeof d); real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, - TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i), - TYPE_UNSIGNED (TREE_TYPE (i))); + wide_int (i), + TYPE_SIGN (TREE_TYPE (i))); return d; } @@ -1917,6 +2025,27 @@ build_case_label (tree low_value, tree high_value, tree label_decl) return t; } +/* Build a newly constructed INETEGER_CST node of length LEN. */ + +tree +make_int_cst_stat (int len MEM_STAT_DECL) +{ + tree t; + int length = (len - 1) * sizeof (tree) + sizeof (struct tree_int_cst); + + gcc_assert (len); + record_node_allocation_statistics (INTEGER_CST, length); + + t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT); + + TREE_SET_CODE (t, INTEGER_CST); + TREE_INT_CST_NUNITS (t) = len; + + TREE_CONSTANT (t) = 1; + + return t; +} + /* Build a newly constructed TREE_VEC node of length LEN. */ tree @@ -1946,8 +2075,7 @@ integer_zerop (const_tree expr) switch (TREE_CODE (expr)) { case INTEGER_CST: - return (TREE_INT_CST_LOW (expr) == 0 - && TREE_INT_CST_HIGH (expr) == 0); + return wi::eq_p (expr, 0); case COMPLEX_CST: return (integer_zerop (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr))); @@ -1975,8 +2103,7 @@ integer_onep (const_tree expr) switch (TREE_CODE (expr)) { case INTEGER_CST: - return (TREE_INT_CST_LOW (expr) == 1 - && TREE_INT_CST_HIGH (expr) == 0); + return wi::eq_p (expr, 1); case COMPLEX_CST: return (integer_onep (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr))); @@ -1999,9 +2126,6 @@ integer_onep (const_tree expr) int integer_all_onesp (const_tree expr) { - int prec; - int uns; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST @@ -2021,35 +2145,7 @@ integer_all_onesp (const_tree expr) else if (TREE_CODE (expr) != INTEGER_CST) return 0; - uns = TYPE_UNSIGNED (TREE_TYPE (expr)); - if (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0 - && TREE_INT_CST_HIGH (expr) == -1) - return 1; - if (!uns) - return 0; - - prec = TYPE_PRECISION (TREE_TYPE (expr)); - if (prec >= HOST_BITS_PER_WIDE_INT) - { - HOST_WIDE_INT high_value; - int shift_amount; - - shift_amount = prec - HOST_BITS_PER_WIDE_INT; - - /* Can not handle precisions greater than twice the host int size. */ - gcc_assert (shift_amount <= HOST_BITS_PER_WIDE_INT); - if (shift_amount == HOST_BITS_PER_WIDE_INT) - /* Shifting by the host word size is undefined according to the ANSI - standard, so we must handle this as a special case. */ - high_value = -1; - else - high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1; - - return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0 - && TREE_INT_CST_HIGH (expr) == high_value); - } - else - return TREE_INT_CST_LOW (expr) == ((unsigned HOST_WIDE_INT) 1 << prec) - 1; + return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr; } /* Return 1 if EXPR is the integer constant minus one. */ @@ -2072,9 +2168,6 @@ integer_minus_onep (const_tree expr) int integer_pow2p (const_tree expr) { - int prec; - unsigned HOST_WIDE_INT high, low; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST @@ -2085,29 +2178,7 @@ integer_pow2p (const_tree expr) if (TREE_CODE (expr) != INTEGER_CST) return 0; - prec = TYPE_PRECISION (TREE_TYPE (expr)); - high = TREE_INT_CST_HIGH (expr); - low = TREE_INT_CST_LOW (expr); - - /* First clear all bits that are beyond the type's precision in case - we've been sign extended. */ - - if (prec == HOST_BITS_PER_DOUBLE_INT) - ; - else if (prec > HOST_BITS_PER_WIDE_INT) - high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT)); - else - { - high = 0; - if (prec < HOST_BITS_PER_WIDE_INT) - low &= ~(HOST_WIDE_INT_M1U << prec); - } - - if (high == 0 && low == 0) - return 0; - - return ((high == 0 && (low & (low - 1)) == 0) - || (low == 0 && (high & (high - 1)) == 0)); + return wi::popcount (expr) == 1; } /* Return 1 if EXPR is an integer constant other than zero or a @@ -2119,8 +2190,7 @@ integer_nonzerop (const_tree expr) STRIP_NOPS (expr); return ((TREE_CODE (expr) == INTEGER_CST - && (TREE_INT_CST_LOW (expr) != 0 - || TREE_INT_CST_HIGH (expr) != 0)) + && !wi::eq_p (expr, 0)) || (TREE_CODE (expr) == COMPLEX_CST && (integer_nonzerop (TREE_REALPART (expr)) || integer_nonzerop (TREE_IMAGPART (expr))))); @@ -2141,34 +2211,12 @@ fixed_zerop (const_tree expr) int tree_log2 (const_tree expr) { - int prec; - HOST_WIDE_INT high, low; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST) return tree_log2 (TREE_REALPART (expr)); - prec = TYPE_PRECISION (TREE_TYPE (expr)); - high = TREE_INT_CST_HIGH (expr); - low = TREE_INT_CST_LOW (expr); - - /* First clear all bits that are beyond the type's precision in case - we've been sign extended. */ - - if (prec == HOST_BITS_PER_DOUBLE_INT) - ; - else if (prec > HOST_BITS_PER_WIDE_INT) - high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT)); - else - { - high = 0; - if (prec < HOST_BITS_PER_WIDE_INT) - low &= ~(HOST_WIDE_INT_M1U << prec); - } - - return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high) - : exact_log2 (low)); + return wi::exact_log2 (expr); } /* Similar, but return the largest integer Y such that 2 ** Y is less @@ -2177,35 +2225,12 @@ tree_log2 (const_tree expr) int tree_floor_log2 (const_tree expr) { - int prec; - HOST_WIDE_INT high, low; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST) return tree_log2 (TREE_REALPART (expr)); - prec = TYPE_PRECISION (TREE_TYPE (expr)); - high = TREE_INT_CST_HIGH (expr); - low = TREE_INT_CST_LOW (expr); - - /* First clear all bits that are beyond the type's precision in case - we've been sign extended. Ignore if type's precision hasn't been set - since what we are doing is setting it. */ - - if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0) - ; - else if (prec > HOST_BITS_PER_WIDE_INT) - high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT)); - else - { - high = 0; - if (prec < HOST_BITS_PER_WIDE_INT) - low &= ~(HOST_WIDE_INT_M1U << prec); - } - - return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high) - : floor_log2 (low)); + return wi::floor_log2 (expr); } /* Return 1 if EXPR is the real constant zero. Trailing zeroes matter for @@ -2626,14 +2651,11 @@ int_size_in_bytes (const_tree type) type = TYPE_MAIN_VARIANT (type); t = TYPE_SIZE_UNIT (type); - if (t == 0 - || TREE_CODE (t) != INTEGER_CST - || TREE_INT_CST_HIGH (t) != 0 - /* If the result would appear negative, it's too big to represent. */ - || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0) - return -1; - return TREE_INT_CST_LOW (t); + if (t && cst_fits_uhwi_p (t)) + return tree_to_hwi (t); + else + return -1; } /* Return the maximum size of TYPE (in bytes) as a wide integer @@ -2651,8 +2673,8 @@ max_int_size_in_bytes (const_tree type) { size_tree = TYPE_ARRAY_MAX_SIZE (type); - if (size_tree && host_integerp (size_tree, 1)) - size = tree_low_cst (size_tree, 1); + if (size_tree && tree_fits_uhwi_p (size_tree)) + size = tree_to_uhwi (size_tree); } /* If we still haven't been able to get a size, see if the language @@ -2662,8 +2684,8 @@ max_int_size_in_bytes (const_tree type) { size_tree = lang_hooks.types.max_size (type); - if (size_tree && host_integerp (size_tree, 1)) - size = tree_low_cst (size_tree, 1); + if (size_tree && tree_fits_uhwi_p (size_tree)) + size = tree_to_uhwi (size_tree); } return size; @@ -2698,7 +2720,7 @@ bit_position (const_tree field) HOST_WIDE_INT int_bit_position (const_tree field) { - return tree_low_cst (bit_position (field), 0); + return tree_to_shwi (bit_position (field)); } /* Return the byte position of FIELD, in bytes from the start of the record. @@ -2718,7 +2740,7 @@ byte_position (const_tree field) HOST_WIDE_INT int_byte_position (const_tree field) { - return tree_low_cst (byte_position (field), 0); + return tree_to_shwi (byte_position (field)); } /* Return the strictest alignment, in bits, that T is known to have. */ @@ -4292,11 +4314,11 @@ build_simple_mem_ref_loc (location_t loc, tree ptr) /* Return the constant offset of a MEM_REF or TARGET_MEM_REF tree T. */ -double_int +addr_wide_int mem_ref_offset (const_tree t) { tree toff = TREE_OPERAND (t, 1); - return tree_to_double_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff))); + return wi::sext (addr_wide_int (toff), TYPE_PRECISION (TREE_TYPE (toff))); } /* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE @@ -4520,6 +4542,8 @@ build_type_attribute_qual_variant (tree ttype, tree attribute, int quals) { hashval_t hashcode = 0; tree ntype; + int i; + tree t; enum tree_code code = TREE_CODE (ttype); /* Building a distinct copy of a tagged type is inappropriate; it @@ -4561,10 +4585,9 @@ build_type_attribute_qual_variant (tree ttype, tree attribute, int quals) hashcode); break; case INTEGER_TYPE: - hashcode = iterative_hash_object - (TREE_INT_CST_LOW (TYPE_MAX_VALUE (ntype)), hashcode); - hashcode = iterative_hash_object - (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (ntype)), hashcode); + t = TYPE_MAX_VALUE (ntype); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + hashcode = iterative_hash_object (TREE_INT_CST_ELT (t, i), hashcode); break; case REAL_TYPE: case FIXED_POINT_TYPE: @@ -5048,7 +5071,7 @@ free_lang_data_in_decl (tree decl) DECL_VINDEX referring to itself into a vtable slot number as it should. Happens with functions that are copied and then forgotten about. Just clear it, it won't matter anymore. */ - if (DECL_VINDEX (decl) && !host_integerp (DECL_VINDEX (decl), 0)) + if (DECL_VINDEX (decl) && !tree_fits_shwi_p (DECL_VINDEX (decl))) DECL_VINDEX (decl) = NULL_TREE; } else if (TREE_CODE (decl) == VAR_DECL) @@ -6576,6 +6599,8 @@ type_hash_eq (const void *va, const void *vb) case INTEGER_TYPE: case REAL_TYPE: case BOOLEAN_TYPE: + if (TYPE_PRECISION (a->type) != TYPE_PRECISION (b->type)) + return false; return ((TYPE_MAX_VALUE (a->type) == TYPE_MAX_VALUE (b->type) || tree_int_cst_equal (TYPE_MAX_VALUE (a->type), TYPE_MAX_VALUE (b->type))) @@ -6866,18 +6891,26 @@ type_num_arguments (const_tree type) int tree_int_cst_equal (const_tree t1, const_tree t2) { + unsigned int prec1, prec2; if (t1 == t2) return 1; if (t1 == 0 || t2 == 0) return 0; - if (TREE_CODE (t1) == INTEGER_CST - && TREE_CODE (t2) == INTEGER_CST - && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2)) - return 1; + if (TREE_CODE (t1) != INTEGER_CST + || TREE_CODE (t2) != INTEGER_CST) + return 0; + + prec1 = TYPE_PRECISION (TREE_TYPE (t1)); + prec2 = TYPE_PRECISION (TREE_TYPE (t2)); + if (prec1 == prec2) + return wi::eq_p (t1, t2); + else if (prec1 < prec2) + return wide_int::from (t1, prec2, TYPE_SIGN (TREE_TYPE (t1))) == t2; + else + return wide_int::from (t2, prec1, TYPE_SIGN (TREE_TYPE (t2))) == t1; return 0; } @@ -6922,37 +6955,6 @@ tree_int_cst_compare (const_tree t1, const_tree t2) return 0; } -/* Return 1 if T is an INTEGER_CST that can be manipulated efficiently on - the host. If POS is zero, the value can be represented in a single - HOST_WIDE_INT. If POS is nonzero, the value must be non-negative and can - be represented in a single unsigned HOST_WIDE_INT. */ - -int -host_integerp (const_tree t, int pos) -{ - if (t == NULL_TREE) - return 0; - - return (TREE_CODE (t) == INTEGER_CST - && ((TREE_INT_CST_HIGH (t) == 0 - && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0) - || (! pos && TREE_INT_CST_HIGH (t) == -1 - && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0 - && !TYPE_UNSIGNED (TREE_TYPE (t))) - || (pos && TREE_INT_CST_HIGH (t) == 0))); -} - -/* Return the HOST_WIDE_INT least significant bits of T if it is an - INTEGER_CST and there is no overflow. POS is nonzero if the result must - be non-negative. We must be able to satisfy the above conditions. */ - -HOST_WIDE_INT -tree_low_cst (const_tree t, int pos) -{ - gcc_assert (host_integerp (t, pos)); - return TREE_INT_CST_LOW (t); -} - /* Return the HOST_WIDE_INT least significant bits of T, a sizetype kind INTEGER_CST. This makes sure to properly sign-extend the constant. */ @@ -6960,8 +6962,11 @@ tree_low_cst (const_tree t, int pos) HOST_WIDE_INT size_low_cst (const_tree t) { - double_int d = tree_to_double_int (t); - return d.sext (TYPE_PRECISION (TREE_TYPE (t))).low; + HOST_WIDE_INT w = TREE_INT_CST_ELT (t, 0); + int prec = TYPE_PRECISION (TREE_TYPE (t)); + if (prec < HOST_BITS_PER_WIDE_INT) + return sext_hwi (w, prec); + return w; } /* Return the most significant (sign) bit of T. */ @@ -6970,17 +6975,8 @@ int tree_int_cst_sign_bit (const_tree t) { unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1; - unsigned HOST_WIDE_INT w; - if (bitno < HOST_BITS_PER_WIDE_INT) - w = TREE_INT_CST_LOW (t); - else - { - w = TREE_INT_CST_HIGH (t); - bitno -= HOST_BITS_PER_WIDE_INT; - } - - return (w >> bitno) & 1; + return wi::extract_uhwi (t, bitno, 1); } /* Return an indication of the sign of the integer constant T. @@ -6990,11 +6986,12 @@ tree_int_cst_sign_bit (const_tree t) int tree_int_cst_sgn (const_tree t) { - if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0) + wide_int w = t; + if (w == 0) return 0; else if (TYPE_UNSIGNED (TREE_TYPE (t))) return 1; - else if (TREE_INT_CST_HIGH (t) < 0) + else if (wi::neg_p (w)) return -1; else return 1; @@ -7004,7 +7001,7 @@ tree_int_cst_sgn (const_tree t) signed or unsigned type, UNSIGNEDP says which. */ unsigned int -tree_int_cst_min_precision (tree value, bool unsignedp) +tree_int_cst_min_precision (tree value, signop sgn) { /* If the value is negative, compute its negative minus 1. The latter adjustment is because the absolute value of the largest negative value @@ -7022,7 +7019,7 @@ tree_int_cst_min_precision (tree value, bool unsignedp) if (integer_zerop (value)) return 1; else - return tree_floor_log2 (value) + 1 + !unsignedp; + return tree_floor_log2 (value) + 1 + (sgn == SIGNED ? 1 : 0) ; } /* Compare two constructor-element-type constants. Return 1 if the lists @@ -7083,8 +7080,7 @@ simple_cst_equal (const_tree t1, const_tree t2) switch (code1) { case INTEGER_CST: - return (TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2)); + return wi::eq_p (t1, t2); case REAL_CST: return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); @@ -7220,11 +7216,11 @@ compare_tree_int (const_tree t, unsigned HOST_WIDE_INT u) { if (tree_int_cst_sgn (t) < 0) return -1; - else if (TREE_INT_CST_HIGH (t) != 0) + else if (!cst_fits_uhwi_p (t)) return 1; - else if (TREE_INT_CST_LOW (t) == u) + else if ((unsigned HOST_WIDE_INT) tree_to_hwi (t) == u) return 0; - else if (TREE_INT_CST_LOW (t) < u) + else if ((unsigned HOST_WIDE_INT) tree_to_hwi (t) < u) return -1; else return 1; @@ -7237,7 +7233,7 @@ compare_tree_int (const_tree t, unsigned HOST_WIDE_INT u) bool valid_constant_size_p (const_tree size) { - if (! host_integerp (size, 1) + if (! tree_fits_uhwi_p (size) || TREE_OVERFLOW (size) || tree_int_cst_sign_bit (size) != 0) return false; @@ -7356,8 +7352,9 @@ iterative_hash_expr (const_tree t, hashval_t val) /* Alas, constants aren't shared, so we can't rely on pointer identity. */ case INTEGER_CST: - val = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), val); - return iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), val); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + val = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), val); + return val; case REAL_CST: { unsigned int val2 = real_hash (TREE_REAL_CST_PTR (t)); @@ -7688,8 +7685,8 @@ build_nonstandard_integer_type (unsigned HOST_WIDE_INT precision, fixup_signed_type (itype); ret = itype; - if (host_integerp (TYPE_MAX_VALUE (itype), 1)) - ret = type_hash_canon (tree_low_cst (TYPE_MAX_VALUE (itype), 1), itype); + if (tree_fits_uhwi_p (TYPE_MAX_VALUE (itype))) + ret = type_hash_canon (tree_to_uhwi (TYPE_MAX_VALUE (itype)), itype); if (precision <= MAX_INT_CACHED_PREC) nonstandard_integer_type_cache[precision + unsignedp] = ret; @@ -8630,10 +8627,10 @@ get_narrower (tree op, int *unsignedp_ptr) && TREE_CODE (TREE_TYPE (op)) != FIXED_POINT_TYPE /* Ensure field is laid out already. */ && DECL_SIZE (TREE_OPERAND (op, 1)) != 0 - && host_integerp (DECL_SIZE (TREE_OPERAND (op, 1)), 1)) + && tree_fits_uhwi_p (DECL_SIZE (TREE_OPERAND (op, 1)))) { unsigned HOST_WIDE_INT innerprec - = tree_low_cst (DECL_SIZE (TREE_OPERAND (op, 1)), 1); + = tree_to_uhwi (DECL_SIZE (TREE_OPERAND (op, 1))); int unsignedp = (DECL_UNSIGNED (TREE_OPERAND (op, 1)) || TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op, 1)))); tree type = lang_hooks.types.type_for_size (innerprec, unsignedp); @@ -8668,11 +8665,11 @@ bool int_fits_type_p (const_tree c, const_tree type) { tree type_low_bound, type_high_bound; - bool ok_for_low_bound, ok_for_high_bound, unsc; - double_int dc, dd; + bool ok_for_low_bound, ok_for_high_bound; + wide_int wc, wd; + signop sgn_c = TYPE_SIGN (TREE_TYPE (c)); - dc = tree_to_double_int (c); - unsc = TYPE_UNSIGNED (TREE_TYPE (c)); + wc = c; retry: type_low_bound = TYPE_MIN_VALUE (type); @@ -8681,7 +8678,7 @@ retry: /* If at least one bound of the type is a constant integer, we can check ourselves and maybe make a decision. If no such decision is possible, but this type is a subtype, try checking against that. Otherwise, use - double_int_fits_to_tree_p, which checks against the precision. + fits_to_tree_p, which checks against the precision. Compute the status for each possibly constant bound, and return if we see one does not match. Use ok_for_xxx_bound for this purpose, assigning -1 @@ -8691,18 +8688,18 @@ retry: /* Check if c >= type_low_bound. */ if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST) { - dd = tree_to_double_int (type_low_bound); - if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound))) + wd = type_low_bound; + if (sgn_c != TYPE_SIGN (TREE_TYPE (type_low_bound))) { - int c_neg = (!unsc && dc.is_negative ()); - int t_neg = (unsc && dd.is_negative ()); + int c_neg = (sgn_c == SIGNED && wi::neg_p (wc)); + int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd)); if (c_neg && !t_neg) return false; - if ((c_neg || !t_neg) && dc.ult (dd)) + if ((c_neg || !t_neg) && wi::ltu_p (wc, wd)) return false; } - else if (dc.cmp (dd, unsc) < 0) + else if (wi::lt_p (wc, wd, sgn_c)) return false; ok_for_low_bound = true; } @@ -8712,18 +8709,18 @@ retry: /* Check if c <= type_high_bound. */ if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST) { - dd = tree_to_double_int (type_high_bound); - if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound))) + wd = type_high_bound; + if (sgn_c != TYPE_SIGN (TREE_TYPE (type_high_bound))) { - int c_neg = (!unsc && dc.is_negative ()); - int t_neg = (unsc && dd.is_negative ()); + int c_neg = (sgn_c == SIGNED && wi::neg_p (wc)); + int t_neg = (sgn_c == UNSIGNED && wi::neg_p (wd)); if (t_neg && !c_neg) return false; - if ((t_neg || !c_neg) && dc.ugt (dd)) + if ((t_neg || !c_neg) && wi::gtu_p (wc, wd)) return false; } - else if (dc.cmp (dd, unsc) > 0) + else if (wi::gt_p (wc, wd, sgn_c)) return false; ok_for_high_bound = true; } @@ -8737,7 +8734,7 @@ retry: /* Perform some generic filtering which may allow making a decision even if the bounds are not constant. First, negative integers never fit in unsigned types, */ - if (TYPE_UNSIGNED (type) && !unsc && dc.is_negative ()) + if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (wc)) return false; /* Second, narrower types always fit in wider ones. */ @@ -8745,18 +8742,8 @@ retry: return true; /* Third, unsigned integers with top bit set never fit signed types. */ - if (! TYPE_UNSIGNED (type) && unsc) - { - int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (c))) - 1; - if (prec < HOST_BITS_PER_WIDE_INT) - { - if (((((unsigned HOST_WIDE_INT) 1) << prec) & dc.low) != 0) - return false; - } - else if (((((unsigned HOST_WIDE_INT) 1) - << (prec - HOST_BITS_PER_WIDE_INT)) & dc.high) != 0) - return false; - } + if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED && wi::neg_p (wc)) + return false; /* If we haven't been able to decide at this point, there nothing more we can check ourselves here. Look at the base type if we have one and it @@ -8769,8 +8756,8 @@ retry: goto retry; } - /* Or to double_int_fits_to_tree_p, if nothing else. */ - return double_int_fits_to_tree_p (type, dc); + /* Or to fits_to_tree_p, if nothing else. */ + return wi::fits_to_tree_p (wc, type); } /* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant @@ -8783,33 +8770,25 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max) { if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type) && TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST) - mpz_set_double_int (min, tree_to_double_int (TYPE_MIN_VALUE (type)), - TYPE_UNSIGNED (type)); + wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type)); else { if (TYPE_UNSIGNED (type)) mpz_set_ui (min, 0); else { - double_int mn; - mn = double_int::mask (TYPE_PRECISION (type) - 1); - mn = (mn + double_int_one).sext (TYPE_PRECISION (type)); - mpz_set_double_int (min, mn, false); + wide_int mn = wi::min_value (TYPE_PRECISION (type), SIGNED); + wi::to_mpz (mn, min, SIGNED); } } if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type) && TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST) - mpz_set_double_int (max, tree_to_double_int (TYPE_MAX_VALUE (type)), - TYPE_UNSIGNED (type)); + wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type)); else { - if (TYPE_UNSIGNED (type)) - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), - true); - else - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type) - 1), - true); + wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type)); + wi::to_mpz (mn, max, TYPE_SIGN (type)); } } @@ -9478,6 +9457,18 @@ tree_contains_struct_check_failed (const_tree node, (dynamically sized) vector. */ void +tree_int_cst_elt_check_failed (int idx, int len, const char *file, int line, + const char *function) +{ + internal_error + ("tree check: accessed elt %d of tree_int_cst with %d elts in %s, at %s:%d", + idx + 1, len, function, trim_filename (file), line); +} + +/* Similar to above, except that the check is for the bounds of a TREE_VEC's + (dynamically sized) vector. */ + +void tree_vec_elt_check_failed (int idx, int len, const char *file, int line, const char *function) { @@ -9702,13 +9693,11 @@ build_common_tree_nodes (bool signed_char, bool short_double) #endif /* Define a boolean type. This type only represents boolean values but - may be larger than char depending on the value of BOOL_TYPE_SIZE. - Front ends which want to override this size (i.e. Java) can redefine - boolean_type_node before calling build_common_tree_nodes_2. */ + may be larger than char depending on the value of BOOL_TYPE_SIZE. */ boolean_type_node = make_unsigned_type (BOOL_TYPE_SIZE); TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE); - TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1); TYPE_PRECISION (boolean_type_node) = 1; + TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1); /* Define what type to use for size_t. */ if (strcmp (SIZE_TYPE, "unsigned int") == 0) @@ -10264,10 +10253,10 @@ build_vector_type_for_mode (tree innertype, enum machine_mode mode) case MODE_INT: /* Check that there are no leftover bits. */ gcc_assert (GET_MODE_BITSIZE (mode) - % TREE_INT_CST_LOW (TYPE_SIZE (innertype)) == 0); + % tree_to_hwi (TYPE_SIZE (innertype)) == 0); nunits = GET_MODE_BITSIZE (mode) - / TREE_INT_CST_LOW (TYPE_SIZE (innertype)); + / tree_to_hwi (TYPE_SIZE (innertype)); break; default: @@ -10652,11 +10641,10 @@ HOST_WIDE_INT int_cst_value (const_tree x) { unsigned bits = TYPE_PRECISION (TREE_TYPE (x)); - unsigned HOST_WIDE_INT val = TREE_INT_CST_LOW (x); + unsigned HOST_WIDE_INT val = tree_to_hwi (x); /* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */ - gcc_assert (TREE_INT_CST_HIGH (x) == 0 - || TREE_INT_CST_HIGH (x) == -1); + gcc_assert (cst_fits_shwi_p (x)); if (bits < HOST_BITS_PER_WIDE_INT) { @@ -10676,16 +10664,22 @@ HOST_WIDEST_INT widest_int_cst_value (const_tree x) { unsigned bits = TYPE_PRECISION (TREE_TYPE (x)); - unsigned HOST_WIDEST_INT val = TREE_INT_CST_LOW (x); + unsigned HOST_WIDEST_INT val = tree_to_hwi (x); #if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT); - val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_HIGH (x)) - << HOST_BITS_PER_WIDE_INT); + gcc_assert (TREE_INT_CST_NUNITS (x) <= 2 + || (TREE_INT_CST_NUNITS (x) == 3 && TREE_INT_CST_ELT (x, 2) == 0)); + + if (TREE_INT_CST_NUNITS (x) == 1) + val = ((HOST_WIDEST_INT)val << HOST_BITS_PER_WIDE_INT) >> HOST_BITS_PER_WIDE_INT; + else + val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_ELT (x, 1)) + << HOST_BITS_PER_WIDE_INT); #else /* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */ - gcc_assert (TREE_INT_CST_HIGH (x) == 0 - || TREE_INT_CST_HIGH (x) == -1); + gcc_assert (TREE_INT_CST_NUNITS (x) == 1 + || (TREE_INT_CST_NUNITS (x) == 2 && TREE_INT_CST_ELT (x, 1) == 0)); #endif if (bits < HOST_BITS_PER_WIDEST_INT) @@ -10770,7 +10764,6 @@ truth_type_for (tree type) tree upper_bound_in_type (tree outer, tree inner) { - double_int high; unsigned int det = 0; unsigned oprec = TYPE_PRECISION (outer); unsigned iprec = TYPE_PRECISION (inner); @@ -10814,21 +10807,8 @@ upper_bound_in_type (tree outer, tree inner) gcc_unreachable (); } - /* Compute 2^^prec - 1. */ - if (prec <= HOST_BITS_PER_WIDE_INT) - { - high.high = 0; - high.low = ((~(unsigned HOST_WIDE_INT) 0) - >> (HOST_BITS_PER_WIDE_INT - prec)); - } - else - { - high.high = ((~(unsigned HOST_WIDE_INT) 0) - >> (HOST_BITS_PER_DOUBLE_INT - prec)); - high.low = ~(unsigned HOST_WIDE_INT) 0; - } - - return double_int_to_tree (outer, high); + return wide_int_to_tree (outer, + wi::mask (prec, false, TYPE_PRECISION (outer))); } /* Returns the smallest value obtainable by casting something in INNER type to @@ -10837,7 +10817,6 @@ upper_bound_in_type (tree outer, tree inner) tree lower_bound_in_type (tree outer, tree inner) { - double_int low; unsigned oprec = TYPE_PRECISION (outer); unsigned iprec = TYPE_PRECISION (inner); @@ -10848,7 +10827,7 @@ lower_bound_in_type (tree outer, tree inner) contains all values of INNER type. In particular, both INNER and OUTER types have zero in common. */ || (oprec > iprec && TYPE_UNSIGNED (inner))) - low.low = low.high = 0; + return build_int_cst (outer, 0); else { /* If we are widening a signed type to another signed type, we @@ -10856,21 +10835,10 @@ lower_bound_in_type (tree outer, tree inner) precision or narrowing to a signed type, we want to obtain -2^(oprec-1). */ unsigned prec = oprec > iprec ? iprec : oprec; - - if (prec <= HOST_BITS_PER_WIDE_INT) - { - low.high = ~(unsigned HOST_WIDE_INT) 0; - low.low = (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); - } - else - { - low.high = ((~(unsigned HOST_WIDE_INT) 0) - << (prec - HOST_BITS_PER_WIDE_INT - 1)); - low.low = 0; - } + return wide_int_to_tree (outer, + wi::mask (prec - 1, true, + TYPE_PRECISION (outer))); } - - return double_int_to_tree (outer, low); } /* Return nonzero if two operands that are suitable for PHI nodes are @@ -10889,42 +10857,12 @@ operand_equal_for_phi_arg_p (const_tree arg0, const_tree arg1) return operand_equal_p (arg0, arg1, 0); } -/* Returns number of zeros at the end of binary representation of X. - - ??? Use ffs if available? */ +/* Returns number of zeros at the end of binary representation of X. */ tree num_ending_zeros (const_tree x) { - unsigned HOST_WIDE_INT fr, nfr; - unsigned num, abits; - tree type = TREE_TYPE (x); - - if (TREE_INT_CST_LOW (x) == 0) - { - num = HOST_BITS_PER_WIDE_INT; - fr = TREE_INT_CST_HIGH (x); - } - else - { - num = 0; - fr = TREE_INT_CST_LOW (x); - } - - for (abits = HOST_BITS_PER_WIDE_INT / 2; abits; abits /= 2) - { - nfr = fr >> abits; - if (nfr << abits == fr) - { - num += abits; - fr = nfr; - } - } - - if (num > TYPE_PRECISION (type)) - num = TYPE_PRECISION (type); - - return build_int_cst_type (type, num); + return build_int_cst (TREE_TYPE (x), wi::ctz (x)); } @@ -12095,7 +12033,7 @@ get_binfo_at_offset (tree binfo, HOST_WIDE_INT offset, tree expected_type) continue; pos = int_bit_position (fld); - size = tree_low_cst (DECL_SIZE (fld), 1); + size = tree_to_uhwi (DECL_SIZE (fld)); if (pos <= offset && (pos + size) > offset) break; } diff --git a/gcc/tree.def b/gcc/tree.def index 88c850af120..5779e3bf8cf 100644 --- a/gcc/tree.def +++ b/gcc/tree.def @@ -257,13 +257,16 @@ DEFTREECODE (LANG_TYPE, "lang_type", tcc_type, 0) /* First, the constants. */ -/* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields, - 32 bits each, giving us a 64 bit constant capability. INTEGER_CST - nodes can be shared, and therefore should be considered read only. - They should be copied, before setting a flag such as TREE_OVERFLOW. - If an INTEGER_CST has TREE_OVERFLOW already set, it is known to be unique. - INTEGER_CST nodes are created for the integral types, for pointer - types and for vector and float types in some circumstances. */ +/* Contents are in an array of HOST_WIDE_INTS. The array may be as + wide as the precision requires but may be shorter when all of the + upper bits are sign bits. The length of the array is given in + TREE_INT_CST_NUNITS and each element can be obtained using + TREE_INT_CST_ELT. INTEGER_CST nodes can be shared, and therefore + should be considered read only. They should be copied, before + setting a flag such as TREE_OVERFLOW. If an INTEGER_CST has + TREE_OVERFLOW already set, it is known to be unique. INTEGER_CST + nodes are created for the integral types, for pointer types and for + vector and float types in some circumstances. */ DEFTREECODE (INTEGER_CST, "integer_cst", tcc_constant, 0) /* Contents are in TREE_REAL_CST field. */ diff --git a/gcc/tree.h b/gcc/tree.h index 2f4514d6a74..dc5db617bc5 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see #define GCC_TREE_H #include "tree-core.h" +#include "wide-int.h" /* Macros for initializing `tree_contains_struct'. */ #define MARK_TS_BASE(C) \ @@ -201,6 +202,8 @@ along with GCC; see the file COPYING3. If not see #define CASE_FLT_FN_REENT(FN) case FN##_R: case FN##F_R: case FN##L_R #define CASE_INT_FN(FN) case FN: case FN##L: case FN##LL: case FN##IMAX +#define NULL_TREE (tree) NULL + /* Define accessors for the fields that all tree nodes have (though some fields are not used for all kinds of nodes). */ @@ -272,6 +275,9 @@ along with GCC; see the file COPYING3. If not see #define NON_TYPE_CHECK(T) \ (non_type_check ((T), __FILE__, __LINE__, __FUNCTION__)) +#define TREE_INT_CST_ELT_CHECK(T, I) \ +(*tree_int_cst_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__)) + #define TREE_VEC_ELT_CHECK(T, I) \ (*(CONST_CAST2 (tree *, typeof (T)*, \ tree_vec_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__)))) @@ -327,6 +333,9 @@ extern void tree_not_class_check_failed (const_tree, const enum tree_code_class, const char *, int, const char *) ATTRIBUTE_NORETURN; +extern void tree_int_cst_elt_check_failed (int, int, const char *, + int, const char *) + ATTRIBUTE_NORETURN; extern void tree_vec_elt_check_failed (int, int, const char *, int, const char *) ATTRIBUTE_NORETURN; @@ -364,6 +373,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, #define TREE_RANGE_CHECK(T, CODE1, CODE2) (T) #define EXPR_CHECK(T) (T) #define NON_TYPE_CHECK(T) (T) +#define TREE_INT_CST_ELT_CHECK(T, I) ((T)->int_cst.val[I]) #define TREE_VEC_ELT_CHECK(T, I) ((T)->vec.a[I]) #define TREE_OPERAND_CHECK(T, I) ((T)->exp.operands[I]) #define TREE_OPERAND_CHECK_CODE(T, CODE, I) ((T)->exp.operands[I]) @@ -589,7 +599,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, #define SET_PREDICT_EXPR_OUTCOME(NODE, OUTCOME) \ (PREDICT_EXPR_CHECK (NODE)->base.addressable_flag = (int) OUTCOME) #define PREDICT_EXPR_PREDICTOR(NODE) \ - ((enum br_predictor)tree_low_cst (TREE_OPERAND (PREDICT_EXPR_CHECK (NODE), 0), 0)) + ((enum br_predictor)tree_to_shwi (TREE_OPERAND (PREDICT_EXPR_CHECK (NODE), 0))) /* In a VAR_DECL, nonzero means allocate static storage. In a FUNCTION_DECL, nonzero if function has been defined. @@ -733,6 +743,9 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, /* In integral and pointer types, means an unsigned type. */ #define TYPE_UNSIGNED(NODE) (TYPE_CHECK (NODE)->base.u.bits.unsigned_flag) +/* Same as TYPE_UNSIGNED but converted to SIGNOP. */ +#define TYPE_SIGN(NODE) ((signop)TYPE_UNSIGNED(NODE)) + /* True if overflow wraps around for the given integral type. That is, TYPE_MAX + 1 == TYPE_MIN. */ #define TYPE_OVERFLOW_WRAPS(TYPE) \ @@ -864,25 +877,14 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, /* Define additional fields and accessors for nodes representing constants. */ -/* In an INTEGER_CST node. These two together make a 2-word integer. - If the data type is signed, the value is sign-extended to 2 words - even though not all of them may really be in use. - In an unsigned constant shorter than 2 words, the extra bits are 0. */ -#define TREE_INT_CST(NODE) (INTEGER_CST_CHECK (NODE)->int_cst.int_cst) -#define TREE_INT_CST_LOW(NODE) (TREE_INT_CST (NODE).low) -#define TREE_INT_CST_HIGH(NODE) (TREE_INT_CST (NODE).high) - #define INT_CST_LT(A, B) \ - (TREE_INT_CST_HIGH (A) < TREE_INT_CST_HIGH (B) \ - || (TREE_INT_CST_HIGH (A) == TREE_INT_CST_HIGH (B) \ - && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B))) + (wi::lts_p (A, B)) -#define INT_CST_LT_UNSIGNED(A, B) \ - (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \ - < (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \ - || (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \ - == (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \ - && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B))) +#define INT_CST_LT_UNSIGNED(A, B) \ + (wi::ltu_p (A, B)) + +#define TREE_INT_CST_NUNITS(NODE) (INTEGER_CST_CHECK (NODE)->base.u.length) +#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I) #define TREE_REAL_CST_PTR(NODE) (REAL_CST_CHECK (NODE)->real_cst.real_cst_ptr) #define TREE_REAL_CST(NODE) (*TREE_REAL_CST_PTR (NODE)) @@ -998,7 +1000,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, Note that we have to bypass the use of TREE_OPERAND to access that field to avoid infinite recursion in expanding the macros. */ #define VL_EXP_OPERAND_LENGTH(NODE) \ - ((int)TREE_INT_CST_LOW (VL_EXP_CHECK (NODE)->exp.operands[0])) + ((int)tree_to_hwi (VL_EXP_CHECK (NODE)->exp.operands[0])) /* Nonzero if is_gimple_debug() may possibly hold. */ #define MAY_HAVE_DEBUG_STMTS (flag_var_tracking_assignments) @@ -1100,7 +1102,7 @@ extern void protected_set_expr_location (tree, location_t); #define CHREC_VAR(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 0) #define CHREC_LEFT(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 1) #define CHREC_RIGHT(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 2) -#define CHREC_VARIABLE(NODE) TREE_INT_CST_LOW (CHREC_VAR (NODE)) +#define CHREC_VARIABLE(NODE) tree_to_hwi (CHREC_VAR (NODE)) /* LABEL_EXPR accessor. This gives access to the label associated with the given label expression. */ @@ -2854,6 +2856,28 @@ non_type_check (tree __t, const char *__f, int __l, const char *__g) return __t; } +inline const HOST_WIDE_INT * +tree_int_cst_elt_check (const_tree __t, int __i, + const char *__f, int __l, const char *__g) +{ + if (TREE_CODE (__t) != INTEGER_CST) + tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0); + if (__i < 0 || __i >= __t->base.u.length) + tree_int_cst_elt_check_failed (__i, __t->base.u.length, __f, __l, __g); + return &CONST_CAST_TREE (__t)->int_cst.val[__i]; +} + +inline HOST_WIDE_INT * +tree_int_cst_elt_check (tree __t, int __i, + const char *__f, int __l, const char *__g) +{ + if (TREE_CODE (__t) != INTEGER_CST) + tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0); + if (__i < 0 || __i >= __t->base.u.length) + tree_int_cst_elt_check_failed (__i, __t->base.u.length, __f, __l, __g); + return &CONST_CAST_TREE (__t)->int_cst.val[__i]; +} + inline tree * tree_vec_elt_check (tree __t, int __i, const char *__f, int __l, const char *__g) @@ -3085,6 +3109,175 @@ omp_clause_elt_check (const_tree __t, int __i, #endif +/* Checks that X is integer constant that can be expressed in signed + HOST_WIDE_INT without loss of precision. This function differs + from the tree_fits_* versions in that the type of signedness of the + type of X is not considered. */ + +static inline bool +cst_fits_shwi_p (const_tree x) +{ + if (TREE_CODE (x) != INTEGER_CST) + return false; + + return TREE_INT_CST_NUNITS (x) == 1 + || (TREE_INT_CST_NUNITS (x) == 2 && TREE_INT_CST_ELT (x, 1) == 0); +} + +/* Checks that X is integer constant that can be expressed in signed + HOST_WIDE_INT without loss of precision. This function differs + from the tree_fits_* versions in that the type of signedness of the + type of X is not considered. */ + +static inline bool +cst_fits_uhwi_p (const_tree x) +{ + if (TREE_CODE (x) != INTEGER_CST) + return false; + + return TREE_INT_CST_NUNITS (x) == 1 && TREE_INT_CST_ELT (x, 0) >= 0; +} + +/* Return true if T is an INTEGER_CST whose value must be non-negative + and can be represented in a single unsigned HOST_WIDE_INT. This + function differs from the cst_fits versions in that the signedness + of the type of cst is considered. */ + +static inline bool +tree_fits_uhwi_p (const_tree cst) +{ + tree type; + if (cst == NULL_TREE) + return false; + + type = TREE_TYPE (cst); + + if (TREE_CODE (cst) != INTEGER_CST) + return false; + + if (TREE_INT_CST_NUNITS (cst) == 1) + { + if ((TYPE_SIGN (type) == UNSIGNED) + && (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)) + return true; + + /* For numbers of unsigned type that are longer than a HWI, if + the top bit of the bottom word is set, and there is not + another element, then this is too large to fit in a single + hwi. For signed numbers, negative values are not allowed. */ + if (TREE_INT_CST_ELT (cst, 0) >= 0) + return true; + } + else if (TREE_INT_CST_NUNITS (cst) == 2) + { + if (TREE_INT_CST_ELT (cst, 1) == 0) + return true; + } + return false; +} + +/* Return true if CST is an INTEGER_CST whose value can be represented + in a single HOST_WIDE_INT. This function differs from the cst_fits + versions in that the signedness of the type of cst is + considered. */ + +static inline bool +tree_fits_shwi_p (const_tree cst) +{ + if (cst == NULL_TREE) + return false; + + if (TREE_CODE (cst) != INTEGER_CST) + return false; + + if (TREE_INT_CST_NUNITS (cst) != 1) + return false; + + if (TYPE_SIGN (TREE_TYPE (cst)) == SIGNED) + return true; + + if (TREE_INT_CST_ELT (cst, 0) >= 0) + return true; + + return false; +} + +/* Return true if T is an INTEGER_CST that can be manipulated + efficiently on the host. If SIGN is SIGNED, the value can be + represented in a single HOST_WIDE_INT. If SIGN is UNSIGNED, the + value must be non-negative and can be represented in a single + unsigned HOST_WIDE_INT. */ + +static inline bool +tree_fits_hwi_p (const_tree cst, signop sign) +{ + return sign ? tree_fits_uhwi_p (cst) : tree_fits_shwi_p (cst); +} + +/* Return true if T is an INTEGER_CST that can be manipulated + efficiently on the host. If the sign of CST is SIGNED, the value + can be represented in a single HOST_WIDE_INT. If the sign of CST + is UNSIGNED, the value must be non-negative and can be represented + in a single unsigned HOST_WIDE_INT. */ + +static inline bool +tree_fits_hwi_p (const_tree cst) +{ + if (cst == NULL_TREE) + return false; + + if (TREE_CODE (cst) != INTEGER_CST) + return false; + + return TYPE_UNSIGNED (TREE_TYPE (cst)) + ? tree_fits_uhwi_p (cst) : tree_fits_shwi_p (cst); +} + +/* Return the unsigned HOST_WIDE_INT least significant bits of CST. + If checking is enabled, this ices if the value does not fit. */ + +static inline unsigned HOST_WIDE_INT +tree_to_uhwi (const_tree cst) +{ + gcc_checking_assert (tree_fits_uhwi_p (cst)); + + return (unsigned HOST_WIDE_INT)TREE_INT_CST_ELT (cst, 0); +} + +/* Return the HOST_WIDE_INT least significant bits of CST. If + checking is enabled, this ices if the value does not fit. */ + +static inline HOST_WIDE_INT +tree_to_shwi (const_tree cst) +{ + gcc_checking_assert (tree_fits_shwi_p (cst)); + + return (HOST_WIDE_INT)TREE_INT_CST_ELT (cst, 0); +} + +/* Return the HOST_WIDE_INT least significant bits of CST. No + checking is done to assure that it fits. It is assumed that one of + tree_fits_uhwi_p or tree_fits_shwi_p was done before this call. */ + +static inline HOST_WIDE_INT +tree_to_hwi (const_tree cst) +{ + return TREE_INT_CST_ELT (cst, 0); +} + +/* Return the HOST_WIDE_INT least significant bits of CST. The sign + of the checking is based on SIGNOP. */ + +static inline HOST_WIDE_INT +tree_to_hwi (const_tree cst, signop sgn) +{ + if (sgn == SIGNED) + return tree_to_shwi (cst); + else + return tree_to_uhwi (cst); +} + + /* Compute the number of operands in an expression node NODE. For tcc_vl_exp nodes like CALL_EXPRs, this is stored in the node itself, otherwise it is looked up from the node's code. */ @@ -3338,8 +3531,6 @@ tree_operand_check_code (const_tree __t, enum tree_code __code, int __i, #define int128_integer_type_node integer_types[itk_int128] #define int128_unsigned_type_node integer_types[itk_unsigned_int128] -#define NULL_TREE (tree) NULL - /* True if NODE is an erroneous expression. */ #define error_operand_p(NODE) \ @@ -3355,9 +3546,9 @@ extern hashval_t decl_assembler_name_hash (const_tree asmname); extern size_t tree_size (const_tree); -/* Compute the number of bytes occupied by a tree with code CODE. This - function cannot be used for TREE_VEC codes, which are of variable - length. */ +/* Compute the number of bytes occupied by a tree with code CODE. + This function cannot be used for TREE_VEC or INTEGER_CST nodes, + which are of variable length. */ extern size_t tree_code_size (enum tree_code); /* Allocate and return a new UID from the DECL_UID namespace. */ @@ -3387,6 +3578,11 @@ extern tree build_case_label (tree, tree, tree); extern tree make_tree_binfo_stat (unsigned MEM_STAT_DECL); #define make_tree_binfo(t) make_tree_binfo_stat (t MEM_STAT_INFO) +/* Make a INTEGER_CST. */ + +extern tree make_int_cst_stat (int MEM_STAT_DECL); +#define make_int_cst(t) make_int_cst_stat (t MEM_STAT_INFO) + /* Make a TREE_VEC. */ extern tree make_tree_vec_stat (int MEM_STAT_DECL); @@ -3503,27 +3699,18 @@ extern tree build_var_debug_value_stat (tree, tree MEM_STAT_DECL); /* Constructs double_int from tree CST. */ -static inline double_int -tree_to_double_int (const_tree cst) -{ - return TREE_INT_CST (cst); -} - extern tree double_int_to_tree (tree, double_int); -extern bool double_int_fits_to_tree_p (const_tree, double_int); -extern tree force_fit_type_double (tree, double_int, int, bool); -/* Create an INT_CST node with a CST value zero extended. */ +extern addr_wide_int mem_ref_offset (const_tree); +extern tree wide_int_to_tree (tree type, const wide_int_ref &cst); +extern tree force_fit_type (tree, const wide_int_ref &, int, bool); -static inline tree -build_int_cstu (tree type, unsigned HOST_WIDE_INT cst) -{ - return double_int_to_tree (type, double_int::from_uhwi (cst)); -} +/* Create an INT_CST node with a CST value zero extended. */ +/* static inline */ extern tree build_int_cst (tree, HOST_WIDE_INT); +extern tree build_int_cstu (tree type, unsigned HOST_WIDE_INT cst); extern tree build_int_cst_type (tree, HOST_WIDE_INT); -extern tree build_int_cst_wide (tree, unsigned HOST_WIDE_INT, HOST_WIDE_INT); extern tree make_vector_stat (unsigned MEM_STAT_DECL); #define make_vector(n) make_vector_stat (n MEM_STAT_INFO) extern tree build_vector_stat (tree, tree * MEM_STAT_DECL); @@ -3615,24 +3802,10 @@ extern int attribute_list_contained (const_tree, const_tree); extern int tree_int_cst_equal (const_tree, const_tree); extern int tree_int_cst_lt (const_tree, const_tree); extern int tree_int_cst_compare (const_tree, const_tree); -extern int host_integerp (const_tree, int) -#ifndef ENABLE_TREE_CHECKING - ATTRIBUTE_PURE /* host_integerp is pure only when checking is disabled. */ -#endif - ; -extern HOST_WIDE_INT tree_low_cst (const_tree, int); -#if !defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 4003) -extern inline __attribute__ ((__gnu_inline__)) HOST_WIDE_INT -tree_low_cst (const_tree t, int pos) -{ - gcc_assert (host_integerp (t, pos)); - return TREE_INT_CST_LOW (t); -} -#endif extern HOST_WIDE_INT size_low_cst (const_tree); extern int tree_int_cst_sgn (const_tree); extern int tree_int_cst_sign_bit (const_tree); -extern unsigned int tree_int_cst_min_precision (tree, bool); +extern unsigned int tree_int_cst_min_precision (tree, signop); extern bool tree_expr_nonnegative_p (tree); extern bool tree_expr_nonnegative_warnv_p (tree, bool *); extern bool may_negate_without_overflow_p (const_tree); @@ -3980,7 +4153,6 @@ extern int integer_pow2p (const_tree); extern int integer_nonzerop (const_tree); -extern bool cst_and_fits_in_hwi (const_tree); extern tree num_ending_zeros (const_tree); /* fixed_zerop (tree x) is nonzero if X is a fixed-point constant of @@ -4403,10 +4575,9 @@ extern tree fold_indirect_ref_loc (location_t, tree); extern tree build_simple_mem_ref_loc (location_t, tree); #define build_simple_mem_ref(T)\ build_simple_mem_ref_loc (UNKNOWN_LOCATION, T) -extern double_int mem_ref_offset (const_tree); extern tree build_invariant_address (tree, tree, HOST_WIDE_INT); extern tree constant_boolean_node (bool, tree); -extern tree div_if_zero_remainder (enum tree_code, const_tree, const_tree); +extern tree div_if_zero_remainder (const_tree, const_tree); extern bool tree_swap_operands_p (const_tree, const_tree, bool); extern enum tree_code swap_tree_comparison (enum tree_code); @@ -4694,7 +4865,7 @@ extern tree get_attribute_namespace (const_tree); extern void apply_tm_attr (tree, tree); /* In stor-layout.c */ -extern void set_min_and_max_values_for_integral_type (tree, int, bool); +extern void set_min_and_max_values_for_integral_type (tree, int, signop); extern void fixup_signed_type (tree); extern void internal_reference_types (void); extern unsigned int update_alignment_for_field (record_layout_info, tree, @@ -5062,5 +5233,107 @@ may_be_aliased (const_tree var) #endif /* NO_DOLLAR_IN_LABEL */ #endif /* NO_DOT_IN_LABEL */ +/* The tree and const_tree overload templates. */ +namespace wi +{ + template <> + struct int_traits <const_tree> + { + static const enum precision_type precision_type = FLEXIBLE_PRECISION; + static const bool host_dependent_precision = false; + static unsigned int get_precision (const_tree); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, + const_tree); + }; + + template <> + struct int_traits <tree> : public int_traits <const_tree> {}; +} + +inline unsigned int +wi::int_traits <const_tree>::get_precision (const_tree tcst) +{ + return TYPE_PRECISION (TREE_TYPE (tcst)); +} + +/* Convert the tree_cst X into a wide_int of PRECISION. */ +inline wi::storage_ref +wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *scratch, + unsigned int precision, const_tree x) +{ + unsigned int len = TREE_INT_CST_NUNITS (x); + const HOST_WIDE_INT *val = (const HOST_WIDE_INT *) &TREE_INT_CST_ELT (x, 0); + unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT); + unsigned int xprecision = get_precision (x); + + gcc_assert (precision >= xprecision); + + /* Got to be careful of precision 0 values. */ + if (precision) + len = MIN (len, max_len); + if (TYPE_SIGN (TREE_TYPE (x)) == UNSIGNED) + { + unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + if (small_prec) + { + /* We have to futz with this because the canonization for + short unsigned numbers in wide-int is different from the + canonized short unsigned numbers in the tree-cst. */ + if (len == max_len) + { + for (unsigned int i = 0; i < len - 1; i++) + scratch[i] = val[i]; + scratch[len - 1] = sext_hwi (val[len - 1], precision); + return wi::storage_ref (scratch, len, precision); + } + } + /* We have to futz here because a large unsigned int with + precision 128 may look (0x0 0xFFFFFFFFFFFFFFFF 0xF...) as a + tree-cst and as (0xF...) as a wide-int. */ + else if (precision == xprecision && len == max_len) + while (len > 1 && val[len - 1] == (HOST_WIDE_INT)-1) + len--; + } + + /* Signed and the rest of the unsigned cases are easy. */ + return wi::storage_ref (val, len, precision); +} + +namespace wi +{ + template <typename T> + bool fits_to_tree_p (const T &x, const_tree); + + wide_int min_value (const_tree); + wide_int max_value (const_tree); + wide_int from_mpz (const_tree, mpz_t, bool); +} + +template <typename T> +bool +wi::fits_to_tree_p (const T &x, const_tree type) +{ + if (TYPE_SIGN (type) == UNSIGNED) + return x == zext (x, TYPE_PRECISION (type)); + else + return x == sext (x, TYPE_PRECISION (type)); +} + +/* Produce the smallest number that is represented in TYPE. The precision + and sign are taken from TYPE. */ +inline wide_int +wi::min_value (const_tree type) +{ + return min_value (TYPE_PRECISION (type), TYPE_SIGN (type)); +} + +/* Produce the largest number that is represented in TYPE. The precision + and sign are taken from TYPE. */ +inline wide_int +wi::max_value (const_tree type) +{ + return max_value (TYPE_PRECISION (type), TYPE_SIGN (type)); +} #endif /* GCC_TREE_H */ diff --git a/gcc/tsan.c b/gcc/tsan.c index 3619f25045d..6ddacfe3c94 100644 --- a/gcc/tsan.c +++ b/gcc/tsan.c @@ -443,8 +443,8 @@ instrument_builtin_call (gimple_stmt_iterator *gsi) case check_last: case fetch_op: last_arg = gimple_call_arg (stmt, num - 1); - if (!host_integerp (last_arg, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (last_arg, 1) + if (!tree_fits_uhwi_p (last_arg) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (last_arg) > MEMMODEL_SEQ_CST) return; gimple_call_set_fndecl (stmt, decl); @@ -515,12 +515,12 @@ instrument_builtin_call (gimple_stmt_iterator *gsi) gcc_assert (num == 6); for (j = 0; j < 6; j++) args[j] = gimple_call_arg (stmt, j); - if (!host_integerp (args[4], 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (args[4], 1) + if (!tree_fits_uhwi_p (args[4]) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (args[4]) > MEMMODEL_SEQ_CST) return; - if (!host_integerp (args[5], 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (args[5], 1) + if (!tree_fits_uhwi_p (args[5]) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (args[5]) > MEMMODEL_SEQ_CST) return; update_gimple_call (gsi, decl, 5, args[0], args[1], args[2], diff --git a/gcc/ubsan.c b/gcc/ubsan.c index 9dc19c9d29a..9e9b94da12a 100644 --- a/gcc/ubsan.c +++ b/gcc/ubsan.c @@ -233,8 +233,8 @@ ubsan_source_location (location_t loc) static unsigned short get_ubsan_type_info_for_type (tree type) { - gcc_assert (TYPE_SIZE (type) && host_integerp (TYPE_SIZE (type), 1)); - int prec = exact_log2 (tree_low_cst (TYPE_SIZE (type), 1)); + gcc_assert (TYPE_SIZE (type) && tree_fits_uhwi_p (TYPE_SIZE (type))); + int prec = exact_log2 (tree_to_uhwi (TYPE_SIZE (type))); gcc_assert (prec != -1); return (prec << 1) | !TYPE_UNSIGNED (type); } diff --git a/gcc/value-prof.c b/gcc/value-prof.c index b19aefbfdd8..29f51e7457a 100644 --- a/gcc/value-prof.c +++ b/gcc/value-prof.c @@ -806,9 +806,17 @@ gimple_divmod_fixed_value_transform (gimple_stmt_iterator *si) else prob = 0; - tree_val = build_int_cst_wide (get_gcov_type (), - (unsigned HOST_WIDE_INT) val, - val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1); + if (sizeof (gcov_type) == sizeof (HOST_WIDE_INT)) + tree_val = build_int_cst (get_gcov_type (), val); + else + { + HOST_WIDE_INT a[2]; + a[0] = (unsigned HOST_WIDE_INT) val; + a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1; + + tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2, + TYPE_PRECISION (get_gcov_type ()), false)); + } result = gimple_divmod_fixed_value (stmt, tree_val, prob, count, all); if (dump_file) @@ -1717,9 +1725,18 @@ gimple_stringops_transform (gimple_stmt_iterator *gsi) default: gcc_unreachable (); } - tree_val = build_int_cst_wide (get_gcov_type (), - (unsigned HOST_WIDE_INT) val, - val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1); + if (sizeof (gcov_type) == sizeof (HOST_WIDE_INT)) + tree_val = build_int_cst (get_gcov_type (), val); + else + { + HOST_WIDE_INT a[2]; + a[0] = (unsigned HOST_WIDE_INT) val; + a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1; + + tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2, + TYPE_PRECISION (get_gcov_type ()), false)); + } + if (dump_file) { fprintf (dump_file, "Single value %i stringop transformation on ", diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index 24c61ccd2a2..89c53efc95a 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -3525,6 +3525,23 @@ loc_cmp (rtx x, rtx y) default: gcc_unreachable (); } + if (CONST_WIDE_INT_P (x)) + { + /* Compare the vector length first. */ + if (CONST_WIDE_INT_NUNITS (x) >= CONST_WIDE_INT_NUNITS (y)) + return 1; + else if (CONST_WIDE_INT_NUNITS (x) < CONST_WIDE_INT_NUNITS (y)) + return -1; + + /* Compare the vectors elements. */; + for (j = CONST_WIDE_INT_NUNITS (x) - 1; j >= 0 ; j--) + { + if (CONST_WIDE_INT_ELT (x, j) < CONST_WIDE_INT_ELT (y, j)) + return -1; + if (CONST_WIDE_INT_ELT (x, j) > CONST_WIDE_INT_ELT (y, j)) + return 1; + } + } return 0; } @@ -6288,9 +6305,9 @@ prepare_call_arguments (basic_block bb, rtx insn) && DECL_INITIAL (SYMBOL_REF_DECL (l->loc))) { initial = DECL_INITIAL (SYMBOL_REF_DECL (l->loc)); - if (host_integerp (initial, 0)) + if (tree_fits_shwi_p (initial)) { - item = GEN_INT (tree_low_cst (initial, 0)); + item = GEN_INT (tree_to_shwi (initial)); item = gen_rtx_CONCAT (indmode, mem, item); call_arguments = gen_rtx_EXPR_LIST (VOIDmode, item, @@ -6369,7 +6386,7 @@ prepare_call_arguments (basic_block bb, rtx insn) = TYPE_MODE (TREE_TYPE (OBJ_TYPE_REF_EXPR (obj_type_ref))); rtx clobbered = gen_rtx_MEM (mode, this_arg); HOST_WIDE_INT token - = tree_low_cst (OBJ_TYPE_REF_TOKEN (obj_type_ref), 0); + = tree_to_shwi (OBJ_TYPE_REF_TOKEN (obj_type_ref)); if (token) clobbered = plus_constant (mode, clobbered, token * GET_MODE_SIZE (mode)); @@ -8667,7 +8684,7 @@ emit_note_insn_var_location (variable_def **varp, emit_note_data *data) ++n_var_parts; } type_size_unit = TYPE_SIZE_UNIT (TREE_TYPE (decl)); - if ((unsigned HOST_WIDE_INT) last_limit < TREE_INT_CST_LOW (type_size_unit)) + if ((unsigned HOST_WIDE_INT) last_limit < tree_to_uhwi (type_size_unit)) complete = false; if (! flag_var_tracking_uninit) diff --git a/gcc/varasm.c b/gcc/varasm.c index acf8af083d4..8d157be612b 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -1137,7 +1137,7 @@ get_block_for_decl (tree decl) constant size. */ if (DECL_SIZE_UNIT (decl) == NULL) return NULL; - if (!host_integerp (DECL_SIZE_UNIT (decl), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl))) return NULL; /* Find out which section should contain DECL. We cannot put it into @@ -1903,7 +1903,7 @@ assemble_noswitch_variable (tree decl, const char *name, section *sect, { unsigned HOST_WIDE_INT size, rounded; - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); rounded = size; if ((flag_sanitize & SANITIZE_ADDRESS) && asan_protect_global (decl)) @@ -1950,11 +1950,11 @@ assemble_variable_contents (tree decl, const char *name, && !initializer_zerop (DECL_INITIAL (decl))) /* Output the actual data. */ output_constant (DECL_INITIAL (decl), - tree_low_cst (DECL_SIZE_UNIT (decl), 1), + tree_to_uhwi (DECL_SIZE_UNIT (decl)), get_variable_align (decl)); else /* Leave space for it. */ - assemble_zeros (tree_low_cst (DECL_SIZE_UNIT (decl), 1)); + assemble_zeros (tree_to_uhwi (DECL_SIZE_UNIT (decl))); } } @@ -2140,7 +2140,7 @@ assemble_variable (tree decl, int top_level ATTRIBUTE_UNUSED, if (asan_protected) { unsigned HOST_WIDE_INT int size - = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + = tree_to_uhwi (DECL_SIZE_UNIT (decl)); assemble_zeros (asan_red_zone_size (size)); } } @@ -2723,7 +2723,7 @@ decode_addr_const (tree exp, struct addr_const *value) while (1) { if (TREE_CODE (target) == COMPONENT_REF - && host_integerp (byte_position (TREE_OPERAND (target, 1)), 0)) + && tree_fits_shwi_p (byte_position (TREE_OPERAND (target, 1)))) { offset += int_byte_position (TREE_OPERAND (target, 1)); target = TREE_OPERAND (target, 0); @@ -2731,14 +2731,14 @@ decode_addr_const (tree exp, struct addr_const *value) else if (TREE_CODE (target) == ARRAY_REF || TREE_CODE (target) == ARRAY_RANGE_REF) { - offset += (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (target)), 1) - * tree_low_cst (TREE_OPERAND (target, 1), 0)); + offset += (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (target))) + * tree_to_shwi (TREE_OPERAND (target, 1))); target = TREE_OPERAND (target, 0); } else if (TREE_CODE (target) == MEM_REF && TREE_CODE (TREE_OPERAND (target, 0)) == ADDR_EXPR) { - offset += mem_ref_offset (target).low; + offset += mem_ref_offset (target).to_short_addr (); target = TREE_OPERAND (TREE_OPERAND (target, 0), 0); } else if (TREE_CODE (target) == INDIRECT_REF @@ -2818,8 +2818,8 @@ const_hash_1 (const tree exp) switch (code) { case INTEGER_CST: - p = (char *) &TREE_INT_CST (exp); - len = sizeof TREE_INT_CST (exp); + p = (char *) &TREE_INT_CST_ELT (exp, 0); + len = TREE_INT_CST_NUNITS (exp) * sizeof (HOST_WIDE_INT); break; case REAL_CST: @@ -3526,6 +3526,7 @@ const_rtx_hash_1 (rtx *xp, void *data) enum rtx_code code; hashval_t h, *hp; rtx x; + int i; x = *xp; code = GET_CODE (x); @@ -3536,12 +3537,12 @@ const_rtx_hash_1 (rtx *xp, void *data) { case CONST_INT: hwi = INTVAL (x); + fold_hwi: { int shift = sizeof (hashval_t) * CHAR_BIT; const int n = sizeof (HOST_WIDE_INT) / sizeof (hashval_t); - int i; - + h ^= (hashval_t) hwi; for (i = 1; i < n; ++i) { @@ -3551,8 +3552,16 @@ const_rtx_hash_1 (rtx *xp, void *data) } break; + case CONST_WIDE_INT: + hwi = GET_MODE_PRECISION (mode); + { + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hwi ^= CONST_WIDE_INT_ELT (x, i); + goto fold_hwi; + } + case CONST_DOUBLE: - if (mode == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && mode == VOIDmode) { hwi = CONST_DOUBLE_LOW (x) ^ CONST_DOUBLE_HIGH (x); goto fold_hwi; @@ -4643,8 +4652,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align) exp = build1 (ADDR_EXPR, saved_type, TREE_OPERAND (exp, 0)); /* Likewise for constant ints. */ else if (TREE_CODE (exp) == INTEGER_CST) - exp = build_int_cst_wide (saved_type, TREE_INT_CST_LOW (exp), - TREE_INT_CST_HIGH (exp)); + exp = wide_int_to_tree (saved_type, exp); } @@ -4684,7 +4692,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align) if (TREE_CODE (exp) == FDESC_EXPR) { #ifdef ASM_OUTPUT_FDESC - HOST_WIDE_INT part = tree_low_cst (TREE_OPERAND (exp, 1), 0); + HOST_WIDE_INT part = tree_to_shwi (TREE_OPERAND (exp, 1)); tree decl = TREE_OPERAND (exp, 0); ASM_OUTPUT_FDESC (asm_out_file, decl, part); #else @@ -4782,7 +4790,7 @@ array_size_for_constructor (tree val) tree max_index; unsigned HOST_WIDE_INT cnt; tree index, value, tmp; - double_int i; + addr_wide_int i; /* This code used to attempt to handle string constants that are not arrays of single-bytes, but nothing else does, so there's no point in @@ -4804,14 +4812,13 @@ array_size_for_constructor (tree val) /* Compute the total number of array elements. */ tmp = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val))); - i = tree_to_double_int (max_index) - tree_to_double_int (tmp); - i += double_int_one; + i = addr_wide_int (max_index) - tmp + 1; /* Multiply by the array element unit size to find number of bytes. */ - i *= tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); + i *= addr_wide_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); - gcc_assert (i.fits_uhwi ()); - return i.low; + gcc_assert (wi::fits_uhwi_p (i)); + return i.to_uhwi (); } /* Other datastructures + helpers for output_constructor. */ @@ -4851,9 +4858,9 @@ output_constructor_array_range (oc_local_state *local) = int_size_in_bytes (TREE_TYPE (local->type)); HOST_WIDE_INT lo_index - = tree_low_cst (TREE_OPERAND (local->index, 0), 0); + = tree_to_shwi (TREE_OPERAND (local->index, 0)); HOST_WIDE_INT hi_index - = tree_low_cst (TREE_OPERAND (local->index, 1), 0); + = tree_to_shwi (TREE_OPERAND (local->index, 1)); HOST_WIDE_INT index; unsigned int align2 @@ -4891,11 +4898,9 @@ output_constructor_regular_field (oc_local_state *local) sign-extend the result because Ada has negative DECL_FIELD_OFFSETs but we are using an unsigned sizetype. */ unsigned prec = TYPE_PRECISION (sizetype); - double_int idx = tree_to_double_int (local->index) - - tree_to_double_int (local->min_index); - idx = idx.sext (prec); - fieldpos = (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (local->val)), 1) - * idx.low); + addr_wide_int idx + = wi::sext (addr_wide_int (local->index) - local->min_index, prec); + fieldpos = (idx * TYPE_SIZE_UNIT (TREE_TYPE (local->val))).to_shwi (); } else if (local->field != NULL_TREE) fieldpos = int_byte_position (local->field); @@ -4944,7 +4949,7 @@ output_constructor_regular_field (oc_local_state *local) gcc_assert (!fieldsize || !DECL_CHAIN (local->field)); } else - fieldsize = tree_low_cst (DECL_SIZE_UNIT (local->field), 1); + fieldsize = tree_to_uhwi (DECL_SIZE_UNIT (local->field)); } else fieldsize = int_size_in_bytes (TREE_TYPE (local->type)); @@ -4969,15 +4974,15 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset) /* Bit size of this element. */ HOST_WIDE_INT ebitsize = (local->field - ? tree_low_cst (DECL_SIZE (local->field), 1) - : tree_low_cst (TYPE_SIZE (TREE_TYPE (local->type)), 1)); + ? tree_to_uhwi (DECL_SIZE (local->field)) + : tree_to_uhwi (TYPE_SIZE (TREE_TYPE (local->type)))); /* Relative index of this element if this is an array component. */ HOST_WIDE_INT relative_index = (!local->field ? (local->index - ? (tree_low_cst (local->index, 0) - - tree_low_cst (local->min_index, 0)) + ? (tree_to_shwi (local->index) + - tree_to_shwi (local->min_index)) : local->last_relative_index + 1) : 0); @@ -5088,22 +5093,13 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset) the word boundary in the INTEGER_CST. We can only select bits from the LOW or HIGH part not from both. */ - if (shift < HOST_BITS_PER_WIDE_INT - && shift + this_time > HOST_BITS_PER_WIDE_INT) - { - this_time = shift + this_time - HOST_BITS_PER_WIDE_INT; - shift = HOST_BITS_PER_WIDE_INT; - } + if ((shift / HOST_BITS_PER_WIDE_INT) + != ((shift + this_time) / HOST_BITS_PER_WIDE_INT)) + this_time = (shift + this_time) & (HOST_BITS_PER_WIDE_INT - 1); /* Now get the bits from the appropriate constant word. */ - if (shift < HOST_BITS_PER_WIDE_INT) - value = TREE_INT_CST_LOW (local->val); - else - { - gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT); - value = TREE_INT_CST_HIGH (local->val); - shift -= HOST_BITS_PER_WIDE_INT; - } + value = TREE_INT_CST_ELT (local->val, shift / HOST_BITS_PER_WIDE_INT); + shift = shift & (HOST_BITS_PER_WIDE_INT - 1); /* Get the result. This works only when: 1 <= this_time <= HOST_BITS_PER_WIDE_INT. */ @@ -5123,19 +5119,13 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset) the word boundary in the INTEGER_CST. We can only select bits from the LOW or HIGH part not from both. */ - if (shift < HOST_BITS_PER_WIDE_INT - && shift + this_time > HOST_BITS_PER_WIDE_INT) + if ((shift / HOST_BITS_PER_WIDE_INT) + != ((shift + this_time) / HOST_BITS_PER_WIDE_INT)) this_time = (HOST_BITS_PER_WIDE_INT - shift); /* Now get the bits from the appropriate constant word. */ - if (shift < HOST_BITS_PER_WIDE_INT) - value = TREE_INT_CST_LOW (local->val); - else - { - gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT); - value = TREE_INT_CST_HIGH (local->val); - shift -= HOST_BITS_PER_WIDE_INT; - } + value = TREE_INT_CST_ELT (local->val, shift / HOST_BITS_PER_WIDE_INT); + shift = shift & (HOST_BITS_PER_WIDE_INT - 1); /* Get the result. This works only when: 1 <= this_time <= HOST_BITS_PER_WIDE_INT. */ @@ -7089,7 +7079,7 @@ place_block_symbol (rtx symbol) { decl = SYMBOL_REF_DECL (symbol); alignment = get_variable_align (decl); - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); if ((flag_sanitize & SANITIZE_ADDRESS) && asan_protect_global (decl)) { @@ -7255,7 +7245,7 @@ output_object_block (struct object_block *block) HOST_WIDE_INT size; decl = SYMBOL_REF_DECL (symbol); assemble_variable_contents (decl, XSTR (symbol, 0), false); - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); offset += size; if ((flag_sanitize & SANITIZE_ADDRESS) && asan_protect_global (decl)) diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc new file mode 100644 index 00000000000..c83d9e44a0d --- /dev/null +++ b/gcc/wide-int-print.cc @@ -0,0 +1,145 @@ +/* Printing operations with very long integers. + Copyright (C) 2012-2013 Free Software Foundation, Inc. + Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "hwint.h" +#include "wide-int.h" +#include "wide-int-print.h" + +/* + * public printing routines. + */ + +#define BLOCKS_NEEDED(PREC) \ + (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) + +void +print_dec (const wide_int &wi, char *buf, signop sgn) +{ + if (sgn == SIGNED) + print_decs (wi, buf); + else + print_decu (wi, buf); +} + +void +print_dec (const wide_int &wi, FILE *file, signop sgn) +{ + if (sgn == SIGNED) + print_decs (wi, file); + else + print_decu (wi, file); +} + + +/* Try to print the signed self in decimal to BUF if the number fits + in a HWI. Other print in hex. */ + +void +print_decs (const wide_int &wi, char *buf) +{ + if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT) + || (wi.get_len () == 1)) + { + if (wi::neg_p (wi)) + sprintf (buf, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -wi.to_shwi ()); + else + sprintf (buf, HOST_WIDE_INT_PRINT_DEC, wi.to_shwi ()); + } + else + print_hex (wi, buf); +} + +/* Try to print the signed self in decimal to FILE if the number fits + in a HWI. Other print in hex. */ + +void +print_decs (const wide_int &wi, FILE *file) +{ + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + print_decs (wi, buf); + fputs (buf, file); +} + +/* Try to print the unsigned self in decimal to BUF if the number fits + in a HWI. Other print in hex. */ + +void +print_decu (const wide_int &wi, char *buf) +{ + if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT) + || (wi.get_len () == 1 && !wi::neg_p (wi))) + sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, wi.to_uhwi ()); + else + print_hex (wi, buf); +} + +/* Try to print the signed self in decimal to FILE if the number fits + in a HWI. Other print in hex. */ + +void +print_decu (const wide_int &wi, FILE *file) +{ + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + print_decu (wi, buf); + fputs (buf, file); +} + +void +print_hex (const wide_int &wi, char *buf) +{ + int i = wi.get_len (); + + if (wi == 0) + buf += sprintf (buf, "0x0"); + else + { + if (wi::neg_p (wi)) + { + int j; + /* If the number is negative, we may need to pad value with + 0xFFF... because the leading elements may be missing and + we do not print a '-' with hex. */ + buf += sprintf (buf, "0x"); + for (j = BLOCKS_NEEDED (wi.get_precision ()); j > i; j--) + buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, (HOST_WIDE_INT) -1); + + } + else + buf += sprintf (buf, "0x"HOST_WIDE_INT_PRINT_HEX_PURE, wi.elt (--i)); + + while (--i >= 0) + buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, wi.elt (i)); + } +} + +/* Print one big hex number to FILE. Note that some assemblers may not + accept this for large modes. */ +void +print_hex (const wide_int &wi, FILE *file) +{ + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + print_hex (wi, buf); + fputs (buf, file); +} + diff --git a/gcc/wide-int-print.h b/gcc/wide-int-print.h new file mode 100644 index 00000000000..be93cd1ecf5 --- /dev/null +++ b/gcc/wide-int-print.h @@ -0,0 +1,38 @@ +/* Print wide integers. + Copyright (C) 2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef WIDE_INT_PRINT_H +#define WIDE_INT_PRINT_H + +#include <stdio.h> +#include "wide-int.h" + +#define WIDE_INT_PRINT_BUFFER_SIZE (MAX_BITSIZE_MODE_ANY_INT / 4 + 4) +/* Printing functions. */ + +extern void print_dec (const wide_int &wi, char *buf, signop sgn); +extern void print_dec (const wide_int &wi, FILE *file, signop sgn); +extern void print_decs (const wide_int &wi, char *buf); +extern void print_decs (const wide_int &wi, FILE *file); +extern void print_decu (const wide_int &wi, char *buf); +extern void print_decu (const wide_int &wi, FILE *file); +extern void print_hex (const wide_int &wi, char *buf); +extern void print_hex (const wide_int &wi, FILE *file); + +#endif /* WIDE_INT_PRINT_H */ diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc new file mode 100644 index 00000000000..460c36ca693 --- /dev/null +++ b/gcc/wide-int.cc @@ -0,0 +1,2206 @@ +/* Operations with very long integers. + Copyright (C) 2012-2013 Free Software Foundation, Inc. + Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "hwint.h" +#include "wide-int.h" +#include "tree.h" +#include "dumpfile.h" + +/* This is the maximal size of the buffer needed for dump. */ +const unsigned int MAX_SIZE = (4 * (MAX_BITSIZE_MODE_ANY_INT / 4 + + (MAX_BITSIZE_MODE_ANY_INT + / HOST_BITS_PER_WIDE_INT) + + 32)); + +static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {}; + +/* + * Internal utilities. + */ + +/* Quantities to deal with values that hold half of a wide int. Used + in multiply and divide. */ +#define HALF_INT_MASK (((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1) + +#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT) +#define BLOCKS_NEEDED(PREC) \ + (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1) +#define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1)) + +/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1 + based on the top existing bit of VAL. */ + +static unsigned HOST_WIDE_INT +safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i) +{ + return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0; +} + +/* Convert the integer in VAL to canonical form, returning its new length. + LEN is the number of blocks currently in VAL and PRECISION is the number + of bits in the integer it represents. + + This function only changes the representation, not the value. */ +static unsigned int +canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision) +{ + unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int blocks_needed = BLOCKS_NEEDED (precision); + HOST_WIDE_INT top; + int i; + + if (len > blocks_needed) + len = blocks_needed; + + /* Clean up the top bits for any mode that is not a multiple of a + HWI and is not compressed. */ + if (len == blocks_needed && small_prec) + val[len - 1] = sext_hwi (val[len - 1], small_prec); + + if (len == 1) + return len; + + top = val[len - 1]; + if (top != 0 && top != (HOST_WIDE_INT)-1) + return len; + + /* At this point we know that the top is either 0 or -1. Find the + first block that is not a copy of this. */ + for (i = len - 2; i >= 0; i--) + { + HOST_WIDE_INT x = val[i]; + if (x != top) + { + if (SIGN_MASK (x) == top) + return i + 1; + + /* We need an extra block because the top bit block i does + not match the extension. */ + return i + 2; + } + } + + /* The number is 0 or -1. */ + return 1; +} + +/* + * Conversion routines in and out of wide_int. + */ + +/* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the + result for an integer with precision PRECISION. Return the length + of VAL (after any canonization. */ +unsigned int +wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int precision, bool need_canon) +{ + for (unsigned i = 0; i < xlen; i++) + val[i] = xval[i]; + return need_canon ? canonize (val, xlen, precision) : xlen; +} + +/* Construct a wide int from a buffer of length LEN. BUFFER will be + read according to byte endianess and word endianess of the target. + Only the lower LEN bytes of the result are set; the remaining high + bytes are cleared. */ +wide_int +wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len) +{ + unsigned int precision = buffer_len * BITS_PER_UNIT; + wide_int result = wide_int::create (precision); + unsigned int words = buffer_len / UNITS_PER_WORD; + + /* We have to clear all the bits ourself, as we merely or in values + below. */ + unsigned int len = BLOCKS_NEEDED (precision); + HOST_WIDE_INT *val = result.write_val (); + for (unsigned int i = 0; i < len; ++i) + val[i] = 0; + + for (unsigned int byte = 0; byte < buffer_len; byte++) + { + unsigned int offset; + unsigned int index; + unsigned int bitpos = byte * BITS_PER_UNIT; + unsigned HOST_WIDE_INT value; + + if (buffer_len > UNITS_PER_WORD) + { + unsigned int word = byte / UNITS_PER_WORD; + + if (WORDS_BIG_ENDIAN) + word = (words - 1) - word; + + offset = word * UNITS_PER_WORD; + + if (BYTES_BIG_ENDIAN) + offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD); + else + offset += byte % UNITS_PER_WORD; + } + else + offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte; + + value = (unsigned HOST_WIDE_INT) buffer[offset]; + + index = bitpos / HOST_BITS_PER_WIDE_INT; + val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT); + } + + result.set_len (canonize (val, len, precision)); + + return result; +} + +/* Sets RESULT from THIS, the sign is taken according to SGN. */ +void +wi::to_mpz (wide_int x, mpz_t result, signop sgn) +{ + bool negative = false; + int len = x.get_len (); + const HOST_WIDE_INT *v = x.get_val (); + int small_prec = x.get_precision () & (HOST_BITS_PER_WIDE_INT - 1); + + if (wi::neg_p (x, sgn)) + { + negative = true; + /* We use ones complement to avoid -x80..0 edge case that - + won't work on. */ + x = ~x; + } + + if (sgn == UNSIGNED && small_prec) + { + HOST_WIDE_INT t[WIDE_INT_MAX_ELTS]; + + for (int i = 0; i < len - 1; i++) + t[i] = v[i]; + t[len-1] = zext_hwi (v[len-1], small_prec); + mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t); + } + else + mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v); + + if (negative) + mpz_com (result, result); +} + +/* Returns VAL converted to TYPE. If WRAP is true, then out-of-range + values of VAL will be wrapped; otherwise, they will be set to the + appropriate minimum or maximum TYPE bound. */ +wide_int +wi::from_mpz (const_tree type, mpz_t x, bool wrap) +{ + size_t count, numb; + int prec = TYPE_PRECISION (type); + int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + wide_int res = wide_int::create (prec); + unsigned int i; + + if (!wrap) + { + mpz_t min, max; + + mpz_init (min); + mpz_init (max); + get_type_static_bounds (type, min, max); + + if (mpz_cmp (x, min) < 0) + mpz_set (x, min); + else if (mpz_cmp (x, max) > 0) + mpz_set (x, max); + + mpz_clear (min); + mpz_clear (max); + } + + /* Determine the number of unsigned HOST_WIDE_INTs that are required + for representing the value. The code to calculate count is + extracted from the GMP manual, section "Integer Import and Export": + http://gmplib.org/manual/Integer-Import-and-Export.html */ + numb = 8*sizeof(HOST_WIDE_INT); + count = (mpz_sizeinbase (x, 2) + numb-1) / numb; + if (count < 1) + count = 1; + + /* Need to initialize the number because it writes nothing for + zero. */ + HOST_WIDE_INT *val = res.write_val (); + for (i = 0; i < count; i++) + val[i] = 0; + + res.set_len (count); + + mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x); + + /* Canonize for small_prec. */ + if (small_prec && count == (size_t)BLOCKS_NEEDED (prec)) + val[count-1] = sext_hwi (val[count-1], small_prec); + + if (mpz_sgn (x) < 0) + res = -res; + + return res; +} + +/* + * Largest and smallest values in a mode. + */ + +/* Return the largest SGNed number that is representable in PRECISION bits. + + TODO: There is still code from the double_int era that trys to + make up for the fact that double int's could not represent the + min and max values of all types. This code should be removed + because the min and max values can always be represented in + wide_ints and int-csts. */ +wide_int +wi::max_value (unsigned int precision, signop sgn) +{ + if (precision == 0) + return shwi (0, precision); + else if (sgn == UNSIGNED) + /* The unsigned max is just all ones. */ + return shwi (-1, precision); + else + /* The signed max is all ones except the top bit. This must be + explicitly represented. */ + return mask (precision - 1, false, precision); +} + +/* Return the largest SGNed number that is representable in PRECISION bits. */ +wide_int +wi::min_value (unsigned int precision, signop sgn) +{ + if (precision == 0 || sgn == UNSIGNED) + return uhwi (0, precision); + else + /* The signed min is all zeros except the top bit. This must be + explicitly represented. */ + return wi::set_bit_in_zero (precision - 1, precision); +} + +/* + * Public utilities. + */ + +/* Convert the number represented by XVAL, XLEN and XPRECISION, which has + signedness SGN, to an integer that has PRECISION bits. Store the blocks + in VAL and return the number of blocks used. + + This function can handle both extension (PRECISION > XPRECISION) + and truncation (PRECISION < XPRECISION). */ +unsigned int +wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int xprecision, + unsigned int precision, signop sgn) +{ + unsigned int blocks_needed = BLOCKS_NEEDED (precision); + unsigned int len = blocks_needed < xlen ? blocks_needed : xlen; + for (unsigned i = 0; i < len; i++) + val[i] = xval[i]; + + if (precision > xprecision) + { + /* Expanding. */ + if (sgn == UNSIGNED) + { + unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT; + + if (small_xprecision && len == BLOCKS_NEEDED (xprecision)) + val[len - 1] = zext_hwi (val[len - 1], small_xprecision); + else if (val[len - 1] < 0) + { + while (len < BLOCKS_NEEDED (xprecision)) + val[len++] = -1; + if (small_xprecision) + val[len - 1] = zext_hwi (val[len - 1], small_xprecision); + else + val[len++] = 0; + } + } + } + len = canonize (val, len, precision); + + return len; +} + +/* This function hides the fact that we cannot rely on the bits beyond + the precision. This issue comes up in the relational comparisions + where we do allow comparisons of values of different precisions. */ +static inline HOST_WIDE_INT +selt (const HOST_WIDE_INT *a, unsigned int len, + unsigned int blocks_needed, + unsigned int small_prec, + unsigned int index, signop sgn) +{ + if (index >= len) + { + if (index < blocks_needed || sgn == SIGNED) + /* Signed or within the precision. */ + return SIGN_MASK (a[len - 1]); + else + /* Unsigned extension beyond the precision. */ + return 0; + } + + if (sgn == UNSIGNED && small_prec && index == blocks_needed - 1) + return zext_hwi (a[index], small_prec); + else + return a[index]; +} + +/* Find the highest bit represented in a wide int. This will in + general have the same value as the sign bit. */ +static inline HOST_WIDE_INT +top_bit_of (const HOST_WIDE_INT *a, unsigned int len) +{ + return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1; +} + +/* + * Comparisons, note that only equality is an operator. The other + * comparisons cannot be operators since they are inherently singed or + * unsigned and C++ has no such operators. + */ + +/* Return true if OP0 == OP1. */ +bool +wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, + const HOST_WIDE_INT *op1, unsigned int op1len, + unsigned int prec) +{ + int l0 = op0len - 1; + unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + + while (op0len != op1len) + return false; + + if (op0len == BLOCKS_NEEDED (prec) && small_prec) + { + /* It does not matter if we zext or sext here, we just have to + do both the same way. */ + if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec)) + return false; + l0--; + } + + while (l0 >= 0) + if (op0[l0] != op1[l0]) + return false; + else + l0--; + + return true; +} + +/* Return true if OP0 < OP1 using signed comparisons. */ +bool +wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, + unsigned int p1) +{ + HOST_WIDE_INT s0, s1; + unsigned HOST_WIDE_INT u0, u1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + /* Only the top block is compared as signed. The rest are unsigned + comparisons. */ + s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + if (s0 < s1) + return true; + if (s0 > s1) + return false; + + l--; + while (l >= 0) + { + u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + + if (u0 < u1) + return true; + if (u0 > u1) + return false; + l--; + } + + return false; +} + +/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using + signed compares. */ +int +wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, + unsigned int p1) +{ + HOST_WIDE_INT s0, s1; + unsigned HOST_WIDE_INT u0, u1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + /* Only the top block is compared as signed. The rest are unsigned + comparisons. */ + s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + if (s0 < s1) + return -1; + if (s0 > s1) + return 1; + + l--; + while (l >= 0) + { + u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + + if (u0 < u1) + return -1; + if (u0 > u1) + return 1; + l--; + } + + return 0; +} + +/* Return true if OP0 < OP1 using unsigned comparisons. */ +bool +wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1) +{ + unsigned HOST_WIDE_INT x0; + unsigned HOST_WIDE_INT x1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + while (l >= 0) + { + x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED); + x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED); + if (x0 < x1) + return true; + if (x0 > x1) + return false; + l--; + } + + return false; +} + +/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using + unsigned compares. */ +int +wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1) +{ + unsigned HOST_WIDE_INT x0; + unsigned HOST_WIDE_INT x1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + while (l >= 0) + { + x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED); + x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED); + if (x0 < x1) + return -1; + if (x0 > x1) + return 1; + l--; + } + + return 0; +} + +/* + * Extension. + */ + +/* Sign-extend the number represented by XVAL and XLEN into VAL, + starting at OFFSET. Return the number of blocks in VAL. Both XVAL + and VAL have PRECISION bits. */ +unsigned int +wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int precision, unsigned int offset) +{ + unsigned int len = offset / HOST_BITS_PER_WIDE_INT; + /* Extending beyond the precision is a no-op. If we have only stored + OFFSET bits or fewer, the rest are already signs. */ + if (offset >= precision || len >= xlen) + { + for (unsigned i = 0; i < xlen; ++i) + val[i] = xval[i]; + return xlen; + } + unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT; + for (unsigned int i = 0; i < len; i++) + val[i] = xval[i]; + if (suboffset > 0) + { + val[len] = sext_hwi (xval[len], suboffset); + len += 1; + } + return canonize (val, len, precision); +} + +/* Zero-extend the number represented by XVAL and XLEN into VAL, + starting at OFFSET. Return the number of blocks in VAL. Both XVAL + and VAL have PRECISION bits. */ +unsigned int +wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int precision, unsigned int offset) +{ + unsigned int len = offset / HOST_BITS_PER_WIDE_INT; + /* Extending beyond the precision is a no-op. If we have only stored + OFFSET bits or fewer, and the upper stored bit is zero, then there + is nothing to do. */ + if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0)) + { + for (unsigned i = 0; i < xlen; ++i) + val[i] = xval[i]; + return xlen; + } + unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT; + for (unsigned int i = 0; i < len; i++) + val[i] = i < xlen ? xval[i] : -1; + if (suboffset > 0) + val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset); + else + val[len] = 0; + return canonize (val, len + 1, precision); +} + +/* + * Masking, inserting, shifting, rotating. + */ + +/* Insert WIDTH bits from Y into X starting at START. */ +wide_int +wi::insert (const wide_int &x, const wide_int &y, unsigned int start, + unsigned int width) +{ + wide_int result; + wide_int mask; + wide_int tmp; + + unsigned int precision = x.get_precision (); + if (start >= precision) + return x; + + gcc_checking_assert (precision >= width); + + if (start + width >= precision) + width = precision - start; + + mask = wi::shifted_mask (start, width, false, precision); + tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start); + result = tmp & mask; + + tmp = wi::bit_and_not (x, mask); + result = result | tmp; + + return result; +} + +/* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT. + Return the number of blocks in VAL. Both XVAL and VAL have PRECISION + bits. */ +unsigned int +wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int precision, unsigned int bit) +{ + unsigned int block = bit / HOST_BITS_PER_WIDE_INT; + unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT; + + if (block + 1 >= xlen) + { + /* The operation either affects the last current block or needs + a new block. */ + unsigned int len = block + 1; + for (unsigned int i = 0; i < len; i++) + val[i] = safe_uhwi (xval, xlen, i); + val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit; + + /* If the bit we just set is at the msb of the block, make sure + that any higher bits are zeros. */ + if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1) + val[len++] = 0; + return len; + } + else + { + for (unsigned int i = 0; i < xlen; i++) + val[i] = xval[i]; + val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit; + return canonize (val, xlen, precision); + } +} + +/* bswap THIS. */ +wide_int +wide_int_storage::bswap () const +{ + wide_int result = wide_int::create (precision); + unsigned int i, s; + unsigned int len = BLOCKS_NEEDED (precision); + unsigned int xlen = get_len (); + const HOST_WIDE_INT *xval = get_val (); + HOST_WIDE_INT *val = result.write_val (); + + /* This is not a well defined operation if the precision is not a + multiple of 8. */ + gcc_assert ((precision & 0x7) == 0); + + for (i = 0; i < len; i++) + val[i] = 0; + + /* Only swap the bytes that are not the padding. */ + for (s = 0; s < precision; s += 8) + { + unsigned int d = precision - s - 8; + unsigned HOST_WIDE_INT byte; + + unsigned int block = s / HOST_BITS_PER_WIDE_INT; + unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1); + + byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff; + + block = d / HOST_BITS_PER_WIDE_INT; + offset = d & (HOST_BITS_PER_WIDE_INT - 1); + + val[block] |= byte << offset; + } + + result.set_len (canonize (val, len, precision)); + return result; +} + +/* Fill VAL with a mask where the lower WIDTH bits are ones and the bits + above that up to PREC are zeros. The result is inverted if NEGATE + is true. Return the number of blocks in VAL. */ +unsigned int +wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate, + unsigned int prec) +{ + gcc_assert (width < 4 * MAX_BITSIZE_MODE_ANY_INT); + gcc_assert (prec <= 4 * MAX_BITSIZE_MODE_ANY_INT); + + if (width == prec) + { + val[0] = negate ? 0 : -1; + return 1; + } + else if (width == 0) + { + val[0] = negate ? -1 : 0; + return 1; + } + + unsigned int i = 0; + while (i < width / HOST_BITS_PER_WIDE_INT) + val[i++] = negate ? 0 : -1; + + unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1); + if (shift != 0) + { + HOST_WIDE_INT last = (((unsigned HOST_WIDE_INT) 1) << shift) - 1; + val[i++] = negate ? ~last : last; + } + else + val[i++] = negate ? -1 : 0; + + return i; +} + +/* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH + bits are ones, and the bits above that up to PREC are zeros. The result + is inverted if NEGATE is true. Return the number of blocks in VAL. */ +unsigned int +wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width, + bool negate, unsigned int prec) +{ + int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + + gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT); + + if (start + width > prec) + width = prec - start; + unsigned int end = start + width; + + if (width == 0) + { + val[0] = negate ? -1 : 0; + return 1; + } + + unsigned int i = 0; + while (i < start / HOST_BITS_PER_WIDE_INT) + val[i++] = negate ? -1 : 0; + + unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1); + if (shift) + { + HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1; + shift = end & (HOST_BITS_PER_WIDE_INT - 1); + if (shift) + { + /* case 000111000 */ + block = (((unsigned HOST_WIDE_INT) 1) << shift) - block - 1; + val[i++] = negate ? ~block : block; + if (i == BLOCKS_NEEDED (prec) && small_prec) + val[i - 1] = sext_hwi (val[i - 1], small_prec); + return i; + } + else + /* ...111000 */ + val[i++] = negate ? block : ~block; + } + + while (i < end / HOST_BITS_PER_WIDE_INT) + /* 1111111 */ + val[i++] = negate ? 0 : -1; + + shift = end & (HOST_BITS_PER_WIDE_INT - 1); + if (shift != 0) + { + /* 000011111 */ + HOST_WIDE_INT block = (((unsigned HOST_WIDE_INT) 1) << shift) - 1; + val[i++] = negate ? ~block : block; + } + else if (end < prec) + val[i++] = negate ? -1 : 0; + + if (i == BLOCKS_NEEDED (prec) && small_prec) + val[i - 1] = sext_hwi (val[i - 1], small_prec); + + return i; +} + +/* + * logical operations. + */ + +/* Set VAL to OP0 & OP1. Return the number of blocks used. */ +unsigned int +wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec) +{ + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + unsigned int len = MAX (op0len, op1len); + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); + if (op1mask == 0) + { + l0 = l1; + len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); + if (op0mask == 0) + len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + val[l1] = op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + val[l0] = op0[l0] & op1[l0]; + l0--; + } + + if (need_canon) + len = canonize (val, len, prec); + + return len; +} + +/* Set VAL to OP0 & ~OP1. Return the number of blocks used. */ +unsigned int +wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + unsigned int len = MAX (op0len, op1len); + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); + if (op1mask != 0) + { + l0 = l1; + len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); + if (op0mask == 0) + len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + val[l1] = ~op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + val[l0] = op0[l0] & ~op1[l0]; + l0--; + } + + if (need_canon) + len = canonize (val, len, prec); + + return len; +} + +/* Set VAL to OP0 | OP1. Return the number of blocks used. */ +unsigned int +wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + unsigned int len = MAX (op0len, op1len); + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); + if (op1mask != 0) + { + l0 = l1; + len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); + if (op0mask != 0) + len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + val[l1] = op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + val[l0] = op0[l0] | op1[l0]; + l0--; + } + + if (need_canon) + len = canonize (val, len, prec); + + return len; +} + +/* Set VAL to OP0 | ~OP1. Return the number of blocks used. */ +unsigned int +wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + unsigned int len = MAX (op0len, op1len); + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); + if (op1mask == 0) + { + l0 = l1; + len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); + if (op0mask != 0) + len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + val[l1] = ~op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + val[l0] = op0[l0] | ~op1[l0]; + l0--; + } + + if (need_canon) + len = canonize (val, len, prec); + + return len; +} + +/* Set VAL to OP0 ^ OP1. Return the number of blocks used. */ +unsigned int +wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + + unsigned int len = MAX (op0len, op1len); + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len); + while (l0 > l1) + { + val[l0] = op0[l0] ^ op1mask; + l0--; + } + } + + if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len); + while (l1 > l0) + { + val[l1] = op0mask ^ op1[l1]; + l1--; + } + } + + while (l0 >= 0) + { + val[l0] = op0[l0] ^ op1[l0]; + l0--; + } + + return canonize (val, len, prec); +} + +/* + * math + */ + +/* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW + whether the result overflows when OP0 and OP1 are treated as having + signedness SGN. Return the number of blocks in VAL. */ +unsigned int +wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec, + signop sgn, bool *overflow) +{ + unsigned HOST_WIDE_INT o0 = 0; + unsigned HOST_WIDE_INT o1 = 0; + unsigned HOST_WIDE_INT x = 0; + unsigned HOST_WIDE_INT carry = 0; + unsigned HOST_WIDE_INT old_carry = 0; + unsigned HOST_WIDE_INT mask0, mask1; + unsigned int i, small_prec; + + unsigned int len = MAX (op0len, op1len); + mask0 = -top_bit_of (op0, op0len); + mask1 = -top_bit_of (op1, op1len); + /* Add all of the explicitly defined elements. */ + + for (i = 0; i < len; i++) + { + o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0; + o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1; + x = o0 + o1 + carry; + val[i] = x; + old_carry = carry; + carry = carry == 0 ? x < o0 : x <= o0; + } + + if (len * HOST_BITS_PER_WIDE_INT < prec) + { + val[len] = mask0 + mask1 + carry; + len++; + if (overflow) + *overflow = false; + } + else if (overflow) + { + if (sgn == SIGNED) + { + unsigned int p = (len == BLOCKS_NEEDED (prec) + ? HOST_BITS_PER_WIDE_INT + : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1; + HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1); + x = (x >> p) & 1; + *overflow = (x != 0); + } + else + { + if (old_carry) + *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] <= o0); + else + *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] < o0); + } + } + + /* Canonize the top of the top block. */ + small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + if (small_prec != 0 && BLOCKS_NEEDED (prec) == len) + { + /* Modes with weird precisions. */ + i = len - 1; + val[i] = sext_hwi (val[i], small_prec); + } + + return canonize (val, len, prec); +} + +/* This is bogus. We should always return the precision and leave the + caller to handle target dependencies. */ +static int +clz_zero (unsigned int precision) +{ + unsigned int count; + + enum machine_mode mode = mode_for_size (precision, MODE_INT, 0); + if (mode == BLKmode) + mode_for_size (precision, MODE_PARTIAL_INT, 0); + + /* Even if the value at zero is undefined, we have to come up + with some replacement. Seems good enough. */ + if (mode == BLKmode) + count = precision; + else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count)) + count = precision; + return count; +} + +/* This is bogus. We should always return the precision and leave the + caller to handle target dependencies. */ +static int +ctz_zero (unsigned int precision) +{ + unsigned int count; + + enum machine_mode mode = mode_for_size (precision, MODE_INT, 0); + if (mode == BLKmode) + mode_for_size (precision, MODE_PARTIAL_INT, 0); + + /* Even if the value at zero is undefined, we have to come up + with some replacement. Seems good enough. */ + if (mode == BLKmode) + count = precision; + else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode, count)) + count = precision; + return count; +} + +/* Subroutines of the multiplication and division operations. Unpack + the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN + HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by + uncompressing the top bit of INPUT[IN_LEN - 1]. */ +static void +wi_unpack (unsigned HOST_HALF_WIDE_INT *result, + const unsigned HOST_WIDE_INT *input, + unsigned int in_len, unsigned int out_len, + unsigned int prec, signop sgn) +{ + unsigned int i; + unsigned int j = 0; + unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int blocks_needed = BLOCKS_NEEDED (prec); + HOST_WIDE_INT mask; + + if (sgn == SIGNED) + { + mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len); + mask &= HALF_INT_MASK; + } + else + mask = 0; + + for (i = 0; i < in_len; i++) + { + HOST_WIDE_INT x = input[i]; + if (i == blocks_needed - 1 && small_prec) + { + if (sgn == SIGNED) + x = sext_hwi (x, small_prec); + else + x = zext_hwi (x, small_prec); + } + result[j++] = x; + result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT; + } + + /* Smear the sign bit. */ + while (j < out_len) + result[j++] = mask; +} + +/* The inverse of wi_unpack. IN_LEN is the the number of input + blocks. The number of output blocks will be half this amount. */ +static void +wi_pack (unsigned HOST_WIDE_INT *result, + const unsigned HOST_HALF_WIDE_INT *input, + unsigned int in_len) +{ + unsigned int i = 0; + unsigned int j = 0; + + while (i + 2 < in_len) + { + result[j++] = (unsigned HOST_WIDE_INT)input[i] + | ((unsigned HOST_WIDE_INT)input[i + 1] + << HOST_BITS_PER_HALF_WIDE_INT); + i += 2; + } + + /* Handle the case where in_len is odd. For this we zero extend. */ + if (in_len & 1) + result[j++] = (unsigned HOST_WIDE_INT)input[i]; + else + result[j++] = (unsigned HOST_WIDE_INT)input[i] + | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT); +} + +/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the + result is returned. If FULL is set, the entire result is returned + in a mode that is twice the width of the inputs. However, that + mode needs to exist if the value is to be usable. Clients that use + FULL need to check for this. + + If HIGH or FULL are not set, throw away the upper half after the check + is made to see if it overflows. Unfortunately there is no better + way to check for overflow than to do this. OVERFLOW is assumed to + be sticky so it should be initialized. SGN controls the signedness + and is used to check overflow or if HIGH or FULL is set. */ +unsigned int +wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1, + unsigned int op1len, const HOST_WIDE_INT *op2, + unsigned int op2len, unsigned int prec, signop sgn, + bool *overflow, bool high, bool full) +{ + unsigned HOST_WIDE_INT o0, o1, k, t; + unsigned int i; + unsigned int j; + unsigned int blocks_needed = BLOCKS_NEEDED (prec); + unsigned int half_blocks_needed = blocks_needed * 2; + /* The sizes here are scaled to support a 2x largest mode by 2x + largest mode yielding a 4x largest mode result. This is what is + needed by vpn. */ + + unsigned HOST_HALF_WIDE_INT + u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned HOST_HALF_WIDE_INT + v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + /* The '2' in 'R' is because we are internally doing a full + multiply. */ + unsigned HOST_HALF_WIDE_INT + r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1; + + /* If the top level routine did not really pass in an overflow, then + just make sure that we never attempt to set it. */ + bool needs_overflow = (overflow != 0); + if (needs_overflow) + *overflow = false; + + /* If we need to check for overflow, we can only do half wide + multiplies quickly because we need to look at the top bits to + check for the overflow. */ + if ((high || full || needs_overflow) + && (prec <= HOST_BITS_PER_HALF_WIDE_INT)) + { + HOST_WIDE_INT r; + + if (sgn == SIGNED) + { + o0 = sext_hwi (op1[0], prec); + o1 = sext_hwi (op2[0], prec); + } + else + { + o0 = zext_hwi (op1[0], prec); + o1 = zext_hwi (op2[0], prec); + } + + r = o0 * o1; + if (needs_overflow) + { + HOST_WIDE_INT upper; + HOST_WIDE_INT sm + = (r << (HOST_BITS_PER_WIDE_INT - prec)) + >> (HOST_BITS_PER_WIDE_INT - 1); + mask = ((HOST_WIDE_INT)1 << prec) - 1; + sm &= mask; + upper = (r >> prec) & mask; + + if (sgn == SIGNED) + { + if (sm != upper) + *overflow = true; + } + else + if (upper != 0) + *overflow = true; + } + if (full) + val[0] = sext_hwi (r, prec * 2); + else if (high) + val[0] = r >> prec; + else + val[0] = sext_hwi (r, prec); + return 1; + } + + /* We do unsigned mul and then correct it. */ + wi_unpack (u, (const unsigned HOST_WIDE_INT*)op1, op1len, + half_blocks_needed, prec, SIGNED); + wi_unpack (v, (const unsigned HOST_WIDE_INT*)op2, op2len, + half_blocks_needed, prec, SIGNED); + + /* The 2 is for a full mult. */ + memset (r, 0, half_blocks_needed * 2 + * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT); + + for (j = 0; j < half_blocks_needed; j++) + { + k = 0; + for (i = 0; i < half_blocks_needed; i++) + { + t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j] + + r[i + j] + k); + r[i + j] = t & HALF_INT_MASK; + k = t >> HOST_BITS_PER_HALF_WIDE_INT; + } + r[j + half_blocks_needed] = k; + } + + /* We did unsigned math above. For signed we must adjust the + product (assuming we need to see that). */ + if (sgn == SIGNED && (full || high || needs_overflow)) + { + unsigned HOST_WIDE_INT b; + if (op1[op1len-1] < 0) + { + b = 0; + for (i = 0; i < half_blocks_needed; i++) + { + t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed] + - (unsigned HOST_WIDE_INT)v[i] - b; + r[i + half_blocks_needed] = t & HALF_INT_MASK; + b = t >> (HOST_BITS_PER_WIDE_INT - 1); + } + } + if (op2[op2len-1] < 0) + { + b = 0; + for (i = 0; i < half_blocks_needed; i++) + { + t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed] + - (unsigned HOST_WIDE_INT)u[i] - b; + r[i + half_blocks_needed] = t & HALF_INT_MASK; + b = t >> (HOST_BITS_PER_WIDE_INT - 1); + } + } + } + + if (needs_overflow) + { + HOST_WIDE_INT top; + + /* For unsigned, overflow is true if any of the top bits are set. + For signed, overflow is true if any of the top bits are not equal + to the sign bit. */ + if (sgn == UNSIGNED) + top = 0; + else + { + top = r[(half_blocks_needed) - 1]; + top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2)); + top &= mask; + } + + for (i = half_blocks_needed; i < half_blocks_needed * 2; i++) + if (((HOST_WIDE_INT)(r[i] & mask)) != top) + *overflow = true; + } + + if (full) + { + /* compute [2prec] <- [prec] * [prec] */ + wi_pack ((unsigned HOST_WIDE_INT *) val, r, 2 * half_blocks_needed); + return canonize (val, blocks_needed * 2, prec * 2); + } + else if (high) + { + /* compute [prec] <- ([prec] * [prec]) >> [prec] */ + wi_pack ((unsigned HOST_WIDE_INT *) val, + &r[half_blocks_needed], half_blocks_needed); + return canonize (val, blocks_needed, prec); + } + else + { + /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */ + wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed); + return canonize (val, blocks_needed, prec); + } +} + +/* Compute the population count of X. */ +int +wi::popcount (const wide_int_ref &x) +{ + unsigned int i; + int count; + + if (x.precision == 0) + return 0; + + /* The high order block is special if it is the last block and the + precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We + have to clear out any ones above the precision before doing + popcount on this block. */ + count = x.precision - x.len * HOST_BITS_PER_WIDE_INT; + unsigned int stop = x.len; + if (count < 0) + { + count = popcount_hwi (x.uhigh () << -count); + stop -= 1; + } + else + { + if (x.sign_mask () >= 0) + count = 0; + } + + for (i = 0; i < stop; ++i) + count += popcount_hwi (x.val[i]); + + return count; +} + +/* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW + whether the result overflows when OP0 and OP1 are treated as having + signedness SGN. Return the number of blocks in VAL. */ +unsigned int +wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0, + unsigned int op0len, const HOST_WIDE_INT *op1, + unsigned int op1len, unsigned int prec, + signop sgn, bool *overflow) +{ + unsigned HOST_WIDE_INT o0 = 0; + unsigned HOST_WIDE_INT o1 = 0; + unsigned HOST_WIDE_INT x = 0; + /* We implement subtraction as an in place negate and add. Negation + is just inversion and add 1, so we can do the add of 1 by just + starting the borrow in of the first element at 1. */ + unsigned HOST_WIDE_INT borrow = 0; + unsigned HOST_WIDE_INT old_borrow = 0; + + unsigned HOST_WIDE_INT mask0, mask1; + unsigned int i, small_prec; + + unsigned int len = MAX (op0len, op1len); + mask0 = -top_bit_of (op0, op0len); + mask1 = -top_bit_of (op1, op1len); + + /* Subtract all of the explicitly defined elements. */ + for (i = 0; i < len; i++) + { + o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0; + o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1; + x = o0 - o1 - borrow; + val[i] = x; + old_borrow = borrow; + borrow = borrow == 0 ? o0 < o1 : o0 <= o1; + } + + if (len * HOST_BITS_PER_WIDE_INT < prec) + { + val[len] = mask0 - mask1 - borrow; + len++; + if (overflow) + *overflow = false; + } + else if (overflow) + { + if (sgn == SIGNED) + { + unsigned int p = (len == BLOCKS_NEEDED (prec) + ? HOST_BITS_PER_WIDE_INT + : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1; + HOST_WIDE_INT x = (((o0 ^ o1) & (val[len - 1] ^ o0)) >> p) & 1; + *overflow = (x != 0); + } + else + { + if (old_borrow) + *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] >= o0); + else + *overflow = ((unsigned HOST_WIDE_INT) val[len - 1] > o0); + } + } + + /* Canonize the top of the top block. */ + small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + if (small_prec != 0 && BLOCKS_NEEDED (prec) == len) + { + /* Modes with weird precisions. */ + i = len - 1; + val[i] = sext_hwi (val[i], small_prec); + } + + return canonize (val, len, prec); +} + + +/* + * Division and Mod + */ + +/* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The + algorithm is a small modification of the algorithm in Hacker's + Delight by Warren, which itself is a small modification of Knuth's + algorithm. M is the number of significant elements of U however + there needs to be at least one extra element of B_DIVIDEND + allocated, N is the number of elements of B_DIVISOR. */ +static void +divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient, + unsigned HOST_HALF_WIDE_INT *b_remainder, + unsigned HOST_HALF_WIDE_INT *b_dividend, + unsigned HOST_HALF_WIDE_INT *b_divisor, + unsigned int m, unsigned int n) +{ + /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a + HOST_WIDE_INT and stored in the lower bits of each word. This + algorithm should work properly on both 32 and 64 bit + machines. */ + unsigned HOST_WIDE_INT b + = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT; + unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */ + unsigned HOST_WIDE_INT rhat; /* A remainder. */ + unsigned HOST_WIDE_INT p; /* Product of two digits. */ + HOST_WIDE_INT s, i, j, t, k; + + /* Single digit divisor. */ + if (n == 1) + { + k = 0; + for (j = m - 1; j >= 0; j--) + { + b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0]; + k = ((k * b + b_dividend[j]) + - ((unsigned HOST_WIDE_INT)b_quotient[j] + * (unsigned HOST_WIDE_INT)b_divisor[0])); + } + b_remainder[0] = k; + return; + } + + s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */ + + if (s) + { + /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published + algorithm, we can overwrite b_dividend and b_divisor, so we do + that. */ + for (i = n - 1; i > 0; i--) + b_divisor[i] = (b_divisor[i] << s) + | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s)); + b_divisor[0] = b_divisor[0] << s; + + b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s); + for (i = m - 1; i > 0; i--) + b_dividend[i] = (b_dividend[i] << s) + | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s)); + b_dividend[0] = b_dividend[0] << s; + } + + /* Main loop. */ + for (j = m - n; j >= 0; j--) + { + qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1]; + rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1]; + again: + if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2]) + { + qhat -= 1; + rhat += b_divisor[n-1]; + if (rhat < b) + goto again; + } + + /* Multiply and subtract. */ + k = 0; + for (i = 0; i < n; i++) + { + p = qhat * b_divisor[i]; + t = b_dividend[i+j] - k - (p & HALF_INT_MASK); + b_dividend[i + j] = t; + k = ((p >> HOST_BITS_PER_HALF_WIDE_INT) + - (t >> HOST_BITS_PER_HALF_WIDE_INT)); + } + t = b_dividend[j+n] - k; + b_dividend[j+n] = t; + + b_quotient[j] = qhat; + if (t < 0) + { + b_quotient[j] -= 1; + k = 0; + for (i = 0; i < n; i++) + { + t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k; + b_dividend[i+j] = t; + k = t >> HOST_BITS_PER_HALF_WIDE_INT; + } + b_dividend[j+n] += k; + } + } + if (s) + for (i = 0; i < n; i++) + b_remainder[i] = (b_dividend[i] >> s) + | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s)); + else + for (i = 0; i < n; i++) + b_remainder[i] = b_dividend[i]; +} + + +/* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate + the result. If QUOTIENT is nonnull, store the value of the quotient + there and return the number of blocks in it. The return value is + not defined otherwise. If REMAINDER is nonnull, store the value + of the remainder there and store the number of blocks in + *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether + the division overflowed. */ +unsigned int +wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len, + HOST_WIDE_INT *remainder, const HOST_WIDE_INT *dividend, + unsigned int dividend_len, unsigned int dividend_prec, + const HOST_WIDE_INT *divisor, unsigned int divisor_len, + unsigned int divisor_prec, signop sgn, + bool *oflow) +{ + unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec); + unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec); + unsigned HOST_HALF_WIDE_INT + b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned HOST_HALF_WIDE_INT + b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned HOST_HALF_WIDE_INT + b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1]; + unsigned HOST_HALF_WIDE_INT + b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned int m, n; + HOST_WIDE_INT u0[WIDE_INT_MAX_ELTS]; + HOST_WIDE_INT u1[WIDE_INT_MAX_ELTS]; + bool dividend_neg = false; + bool divisor_neg = false; + bool overflow = false; + + if (divisor[0] == 0 && divisor_len == 1) + overflow = true; + + /* The smallest signed number / -1 causes overflow. */ + if (sgn == SIGNED) + { + HOST_WIDE_INT small_prec = dividend_prec & (HOST_BITS_PER_WIDE_INT - 1); + if (dividend_len == BLOCKS_NEEDED (dividend_prec) + && divisor_len == 1 + && divisor[0] == HOST_WIDE_INT(-1)) + + if ((small_prec + && ((HOST_WIDE_INT)zext_hwi (dividend[dividend_len - 1], + small_prec) + == (HOST_WIDE_INT(1) << (small_prec - 1)))) + || dividend[dividend_len - 1] + == HOST_WIDE_INT(1) << (HOST_BITS_PER_WIDE_INT - 1)) + { + /* The smallest neg number is 100...00. The high word was + checked above, now check the rest of the words are + zero. */ + unsigned int i; + bool all_zero = true; + for (i = 0; i + 1 < dividend_len; i++) + if (dividend[i] != 0) + { + all_zero = false; + break; + } + if (all_zero) + overflow = true; + } + } + + /* If overflow is set, just get out. There will only be grief by + continuing. */ + if (overflow) + { + if (remainder) + { + *remainder_len = 1; + remainder[0] = 0; + } + if (oflow != 0) + *oflow = true; + if (quotient) + quotient[0] = 0; + return 1; + } + + if (oflow) + *oflow = false; + + /* Do it on the host if you can. */ + if (dividend_prec <= HOST_BITS_PER_WIDE_INT + && divisor_prec <= HOST_BITS_PER_WIDE_INT) + { + if (sgn == SIGNED) + { + HOST_WIDE_INT o0 = sext_hwi (dividend[0], dividend_prec); + HOST_WIDE_INT o1 = sext_hwi (divisor[0], divisor_prec); + + if (quotient) + quotient[0] = sext_hwi (o0 / o1, dividend_prec); + if (remainder) + { + remainder[0] = sext_hwi (o0 % o1, dividend_prec); + *remainder_len = 1; + } + } + else + { + unsigned HOST_WIDE_INT o0 = zext_hwi (dividend[0], dividend_prec); + unsigned HOST_WIDE_INT o1 = zext_hwi (divisor[0], divisor_prec); + + if (quotient) + quotient[0] = sext_hwi (o0 / o1, dividend_prec); + if (remainder) + { + remainder[0] = sext_hwi (o0 % o1, dividend_prec); + *remainder_len = 1; + } + } + + return 1; + } + + /* Make the divisor and dividend positive and remember what we + did. */ + if (sgn == SIGNED) + { + if (top_bit_of (dividend, dividend_len)) + { + dividend_len = wi::sub_large (u0, zeros, 1, dividend, dividend_len, + dividend_prec, UNSIGNED, 0); + dividend = u0; + dividend_neg = true; + } + if (top_bit_of (divisor, divisor_len)) + { + divisor_len = wi::sub_large (u1, zeros, 1, divisor, divisor_len, + divisor_prec, UNSIGNED, 0); + divisor = u1; + divisor_neg = true; + } + } + + wi_unpack (b_dividend, (const unsigned HOST_WIDE_INT*)dividend, + dividend_len, dividend_blocks_needed, dividend_prec, sgn); + wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor, + divisor_len, divisor_blocks_needed, divisor_prec, sgn); + + if (top_bit_of (dividend, dividend_len) && sgn == SIGNED) + m = dividend_blocks_needed; + else + m = 2 * dividend_len; + + if (top_bit_of (divisor, divisor_len) && sgn == SIGNED) + n = divisor_blocks_needed; + else + n = 2 * divisor_len; + + /* We need to find the top non zero block of b_divisor. At most the + top two blocks are zero. */ + if (b_divisor[n - 1] == 0) + n--; + if (b_divisor[n - 1] == 0) + n--; + + memset (b_quotient, 0, sizeof (b_quotient)); + + divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n); + + unsigned int quotient_len = 0; + if (quotient) + { + wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m); + quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec); + /* The quotient is neg if exactly one of the divisor or dividend is + neg. */ + if (dividend_neg != divisor_neg) + quotient_len = wi::sub_large (quotient, zeros, 1, quotient, + quotient_len, dividend_prec, + UNSIGNED, 0); + } + + if (remainder) + { + wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n); + *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec); + /* The remainder is always the same sign as the dividend. */ + if (dividend_neg) + *remainder_len = wi::sub_large (remainder, zeros, 1, remainder, + *remainder_len, dividend_prec, + UNSIGNED, 0); + } + + return quotient_len; +} + +/* + * Shifting, rotating and extraction. + */ + +/* Left shift XVAL by SHIFT and store the result in VAL. Return the + number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */ +unsigned int +wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int precision, + unsigned int shift) +{ + /* Split the shift into a whole-block shift and a subblock shift. */ + unsigned int skip = shift / HOST_BITS_PER_WIDE_INT; + unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT; + + /* The whole-block shift fills with zeros. */ + unsigned int len = BLOCKS_NEEDED (precision); + for (unsigned int i = 0; i < skip; ++i) + val[i] = 0; + + /* It's easier to handle the simple block case specially. */ + if (small_shift == 0) + for (unsigned int i = skip; i < len; ++i) + val[i] = safe_uhwi (xval, xlen, i - skip); + else + { + /* The first unfilled output block is a left shift of the first + block in XVAL. The other output blocks contain bits from two + consecutive input blocks. */ + unsigned HOST_WIDE_INT carry = 0; + for (unsigned int i = skip; i < len; ++i) + { + unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip); + val[i] = (x << small_shift) | carry; + carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT); + } + } + return canonize (val, len, precision); +} + +/* Right shift XVAL by SHIFT and store the result in VAL. Return the + number of blocks in VAL. The input has XPRECISION bits and the + output has XPRECISION - SHIFT bits. */ +static unsigned int +rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int xprecision, + unsigned int shift) +{ + /* Split the shift into a whole-block shift and a subblock shift. */ + unsigned int skip = shift / HOST_BITS_PER_WIDE_INT; + unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT; + + /* Work out how many blocks are needed to store the significant bits + (excluding the upper zeros or signs). */ + unsigned int len = BLOCKS_NEEDED (xprecision - shift); + + /* It's easier to handle the simple block case specially. */ + if (small_shift == 0) + for (unsigned int i = 0; i < len; ++i) + val[i] = safe_uhwi (xval, xlen, i + skip); + else + { + /* Each output block but the last is a combination of two input blocks. + The last block is a right shift of the last block in XVAL. */ + unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip); + for (unsigned int i = 0; i < len; ++i) + { + val[i] = curr >> small_shift; + curr = safe_uhwi (xval, xlen, i + skip + 1); + val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT); + } + } + return len; +} + +/* Logically right shift XVAL by SHIFT and store the result in VAL. + Return the number of blocks in VAL. XVAL has XPRECISION bits and + VAL has PRECISION bits. */ +unsigned int +wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int xprecision, + unsigned int precision, unsigned int shift) +{ + unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift); + + /* The value we just created has precision XPRECISION - SHIFT. + Zero-extend it to wider precisions. */ + if (precision > xprecision - shift) + { + unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT; + if (small_prec) + val[len - 1] = zext_hwi (val[len - 1], small_prec); + else if (val[len - 1] < 0) + { + /* Add a new block with a zero. */ + val[len++] = 0; + return len; + } + } + return canonize (val, len, precision); +} + +/* Arithmetically right shift XVAL by SHIFT and store the result in VAL. + Return the number of blocks in VAL. XVAL has XPRECISION bits and + VAL has PRECISION bits. */ +unsigned int +wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval, + unsigned int xlen, unsigned int xprecision, + unsigned int precision, unsigned int shift) +{ + unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift); + + /* The value we just created has precision XPRECISION - SHIFT. + Sign-extend it to wider types. */ + if (precision > xprecision - shift) + { + unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT; + if (small_prec) + val[len - 1] = sext_hwi (val[len - 1], small_prec); + } + return canonize (val, len, precision); +} + +/* Return the number of leading (upper) zeros in X. */ +int +wi::clz (const wide_int_ref &x) +{ + /* Calculate how many bits there above the highest represented block. */ + int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT; + + unsigned HOST_WIDE_INT high = x.uhigh (); + if (count < 0) + /* The upper -COUNT bits of HIGH are not part of the value. + Clear them out. */ + high = (high << -count) >> -count; + else if (x.sign_mask () < 0) + /* The upper bit is set, so there are no leading zeros. */ + return 0; + + /* Check whether the value is zero. */ + if (high == 0 && x.len == 1) + return clz_zero (x.precision); + + /* We don't need to look below HIGH. Either HIGH is nonzero, + or the top bit of the block below is nonzero; clz_hwi is + HOST_BITS_PER_WIDE_INT in the latter case. */ + return count + clz_hwi (high); +} + +/* Return the number of redundant sign bits in X. (That is, the number + of bits immediately below the sign bit that have the same value as + the sign bit.) */ +int +wi::clrsb (const wide_int_ref &x) +{ + /* Calculate how many bits there above the highest represented block. */ + int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT; + + unsigned HOST_WIDE_INT high = x.uhigh (); + unsigned HOST_WIDE_INT mask = -1; + if (count < 0) + { + /* The upper -COUNT bits of HIGH are not part of the value. + Clear them from both MASK and HIGH. */ + mask >>= -count; + high &= mask; + } + + /* If the top bit is 1, count the number of leading 1s. If the top + bit is zero, count the number of leading zeros. */ + if (high > mask / 2) + high ^= mask; + + /* There are no sign bits below the top block, so we don't need to look + beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when + HIGH is 0. */ + return count + clz_hwi (high) - 1; +} + +/* Return the number of trailing (lower) zeros in X. */ +int +wi::ctz (const wide_int_ref &x) +{ + if (x.len == 1 && x.ulow () == 0) + return ctz_zero (x.precision); + + /* Having dealt with the zero case, there must be a block with a + nonzero bit. We don't care about the bits above the first 1. */ + unsigned int i = 0; + while (x.val[i] == 0) + ++i; + return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]); +} + +/* If X is an exact power of 2, return the base-2 logarithm, otherwise + return -1. */ +int +wi::exact_log2 (const wide_int_ref &x) +{ + /* 0-precision values can only hold 0. */ + if (x.precision == 0) + return -1; + + /* Reject cases where there are implicit -1 blocks above HIGH. */ + if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0) + return -1; + + /* Set CRUX to the index of the entry that should be nonzero. + If the top block is zero then the next lowest block (if any) + must have the high bit set. */ + unsigned int crux = x.len - 1; + if (crux > 0 && x.val[crux] == 0) + crux -= 1; + + /* Check that all lower blocks are zero. */ + for (unsigned int i = 0; i < crux; ++i) + if (x.val[i] != 0) + return -1; + + /* Get a zero-extended form of block CRUX. */ + unsigned HOST_WIDE_INT hwi = x.val[crux]; + if (crux * HOST_BITS_PER_WIDE_INT > x.precision) + hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT); + + /* Now it's down to whether HWI is a power of 2. */ + int res = ::exact_log2 (hwi); + if (res >= 0) + res += crux * HOST_BITS_PER_WIDE_INT; + return res; +} + +/* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */ +int +wi::floor_log2 (const wide_int_ref &x) +{ + return x.precision - 1 - clz (x); +} + +/* Return the index of the first (lowest) set bit in X, counting from 1. + Return 0 if X is 0. */ +int +wi::ffs (const wide_int_ref &x) +{ + return eq_p (x, 0) ? 0 : ctz (x) + 1; +} + +/* Return true if sign-extending X to have precision PRECISION would give + the minimum signed value at that precision. */ +bool +wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision) +{ + return ctz (x) + 1 == int (precision); +} + +/* Return true if X represents the minimum signed value. */ +bool +wi::only_sign_bit_p (const wide_int_ref &x) +{ + return only_sign_bit_p (x, x.precision); +} + +/* + * Private utilities. + */ + +void gt_ggc_mx(max_wide_int*) { } +void gt_pch_nx(max_wide_int*,void (*)(void*, void*), void*) { } +void gt_pch_nx(max_wide_int*) { } + +/* + * Private debug printing routines. + */ +#ifdef DEBUG_WIDE_INT +/* The debugging routines print results of wide operations into the + dump files of the respective passes in which they were called. */ +static char * +dumpa (const HOST_WIDE_INT *val, unsigned int len, unsigned int prec, char *buf) +{ + int i; + unsigned int l; + const char * sep = ""; + + l = sprintf (buf, "[%d (", prec); + for (i = len - 1; i >= 0; i--) + { + l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]); + sep = " "; + } + + gcc_assert (len != 0); + + l += sprintf (&buf[l], ")]"); + + gcc_assert (l < MAX_SIZE); + return buf; + + +} +#endif + +#if 0 +/* The debugging routines print results of wide operations into the + dump files of the respective passes in which they were called. */ +char * +wide_int_ro::dump (char* buf) const +{ + int i; + unsigned int l; + const char * sep = ""; + + l = sprintf (buf, "[%d (", precision); + for (i = len - 1; i >= 0; i--) + { + l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]); + sep = " "; + } + + gcc_assert (len != 0); + + l += sprintf (&buf[l], ")]"); + + gcc_assert (l < MAX_SIZE); + return buf; +} +#endif + +HOST_WIDE_INT foo (tree x) +{ + addr_wide_int y = x; + addr_wide_int z = y; + return z.to_shwi (); +} diff --git a/gcc/wide-int.h b/gcc/wide-int.h new file mode 100644 index 00000000000..504e0564abe --- /dev/null +++ b/gcc/wide-int.h @@ -0,0 +1,2784 @@ +/* Operations with very long integers. -*- C++ -*- + Copyright (C) 2012-2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef WIDE_INT_H +#define WIDE_INT_H + +/* wide-int.[cc|h] implements a class that efficiently performs + mathematical operations on finite precision integers. wide_ints + are designed to be transient - they are not for long term storage + of values. There is tight integration between wide_ints and the + other longer storage GCC representations (rtl and tree). + + The actual precision of a wide_int depends on the flavor. There + are three predefined flavors: + + 1) wide_int (the default). This flavor does the math in the + precision of its input arguments. It is assumed (and checked) + that the precisions of the operands and results are consistent. + This is the most efficient flavor. It is not possible to examine + bits above the precision that has been specified. Because of + this, the default flavor has semantics that are simple to + understand and in general model the underlying hardware that the + compiler is targetted for. + + This flavor must be used at the RTL level of gcc because there + is, in general, not enough information in the RTL representation + to extend a value beyond the precision specified in the mode. + + This flavor should also be used at the TREE and GIMPLE levels of + the compiler except for the circumstances described in the + descriptions of the other two flavors. + + The default wide_int representation does not contain any + information inherent about signedness of the represented value, + so it can be used to represent both signed and unsigned numbers. + For operations where the results depend on signedness (full width + multiply, division, shifts, comparisons, and operations that need + overflow detected), the signedness must be specified separately. + + 2) addr_wide_int. This is a fixed size representation that is + guaranteed to be large enough to compute any bit or byte sized + address calculation on the target. Currently the value is 64 + 4 + bits rounded up to the next number even multiple of + HOST_BITS_PER_WIDE_INT (but this can be changed when the first + port needs more than 64 bits for the size of a pointer). + + This flavor can be used for all address math on the target. In + this representation, the values are sign or zero extended based + on their input types to the internal precision. All math is done + in this precision and then the values are truncated to fit in the + result type. Unlike most gimple or rtl intermediate code, it is + not useful to perform the address arithmetic at the same + precision in which the operands are represented because there has + been no effort by the front ends to convert most addressing + arithmetic to canonical types. + + In the addr_wide_int, all numbers are represented as signed + numbers. There are enough bits in the internal representation so + that no infomation is lost by representing them this way. + + 3) max_wide_int. This representation is an approximation of + infinite precision math. However, it is not really infinite + precision math as in the GMP library. It is really finite + precision math where the precision is 4 times the size of the + largest integer that the target port can represent. + + Like, the addr_wide_ints, all numbers are inherently signed. + + There are several places in the GCC where this should/must be used: + + * Code that does widening conversions. The canonical way that + this is performed is to sign or zero extend the input value to + the max width based on the sign of the type of the source and + then to truncate that value to the target type. This is in + preference to using the sign of the target type to extend the + value directly (which gets the wrong value for the conversion + of large unsigned numbers to larger signed types). + + * Code that does induction variable optimizations. This code + works with induction variables of many different types at the + same time. Because of this, it ends up doing many different + calculations where the operands are not compatible types. The + max_wide_int makes this easy, because it provides a field where + nothing is lost when converting from any variable, + + * There are a small number of passes that currently use the + max_wide_int that should use the default. These should be + changed. + + There are surprising features of addr_wide_int and max_wide_int + that the users should be careful about: + + 1) Shifts and rotations are just weird. You have to specify a + precision in which the shift or rotate is to happen in. The bits + above this precision remain unchanged. While this is what you + want, it is clearly is non obvious. + + 2) Larger precision math sometimes does not produce the same + answer as would be expected for doing the math at the proper + precision. In particular, a multiply followed by a divide will + produce a different answer if the first product is larger than + what can be represented in the input precision. + + The addr_wide_int and the max_wide_int flavors are more expensive + than the default wide int, so in addition to the caveats with these + two, the default is the prefered representation. + + All three flavors of wide_int are represented as a vector of + HOST_WIDE_INTs. The vector contains enough elements to hold a + value of MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT which is + a derived for each host/target combination. The values are stored + in the vector with the least significant HOST_BITS_PER_WIDE_INT + bits of the value stored in element 0. + + A wide_int contains three fields: the vector (VAL), precision and a + length (LEN). The length is the number of HWIs needed to + represent the value. For the max_wide_int and the addr_wide_int, + the precision is a constant that cannot be changed. For the + default wide_int, the precision is set from the constructor. + + Since most integers used in a compiler are small values, it is + generally profitable to use a representation of the value that is + as small as possible. LEN is used to indicate the number of + elements of the vector that are in use. The numbers are stored as + sign extended numbers as a means of compression. Leading + HOST_WIDE_INTs that contain strings of either -1 or 0 are removed + as long as they can be reconstructed from the top bit that is being + represented. + + There are constructors to create the various forms of wide_int from + trees, rtl and constants. For trees and constants, you can simply say: + + tree t = ...; + wide_int x = t; + wide_int y = 6; + + However, a little more syntax is required for rtl constants since + they do have an explicit precision. To make an rtl into a + wide_int, you have to pair it with a mode. The canonical way to do + this is with std::make_pair as in: + + rtx r = ... + wide_int x = std::make_pair (r, mode); + + Wide ints sometimes have a value with the precision of 0. These + come from two separate sources: + + * The front ends do sometimes produce values that really have a + precision of 0. The only place where these seem to come in are + the MIN and MAX value for types with a precision of 0. Asside + from the computation of these MIN and MAX values, there appears + to be no other use of true precision 0 numbers so the overloading + of precision 0 does not appear to be an issue. These appear to + be associated with 0 width bit fields. They are harmless, but + there are several paths through the wide int code to support this + without having to special case the front ends. + + * When a constant that has an integer type is converted to a + wide_int it comes in with precision 0. For these constants the + top bit does accurately reflect the sign of that constant; this + is an exception to the normal rule that the signedness is not + represented. When used in a binary operation, the wide_int + implementation properly extends these constants so that they + properly match the other operand of the computation. This allows + you write: + + tree t = ... + wide_int x = t + 6; + + assuming t is a int_cst. + + Note, the bits past the precision up to the nearest HOST_WIDE_INT + boundary are defined to be copies of the top bit of the value, + however the bits above those defined bits not defined and the + algorithms used here are careful not to depend on their value. In + particular, values that come in from rtx constants may have random + bits. When the precision is 0, all the bits in the LEN elements of + VEC are significant with no undefined bits. Precisionless + constants are limited to being one or two HOST_WIDE_INTs. When two + are used the upper value is 0, and the high order bit of the first + value is set. (Note that this may need to be generalized if it is + ever necessary to support 32bit HWIs again). + + Many binary operations require that the precisions of the two + operands be the same. However, the API tries to keep this relaxed + as much as possible. In particular: + + * shifts do not care about the precision of the second operand. + + * values that come in from gcc source constants or variables are + not checked as long one of the two operands has a precision. + This is allowed because it is always known whether to sign or zero + extend these values. + + * The comparisons do not require that the operands be the same + length. This allows wide ints to be used in hash tables where + all of the values may not be the same precision. */ + + +#include <utility> +#include "system.h" +#include "hwint.h" +#include "signop.h" +#include "insn-modes.h" + +#if 0 +#define DEBUG_WIDE_INT +#endif + +/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very + early examination of the target's mode file. Thus it is safe that + some small multiple of this number is easily larger than any number + that that target could compute. The place in the compiler that + currently needs the widest ints is the code that determines the + range of a multiply. This code needs 2n + 2 bits. */ + +#define WIDE_INT_MAX_ELTS \ + ((4 * MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT - 1) \ + / HOST_BITS_PER_WIDE_INT) + +/* This is the max size of any pointer on any machine. It does not + seem to be as easy to sniff this out of the machine description as + it is for MAX_BITSIZE_MODE_ANY_INT since targets may support + multiple address sizes and may have different address sizes for + different address spaces. However, currently the largest pointer + on any platform is 64 bits. When that changes, then it is likely + that a target hook should be defined so that targets can make this + value larger for those targets. */ +#define ADDR_MAX_BITSIZE 64 + +/* This is the internal precision used when doing any address + arithmetic. The '4' is really 3 + 1. Three of the bits are for + the number of extra bits needed to do bit addresses and single bit is + allow everything to be signed without loosing any precision. Then + everything is rounded up to the next HWI for efficiency. */ +#define ADDR_MAX_PRECISION \ + ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) & ~(HOST_BITS_PER_WIDE_INT - 1)) + +namespace wi +{ + /* Classifies an integer based on its precision. */ + enum precision_type { + /* The integer has both a precision and defined signedness. This allows + the integer to be converted to any width, since we know whether to fill + any extra bits with zeros or signs. */ + FLEXIBLE_PRECISION, + + /* The integer has a variable precision but no defined signedness. */ + VAR_PRECISION, + + /* The integer has a constant precision (known at GCC compile time) + but no defined signedness. */ + CONST_PRECISION + }; + + /* This class, which has no default implementation, is expected to + provide the following members: + + static const enum precision_type precision_type; + Classifies the type of T. + + static const unsigned int precision; + Only defined if precision_type == CONST_PRECISION. Specifies the + precision of all integers of type T. + + static const bool host_dependent_precision; + True if the precision of T depends (or can depend) on the host. + + static unsigned int get_precision (const T &x) + Return the number of bits in X. + + static wi::storage_ref *decompose (HOST_WIDE_INT *scratch, + unsigned int precision, const T &x) + Decompose X as a PRECISION-bit integer, returning the associated + wi::storage_ref. SCRATCH is available as scratch space if needed. + The routine should assert that PRECISION is acceptable. */ + template <typename T> struct int_traits; + + /* This class provides a single type, result_type, which specifies the + type of integer produced by a binary operation whose inputs have + types T1 and T2. The definition should be symmetric. */ + template <typename T1, typename T2, + enum precision_type P1 = int_traits <T1>::precision_type, + enum precision_type P2 = int_traits <T2>::precision_type> + struct binary_traits; + + /* The result of a unary operation on T is the same as the result of + a binary operation on two values of type T. */ + template <typename T> + struct unary_traits : public binary_traits <T, T> {}; +} + +/* The type of result produced by a binary operation on types T1 and T2. + Defined purely for brevity. */ +#define WI_BINARY_RESULT(T1, T2) \ + typename wi::binary_traits <T1, T2>::result_type + +/* The type of result produced by a unary operation on type T. */ +#define WI_UNARY_RESULT(T) \ + typename wi::unary_traits <T>::result_type + +/* Define a variable RESULT to hold the result of a binary operation on + X and Y, which have types T1 and T2 respectively. Define VAR to + point to the blocks of RESULT. Once the user of the macro has + filled in VAR, it should call RESULT.set_len to set the number + of initialized blocks. */ +#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \ + WI_BINARY_RESULT (T1, T2) RESULT = \ + wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \ + HOST_WIDE_INT *VAL = RESULT.write_val () + +/* Similar for the result of a unary operation on X, which has type T. */ +#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \ + WI_UNARY_RESULT (T) RESULT = \ + wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \ + HOST_WIDE_INT *VAL = RESULT.write_val () + +template <typename T> struct generic_wide_int; + +struct wide_int_storage; +typedef generic_wide_int <wide_int_storage> wide_int; + +struct wide_int_ref_storage; +typedef generic_wide_int <wide_int_ref_storage> wide_int_ref; + +/* Public functions for querying and operating on integers. */ +namespace wi +{ + template <typename T> + unsigned int get_precision (const T &); + + template <typename T1, typename T2> + unsigned int get_binary_precision (const T1 &, const T2 &); + + bool fits_shwi_p (const wide_int_ref &); + bool fits_uhwi_p (const wide_int_ref &); + bool neg_p (const wide_int_ref &, signop = SIGNED); + bool only_sign_bit_p (const wide_int_ref &, unsigned int); + bool only_sign_bit_p (const wide_int_ref &); + HOST_WIDE_INT sign_mask (const wide_int_ref &); + + template <typename T1, typename T2> + bool eq_p (const T1 &, const T2 &); + + template <typename T1, typename T2> + bool ne_p (const T1 &, const T2 &); + + bool lt_p (const wide_int_ref &, const wide_int_ref &, signop); + bool lts_p (const wide_int_ref &, const wide_int_ref &); + bool ltu_p (const wide_int_ref &, const wide_int_ref &); + bool le_p (const wide_int_ref &, const wide_int_ref &, signop); + bool les_p (const wide_int_ref &, const wide_int_ref &); + bool leu_p (const wide_int_ref &, const wide_int_ref &); + bool gt_p (const wide_int_ref &, const wide_int_ref &, signop); + bool gts_p (const wide_int_ref &, const wide_int_ref &); + bool gtu_p (const wide_int_ref &, const wide_int_ref &); + bool ge_p (const wide_int_ref &, const wide_int_ref &, signop); + bool ges_p (const wide_int_ref &, const wide_int_ref &); + bool geu_p (const wide_int_ref &, const wide_int_ref &); + int cmp (const wide_int_ref &, const wide_int_ref &, signop); + int cmps (const wide_int_ref &, const wide_int_ref &); + int cmpu (const wide_int_ref &, const wide_int_ref &); + +#define UNARY_FUNCTION \ + template <typename T> WI_UNARY_RESULT (T) +#define BINARY_FUNCTION \ + template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2) +#define SHIFT_FUNCTION \ + template <typename T> WI_UNARY_RESULT (T) + + UNARY_FUNCTION bit_not (const T &); + UNARY_FUNCTION neg (const T &); + UNARY_FUNCTION neg (const T &, bool *); + UNARY_FUNCTION abs (const T &); + UNARY_FUNCTION ext (const T &, unsigned int, signop); + UNARY_FUNCTION sext (const T &, unsigned int); + UNARY_FUNCTION zext (const T &, unsigned int); + UNARY_FUNCTION set_bit (const T &, unsigned int); + + BINARY_FUNCTION min (const T1 &, const T2 &, signop); + BINARY_FUNCTION smin (const T1 &, const T2 &); + BINARY_FUNCTION umin (const T1 &, const T2 &); + BINARY_FUNCTION max (const T1 &, const T2 &, signop); + BINARY_FUNCTION smax (const T1 &, const T2 &); + BINARY_FUNCTION umax (const T1 &, const T2 &); + + BINARY_FUNCTION bit_and (const T1 &, const T2 &); + BINARY_FUNCTION bit_and_not (const T1 &, const T2 &); + BINARY_FUNCTION bit_or (const T1 &, const T2 &); + BINARY_FUNCTION bit_or_not (const T1 &, const T2 &); + BINARY_FUNCTION bit_xor (const T1 &, const T2 &); + BINARY_FUNCTION add (const T1 &, const T2 &); + BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *); + BINARY_FUNCTION sub (const T1 &, const T2 &); + BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *); + BINARY_FUNCTION mul (const T1 &, const T2 &); + BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *); + BINARY_FUNCTION smul (const T1 &, const T2 &, bool *); + BINARY_FUNCTION umul (const T1 &, const T2 &, bool *); + BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop); + BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &); + BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &); + BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION udiv_floor (const T1 &, const T2 &); + BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &); + BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop, + WI_BINARY_RESULT (T1, T2) *); + BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION smod_trunc (const T1 &, const T2 &); + BINARY_FUNCTION umod_trunc (const T1 &, const T2 &); + BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION umod_floor (const T1 &, const T2 &); + BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0); + BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0); + + template <typename T1, typename T2> + bool multiple_of_p (const T1 &, const T2 &, signop, + WI_BINARY_RESULT (T1, T2) *); + + unsigned int trunc_shift (const wide_int_ref &, unsigned int, unsigned int); + + SHIFT_FUNCTION lshift (const T &, const wide_int_ref &, unsigned int = 0); + SHIFT_FUNCTION lrshift (const T &, const wide_int_ref &, unsigned int = 0); + SHIFT_FUNCTION arshift (const T &, const wide_int_ref &, unsigned int = 0); + SHIFT_FUNCTION rshift (const T &, const wide_int_ref &, signop sgn, + unsigned int = 0); + SHIFT_FUNCTION lrotate (const T &, const wide_int_ref &, unsigned int = 0); + SHIFT_FUNCTION rrotate (const T &, const wide_int_ref &, unsigned int = 0); + +#undef SHIFT_FUNCTION +#undef BINARY_FUNCTION +#undef UNARY_FUNCTION + + int clz (const wide_int_ref &); + int clrsb (const wide_int_ref &); + int ctz (const wide_int_ref &); + int exact_log2 (const wide_int_ref &); + int floor_log2 (const wide_int_ref &); + int ffs (const wide_int_ref &); + int popcount (const wide_int_ref &); + int parity (const wide_int_ref &); + + template <typename T> + unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int); +} + +namespace wi +{ + /* Contains the components of a decomposed integer for easy, direct + access. */ + struct storage_ref + { + storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int); + + const HOST_WIDE_INT *val; + unsigned int len; + unsigned int precision; + + /* Provide enough trappings for this class to act as storage for + generic_wide_int. */ + unsigned int get_len () const; + unsigned int get_precision () const; + const HOST_WIDE_INT *get_val () const; + }; +} + +inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in, + unsigned int len_in, + unsigned int precision_in) + : val (val_in), len (len_in), precision (precision_in) +{ +} + +inline unsigned int +wi::storage_ref::get_len () const +{ + return len; +} + +inline unsigned int +wi::storage_ref::get_precision () const +{ + return precision; +} + +inline const HOST_WIDE_INT * +wi::storage_ref::get_val () const +{ + return val; +} + +namespace wi +{ + template <> + struct int_traits <wi::storage_ref> + { + static const enum precision_type precision_type = VAR_PRECISION; + /* wi::storage_ref can be a reference to a primitive type, + so this is the conservatively-correct setting. */ + static const bool host_dependent_precision = true; + }; +} + +/* This class defines an integer type using the storage provided by the + template argument. The storage class must provide the following + functions: + + unsigned int get_precision () const + Return the number of bits in the integer. + + HOST_WIDE_INT *get_val () const + Return a pointer to the array of blocks that encodes the integer. + + unsigned int get_len () const + Return the number of blocks in get_val (). If this is smaller + than the number of blocks implied by get_precision (), the + remaining blocks are sign extensions of block get_len () - 1. + + Although not required by generic_wide_int itself, writable storage + classes can also provide the following functions: + + HOST_WIDE_INT *write_val () + Get a modifiable version of get_val () + + unsigned int set_len (unsigned int len) + Set the value returned by get_len () to LEN. */ +template <typename storage> +class GTY(()) generic_wide_int : public storage +{ +public: + generic_wide_int (); + + template <typename T> + generic_wide_int (const T &); + + template <typename T> + generic_wide_int (const T &, unsigned int); + + /* Conversions. */ + HOST_WIDE_INT to_shwi (unsigned int = 0) const; + unsigned HOST_WIDE_INT to_uhwi (unsigned int = 0) const; + HOST_WIDE_INT to_short_addr () const; + + /* Public accessors for the interior of a wide int. */ + HOST_WIDE_INT sign_mask () const; + HOST_WIDE_INT elt (unsigned int) const; + unsigned HOST_WIDE_INT ulow () const; + unsigned HOST_WIDE_INT uhigh () const; + HOST_WIDE_INT slow () const; + HOST_WIDE_INT shigh () const; + +#define BINARY_PREDICATE(OP, F) \ + template <typename T> \ + bool OP (const T &c) const { return wi::F (*this, c); } + +#define UNARY_OPERATOR(OP, F) \ + generic_wide_int OP () const { return wi::F (*this); } + +#define BINARY_OPERATOR(OP, F) \ + template <typename T> \ + generic_wide_int OP (const T &c) const { return wi::F (*this, c); } + +#define ASSIGNMENT_OPERATOR(OP, F) \ + template <typename T> \ + generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); } + +#define INCDEC_OPERATOR(OP, DELTA) \ + generic_wide_int &OP () { *this += DELTA; return *this; } + + UNARY_OPERATOR (operator ~, bit_not) \ + UNARY_OPERATOR (operator -, neg) \ + BINARY_PREDICATE (operator ==, eq_p) \ + BINARY_PREDICATE (operator !=, ne_p) \ + BINARY_OPERATOR (operator &, bit_and) \ + BINARY_OPERATOR (and_not, bit_and_not) \ + BINARY_OPERATOR (operator |, bit_or) \ + BINARY_OPERATOR (or_not, bit_or_not) \ + BINARY_OPERATOR (operator ^, bit_xor) \ + BINARY_OPERATOR (operator +, add) \ + BINARY_OPERATOR (operator -, sub) \ + BINARY_OPERATOR (operator *, mul) \ + ASSIGNMENT_OPERATOR (operator &=, bit_and) \ + ASSIGNMENT_OPERATOR (operator |=, bit_or) \ + ASSIGNMENT_OPERATOR (operator ^=, bit_xor) \ + ASSIGNMENT_OPERATOR (operator +=, add) \ + ASSIGNMENT_OPERATOR (operator -=, sub) \ + ASSIGNMENT_OPERATOR (operator *=, mul) \ + INCDEC_OPERATOR (operator ++, 1) \ + INCDEC_OPERATOR (operator --, -1) + +#undef BINARY_PREDICATE +#undef UNARY_OPERATOR +#undef BINARY_OPERATOR +#undef ASSIGNMENT_OPERATOR +#undef INCDEC_OPERATOR + + char *dump (char *) const; +}; + +template <typename storage> +inline generic_wide_int <storage>::generic_wide_int () {} + +template <typename storage> +template <typename T> +inline generic_wide_int <storage>::generic_wide_int (const T &x) + : storage (x) +{ +} + +template <typename storage> +template <typename T> +inline generic_wide_int <storage>::generic_wide_int (const T &x, + unsigned int precision) + : storage (x, precision) +{ +} + +/* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION. + If THIS does not fit in PRECISION, the information is lost. */ +template <typename storage> +inline HOST_WIDE_INT +generic_wide_int <storage>::to_shwi (unsigned int precision) const +{ + if (precision == 0) + precision = this->get_precision (); + if (precision < HOST_BITS_PER_WIDE_INT) + return sext_hwi (this->get_val ()[0], precision); + else + return this->get_val ()[0]; +} + +/* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from + PRECISION. If THIS does not fit in PRECISION, the information + is lost. */ +template <typename storage> +inline unsigned HOST_WIDE_INT +generic_wide_int <storage>::to_uhwi (unsigned int precision) const +{ + if (precision == 0) + precision = this->get_precision (); + if (precision < HOST_BITS_PER_WIDE_INT) + return zext_hwi (this->get_val ()[0], precision); + else + return this->get_val ()[0]; +} + +/* TODO: The compiler is half converted from using HOST_WIDE_INT to + represent addresses to using addr_wide_int to represent addresses. + We use to_short_addr at the interface from new code to old, + unconverted code. */ +template <typename storage> +inline HOST_WIDE_INT +generic_wide_int <storage>::to_short_addr () const +{ + return this->get_val ()[0]; +} + +/* Return the implicit value of blocks above get_len (). */ +template <typename storage> +inline HOST_WIDE_INT +generic_wide_int <storage>::sign_mask () const +{ + return this->get_val ()[this->get_len () - 1] < 0 ? -1 : 0; +} + +/* Return the signed value of the least-significant explicitly-encoded + block. */ +template <typename storage> +inline HOST_WIDE_INT +generic_wide_int <storage>::slow () const +{ + return this->get_val ()[0]; +} + +/* Return the signed value of the most-significant explicitly-encoded + block. */ +template <typename storage> +inline HOST_WIDE_INT +generic_wide_int <storage>::shigh () const +{ + return this->get_val ()[this->get_len () - 1]; +} + +/* Return the unsigned value of the least-significant + explicitly-encoded block. */ +template <typename storage> +inline unsigned HOST_WIDE_INT +generic_wide_int <storage>::ulow () const +{ + return this->get_val ()[0]; +} + +/* Return the unsigned value of the most-significant + explicitly-encoded block. */ +template <typename storage> +inline unsigned HOST_WIDE_INT +generic_wide_int <storage>::uhigh () const +{ + return this->get_val ()[this->get_len () - 1]; +} + +/* Return block I, which might be implicitly or explicit encoded. */ +template <typename storage> +inline HOST_WIDE_INT +generic_wide_int <storage>::elt (unsigned int i) const +{ + if (i >= this->get_len ()) + return sign_mask (); + else + return this->get_val ()[i]; +} + +namespace wi +{ + template <> + template <typename storage> + struct int_traits < generic_wide_int <storage> > + : public wi::int_traits <storage> + { + static unsigned int get_precision (const generic_wide_int <storage> &); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, + const generic_wide_int <storage> &); + }; +} + +template <typename storage> +inline unsigned int +wi::int_traits < generic_wide_int <storage> >:: +get_precision (const generic_wide_int <storage> &x) +{ + return x.get_precision (); +} + +template <typename storage> +inline wi::storage_ref +wi::int_traits < generic_wide_int <storage> >:: +decompose (HOST_WIDE_INT *, unsigned int precision, + const generic_wide_int <storage> &x) +{ + gcc_checking_assert (precision == x.get_precision ()); + return wi::storage_ref (x.get_val (), x.get_len (), precision); +} + +/* Provide the storage for a wide_int_ref. This acts like a read-only + wide_int, with the optimization that VAL is normally a pointer to + another integer's storage, so that no array copy is needed. */ +struct wide_int_ref_storage : public wi::storage_ref +{ +private: + /* Scratch space that can be used when decomposing the original integer. + It must live as long as this object. */ + HOST_WIDE_INT scratch[WIDE_INT_MAX_ELTS]; + +public: + template <typename T> + wide_int_ref_storage (const T &); + + template <typename T> + wide_int_ref_storage (const T &, unsigned int); +}; + +/* Create a reference to integer X in its natural precision. Note + that the natural precision is host-dependent for primitive + types. */ +template <typename T> +inline wide_int_ref_storage::wide_int_ref_storage (const T &x) + : storage_ref (wi::int_traits <T>::decompose (scratch, + wi::get_precision (x), x)) +{ +} + +/* Create a reference to integer X in precision PRECISION. */ +template <typename T> +inline wide_int_ref_storage::wide_int_ref_storage (const T &x, + unsigned int precision) + : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x)) +{ +} + +namespace wi +{ + template <> + struct int_traits <wide_int_ref_storage> + : public int_traits <wi::storage_ref> + { + }; +} + +namespace wi +{ + unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, unsigned int, + signop sgn); + unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, bool = true); +} + +/* The storage used by wide_int. */ +class GTY(()) wide_int_storage +{ +private: + HOST_WIDE_INT val[WIDE_INT_MAX_ELTS]; + unsigned int len; + unsigned int precision; + +public: + wide_int_storage (); + template <typename T> + wide_int_storage (const T &); + + /* The standard generic_wide_int storage methods. */ + unsigned int get_precision () const; + const HOST_WIDE_INT *get_val () const; + unsigned int get_len () const; + HOST_WIDE_INT *write_val (); + void set_len (unsigned int); + + static wide_int from (const wide_int_ref &, unsigned int, signop); + static wide_int from_array (const HOST_WIDE_INT *, unsigned int, + unsigned int, bool = true); + static wide_int create (unsigned int); + + /* FIXME: target-dependent, so should disappear. */ + wide_int bswap () const; +}; + +inline wide_int_storage::wide_int_storage () {} + +/* Initialize the storage from integer X, in its natural precision. + Note that we do not allow integers with host-dependent precision + to become wide_ints; wide_ints must always be logically independent + of the host. */ +template <typename T> +inline wide_int_storage::wide_int_storage (const T &x) +{ + STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); + wide_int_ref xi (x); + precision = xi.precision; + len = xi.len; + for (unsigned int i = 0; i < len; ++i) + val[i] = xi.val[i]; +} + +inline unsigned int +wide_int_storage::get_precision () const +{ + return precision; +} + +inline const HOST_WIDE_INT * +wide_int_storage::get_val () const +{ + return val; +} + +inline unsigned int +wide_int_storage::get_len () const +{ + return len; +} + +inline HOST_WIDE_INT * +wide_int_storage::write_val () +{ + return val; +} + +inline void +wide_int_storage::set_len (unsigned int l) +{ + len = l; +} + +/* Treat X as having signedness SGN and convert it to a PRECISION-bit + number. */ +inline wide_int +wide_int_storage::from (const wide_int_ref &x, unsigned int precision, + signop sgn) +{ + wide_int result = wide_int::create (precision); + result.set_len (wi::force_to_size (result.write_val (), x.val, x.len, + x.precision, precision, sgn)); + return result; +} + +/* Create a wide_int from the explicit block encoding given by VAL and + LEN. PRECISION is the precision of the integer. NEED_CANON_P is + true if the encoding may have redundant trailing blocks. */ +inline wide_int +wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len, + unsigned int precision, bool need_canon_p) +{ + wide_int result = wide_int::create (precision); + result.set_len (wi::from_array (result.write_val (), val, len, precision, + need_canon_p)); + return result; +} + +/* Return an uninitialized wide_int with precision PRECISION. */ +inline wide_int +wide_int_storage::create (unsigned int precision) +{ + wide_int x; + x.precision = precision; + return x; +} + +namespace wi +{ + template <> + struct int_traits <wide_int_storage> + { + static const enum precision_type precision_type = VAR_PRECISION; + /* Guaranteed by a static assert in the wide_int_storage constructor. */ + static const bool host_dependent_precision = false; + template <typename T1, typename T2> + static wide_int get_binary_result (const T1 &, const T2 &); + }; +} + +template <typename T1, typename T2> +inline wide_int +wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y) +{ + /* This shouldn't be used for two flexible-precision inputs. */ + STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION + || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION); + if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION) + return wide_int::create (wi::get_precision (y)); + else + return wide_int::create (wi::get_precision (x)); +} + +/* An N-bit integer. Until we can use typedef templates, use this instead. */ +#define FIXED_WIDE_INT(N) \ + generic_wide_int < fixed_wide_int_storage <N> > + +/* The storage used by FIXED_WIDE_INT (N). */ +template <int N> +class GTY(()) fixed_wide_int_storage +{ +private: + HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT]; + unsigned int len; + +public: + fixed_wide_int_storage (); + template <typename T> + fixed_wide_int_storage (const T &); + + /* The standard generic_wide_int storage methods. */ + unsigned int get_precision () const; + const HOST_WIDE_INT *get_val () const; + unsigned int get_len () const; + HOST_WIDE_INT *write_val (); + void set_len (unsigned int); + + static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop); + static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int, + bool = true); +}; + +typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) addr_wide_int; +typedef FIXED_WIDE_INT (MAX_BITSIZE_MODE_ANY_INT) max_wide_int; + +template <int N> +inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {} + +/* Initialize the storage from integer X, in precision N. */ +template <int N> +template <typename T> +inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x) +{ + /* Check for type compatibility. We don't want to initialize a + fixed-width integer from something like a wide_int. */ + WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED; + wide_int_ref xi (x, N); + len = xi.len; + for (unsigned int i = 0; i < len; ++i) + val[i] = xi.val[i]; +} + +template <int N> +inline unsigned int +fixed_wide_int_storage <N>::get_precision () const +{ + return N; +} + +template <int N> +inline const HOST_WIDE_INT * +fixed_wide_int_storage <N>::get_val () const +{ + return val; +} + +template <int N> +inline unsigned int +fixed_wide_int_storage <N>::get_len () const +{ + return len; +} + +template <int N> +inline HOST_WIDE_INT * +fixed_wide_int_storage <N>::write_val () +{ + return val; +} + +template <int N> +inline void +fixed_wide_int_storage <N>::set_len (unsigned int l) +{ + len = l; +} + +/* Treat X as having signedness SGN and convert it to an N-bit number. */ +template <int N> +inline FIXED_WIDE_INT (N) +fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn) +{ + FIXED_WIDE_INT (N) result; + result.set_len (wi::force_to_size (result.write_val (), x.val, x.len, + x.precision, N, sgn)); + return result; +} + +/* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by + VAL and LEN. NEED_CANON_P is true if the encoding may have redundant + trailing blocks. */ +template <int N> +inline FIXED_WIDE_INT (N) +fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val, + unsigned int len, + bool need_canon_p) +{ + FIXED_WIDE_INT (N) result; + result.set_len (wi::from_array (result.write_val (), val, len, + N, need_canon_p)); + return result; +} + +namespace wi +{ + template <> + template <int N> + struct int_traits < fixed_wide_int_storage <N> > + { + static const enum precision_type precision_type = CONST_PRECISION; + static const bool host_dependent_precision = false; + static const unsigned int precision = N; + template <typename T1, typename T2> + static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &); + }; +} + +template <int N> +template <typename T1, typename T2> +inline FIXED_WIDE_INT (N) +wi::int_traits < fixed_wide_int_storage <N> >:: +get_binary_result (const T1 &, const T2 &) +{ + return FIXED_WIDE_INT (N) (); +} + +/* Specify the result type for each supported combination of binary + inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be + mixed, in order to give stronger type checking. When both inputs + are CONST_PRECISION, they must have the same precision. */ +namespace wi +{ + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION> + { + typedef max_wide_int result_type; + }; + + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION> + { + typedef wide_int result_type; + }; + + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION> + { + /* Spelled out explicitly (rather than through FIXED_WIDE_INT) + so as not to confuse gengtype. */ + typedef generic_wide_int < fixed_wide_int_storage + <int_traits <T2>::precision> > result_type; + }; + + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION> + { + typedef wide_int result_type; + }; + + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION> + { + /* Spelled out explicitly (rather than through FIXED_WIDE_INT) + so as not to confuse gengtype. */ + typedef generic_wide_int < fixed_wide_int_storage + <int_traits <T1>::precision> > result_type; + }; + + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION> + { + /* Spelled out explicitly (rather than through FIXED_WIDE_INT) + so as not to confuse gengtype. */ + STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision); + typedef generic_wide_int < fixed_wide_int_storage + <int_traits <T1>::precision> > result_type; + }; + + template <> + template <typename T1, typename T2> + struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION> + { + typedef wide_int result_type; + }; +} + +namespace wi +{ + /* Implementation of int_traits for primitive integer types like "int". */ + template <typename T, bool signed_p> + struct primitive_int_traits + { + static const enum precision_type precision_type = FLEXIBLE_PRECISION; + static const bool host_dependent_precision = true; + static unsigned int get_precision (T); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T); + }; +} + +template <typename T, bool signed_p> +inline unsigned int +wi::primitive_int_traits <T, signed_p>::get_precision (T) +{ + return sizeof (T) * CHAR_BIT; +} + +template <typename T, bool signed_p> +inline wi::storage_ref +wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch, + unsigned int precision, T x) +{ + scratch[0] = x; + if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT) + return wi::storage_ref (scratch, 1, precision); + scratch[1] = 0; + return wi::storage_ref (scratch, 2, precision); +} + +/* Allow primitive C types to be used in wi:: routines. */ +namespace wi +{ + template <> + struct int_traits <int> + : public primitive_int_traits <int, true> {}; + + template <> + struct int_traits <unsigned int> + : public primitive_int_traits <unsigned int, false> {}; + +#if HOST_BITS_PER_INT != HOST_BITS_PER_WIDE_INT + template <> + struct int_traits <HOST_WIDE_INT> + : public primitive_int_traits <HOST_WIDE_INT, true> {}; + + template <> + struct int_traits <unsigned HOST_WIDE_INT> + : public primitive_int_traits <unsigned HOST_WIDE_INT, false> {}; +#endif +} + +namespace wi +{ + /* Stores HWI-sized integer VAL, treating it as having signedness SGN + and precision PRECISION. */ + struct hwi_with_prec + { + hwi_with_prec (HOST_WIDE_INT, unsigned int, signop); + HOST_WIDE_INT val; + unsigned int precision; + signop sgn; + }; + + hwi_with_prec shwi (HOST_WIDE_INT, unsigned int); + hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int); + + hwi_with_prec minus_one (unsigned int); + hwi_with_prec zero (unsigned int); + hwi_with_prec one (unsigned int); + hwi_with_prec two (unsigned int); +} + +inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p, + signop s) + : val (v), precision (p), sgn (s) +{ +} + +/* Return a signed integer that has value VAL and precision PRECISION. */ +inline wi::hwi_with_prec +wi::shwi (HOST_WIDE_INT val, unsigned int precision) +{ + return hwi_with_prec (val, precision, SIGNED); +} + +/* Return an unsigned integer that has value VAL and precision PRECISION. */ +inline wi::hwi_with_prec +wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision) +{ + return hwi_with_prec (val, precision, UNSIGNED); +} + +/* Return a wide int of -1 with precision PREC. */ +inline wi::hwi_with_prec +wi::minus_one (unsigned int precision) +{ + return wi::shwi (-1, precision); +} + +/* Return a wide int of 0 with precision PREC. */ +inline wi::hwi_with_prec +wi::zero (unsigned int precision) +{ + return wi::shwi (0, precision); +} + +/* Return a wide int of 1 with precision PREC. */ +inline wi::hwi_with_prec +wi::one (unsigned int precision) +{ + return wi::shwi (1, precision); +} + +/* Return a wide int of 2 with precision PREC. */ +inline wi::hwi_with_prec +wi::two (unsigned int precision) +{ + return wi::shwi (2, precision); +} + +namespace wi +{ + template <> + struct int_traits <wi::hwi_with_prec> + { + /* Since we have a sign, we can extend or truncate the integer to + other precisions where necessary. */ + static const enum precision_type precision_type = FLEXIBLE_PRECISION; + /* hwi_with_prec has an explicitly-given precision, rather than the + precision of HOST_WIDE_INT. */ + static const bool host_dependent_precision = false; + static unsigned int get_precision (const wi::hwi_with_prec &); + static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, + const wi::hwi_with_prec &); + }; +} + +inline unsigned int +wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x) +{ + return x.precision; +} + +inline wi::storage_ref +wi::int_traits <wi::hwi_with_prec>:: +decompose (HOST_WIDE_INT *scratch, unsigned int precision, + const wi::hwi_with_prec &x) +{ + scratch[0] = x.val; + if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT) + { + if (precision < HOST_BITS_PER_WIDE_INT) + scratch[0] = sext_hwi (scratch[0], precision); + return wi::storage_ref (scratch, 1, precision); + } + scratch[1] = 0; + return wi::storage_ref (scratch, 2, precision); +} + +/* Private functions for handling large cases out of line. They take + individual length and array parameters because that is cheaper for + the inline caller than constructing an object on the stack and + passing a reference to it. (Although many callers use wide_int_refs, + we generally want those to be removed by SRA.) */ +namespace wi +{ + bool eq_p_large (const HOST_WIDE_INT *, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, + unsigned int, unsigned int); + unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, + unsigned int, unsigned int); + unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, unsigned int); + unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, unsigned int); + unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, unsigned int, + unsigned int); + unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, unsigned int, + unsigned int); + unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, const HOST_WIDE_INT *, + unsigned int, unsigned int); + unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, const HOST_WIDE_INT *, + unsigned int, unsigned int); + unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int, + signop, bool *); + unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int, + signop, bool *); + unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, const HOST_WIDE_INT *, + unsigned int, unsigned int, signop, bool *, + bool, bool); + unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *, + HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, + const HOST_WIDE_INT *, + unsigned int, unsigned int, + signop, bool *); +} + +/* Return the number of bits that integer X can hold. */ +template <typename T> +inline unsigned int +wi::get_precision (const T &x) +{ + return wi::int_traits <T>::get_precision (x); +} + +/* Return the number of bits that the result of a binary operation can + hold when the input operands are X and Y. */ +template <typename T1, typename T2> +inline unsigned int +wi::get_binary_precision (const T1 &x, const T2 &y) +{ + return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>:: + get_binary_result (x, y)); +} + +/* Return true if X fits in a HOST_WIDE_INT with no loss of + precision. */ +inline bool +wi::fits_shwi_p (const wide_int_ref &x) +{ + return x.len == 1; +} + +/* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of + precision. */ +inline bool +wi::fits_uhwi_p (const wide_int_ref &x) +{ + if (x.precision <= HOST_BITS_PER_WIDE_INT) + return true; + if (x.len == 1) + return x.slow () >= 0; + return x.len == 2 && x.uhigh () == 0; +} + +/* Return true if X is negative based on the interpretation of SGN. + For UNSIGNED, this is always false. */ +inline bool +wi::neg_p (const wide_int_ref &x, signop sgn) +{ + if (sgn == UNSIGNED) + return false; + return x.shigh () < 0; +} + +/* Return -1 if the top bit of X is set and 0 if the top bit is + clear. */ +inline HOST_WIDE_INT +wi::sign_mask (const wide_int_ref &x) +{ + return x.sign_mask (); +} + +/* Return true if X == Y. X and Y must be binary-compatible. */ +template <typename T1, typename T2> +inline bool +wi::eq_p (const T1 &x, const T2 &y) +{ + unsigned int precision = get_binary_precision (x, y); + if (precision == 0) + return true; + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT diff = xi.ulow () ^ yi.ulow (); + bool result = (diff << (HOST_BITS_PER_WIDE_INT - precision)) == 0; + if (result) + gcc_assert (xi.ulow () == yi.ulow ()); + return result; + } + return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision); +} + +/* Return true if X != Y. X and Y must be binary-compatible. */ +template <typename T1, typename T2> +inline bool +wi::ne_p (const T1 &x, const T2 &y) +{ + return !eq_p (x, y); +} + +/* Return true if X < Y when both are treated as signed values. */ +inline bool +wi::lts_p (const wide_int_ref &x, const wide_int_ref &y) +{ + // We optimize x < y, where y is 64 or fewer bits. + if (y.precision <= HOST_BITS_PER_WIDE_INT) + { + // If x fits directly into a shwi, we can compare directly. + if (wi::fits_shwi_p (x)) + return x.slow () < y.slow (); + // If x doesn't fit and is negative, then it must be more + // negative than any value in y, and hence smaller than y. + if (neg_p (x, SIGNED)) + return true; + // If x is positive, then it must be larger than any value in y, + // and hence greater than y. + return false; + } + return lts_p_large (x.val, x.len, x.precision, y.val, y.len, + y.precision); +} + +/* Return true if X < Y when both are treated as unsigned values. */ +inline bool +wi::ltu_p (const wide_int_ref &x, const wide_int_ref &y) +{ + if (x.precision <= HOST_BITS_PER_WIDE_INT + && y.precision <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT xl = zext_hwi (x.ulow (), x.precision); + unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision); + return xl < yl; + } + else + return ltu_p_large (x.val, x.len, x.precision, + y.val, y.len, y.precision); +} + +/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */ +inline bool +wi::lt_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn) +{ + if (sgn == SIGNED) + return lts_p (x, y); + else + return ltu_p (x, y); +} + +/* Return true if X <= Y when both are treated as signed values. */ +inline bool +wi::les_p (const wide_int_ref &x, const wide_int_ref &y) +{ + return !lts_p (y, x); +} + +/* Return true if X <= Y when both are treated as unsigned values. */ +inline bool +wi::leu_p (const wide_int_ref &x, const wide_int_ref &y) +{ + return !ltu_p (y, x); +} + +/* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */ +inline bool +wi::le_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn) +{ + if (sgn == SIGNED) + return les_p (x, y); + else + return leu_p (x, y); +} + +/* Return true if X > Y when both are treated as signed values. */ +inline bool +wi::gts_p (const wide_int_ref &x, const wide_int_ref &y) +{ + return lts_p (y, x); +} + +/* Return true if X > Y when both are treated as unsigned values. */ +inline bool +wi::gtu_p (const wide_int_ref &x, const wide_int_ref &y) +{ + return ltu_p (y, x); +} + +/* Return true if X > Y. Signedness of X and Y is indicated by SGN. */ +inline bool +wi::gt_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn) +{ + if (sgn == SIGNED) + return gts_p (x, y); + else + return gtu_p (x, y); +} + +/* Return true if X >= Y when both are treated as signed values. */ +inline bool +wi::ges_p (const wide_int_ref &x, const wide_int_ref &y) +{ + return !lts_p (x, y); +} + +/* Return true if X >= Y when both are treated as unsigned values. */ +inline bool +wi::geu_p (const wide_int_ref &x, const wide_int_ref &y) +{ + return !ltu_p (x, y); +} + +/* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */ +inline bool +wi::ge_p (const wide_int_ref &x, const wide_int_ref &y, signop sgn) +{ + if (sgn == SIGNED) + return ges_p (x, y); + else + return geu_p (x, y); +} + +/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y + as signed values. */ +inline int +wi::cmps (const wide_int_ref &x, const wide_int_ref &y) +{ + if (x.precision <= HOST_BITS_PER_WIDE_INT + && y.precision <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT xl = x.slow (); + HOST_WIDE_INT yl = y.slow (); + if (xl < yl) + return -1; + else if (xl > yl) + return 1; + else + return 0; + } + return cmps_large (x.val, x.len, x.precision, y.val, y.len, + y.precision); +} + +/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y + as unsigned values. */ +inline int +wi::cmpu (const wide_int_ref &x, const wide_int_ref &y) +{ + if (x.precision <= HOST_BITS_PER_WIDE_INT + && y.precision <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT xl = zext_hwi (x.ulow (), x.precision); + unsigned HOST_WIDE_INT yl = zext_hwi (y.ulow (), y.precision); + if (xl < yl) + return -1; + else if (xl == yl) + return 0; + else + return 1; + } + return cmpu_large (x.val, x.len, x.precision, y.val, y.len, + y.precision); +} + +/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of + X and Y indicated by SGN. */ +inline int +wi::cmp (const wide_int_ref &x, const wide_int_ref &y, signop sgn) +{ + if (sgn == SIGNED) + return cmps (x, y); + else + return cmpu (x, y); +} + +/* Return ~x. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::bit_not (const T &x) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + wide_int_ref xi (x, get_precision (result)); + for (unsigned int i = 0; i < xi.len; ++i) + val[i] = ~xi.val[i]; + result.set_len (xi.len); + return result; +} + +/* Return -x. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::neg (const T &x) +{ + return sub (0, x); +} + +/* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::neg (const T &x, bool *overflow) +{ + *overflow = only_sign_bit_p (x); + return sub (0, x); +} + +/* Return the absolute value of x. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::abs (const T &x) +{ + if (neg_p (x)) + return neg (x); + + WI_UNARY_RESULT_VAR (result, val, T, x); + wide_int_ref xi (x, get_precision(result)); + for (unsigned int i = 0; i < xi.len; ++i) + val[i] = xi.val[i]; + result.set_len (xi.len); + + return result; +} + +/* Return the result of sign-extending the low OFFSET bits of X. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::sext (const T &x, unsigned int offset) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + + if (offset <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (xi.ulow (), offset); + result.set_len (1); + } + else + result.set_len (sext_large (val, xi.val, xi.len, precision, offset)); + return result; +} + +/* Return the result of zero-extending the low OFFSET bits of X. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::zext (const T &x, unsigned int offset) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + + /* This is not just an optimization, it is actually required to + maintain canonization. */ + if (offset >= precision) + { + for (unsigned int i = 0; i < xi.len; ++i) + val[i] = xi.val[i]; + result.set_len (xi.len); + return result; + } + + if (offset < HOST_BITS_PER_WIDE_INT) + { + val[0] = zext_hwi (xi.ulow (), offset); + result.set_len (1); + } + else + result.set_len (zext_large (val, xi.val, xi.len, precision, offset)); + return result; +} + +/* Return the result of extending the low OFFSET bits of X according to + signedness SGN. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::ext (const T &x, unsigned int offset, signop sgn) +{ + return sgn == SIGNED ? sext (x, offset) : zext (x, offset); +} + +/* Return an integer that represents X | (1 << bit). */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::set_bit (const T &x, unsigned int bit) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = xi.ulow () | ((unsigned HOST_WIDE_INT) 1 << bit); + result.set_len (1); + } + else + result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit)); + return result; +} + +/* Return the mininum of X and Y, treating them both as having + signedness SGN. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::min (const T1 &x, const T2 &y, signop sgn) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + if (wi::le_p (x, y, sgn)) + { + wide_int_ref xi (x, precision); + for (unsigned int i = 0; i < xi.len; ++i) + val[i] = xi.val[i]; + result.set_len (xi.len); + } + else + { + wide_int_ref yi (y, precision); + for (unsigned int i = 0; i < yi.len; ++i) + val[i] = yi.val[i]; + result.set_len (yi.len); + } + return result; +} + +/* Return the minimum of X and Y, treating both as signed values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::smin (const T1 &x, const T2 &y) +{ + return min (x, y, SIGNED); +} + +/* Return the minimum of X and Y, treating both as unsigned values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::umin (const T1 &x, const T2 &y) +{ + return min (x, y, UNSIGNED); +} + +/* Return the maxinum of X and Y, treating them both as having + signedness SGN. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::max (const T1 &x, const T2 &y, signop sgn) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + if (wi::ge_p (x, y, sgn)) + { + wide_int_ref xi (x, precision); + for (unsigned int i = 0; i < xi.len; ++i) + val[i] = xi.val[i]; + result.set_len (xi.len); + } + else + { + wide_int_ref yi (y, precision); + for (unsigned int i = 0; i < yi.len; ++i) + val[i] = yi.val[i]; + result.set_len (yi.len); + } + return result; +} + +/* Return the maximum of X and Y, treating both as signed values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::smax (const T1 &x, const T2 &y) +{ + return max (x, y, SIGNED); +} + +/* Return the maximum of X and Y, treating both as unsigned values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::umax (const T1 &x, const T2 &y) +{ + return max (x, y, UNSIGNED); +} + +/* Return X & Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::bit_and (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (xi.len + yi.len == 2) + { + val[0] = xi.ulow () & yi.ulow (); + result.set_len (1); + } + else + result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len, + precision)); + return result; +} + +/* Return X & ~Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::bit_and_not (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (xi.len + yi.len == 2) + { + val[0] = xi.ulow () & ~yi.ulow (); + result.set_len (1); + } + else + result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len, + precision)); + return result; +} + +/* Return X | Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::bit_or (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (xi.len + yi.len == 2) + { + val[0] = xi.ulow () | yi.ulow (); + result.set_len (1); + } + else + result.set_len (or_large (val, xi.val, xi.len, + yi.val, yi.len, precision)); + return result; +} + +/* Return X | ~Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::bit_or_not (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (xi.len + yi.len == 2) + { + val[0] = xi.ulow () | ~yi.ulow (); + result.set_len (1); + } + else + result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len, + precision)); + return result; +} + +/* Return X ^ Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::bit_xor (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (xi.len + yi.len == 2) + { + val[0] = xi.ulow () ^ yi.ulow (); + result.set_len (1); + } + else + result.set_len (xor_large (val, xi.val, xi.len, + yi.val, yi.len, precision)); + return result; +} + +/* Return X + Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::add (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (xi.ulow () + yi.ulow (), precision); + result.set_len (1); + } + else + result.set_len (add_large (val, xi.val, xi.len, + yi.val, yi.len, precision, + UNSIGNED, 0)); + return result; +} + +/* Return X + Y. Treat X and Y as having the signednes given by SGN + and indicate in *OVERFLOW whether the operation overflowed. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT xl = xi.ulow (); + unsigned HOST_WIDE_INT yl = yi.ulow (); + unsigned HOST_WIDE_INT resultl = xl + yl; + if (precision == 0) + *overflow = false; + else if (sgn == SIGNED) + *overflow = (((resultl ^ xl) & (resultl ^ yl)) + >> (precision - 1)) & 1; + else + *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) + < (xl << (HOST_BITS_PER_WIDE_INT - precision))); + val[0] = sext_hwi (resultl, precision); + result.set_len (1); + } + else + result.set_len (add_large (val, xi.val, xi.len, + yi.val, yi.len, precision, + sgn, overflow)); + return result; +} + +/* Return X - Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::sub (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (xi.ulow () - yi.ulow (), precision); + result.set_len (1); + } + else + result.set_len (sub_large (val, xi.val, xi.len, + yi.val, yi.len, precision, + UNSIGNED, 0)); + return result; +} + +/* Return X - Y. Treat X and Y as having the signednes given by SGN + and indicate in *OVERFLOW whether the operation overflowed. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT xl = xi.ulow (); + unsigned HOST_WIDE_INT yl = yi.ulow (); + unsigned HOST_WIDE_INT resultl = xl - yl; + if (precision == 0) + *overflow = false; + else if (sgn == SIGNED) + *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1; + else + *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision)) + > (xl << (HOST_BITS_PER_WIDE_INT - precision))); + val[0] = sext_hwi (resultl, precision); + result.set_len (1); + } + else + result.set_len (sub_large (val, xi.val, xi.len, + yi.val, yi.len, precision, + sgn, overflow)); + return result; +} + +/* Return X * Y. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mul (const T1 &x, const T2 &y) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (xi.ulow () * yi.ulow (), precision); + result.set_len (1); + } + else + result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len, + precision, UNSIGNED, 0, false, false)); + return result; +} + +/* Return X * Y. Treat X and Y as having the signednes given by SGN + and indicate in *OVERFLOW whether the operation overflowed. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + result.set_len (mul_internal (val, xi.val, xi.len, + yi.val, yi.len, precision, + sgn, overflow, false, false)); + return result; +} + +/* Return X * Y, treating both X and Y as signed values. Indicate in + *OVERFLOW whether the operation overflowed. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::smul (const T1 &x, const T2 &y, bool *overflow) +{ + return mul (x, y, SIGNED, overflow); +} + +/* Return X * Y, treating both X and Y as unsigned values. Indicate in + *OVERFLOW whether the operation overflowed. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::umul (const T1 &x, const T2 &y, bool *overflow) +{ + return mul (x, y, UNSIGNED, overflow); +} + +/* Perform a widening multiplication of X and Y, extending the values + according to SGN, and return the high part of the result. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mul_high (const T1 &x, const T2 &y, signop sgn) +{ + WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + wide_int_ref yi (y, precision); + result.set_len (mul_internal (val, xi.val, xi.len, + yi.val, yi.len, precision, + sgn, 0, true, false)); + return result; +} + +/* Return X / Y, rouding towards 0. Treat X and Y as having the + signedness given by SGN. Indicate in *OVERFLOW if the result + overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len, + precision, + yi.val, yi.len, yi.precision, + sgn, overflow)); + return quotient; +} + +/* Return X / Y, rouding towards 0. Treat X and Y as signed values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::sdiv_trunc (const T1 &x, const T2 &y) +{ + return div_trunc (x, y, SIGNED); +} + +/* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::udiv_trunc (const T1 &x, const T2 &y) +{ + return div_trunc (x, y, UNSIGNED); +} + +/* Return X / Y, rouding towards -inf. Treat X and Y as having the + signedness given by SGN. Indicate in *OVERFLOW if the result + overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, + overflow)); + remainder.set_len (remainder_len); + if (wi::neg_p (quotient, sgn) && remainder != 0) + return quotient + 1; + return quotient; +} + +/* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::sdiv_floor (const T1 &x, const T2 &y) +{ + return div_floor (x, y, SIGNED); +} + +/* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */ +/* ??? Why do we have both this and udiv_trunc. Aren't they the same? */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::udiv_floor (const T1 &x, const T2 &y) +{ + return div_floor (x, y, UNSIGNED); +} + +/* Return X / Y, rouding towards +inf. Treat X and Y as having the + signedness given by SGN. Indicate in *OVERFLOW if the result + overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, + overflow)); + remainder.set_len (remainder_len); + if (!wi::neg_p (quotient, sgn) && remainder != 0) + return quotient + 1; + return quotient; +} + +/* Return X / Y, rouding towards nearest with ties away from zero. + Treat X and Y as having the signedness given by SGN. Indicate + in *OVERFLOW if the result overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, + overflow)); + remainder.set_len (remainder_len); + + if (remainder != 0) + { + if (sgn == SIGNED) + { + if (wi::gts_p (wi::lrshift (wi::abs (y), 1), + wi::abs (remainder))) + { + if (wi::neg_p (quotient)) + return quotient - 1; + else + return quotient + 1; + } + } + else + { + if (wi::gtu_p (wi::lrshift (y, 1), remainder)) + return quotient + 1; + } + } + return quotient; +} + +/* Return X / Y, rouding towards nearest with ties away from zero. + Treat X and Y as having the signedness given by SGN. Store the + remainder in *REMAINDER_PTR. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn, + WI_BINARY_RESULT (T1, T2) *remainder_ptr) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, 0)); + remainder.set_len (remainder_len); + + *remainder_ptr = remainder; + return quotient; +} + +/* Compute X / Y, rouding towards 0, and return the remainder. + Treat X and Y as having the signedness given by SGN. Indicate + in *OVERFLOW if the division overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (remainder); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + divmod_internal (0, &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, overflow); + remainder.set_len (remainder_len); + + return remainder; +} + +/* Compute X / Y, rouding towards 0, and return the remainder. + Treat X and Y as signed values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::smod_trunc (const T1 &x, const T2 &y) +{ + return mod_trunc (x, y, SIGNED); +} + +/* Compute X / Y, rouding towards 0, and return the remainder. + Treat X and Y as unsigned values. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::umod_trunc (const T1 &x, const T2 &y) +{ + return mod_trunc (x, y, UNSIGNED); +} + +/* Compute X / Y, rouding towards -inf, and return the remainder. + Treat X and Y as having the signedness given by SGN. Indicate + in *OVERFLOW if the division overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, + overflow)); + remainder.set_len (remainder_len); + + if (wi::neg_p (quotient, sgn) && remainder != 0) + return remainder + y; + return remainder; +} + +/* Compute X / Y, rouding towards -inf, and return the remainder. + Treat X and Y as unsigned values. */ +/* ??? Why do we have both this and umod_trunc. Aren't they the same? */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::umod_floor (const T1 &x, const T2 &y) +{ + return mod_floor (x, y, UNSIGNED); +} + +/* Compute X / Y, rouding towards +inf, and return the remainder. + Treat X and Y as having the signedness given by SGN. Indicate + in *OVERFLOW if the division overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, + overflow)); + remainder.set_len (remainder_len); + + if (!wi::neg_p (quotient, sgn) && remainder != 0) + return remainder - y; + return remainder; +} + +/* Compute X / Y, rouding towards nearest with ties away from zero, + and return the remainder. Treat X and Y as having the signedness + given by SGN. Indicate in *OVERFLOW if the division overflows. */ +template <typename T1, typename T2> +inline WI_BINARY_RESULT (T1, T2) +wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow) +{ + WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y); + WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y); + unsigned int precision = get_precision (quotient); + wide_int_ref xi (x, precision); + wide_int_ref yi (y); + + unsigned int remainder_len; + quotient.set_len (divmod_internal (quotient_val, + &remainder_len, remainder_val, + xi.val, xi.len, precision, + yi.val, yi.len, yi.precision, sgn, + overflow)); + remainder.set_len (remainder_len); + + if (remainder != 0) + { + if (sgn == SIGNED) + { + if (wi::gts_p (wi::lrshift (wi::abs (y), 1), + wi::abs (remainder))) + { + if (wi::neg_p (quotient)) + return remainder + y; + else + return remainder - y; + } + } + else + { + if (wi::gtu_p (wi::lrshift (y, 1), remainder)) + return remainder - y; + } + } + return remainder; +} + +/* Return true if X is a multiple of Y, storing X / Y in *RES if so. + Treat X and Y as having the signedness given by SGN. */ +template <typename T1, typename T2> +inline bool +wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn, + WI_BINARY_RESULT (T1, T2) *res) +{ + WI_BINARY_RESULT (T1, T2) remainder; + WI_BINARY_RESULT (T1, T2) quotient + = divmod_trunc (x, y, sgn, &remainder); + if (remainder == 0) + { + *res = quotient; + return true; + } + return false; +} + +/* Truncate the value of shift value X so that the value is within + BITSIZE. PRECISION is the number of bits in the value being + shifted. */ +inline unsigned int +wi::trunc_shift (const wide_int_ref &x, unsigned int bitsize, + unsigned int precision) +{ + if (bitsize == 0) + { + gcc_checking_assert (!neg_p (x)); + if (geu_p (x, precision)) + return precision; + } + /* Flush out undefined bits. */ + unsigned int shift = x.ulow (); + if (x.precision < HOST_BITS_PER_WIDE_INT) + shift = zext_hwi (shift, x.precision); + return shift & (bitsize - 1); +} + +/* Return X << Y. If BITSIZE is nonzero, only use the low BITSIZE + bits of Y. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::lshift (const T &x, const wide_int_ref &y, unsigned int bitsize) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + unsigned int precision = get_precision (result); + wide_int_ref xi (x, precision); + unsigned int shift = trunc_shift (y, bitsize, precision); + /* Handle the simple cases quickly. */ + if (shift >= precision) + { + val[0] = 0; + result.set_len (1); + } + else if (precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (xi.ulow () << shift, precision); + result.set_len (1); + } + else + result.set_len (lshift_large (val, xi.val, xi.len, + precision, shift)); + return result; +} + +/* Return X >> Y, using a logical shift. If BITSIZE is nonzero, only + use the low BITSIZE bits of Y. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::lrshift (const T &x, const wide_int_ref &y, unsigned int bitsize) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + /* Do things in the precision of the input rather than the output, + since the result can be no larger than that. */ + wide_int_ref xi (x); + unsigned int shift = trunc_shift (y, bitsize, xi.precision); + /* Handle the simple cases quickly. */ + if (shift >= xi.precision) + { + val[0] = 0; + result.set_len (1); + } + else if (xi.precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (zext_hwi (xi.ulow (), xi.precision) >> shift, + xi.precision); + result.set_len (1); + } + else + result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision, + get_precision (result), shift)); + return result; +} + +/* Return X >> Y, using an arithmetic shift. If BITSIZE is nonzero, + only use the low BITSIZE bits of Y. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::arshift (const T &x, const wide_int_ref &y, unsigned int bitsize) +{ + WI_UNARY_RESULT_VAR (result, val, T, x); + /* Do things in the precision of the input rather than the output, + since the result can be no larger than that. */ + wide_int_ref xi (x); + unsigned int shift = trunc_shift (y, bitsize, xi.precision); + /* Handle the simple case quickly. */ + if (shift >= xi.precision) + { + val[0] = sign_mask (x); + result.set_len (1); + } + else if (xi.precision <= HOST_BITS_PER_WIDE_INT) + { + val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift); + result.set_len (1); + } + else + result.set_len (arshift_large (val, xi.val, xi.len, xi.precision, + get_precision (result), shift)); + return result; +} + +/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a + logical shift otherwise. If BITSIZE is nonzero, only use the low + BITSIZE bits of Y. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::rshift (const T &x, const wide_int_ref &y, signop sgn, + unsigned int bitsize) +{ + if (sgn == UNSIGNED) + return lrshift (x, y, bitsize); + else + return arshift (x, y, bitsize); +} + +/* Return the result of rotating the low WIDTH bits of X left by Y + bits and zero-extending the result. Use a full-width rotate if + WIDTH is zero. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::lrotate (const T &x, const wide_int_ref &y, unsigned int width) +{ + unsigned int precision = get_binary_precision (x, x); + if (width == 0) + width = precision; + gcc_checking_assert ((width & -width) == width); + WI_UNARY_RESULT (T) left = wi::lshift (x, y, width); + WI_UNARY_RESULT (T) right = wi::lrshift (x, wi::sub (width, y), width); + if (width != precision) + return wi::zext (left, width) | wi::zext (right, width); + return left | right; +} + +/* Return the result of rotating the low WIDTH bits of X right by Y + bits and zero-extending the result. Use a full-width rotate if + WIDTH is zero. */ +template <typename T> +inline WI_UNARY_RESULT (T) +wi::rrotate (const T &x, const wide_int_ref &y, unsigned int width) +{ + unsigned int precision = get_binary_precision (x, x); + if (width == 0) + width = precision; + gcc_checking_assert ((width & -width) == width); + WI_UNARY_RESULT (T) right = wi::lrshift (x, y, width); + WI_UNARY_RESULT (T) left = wi::lshift (x, wi::sub (width, y), width); + if (width != precision) + return wi::zext (left, width) | wi::zext (right, width); + return left | right; +} + +/* Return 0 if the number of 1s in X is even and 1 if the number of 1s + is odd. */ +inline int +wi::parity (const wide_int_ref &x) +{ + return popcount (x) & 1; +} + +/* Extract WIDTH bits from X, starting at BITPOS. */ +template <typename T> +inline unsigned HOST_WIDE_INT +wi::extract_uhwi (const T &x, unsigned int bitpos, + unsigned int width) +{ + unsigned precision = get_precision (x); + if (precision < bitpos + width) + precision = bitpos + width; + wide_int_ref xi (x, precision); + + /* Handle this rare case after the above, so that we assert about + bogus BITPOS values. */ + if (width == 0) + return 0; + + unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT; + unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT; + unsigned HOST_WIDE_INT res = xi.elt (start); + res >>= shift; + if (shift + width > HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT upper = xi.elt (start + 1); + res |= upper << (-shift % HOST_BITS_PER_WIDE_INT); + } + return zext_hwi (res, width); +} + +template<typename T> +void +gt_ggc_mx (generic_wide_int <T> *) +{ +} + +template<typename T> +void +gt_pch_nx (generic_wide_int <T> *) +{ +} + +template<typename T> +void +gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *) +{ +} + +namespace wi +{ + /* Used for overloaded functions in which the only other acceptable + scalar type is a pointer. It stops a plain 0 from being treated + as a null pointer. */ + struct never_used1 {}; + struct never_used2 {}; + + wide_int min_value (unsigned int, signop); + wide_int min_value (never_used1 *); + wide_int min_value (never_used2 *); + wide_int max_value (unsigned int, signop); + wide_int max_value (never_used1 *); + wide_int max_value (never_used2 *); + + wide_int mul_full (const wide_int_ref &, const wide_int_ref &, signop); + + /* FIXME: this is target dependent, so should be elsewhere. + It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */ + wide_int from_buffer (const unsigned char *, unsigned int); + +#ifndef GENERATOR_FILE + void to_mpz (wide_int, mpz_t, signop); +#endif + + wide_int mask (unsigned int, bool, unsigned int); + wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int); + wide_int set_bit_in_zero (unsigned int, unsigned int); + wide_int insert (const wide_int &x, const wide_int &y, unsigned int, + unsigned int); + + template <typename T> + T mask (unsigned int, bool); + + template <typename T> + T shifted_mask (unsigned int, unsigned int, bool); + + template <typename T> + T set_bit_in_zero (unsigned int); + + unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int); + unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int, + bool, unsigned int); + unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, + unsigned int, unsigned int, bool); +} + +/* Perform a widening multiplication of X and Y, extending the values + according according to SGN. */ +inline wide_int +wi::mul_full (const wide_int_ref &x, const wide_int_ref &y, signop sgn) +{ + gcc_checking_assert (x.precision == y.precision); + wide_int result = wide_int::create (x.precision * 2); + result.set_len (mul_internal (result.write_val (), x.val, x.len, + y.val, y.len, x.precision, + sgn, 0, false, true)); + return result; +} + +/* Return a PRECISION-bit integer in which the low WIDTH bits are set + and the other bits are clear, or the inverse if NEGATE_P. */ +inline wide_int +wi::mask (unsigned int width, bool negate_p, unsigned int precision) +{ + wide_int result = wide_int::create (precision); + result.set_len (mask (result.write_val (), width, negate_p, precision)); + return result; +} + +/* Return a PRECISION-bit integer in which the low START bits are clear, + the next WIDTH bits are set, and the other bits are clear, + or the inverse if NEGATE_P. */ +inline wide_int +wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p, + unsigned int precision) +{ + wide_int result = wide_int::create (precision); + result.set_len (shifted_mask (result.write_val (), start, width, negate_p, + precision)); + return result; +} + +/* Return a PRECISION-bit integer in which bit BIT is set and all the + others are clear. */ +inline wide_int +wi::set_bit_in_zero (unsigned int bit, unsigned int precision) +{ + return shifted_mask (bit, 1, false, precision); +} + +/* Return an integer of type T in which the low WIDTH bits are set + and the other bits are clear, or the inverse if NEGATE_P. */ +template <typename T> +inline T +wi::mask (unsigned int width, bool negate_p) +{ + STATIC_ASSERT (wi::int_traits<T>::precision); + T result; + result.set_len (mask (result.write_val (), width, negate_p, + wi::int_traits <T>::precision)); + return result; +} + +/* Return an integer of type T in which the low START bits are clear, + the next WIDTH bits are set, and the other bits are clear, or the + inverse if NEGATE_P. */ +template <typename T> +inline T +wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p) +{ + STATIC_ASSERT (wi::int_traits<T>::precision); + T result; + result.set_len (shifted_mask (result.write_val (), start, width, + negate_p, + wi::int_traits <T>::precision)); + return result; +} + +/* Return an integer of type T in which bit BIT is set and all the + others are clear. */ +template <typename T> +inline T +wi::set_bit_in_zero (unsigned int bit) +{ + return shifted_mask <T> (bit, 1, false); +} + +#endif /* WIDE_INT_H */ |