summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorKenneth Zadeck <zadeck@naturalbridge.com>2014-05-06 16:25:05 +0000
committerMike Stump <mrs@gcc.gnu.org>2014-05-06 16:25:05 +0000
commit807e902eea17f3132488c256c963823976b2348c (patch)
treee5e1af94eb1502ba893bd6ce4a11f68877ff62a9 /gcc
parent6122336c832dc4dfedc49279549caddce86306ff (diff)
downloadgcc-807e902eea17f3132488c256c963823976b2348c.tar.gz
Merge in wide-int.
From-SVN: r210113
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog.wide-int899
-rw-r--r--gcc/Makefile.in12
-rw-r--r--gcc/ada/gcc-interface/cuintp.c6
-rw-r--r--gcc/ada/gcc-interface/decl.c8
-rw-r--r--gcc/ada/gcc-interface/utils.c3
-rw-r--r--gcc/alias.c26
-rw-r--r--gcc/builtins.c194
-rw-r--r--gcc/c-family/c-ada-spec.c34
-rw-r--r--gcc/c-family/c-common.c70
-rw-r--r--gcc/c-family/c-format.c2
-rw-r--r--gcc/c-family/c-lex.c39
-rw-r--r--gcc/c-family/c-pretty-print.c13
-rw-r--r--gcc/c-family/cilk.c3
-rw-r--r--gcc/c/c-decl.c17
-rw-r--r--gcc/c/c-parser.c2
-rw-r--r--gcc/c/c-typeck.c34
-rw-r--r--gcc/cfgloop.c28
-rw-r--r--gcc/cfgloop.h27
-rw-r--r--gcc/cgraph.c3
-rw-r--r--gcc/combine.c21
-rw-r--r--gcc/config/aarch64/aarch64.c38
-rw-r--r--gcc/config/arc/arc.c11
-rw-r--r--gcc/config/arm/arm.c38
-rw-r--r--gcc/config/avr/avr.c9
-rw-r--r--gcc/config/bfin/bfin.c9
-rw-r--r--gcc/config/darwin.c30
-rw-r--r--gcc/config/i386/i386.c31
-rw-r--r--gcc/config/msp430/msp430.c2
-rw-r--r--gcc/config/nds32/nds32.c8
-rw-r--r--gcc/config/rs6000/predicates.md13
-rw-r--r--gcc/config/rs6000/rs6000-c.c13
-rw-r--r--gcc/config/rs6000/rs6000.c90
-rw-r--r--gcc/config/rs6000/rs6000.h1
-rw-r--r--gcc/config/rs6000/rs6000.md12
-rw-r--r--gcc/config/s390/s390.c4
-rw-r--r--gcc/config/sol2-c.c4
-rw-r--r--gcc/config/sparc/sparc.c21
-rw-r--r--gcc/config/vax/vax.c3
-rw-r--r--gcc/coretypes.h3
-rw-r--r--gcc/cp/call.c7
-rw-r--r--gcc/cp/class.c27
-rw-r--r--gcc/cp/cvt.c5
-rw-r--r--gcc/cp/decl.c34
-rw-r--r--gcc/cp/init.c46
-rw-r--r--gcc/cp/mangle.c25
-rw-r--r--gcc/cp/tree.c4
-rw-r--r--gcc/cp/typeck2.c11
-rw-r--r--gcc/cse.c12
-rw-r--r--gcc/cselib.c14
-rw-r--r--gcc/dbxout.c98
-rw-r--r--gcc/defaults.h16
-rw-r--r--gcc/dfp.c11
-rw-r--r--gcc/dfp.h2
-rw-r--r--gcc/doc/generic.texi51
-rw-r--r--gcc/doc/rtl.texi57
-rw-r--r--gcc/doc/tm.texi60
-rw-r--r--gcc/doc/tm.texi.in58
-rw-r--r--gcc/dojump.c4
-rw-r--r--gcc/double-int.h34
-rw-r--r--gcc/dwarf2out.c359
-rw-r--r--gcc/dwarf2out.h4
-rw-r--r--gcc/emit-rtl.c126
-rw-r--r--gcc/explow.c35
-rw-r--r--gcc/expmed.c161
-rw-r--r--gcc/expr.c184
-rw-r--r--gcc/final.c16
-rw-r--r--gcc/fixed-value.c28
-rw-r--r--gcc/fold-const.c828
-rw-r--r--gcc/fold-const.h4
-rw-r--r--gcc/fortran/target-memory.c3
-rw-r--r--gcc/fortran/trans-array.c19
-rw-r--r--gcc/fortran/trans-const.c11
-rw-r--r--gcc/fortran/trans-decl.c2
-rw-r--r--gcc/fortran/trans-expr.c14
-rw-r--r--gcc/fortran/trans-intrinsic.c27
-rw-r--r--gcc/fortran/trans-types.c13
-rw-r--r--gcc/gencheck.c3
-rw-r--r--gcc/genemit.c1
-rw-r--r--gcc/gengenrtl.c1
-rw-r--r--gcc/gengtype-lex.l3
-rw-r--r--gcc/gengtype-parse.c58
-rw-r--r--gcc/gengtype-state.c1
-rw-r--r--gcc/gengtype.c7
-rw-r--r--gcc/genpreds.c7
-rw-r--r--gcc/genrecog.c8
-rw-r--r--gcc/gensupport.c5
-rw-r--r--gcc/gimple-fold.c139
-rw-r--r--gcc/gimple-pretty-print.c16
-rw-r--r--gcc/gimple-ssa-strength-reduction.c239
-rw-r--r--gcc/gimple.c6
-rw-r--r--gcc/go/go-gcc.cc3
-rw-r--r--gcc/godump.c8
-rw-r--r--gcc/graphite-clast-to-gimple.c5
-rw-r--r--gcc/graphite-sese-to-poly.c7
-rw-r--r--gcc/hooks.c3
-rw-r--r--gcc/hooks.h5
-rw-r--r--gcc/internal-fn.c44
-rw-r--r--gcc/ipa-devirt.c2
-rw-r--r--gcc/ipa-prop.c9
-rw-r--r--gcc/java/boehm.c22
-rw-r--r--gcc/java/expr.c7
-rw-r--r--gcc/java/jcf-parse.c8
-rw-r--r--gcc/loop-doloop.c19
-rw-r--r--gcc/loop-iv.c16
-rw-r--r--gcc/loop-unroll.c50
-rw-r--r--gcc/lto-streamer-in.c41
-rw-r--r--gcc/lto-streamer-out.c32
-rw-r--r--gcc/lto/lto-lang.c3
-rw-r--r--gcc/lto/lto.c4
-rw-r--r--gcc/mkconfig.sh3
-rw-r--r--gcc/objc/objc-act.c36
-rw-r--r--gcc/omp-low.c4
-rw-r--r--gcc/optabs.c37
-rw-r--r--gcc/postreload.c20
-rw-r--r--gcc/predict.c81
-rw-r--r--gcc/pretty-print.h8
-rw-r--r--gcc/print-rtl.c5
-rw-r--r--gcc/print-tree.c26
-rw-r--r--gcc/read-rtl.c71
-rw-r--r--gcc/real.c176
-rw-r--r--gcc/real.h26
-rw-r--r--gcc/recog.c52
-rw-r--r--gcc/rtl.c55
-rw-r--r--gcc/rtl.def3
-rw-r--r--gcc/rtl.h193
-rw-r--r--gcc/rtlanal.c25
-rw-r--r--gcc/sched-vis.c17
-rw-r--r--gcc/sel-sched-ir.c8
-rw-r--r--gcc/signop.h35
-rw-r--r--gcc/simplify-rtx.c963
-rw-r--r--gcc/stmt.c8
-rw-r--r--gcc/stor-layout.c85
-rw-r--r--gcc/stor-layout.h2
-rw-r--r--gcc/system.h10
-rw-r--r--gcc/target.def4
-rw-r--r--gcc/target.h2
-rw-r--r--gcc/targhooks.c4
-rw-r--r--gcc/targhooks.h3
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr45427.c2
-rw-r--r--gcc/tree-affine.c190
-rw-r--r--gcc/tree-affine.h23
-rw-r--r--gcc/tree-call-cdce.c4
-rw-r--r--gcc/tree-cfg.c12
-rw-r--r--gcc/tree-chrec.c20
-rw-r--r--gcc/tree-core.h22
-rw-r--r--gcc/tree-data-ref.c14
-rw-r--r--gcc/tree-dfa.c101
-rw-r--r--gcc/tree-dfa.h19
-rw-r--r--gcc/tree-dump.c7
-rw-r--r--gcc/tree-eh.c8
-rw-r--r--gcc/tree-inline.c6
-rw-r--r--gcc/tree-object-size.c8
-rw-r--r--gcc/tree-predcom.c40
-rw-r--r--gcc/tree-pretty-print.c30
-rw-r--r--gcc/tree-scalar-evolution.c2
-rw-r--r--gcc/tree-ssa-address.c43
-rw-r--r--gcc/tree-ssa-alias.c80
-rw-r--r--gcc/tree-ssa-ccp.c428
-rw-r--r--gcc/tree-ssa-forwprop.c24
-rw-r--r--gcc/tree-ssa-loop-im.c4
-rw-r--r--gcc/tree-ssa-loop-ivcanon.c12
-rw-r--r--gcc/tree-ssa-loop-ivopts.c50
-rw-r--r--gcc/tree-ssa-loop-niter.c260
-rw-r--r--gcc/tree-ssa-loop-niter.h8
-rw-r--r--gcc/tree-ssa-loop.h4
-rw-r--r--gcc/tree-ssa-math-opts.c10
-rw-r--r--gcc/tree-ssa-phiopt.c2
-rw-r--r--gcc/tree-ssa-pre.c10
-rw-r--r--gcc/tree-ssa-reassoc.c5
-rw-r--r--gcc/tree-ssa-sccvn.c53
-rw-r--r--gcc/tree-ssa-structalias.c7
-rw-r--r--gcc/tree-ssa-uninit.c9
-rw-r--r--gcc/tree-ssa.c6
-rw-r--r--gcc/tree-ssanames.c77
-rw-r--r--gcc/tree-ssanames.h25
-rw-r--r--gcc/tree-streamer-in.c11
-rw-r--r--gcc/tree-streamer-out.c24
-rw-r--r--gcc/tree-switch-conversion.c24
-rw-r--r--gcc/tree-vect-data-refs.c16
-rw-r--r--gcc/tree-vect-generic.c42
-rw-r--r--gcc/tree-vect-loop-manip.c4
-rw-r--r--gcc/tree-vect-loop.c14
-rw-r--r--gcc/tree-vect-patterns.c6
-rw-r--r--gcc/tree-vect-stmts.c18
-rw-r--r--gcc/tree-vrp.c754
-rw-r--r--gcc/tree.c838
-rw-r--r--gcc/tree.def25
-rw-r--r--gcc/tree.h278
-rw-r--r--gcc/value-prof.c29
-rw-r--r--gcc/var-tracking.c17
-rw-r--r--gcc/varasm.c75
-rw-r--r--gcc/wide-int-print.cc145
-rw-r--r--gcc/wide-int-print.h39
-rw-r--r--gcc/wide-int.cc2083
-rw-r--r--gcc/wide-int.h3175
195 files changed, 11301 insertions, 4876 deletions
diff --git a/gcc/ChangeLog.wide-int b/gcc/ChangeLog.wide-int
new file mode 100644
index 00000000000..71057cfa972
--- /dev/null
+++ b/gcc/ChangeLog.wide-int
@@ -0,0 +1,899 @@
+2013-11-21 Kenneth Zadeck <zadeck@naturalbridge.com>
+ Mike Stump <mikestump@comcast.net>
+ Richard Sandiford <rdsandiford@googlemail.com>
+ Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * alias.c (ao_ref_from_mem): Use wide-int interfaces.
+ (rtx_equal_for_memref_p): Update comment.
+ (adjust_offset_for_component_ref): Use wide-int interfaces.
+ * builtins.c (get_object_alignment_2): Likewise.
+ (c_readstr): Likewise.
+ (target_char_cast): Add comment.
+ (determine_block_size): Use wide-int interfaces.
+ (expand_builtin_signbit): Likewise.
+ (fold_builtin_int_roundingfn): Likewise.
+ (fold_builtin_bitop): Likewise.
+ (fold_builtin_bswap): Likewise.
+ (fold_builtin_logarithm): Use signop.
+ (fold_builtin_pow): Likewise.
+ (fold_builtin_memory_op): Use wide-int interfaces.
+ (fold_builtin_object_size): Likewise.
+ * cfgloop.c (alloc_loop): Initialize nb_iterations_upper_bound and
+ nb_iterations_estimate.
+ (record_niter_bound): Use wide-int interfaces.
+ (get_estimated_loop_iterations_int): Likewise.
+ (get_estimated_loop_iterations): Likewise.
+ (get_max_loop_iterations): Likewise.
+ * cfgloop.h: Include wide-int.h.
+ (struct nb_iter_bound): Change bound to widest_int.
+ (struct loop): Change nb_iterations_upper_bound and
+ nb_iterations_estimate to widest_int.
+ (record_niter_bound): Switch to use widest_int.
+ (get_estimated_loop_iterations): Likewise.
+ (get_max_loop_iterations): Likewise.
+ (gcov_type_to_double_int): Rename to gcov_type_to_wide_int and
+ update for wide-int.
+ * cgraph.c (cgraph_add_thunk): Use wide-int interfaces.
+ * combine.c (try_combine): Likewise.
+ (subst): Use CONST_SCALAR_INT_P rather than CONST_INT_P.
+ * config/aarch64/aarch64.c (aapcs_vfp_sub_candidate): Use wide-int
+ interfaces.
+ (aarch64_float_const_representable_p): Likewise.
+ * config/arc/arc.c: Include wide-int.h.
+ (arc_can_use_doloop_p): Use wide-int interfaces.
+ * config/arm/arm.c (aapcs_vfp_sub_candidate): Likewise.
+ (vfp3_const_double_index): Likewise.
+ * config/avr/avr.c (avr_out_round): Likewise.
+ (avr_fold_builtin): Likewise.
+ * config/bfin/bfin.c (bfin_local_alignment): Likewise.
+ (bfin_can_use_doloop_p): Likewise.
+ * config/darwin.c (darwin_mergeable_constant_section): Likewise.
+ (machopic_select_rtx_section): Update to handle CONST_WIDE_INT.
+ * config/i386/i386.c: Include wide-int.h.
+ (ix86_data_alignment): Use wide-int interfaces.
+ (ix86_local_alignment): Likewise.
+ (ix86_emit_swsqrtsf): Update real_from_integer.
+ * config/msp430/msp430.c (msp430_attr): Use wide-int interfaces.
+ * config/nds32/nds32.c (nds32_insert_attributes): Likewise.
+ * config/rs6000/predicates.md (any_operand): Add const_wide_int.
+ (zero_constant): Likewise.
+ (input_operand): Likewise.
+ (splat_input_operand): Likewise.
+ (non_logical_cint_operand): Change const_double to const_wide_int.
+ * config/rs6000/rs6000.c (num_insns_constant): Handle CONST_WIDE_INT.
+ (easy_altivec_constant): Remove comment.
+ (paired_expand_vector_init): Use CONSTANT_P.
+ (rs6000_legitimize_address): Handle CONST_WIDE_INT.
+ (rs6000_emit_move): Update checks.
+ (rs6000_aggregate_candidate): Use wide-int interfaces.
+ (rs6000_expand_ternop_builtin): Likewise.
+ (rs6000_output_move_128bit): Handle CONST_WIDE_INT.
+ (rs6000_assemble_integer): Likewise.
+ (rs6000_hash_constant): Likewise.
+ (output_toc): Likewise.
+ (rs6000_rtx_costs): Likewise.
+ (rs6000_emit_swrsqrt); Update call to real_from_integer.
+ * config/rs6000/rs6000-c.c: Include wide-int.h.
+ (altivec_resolve_overloaded_builtin): Use wide-int interfaces.
+ * config/rs6000/rs6000.h (TARGET_SUPPORTS_WIDE_INT): New.
+ * config/rs6000/rs6000.md: Use const_scalar_int_operand.
+ Handle CONST_WIDE_INT.
+ * config/sol2-c.c (solaris_pragma_align): Change low to unsigned HWI.
+ Use tree_fits_uhwi_p.
+ * config/sparc/sparc.c: Include wide-int.h.
+ (sparc_fold_builtin): Use wide-int interfaces.
+ * config/vax/vax.c: Include wide-int.h.
+ (vax_float_literal): Use real_from_integer.
+ * coretypes.h (struct hwivec_def): New.
+ (hwivec): New.
+ (const_hwivec): New.
+ * cse.c (hash_rtx_cb): Handle CONST_WIDE_INT.
+ (equiv_constant): Handle CONST_WIDE_INT.
+ * cselib.c (rtx_equal_for_cselib_1): Use CASE_CONST_UNIQUE.
+ (cselib_hash_rtx): Handle CONST_WIDE_INT.
+ * dbxout.c (stabstr_U): Use wide-int interfaces.
+ (dbxout_type): Update to use cst_fits_shwi_p.
+ * defaults.h (LOG2_BITS_PER_UNIT): Define.
+ (TARGET_SUPPORTS_WIDE_INT): Add default.
+ * dfp.c: Include wide-int.h.
+ (decimal_real_to_integer2): Use wide-int interfaces and rename to
+ decimal_real_to_integer.
+ * dfp.h (decimal_real_to_integer2): Return a wide_int and rename to
+ decimal_real_to_integer.
+ * doc/generic.texi (Constant expressions): Update for wide_int.
+ * doc/rtl.texi (const_double): Likewise.
+ (const_wide_int, CONST_WIDE_INT, CONST_WIDE_INT_VEC): New.
+ (CONST_WIDE_INT_NUNITS, CONST_WIDE_INT_ELT): New.
+ * doc/tm.texi.in (REAL_VALUE_TO_INT): Remove.
+ (REAL_VALUE_FROM_INT): Remove.
+ (TARGET_SUPPORTS_WIDE_INT): New.
+ * doc/tm.texi: Regenerate.
+ * dojump.c (prefer_and_bit_test): Use wide-int interfaces.
+ * double-int.h: Include wide-int.h.
+ (struct wi::int_traits): New.
+ * dwarf2out.c (get_full_len): New.
+ (dw_val_equal_p): Add case dw_val_class_wide_int.
+ (size_of_loc_descr): Likewise.
+ (output_loc_operands): Likewise.
+ (insert_double): Remove.
+ (insert_wide_int): New.
+ (add_AT_wide): New.
+ (print_die): Add case dw_val_class_wide_int.
+ (attr_checksum): Likewise.
+ (attr_checksum_ordered): Likewise.
+ (same_dw_val_p): Likewise.
+ (size_of_die): Likewise.
+ (value_format): Likewise.
+ (output_die): Likewise.
+ (double_int_type_size_in_bits): Rename to offset_int_type_size_in_bits.
+ Use wide-int.
+ (clz_loc_descriptor): Use wide-int interfaces.
+ (mem_loc_descriptor): Likewise. Handle CONST_WIDE_INT.
+ (loc_descriptor): Use wide-int interfaces. Handle CONST_WIDE_INT.
+ (round_up_to_align): Use wide-int interfaces.
+ (field_byte_offset): Likewise.
+ (insert_double): Rename to insert_wide_int. Use wide-int interfaces.
+ (add_const_value_attribute): Handle CONST_WIDE_INT. Update
+ CONST_DOUBLE handling. Use wide-int interfaces.
+ (add_bound_info): Use tree_fits_uhwi_p. Use wide-int interfaces.
+ (gen_enumeration_type_die): Use add_AT_wide.
+ (hash_loc_operands): Add case dw_val_class_wide_int.
+ (compare_loc_operands): Likewise.
+ * dwarf2out.h: Include wide-int.h.
+ (wide_int_ptr): New.
+ (enum dw_val_class): Add dw_val_class_wide_int.
+ (struct dw_val_struct): Add val_wide.
+ * emit-rtl.c (const_wide_int_htab): New.
+ (const_wide_int_htab_hash): New.
+ (const_wide_int_htab_eq): New.
+ (lookup_const_wide_int): New.
+ (const_double_htab_hash): Use wide-int interfaces.
+ (const_double_htab_eq): Likewise.
+ (rtx_to_double_int): Conditionally compile for wide-int.
+ (immed_double_int_const): Rename to immed_wide_int_const and
+ update for wide-int.
+ (immed_double_const): Conditionally compile for wide-int.
+ (init_emit_once): Use wide-int interfaces.
+ * explow.c (plus_constant): Likewise.
+ * expmed.c (mask_rtx): Move further up file. Use wide-int interfaces.
+ (lshift_value): Use wide-int interfaces.
+ (expand_mult): Likewise.
+ (choose_multiplier): Likewise.
+ (expand_smod_pow2): Likewise.
+ (make_tree): Likewise.
+ * expr.c (convert_modes): Consolidate handling of constants.
+ Use wide-int interfaces.
+ (emit_group_load_1): Add note.
+ (store_expr): Update comment.
+ (get_inner_reference): Use wide-int interfaces.
+ (expand_constructor): Update comment.
+ (expand_expr_real_2): Use wide-int interfaces.
+ (expand_expr_real_1): Likewise.
+ (reduce_to_bit_field_precision): Likewise.
+ (const_vector_from_tree): Likewise.
+ * final.c: Include wide-int-print.h.
+ (output_addr_const): Handle CONST_WIDE_INT. Use CONST_DOUBLE_AS_INT_P.
+ * fixed-value.c: Include wide-int.h.
+ (fixed_from_string): Use wide-int interfaces.
+ (fixed_to_decimal): Likewise.
+ (fixed_convert_from_real): Likewise.
+ (real_convert_from_fixed): Likewise.
+ * fold-const.h (mem_ref_offset): Return an offset_int.
+ (div_if_zero_remainder): Remove code parameter.
+ * fold-const.c (div_if_zero_remainder): Remove code parameter.
+ Use wide-int interfaces.
+ (may_negate_without_overflow_p): Use wide-int interfaces.
+ (negate_expr_p): Likewise.
+ (fold_negate_expr): Likewise.
+ (int_const_binop_1): Likewise.
+ (const_binop): Likewise.
+ (fold_convert_const_int_from_int): Likewise.
+ (fold_convert_const_int_from_real): Likewise.
+ (fold_convert_const_int_from_fixed): Likewise.
+ (fold_convert_const_fixed_from_int): Likewise.
+ (all_ones_mask_p): Take an unsigned size. Use wide-int interfaces.
+ (sign_bit_p): Use wide-int interfaces.
+ (make_range_step): Likewise.
+ (build_range_check): Likewise. Pass an integer of the correct type
+ instead of using integer_one_node.
+ (range_predecessor): Pass an integer of the correct type instead
+ of using integer_one_node.
+ (range_successor): Likewise.
+ (merge_ranges): Likewise.
+ (unextend): Use wide-int interfaces.
+ (extract_muldiv_1): Likewise.
+ (fold_div_compare): Likewise.
+ (fold_single_bit_test): Likewise.
+ (fold_sign_changed_comparison): Likewise.
+ (try_move_mult_to_index): Update calls to div_if_zero_remainder.
+ (fold_plusminus_mult_expr): Use wide-int interfaces.
+ (native_encode_int): Likewise.
+ (native_interpret_int): Likewise.
+ (fold_unary_loc): Likewise.
+ (pointer_may_wrap_p): Likewise.
+ (size_low_cst): Likewise.
+ (mask_with_tz): Likewise.
+ (fold_binary_loc): Likewise.
+ (fold_ternary_loc): Likewise.
+ (multiple_of_p): Likewise.
+ (tree_call_nonnegative_warnv_p): Update calls to
+ tree_int_cst_min_precision and real_from_integer.
+ (fold_negate_const): Use wide-int interfaces.
+ (fold_abs_const): Likewise.
+ (fold_relational_const): Use tree_int_cst_lt.
+ (round_up_loc): Use wide-int interfaces.
+ * genemit.c (gen_exp): Add CONST_WIDE_INT case.
+ * gengenrtl.c (excluded_rtx): Add CONST_WIDE_INT case.
+ * gengtype.c: Remove include of double-int.h.
+ (do_typedef): Use wide-int interfaces.
+ (open_base_files): Add wide-int.h.
+ (main): Add offset_int and widest_int typedefs.
+ * gengtype-lex.l: Handle "^".
+ (CXX_KEYWORD): Add "static".
+ * gengtype-parse.c (require3): New.
+ (require_template_declaration): Handle constant template arguments
+ and nested templates.
+ * gengtype-state.c: Don't include "double-int.h".
+ * genpreds.c (write_one_predicate_function): Update comment.
+ (write_tm_constrs_h): Add check for hval and lval use in
+ CONST_WIDE_INT.
+ * genrecog.c (validate_pattern): Add CONST_WIDE_INT case.
+ (add_to_sequence): Likewise.
+ * gensupport.c (struct std_pred_table): Add const_scalar_int_operand
+ and const_double_operand.
+ * gimple.c (preprocess_case_label_vec_for_gimple): Use wide-int
+ interfaces.
+ * gimple-fold.c (get_base_constructor): Likewise.
+ (fold_array_ctor_reference): Likewise.
+ (fold_nonarray_ctor_reference): Likewise.
+ (fold_const_aggregate_ref_1): Likewise.
+ (gimple_val_nonnegative_real_p): Likewise.
+ (gimple_fold_indirect_ref): Likewise.
+ * gimple-pretty-print.c (dump_ssaname_info): Likewise.
+ * gimple-ssa-strength-reduction.c: Include wide-int-print.h.
+ (struct slsr_cand_d): Change index to be widest_int.
+ (struct incr_info_d): Change incr to be widest_int.
+ (alloc_cand_and_find_basis): Use wide-int interfaces.
+ (slsr_process_phi): Likewise.
+ (backtrace_base_for_ref): Likewise. Return a widest_int.
+ (restructure_reference): Take a widest_int instead of a double_int.
+ (slsr_process_ref): Use wide-int interfaces.
+ (create_mul_ssa_cand): Likewise.
+ (create_mul_imm_cand): Likewise.
+ (create_add_ssa_cand): Likewise.
+ (create_add_imm_cand): Take a widest_int instead of a double_int.
+ (slsr_process_add): Use wide-int interfaces.
+ (slsr_process_cast): Likewise.
+ (slsr_process_copy): Likewise.
+ (dump_candidate): Likewise.
+ (dump_incr_vec): Likewise.
+ (replace_ref): Likewise.
+ (cand_increment): Likewise. Return a widest_int.
+ (cand_abs_increment): Likewise.
+ (replace_mult_candidate): Take a widest_int instead of a double_int.
+ (replace_unconditional_candidate): Use wide-int interfaces.
+ (incr_vec_index): Take a widest_int instead of a double_int.
+ (create_add_on_incoming_edge): Likewise.
+ (create_phi_basis): Use wide-int interfaces.
+ (replace_conditional_candidate): Likewise.
+ (record_increment): Take a widest_int instead of a double_int.
+ (record_phi_increments): Use wide-int interfaces.
+ (phi_incr_cost): Take a widest_int instead of a double_int.
+ (lowest_cost_path): Likewise.
+ (total_savings): Likewise.
+ (analyze_increments): Use wide-int interfaces.
+ (ncd_with_phi): Take a widest_int instead of a double_int.
+ (ncd_of_cand_and_phis): Likewise.
+ (nearest_common_dominator_for_cands): Likewise.
+ (insert_initializers): Use wide-int interfaces.
+ (all_phi_incrs_profitable): Likewise.
+ (replace_one_candidate): Likewise.
+ (replace_profitable_candidates): Likewise.
+ * godump.c: Include wide-int-print.h.
+ (go_output_typedef): Use wide-int interfaces.
+ * graphite-clast-to-gimple.c (gmp_cst_to_tree): Likewise.
+ * graphite-sese-to-poly.c (tree_int_to_gmp): Likewise.
+ (build_loop_iteration_domains): Likewise.
+ * hooks.h: Include wide-int.h rather than double-int.h.
+ (hook_bool_dint_dint_uint_bool_true): Delete.
+ (hook_bool_wint_wint_uint_bool_true): Declare.
+ * hooks.c (hook_bool_dint_dint_uint_bool_true): Removed.
+ (hook_bool_wint_wint_uint_bool_true): New.
+ * internal-fn.c (ubsan_expand_si_overflow_addsub_check): Use wide-int
+ interfaces.
+ (ubsan_expand_si_overflow_mul_check): Likewise.
+ * ipa-devirt.c (get_polymorphic_call_info): Likewise.
+ * ipa-prop.c (compute_complex_assign_jump_func): Likewise.
+ (get_ancestor_addr_info): Likewise.
+ (ipa_modify_call_arguments): Likewise.
+ * loop-doloop.c (doloop_modify): Likewise.
+ (doloop_optimize): Likewise.
+ * loop-iv.c (iv_number_of_iterations): Likewise.
+ * loop-unroll.c (decide_unroll_constant_iterations): Likewise.
+ (unroll_loop_constant_iterations): Likewise.
+ (decide_unroll_runtime_iterations): Likewise.
+ (unroll_loop_runtime_iterations): Likewise.
+ (decide_peel_simple): Likewise.
+ (decide_unroll_stupid): Likewise.
+ * lto-streamer-in.c (streamer_read_wi): Add.
+ (input_cfg): Use wide-int interfaces.
+ (lto_input_tree_1): Likewise.
+ * lto-streamer-out.c (streamer_write_wi): Add.
+ (hash_tree): Use wide-int interfaces.
+ (output_cfg): Likewise.
+ * Makefile.in (OBJS): Add wide-int.o and wide-int-print.o.
+ (GTFILES): Add wide-int.h and signop.h.
+ (TAGS): Look for .cc files too.
+ * omp-low.c (scan_omp_1_op): Use wide-int interfaces.
+ * optabs.c (expand_subword_shift): Likewise.
+ (expand_doubleword_shift): Likewise.
+ (expand_absneg_bit): Likewise.
+ (expand_copysign_absneg): Likewise.
+ (expand_copysign_bit): Likewise.
+ * postreload.c (reload_cse_simplify_set): Likewise.
+ * predict.c (predict_iv_comparison): Likewise.
+ * pretty-print.h: Include wide-int-print.h.
+ (pp_wide_int) New.
+ * print-rtl.c (print_rtx): Add CONST_WIDE_INT case.
+ * print-tree.c: Include wide-int-print.h.
+ (print_node_brief): Use wide-int interfaces.
+ (print_node): Likewise.
+ * read-rtl.c (validate_const_wide_int): New.
+ (read_rtx_code): Add CONST_WIDE_INT case.
+ * real.c: Include wide-int.h.
+ (real_to_integer2): Delete.
+ (real_to_integer): New function, returning a wide_int.
+ (real_from_integer): Take a wide_int rather than two HOST_WIDE_INTs.
+ (ten_to_ptwo): Update call to real_from_integer.
+ (real_digit): Likewise.
+ * real.h: Include signop.h, wide-int.h and insn-modes.h.
+ (real_to_integer2, REAL_VALUE_FROM_INT, REAL_VALUE_FROM_UNSIGNED_INT)
+ (REAL_VALUE_TO_INT): Delete.
+ (real_to_integer): Declare a wide-int form.
+ (real_from_integer): Take a wide_int rather than two HOST_WIDE_INTs.
+ * recog.c (const_int_operand): Improve comment.
+ (const_scalar_int_operand): New.
+ (const_double_operand): Add a separate definition for CONST_WIDE_INT.
+ * rtlanal.c (commutative_operand_precedence): Handle CONST_WIDE_INT.
+ (split_double): Likewise.
+ * rtl.c (DEF_RTL_EXPR): Handle CONST_WIDE_INT.
+ (rtx_size): Likewise.
+ (rtx_alloc_stat_v): New.
+ (rtx_alloc_stat): Now calls rtx_alloc_stat_v.
+ (cwi_output_hex): New.
+ (iterative_hash_rtx): Handle CONST_WIDE_INT.
+ (cwi_check_failed_bounds): New.
+ * rtl.def (CONST_WIDE_INT): New.
+ * rtl.h: Include <utility> and wide-int.h.
+ (struct hwivec_def): New.
+ (CWI_GET_NUM_ELEM): New.
+ (CWI_PUT_NUM_ELEM): New.
+ (struct rtx_def): Add num_elem and hwiv.
+ (CASE_CONST_SCALAR_INT): Modify for TARGET_SUPPORTS_WIDE_INT.
+ (CASE_CONST_UNIQUE): Likewise.
+ (CASE_CONST_ANY): Likewise.
+ (CONST_SCALAR_INT_P): Likewise.
+ (CONST_WIDE_INT_P): New.
+ (CWI_ELT): New.
+ (HWIVEC_CHECK): New.
+ (cwi_check_failed_bounds): New.
+ (CWI_ELT): New.
+ (HWIVEC_CHECK): New.
+ (CONST_WIDE_INT_VEC) New.
+ (CONST_WIDE_INT_NUNITS) New.
+ (CONST_WIDE_INT_ELT) New.
+ (rtx_mode_t): New type.
+ (wi::int_traits <rtx_mode_t>): New.
+ (wi::shwi): New.
+ (wi::min_value): New.
+ (wi::max_value): New.
+ (rtx_alloc_v) New.
+ (const_wide_int_alloc): New.
+ (immed_wide_int_const): New.
+ * sched-vis.c (print_value): Handle CONST_WIDE_INT.
+ * sel-sched-ir.c (lhs_and_rhs_separable_p): Update comment.
+ * signop.h: New file.
+ * simplify-rtx.c (mode_signbit_p): Handle CONST_WIDE_INT.
+ (simplify_const_unary_operation): Use wide-int interfaces.
+ (simplify_binary_operation_1): Likewise.
+ (simplify_const_binary_operation): Likewise.
+ (simplify_const_relational_operation): Likewise.
+ (simplify_immed_subreg): Likewise.
+ * stmt.c (expand_case): Likewise.
+ * stor-layout.h (set_min_and_max_values_for_integral_type): Take a
+ signop rather than a bool.
+ * stor-layout.c (layout_type): Use wide-int interfaces.
+ (initialize_sizetypes): Update calls to
+ set_min_and_max_values_for_integral_type.
+ (set_min_and_max_values_for_integral_type): Take a signop rather
+ than a bool. Use wide-int interfaces.
+ (fixup_signed_type): Update accordingly. Remove
+ HOST_BITS_PER_DOUBLE_INT limit.
+ (fixup_unsigned_type): Likewise.
+ * system.h (STATIC_CONSTANT_P): New.
+ (STATIC_ASSERT): New.
+ * target.def (can_use_doloop_p): Take widest_ints rather than
+ double_ints.
+ * target.h: Include wide-int.h rather than double-int.h.
+ * targhooks.h (can_use_doloop_if_innermost): Take widest_ints rather
+ than double_ints.
+ * targhooks.c (default_cxx_get_cookie_size): Use tree_int_cst_lt
+ rather than INT_CST_LT_UNSIGNED.
+ (can_use_doloop_if_innermost): Take widest_ints rather than
+ double_ints.
+ * tree-affine.c: Include wide-int-print.h.
+ (double_int_ext_for_comb): Delete.
+ (wide_int_ext_for_comb): New.
+ (aff_combination_zero): Use wide-int interfaces.
+ (aff_combination_const): Take a widest_int instead of a double_int.
+ (aff_combination_elt): Use wide-int interfaces.
+ (aff_combination_scale): Take a widest_int instead of a double_int.
+ (aff_combination_add_elt): Likewise.
+ (aff_combination_add_cst): Likewise.
+ (aff_combination_add): Use wide-int interfaces.
+ (aff_combination_convert): Likewise.
+ (tree_to_aff_combination): Likewise.
+ (add_elt_to_tree): Take a widest_int instead of a double_int.
+ (aff_combination_to_tree): Use wide-int interfaces.
+ (aff_combination_remove_elt): Likewise.
+ (aff_combination_add_product): Take a widest_int instead of
+ a double_int.
+ (aff_combination_mult): Use wide-int interfaces.
+ (aff_combination_expand): Likewise.
+ (double_int_constant_multiple_p): Delete.
+ (wide_int_constant_multiple_p): New.
+ (aff_combination_constant_multiple_p): Take a widest_int pointer
+ instead of a double_int pointer.
+ (print_aff): Use wide-int interfaces.
+ (get_inner_reference_aff): Take a widest_int pointer
+ instead of a double_int pointer.
+ (aff_comb_cannot_overlap_p): Take widest_ints instead of double_ints.
+ * tree-affine.h: Include wide-int.h.
+ (struct aff_comb_elt): Change type of coef to widest_int.
+ (struct affine_tree_combination): Change type of offset to widest_int.
+ (double_int_ext_for_comb): Delete.
+ (wide_int_ext_for_comb): New.
+ (aff_combination_const): Use widest_int instead of double_int.
+ (aff_combination_scale): Likewise.
+ (aff_combination_add_elt): Likewise.
+ (aff_combination_constant_multiple_p): Likewise.
+ (get_inner_reference_aff): Likewise.
+ (aff_comb_cannot_overlap_p): Likewise.
+ (aff_combination_zero_p): Use wide-int interfaces.
+ * tree.c: Include tree.h.
+ (init_ttree): Use make_int_cst.
+ (tree_code_size): Removed code for INTEGER_CST case.
+ (tree_size): Add INTEGER_CST case.
+ (make_node_stat): Update comment.
+ (get_int_cst_ext_nunits, build_new_int_cst, build_int_cstu): New.
+ (build_int_cst_type): Use wide-int interfaces.
+ (double_int_to_tree): Likewise.
+ (double_int_fits_to_tree_p): Delete.
+ (force_fit_type_double): Delete.
+ (force_fit_type): New.
+ (int_cst_hash_hash): Use wide-int interfaces.
+ (int_cst_hash_eq): Likewise.
+ (build_int_cst_wide): Delete.
+ (wide_int_to_tree): New.
+ (cache_integer_cst): Use wide-int interfaces.
+ (build_low_bits_mask): Likewise.
+ (cst_and_fits_in_hwi): Likewise.
+ (real_value_from_int_cst): Likewise.
+ (make_int_cst_stat): New.
+ (integer_zerop): Use wide_int interfaces.
+ (integer_onep): Likewise.
+ (integer_all_onesp): Likewise.
+ (integer_pow2p): Likewise.
+ (integer_nonzerop): Likewise.
+ (tree_log2): Likewise.
+ (tree_floor_log2): Likewise.
+ (tree_ctz): Likewise.
+ (int_size_in_bytes): Likewise.
+ (mem_ref_offset): Return an offset_int rather than a double_int.
+ (build_type_attribute_qual_variant): Use wide_int interfaces.
+ (type_hash_eq): Likewise
+ (tree_int_cst_equal): Likewise.
+ (tree_int_cst_lt): Delete.
+ (tree_int_cst_compare): Likewise.
+ (tree_fits_shwi_p): Use wide_int interfaces.
+ (tree_fits_uhwi_p): Likewise.
+ (tree_int_cst_sign_bit): Likewise.
+ (tree_int_cst_sgn): Likewise.
+ (tree_int_cst_min_precision): Take a signop rather than a bool.
+ (simple_cst_equal): Use wide_int interfaces.
+ (compare_tree_int): Likewise.
+ (iterative_hash_expr): Likewise.
+ (int_fits_type_p): Likewise. Use tree_int_cst_lt rather than
+ INT_CST_LT.
+ (get_type_static_bounds): Use wide_int interfaces.
+ (tree_int_cst_elt_check_failed): New.
+ (build_common_tree_nodes): Reordered to set prec before filling in
+ value.
+ (int_cst_value): Check cst_and_fits_in_hwi.
+ (widest_int_cst_value): Use wide_int interfaces.
+ (upper_bound_in_type): Likewise.
+ (lower_bound_in_type): Likewise.
+ (num_ending_zeros): Likewise.
+ (drop_tree_overflow): Likewise.
+ * tree-call-cdce.c (check_pow): Update call to real_from_integer.
+ (gen_conditions_for_pow_cst_base): Likewise.
+ * tree-cfg.c: Include wide-int.h and wide-int-print.h.
+ (group_case_labels_stmt): Use wide-int interfaces.
+ (verify_gimple_assign_binary): Likewise.
+ (print_loop): Likewise.
+ * tree-chrec.c (tree_fold_binomial): Likewise.
+ * tree-core.h (struct tree_base): Add int_length.
+ (struct tree_int_cst): Change rep of value.
+ * tree-data-ref.c (dr_analyze_innermost): Use wide-int interfaces.
+ (dr_may_alias_p): Likewise.
+ (max_stmt_executions_tree): Likewise.
+ * tree.def (INTEGER_CST): Update comment.
+ * tree-dfa.c (get_ref_base_and_extent): Use wide-int interfaces.
+ * tree-dfa.h (get_addr_base_and_unit_offset_1): Likewise.
+ * tree-dump.c: Include wide-int.h and wide-int-print.h.
+ (dequeue_and_dump): Use wide-int interfaces.
+ * tree.h: Include wide-int.h.
+ (NULL_TREE): Moved to earlier loc in file.
+ (TREE_INT_CST_ELT_CHECK): New.
+ (tree_int_cst_elt_check_failed): New.
+ (TYPE_SIGN): New.
+ (TREE_INT_CST): Delete.
+ (TREE_INT_CST_LOW): Use wide-int interfaces.
+ (TREE_INT_CST_HIGH): Delete.
+ (TREE_INT_CST_NUNITS): New.
+ (TREE_INT_CST_EXT_NUNITS): Likewise.
+ (TREE_INT_CST_OFFSET_NUNITS): Likewise.
+ (TREE_INT_CST_ELT): Likewise.
+ (INT_CST_LT): Delete.
+ (tree_int_cst_elt_check): New (two forms).
+ (type_code_size): Update comment.
+ (make_int_cst_stat, make_int_cst): New.
+ (tree_to_double_int): Delete.
+ (double_int_fits_to_tree_p): Delete.
+ (force_fit_type_double): Delete.
+ (build_int_cstu): Replace with out-of-line function.
+ (build_int_cst_wide): Delete.
+ (tree_int_cst_lt): Define inline.
+ (tree_int_cst_le): New.
+ (tree_int_cst_compare): Define inline.
+ (tree_int_cst_min_precision): Take a signop rather than a bool.
+ (wi::int_traits <const_tree>): New.
+ (wi::int_traits <tree>): New.
+ (wi::extended_tree): New.
+ (wi::int_traits <wi::extended_tree>): New.
+ (wi::to_widest): New.
+ (wi::to_offset): New.
+ (wi::fits_to_tree_p): New.
+ (wi::min_value): New.
+ (wi::max_value): New.
+ * tree-inline.c (remap_gimple_op_r): Use wide-int interfaces.
+ (copy_tree_body_r): Likewise.
+ * tree-object-size.c (compute_object_offset): Likewise.
+ (addr_object_size): Likewise.
+ * tree-predcom.c: Include wide-int-print.h.
+ (struct dref_d): Change type of offset to widest_int.
+ (dump_dref): Call wide-int printer.
+ (aff_combination_dr_offset): Use wide-int interfaces.
+ (determine_offset): Take a widest_int pointer rather than a
+ double_int pointer.
+ (split_data_refs_to_components): Use wide-int interfaces.
+ (suitable_component_p): Likewise.
+ (order_drefs): Likewise.
+ (add_ref_to_chain): Likewise.
+ (valid_initializer_p): Likewise.
+ (determine_roots_comp): Likewise.
+ * tree-pretty-print.c: Include wide-int-print.h.
+ (dump_generic_node): Use wide-int interfaces.
+ * tree-sra.c (sra_ipa_modify_expr): Likewise.
+ * tree-ssa-address.c (addr_for_mem_ref): Likewise.
+ (move_fixed_address_to_symbol): Likewise.
+ (move_hint_to_base): Likewise.
+ (move_pointer_to_base): Likewise.
+ (move_variant_to_index): Likewise.
+ (most_expensive_mult_to_index): Likewise.
+ (addr_to_parts): Likewise.
+ (copy_ref_info): Likewise.
+ * tree-ssa-alias.c (indirect_ref_may_alias_decl_p): Likewise.
+ (indirect_refs_may_alias_p): Likewise.
+ (stmt_kills_ref_p_1): Likewise.
+ * tree-ssa.c (non_rewritable_mem_ref_base): Likewise.
+ * tree-ssa-ccp.c: Update comment at top of file. Include
+ wide-int-print.h.
+ (struct prop_value_d): Change type of mask to widest_int.
+ (extend_mask): New function.
+ (dump_lattice_value): Use wide-int interfaces.
+ (get_default_value): Likewise.
+ (set_constant_value): Likewise.
+ (set_value_varying): Likewise.
+ (valid_lattice_transition): Likewise.
+ (set_lattice_value): Likewise.
+ (value_to_double_int): Delete.
+ (value_to_wide_int): New.
+ (get_value_from_alignment): Use wide-int interfaces.
+ (get_value_for_expr): Likewise.
+ (do_dbg_cnt): Likewise.
+ (ccp_finalize): Likewise.
+ (ccp_lattice_meet): Likewise.
+ (bit_value_unop_1): Use widest_ints rather than double_ints.
+ (bit_value_binop_1): Likewise.
+ (bit_value_unop): Use wide-int interfaces.
+ (bit_value_binop): Likewise.
+ (bit_value_assume_aligned): Likewise.
+ (evaluate_stmt): Likewise.
+ (ccp_fold_stmt): Likewise.
+ (visit_cond_stmt): Likewise.
+ (ccp_visit_stmt): Likewise.
+ * tree-ssa-forwprop.c (forward_propagate_addr_expr_1): Likewise.
+ (constant_pointer_difference): Likewise.
+ (associate_pointerplus): Likewise.
+ (combine_conversions): Likewise.
+ * tree-ssa-loop.h: Include wide-int.h.
+ (struct tree_niter_desc): Change type of max to widest_int.
+ * tree-ssa-loop-im.c (mem_refs_may_alias_p): Use wide-int interfaces.
+ * tree-ssa-loop-ivcanon.c (remove_exits_and_undefined_stmts): Likewise.
+ (remove_redundant_iv_tests): Likewise.
+ (canonicalize_loop_induction_variables): Likewise.
+ * tree-ssa-loop-ivopts.c (alloc_iv): Likewise.
+ (constant_multiple_of): Take a widest_int pointer instead of
+ a double_int pointer.
+ (get_computation_aff): Use wide-int interfaces.
+ (ptr_difference_cost): Likewise.
+ (difference_cost): Likewise.
+ (get_loop_invariant_expr_id): Likewise.
+ (get_computation_cost_at): Likewise.
+ (iv_elimination_compare_lt): Likewise.
+ (may_eliminate_iv): Likewise.
+ * tree-ssa-loop-niter.h (estimated_loop_iterations): Use widest_int
+ instead of double_int.
+ (max_loop_iterations): Likewise.
+ (max_stmt_executions): Likewise.
+ (estimated_stmt_executions): Likewise.
+ * tree-ssa-loop-niter.c: Include wide-int-print.h.
+ (split_to_var_and_offset): Use wide-int interfaces.
+ (determine_value_range): Likewise.
+ (bound_difference_of_offsetted_base): Likewise.
+ (bounds_add): Take a widest_int instead of a double_int.
+ (number_of_iterations_ne_max): Use wide-int interfaces.
+ (number_of_iterations_ne): Likewise.
+ (number_of_iterations_lt_to_ne): Likewise.
+ (assert_loop_rolls_lt): Likewise.
+ (number_of_iterations_lt): Likewise.
+ (number_of_iterations_le): Likewise.
+ (number_of_iterations_cond): Likewise.
+ (number_of_iterations_exit): Likewise.
+ (finite_loop_p): Likewise.
+ (derive_constant_upper_bound_assign): Likewise.
+ (derive_constant_upper_bound): Return a widest_int.
+ (derive_constant_upper_bound_ops): Likewise.
+ (do_warn_aggressive_loop_optimizations): Use wide-int interfaces.
+ (record_estimate): Take a widest_int rather than a double_int.
+ (record_nonwrapping_iv): Use wide-int interfaces.
+ (double_int_cmp): Delete.
+ (wide_int_cmp): New.
+ (bound_index): Take a widest_int rather than a double_int.
+ (discover_iteration_bound_by_body_walk): Use wide-int interfaces.
+ (maybe_lower_iteration_bound): Likewise.
+ (estimate_numbers_of_iterations_loop): Likewise.
+ (estimated_loop_iterations): Take a widest_int pointer than than
+ a double_int pointer.
+ (estimated_loop_iterations_int): Use wide-int interfaces.
+ (max_loop_iterations): Take a widest_int pointer than than
+ a double_int pointer.
+ (max_loop_iterations_int): Use wide-int interfaces.
+ (max_stmt_executions): Take a widest_int pointer than than
+ a double_int pointer.
+ (estimated_stmt_executions): Likewise.
+ (n_of_executions_at_most): Use wide-int interfaces.
+ (scev_probably_wraps_p): Likewise.
+ * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Update calls
+ to real_to_integer.
+ * tree-scalar-evolution.c (simplify_peeled_chrec): Use wide-int
+ interfaces.
+ * tree-ssanames.c (set_range_info): Use wide_int_refs rather than
+ double_ints. Adjust for trailing_wide_ints <3> representation.
+ (set_nonzero_bits): Likewise.
+ (get_range_info): Return wide_ints rather than double_ints.
+ Adjust for trailing_wide_ints <3> representation.
+ (get_nonzero_bits): Likewise.
+ (duplicate_ssa_name_range_info): Adjust for trailing_wide_ints <3>
+ representation.
+ * tree-ssanames.h (struct range_info_def): Replace min, max and
+ nonzero_bits with a trailing_wide_ints <3>.
+ (set_range_info): Use wide_int_refs rather than double_ints.
+ (set_nonzero_bits): Likewise.
+ (get_range_info): Return wide_ints rather than double_ints.
+ (get_nonzero_bits): Likewise.
+ * tree-ssa-phiopt.c (jump_function_from_stmt): Use wide-int interfaces.
+ * tree-ssa-pre.c (phi_translate_1): Likewise.
+ * tree-ssa-reassoc.c (decrement_power): Use calls to real_from_integer.
+ (acceptable_pow_call): Likewise.
+ * tree-ssa-sccvn.c (copy_reference_ops_from_ref): Use wide-int
+ interfaces.
+ (vn_reference_fold_indirect): Likewise.
+ (vn_reference_maybe_forwprop_address): Likewise.
+ (valueize_refs_1): Likewise.
+ * tree-ssa-structalias.c (get_constraint_for_ptr_offset): Likewise.
+ * tree-ssa-uninit.c (is_value_included_in): Use wide-int interfaces,
+ tree_int_cst_lt and tree_int_cst_le.
+ * tree-streamer-in.c (unpack_ts_base_value_fields): Use wide-int
+ interfaces.
+ (streamer_alloc_tree): Likewise.
+ * tree-streamer-out.c (pack_ts_int_cst_value_fields): Likewise.
+ (streamer_write_tree_header): Likewise.
+ (streamer_write_integer_cst): Likewise.
+ * tree-switch-conversion.c (emit_case_bit_tests): Likewise.
+ (build_constructors): Likewise.
+ (array_value_type): Likewise.
+ * tree-vect-data-refs.c (vect_prune_runtime_alias_test_list): Likewise.
+ (vect_check_gather): Likewise.
+ * tree-vect-generic.c (build_replicated_const): Likewise.
+ (expand_vector_divmod): Likewise.
+ * tree-vect-loop.c (vect_transform_loop): Likewise.
+ * tree-vect-loop-manip.c (vect_do_peeling_for_loop_bound): Likewise.
+ (vect_do_peeling_for_alignment): Likewise.
+ * tree-vect-patterns.c (vect_recog_divmod_pattern): Likewise.
+ * tree-vrp.c: Include wide-int.h.
+ (operand_less_p): Use wide-int interfaces and tree_int_cst_lt.
+ (extract_range_from_assert): Use wide-int interfaces.
+ (vrp_int_const_binop): Likewise.
+ (zero_nonzero_bits_from_vr): Take wide_int pointers rather than
+ double_int pointers.
+ (ranges_from_anti_range): Use wide-int interfaces.
+ (quad_int_cmp): Delete.
+ (quad_int_pair_sort): Likewise.
+ (extract_range_from_binary_expr_1): Use wide-int interfaces.
+ (extract_range_from_unary_expr_1): Likewise.
+ (adjust_range_with_scev): Likewise.
+ (masked_increment): Take and return wide_ints rather than double_ints.
+ (register_edge_assert_for_2): Use wide-int interfaces.
+ (check_array_ref): Likewise.
+ (search_for_addr_array): Likewise.
+ (maybe_set_nonzero_bits): Likewise.
+ (union_ranges): Pass an integer of the correct type instead of
+ using integer_one_node.
+ (intersect_ranges): Likewise.
+ (simplify_truth_ops_using_ranges): Likewise.
+ (simplify_bit_ops_using_ranges): Use wide-int interfaces.
+ (range_fits_type_p): Likewise.
+ (simplify_cond_using_ranges): Likewise. Take a signop rather than
+ a bool.
+ (simplify_conversion_using_ranges): Use wide-int interfaces.
+ (simplify_float_conversion_using_ranges): Likewise.
+ (vrp_finalize): Likewise.
+ * value-prof.c (gimple_divmod_fixed_value_transform): Likewise.
+ (gimple_stringops_transform): Likewise.
+ * varasm.c (decode_addr_const): Likewise.
+ (const_hash_1): Likewise.
+ (const_rtx_hash_1): Likewise
+ (output_constant): Likewise.
+ (array_size_for_constructor): Likewise.
+ (output_constructor_regular_field): Likewise.
+ (output_constructor_bitfield): Likewise.
+ * var-tracking.c (loc_cmp): Handle CONST_WIDE_INT.
+ * mkconfig.sh: Include machmode.h to pick up BITS_PER_UNIT for
+ GENERATOR_FILEs.
+ * gencheck.c: Define BITS_PER_UNIT.
+ * wide-int.cc: New.
+ * wide-int.h: New.
+ * wide-int-print.cc: New.
+ * wide-int-print.h: New.
+
+
+ada:
+ * gcc-interface/cuintp.c (UI_From_gnu): Use wide-int interfaces.
+ * gcc-interface/decl.c (gnat_to_gnu_entity): Use TYPE_SIGN.
+ (annotate_value): Use wide-int interfaces.
+ * gcc-interface/utils.c (get_nonnull_operand): Use tree_fits_uhwi_p.
+
+
+c:
+ * c-decl.c (check_bitfield_type_and_width): Use TYPE_SIGN.
+ (finish_enum): Use wide-int interfaces.
+ * c-parser.c (c_parser_cilk_clause_vectorlength): Likewise.
+ * c-typeck.c (build_c_cast): Likewise.
+ (set_nonincremental_init_from_string): Likewise.
+ (c_tree_equal): Likewise.
+
+
+c-family:
+ * c-ada-spec.c: Include wide-int.h.
+ (ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX): Remove.
+ (dump_generic_ada_node): Use wide-int interfaces.
+ * c-common.c: Include wide-int-print.h.
+ (shorten_compare): Use wide-int interfaces and tree_int_cst_lt.
+ (pointer_int_sum): Use wide-int interfaces.
+ (c_common_nodes_and_builtins): Use make_int_cst.
+ (match_case_to_enum_1): Use tree_fits_uhwi_p and tree_fits_shwi_p.
+ (handle_alloc_size_attribute): Use wide-int interfaces.
+ (get_nonnull_operand): Likewise.
+ * c-format.c (get_constant): Use tree_fits_uhwi_p.
+ * c-lex.c: Include wide-int.h.
+ (narrowest_unsigned_type): Take a widest_int rather than two
+ HOST_WIDE_INTs.
+ (narrowest_signed_type): Likewise.
+ (interpret_integer): Update accordingly. Use wide-int interfaces.
+ (lex_charconst): Use wide-int interfaces.
+ * c-pretty-print.c: Include wide-int.h.
+ (pp_c_integer_constant): Use wide-int interfaces.
+ * cilk.c (declare_one_free_variable): Use tree_int_cst_lt instead of
+ INT_CST_LT_UNSIGNED.
+
+
+cp:
+ * call.c: Include wide-int.h.
+ (type_passed_as): Use tree_int_cst_lt instead of INT_CST_LT_UNSIGNED.
+ (convert_for_arg_passing): Likewise.
+ * class.c: Include wide-int.h.
+ (walk_subobject_offsets): Use tree_int_cst_lt instead of INT_CST_LT.
+ (end_of_class): Use tree_int_cst_lt instead of INT_CST_LT_UNSIGNED.
+ (include_empty_classes): Likewise
+ (layout_class_type): Use tree_int_cst_lt instead of INT_CST_LT.
+ * cvt.c: Include wide-int.h.
+ (ignore_overflows): Use wide_int_to_tree.
+ * decl.c: Include wide-int.h.
+ (check_array_designated_initializer): Use wide-int interfaces.
+ (compute_array_index_type): Use tree_int_cst_lt instead of INT_CST_LT.
+ (finish_enum_value_list): Use signop.
+ (build_enumerator): Use wide-int interfaces.
+ * init.c: Include wide-int.h.
+ (build_new_1): Use wide-int interfaces.
+ * mangle.c: Include wide-int.h.
+ (write_integer_cst): Use wide-int interfaces.
+ (write_array_type): Likewise.
+ * tree.c: Include wide-int.h.
+ (cp_tree_equal): Use tree_int_cst_equal.
+ * typeck2.c: Include wide-int.h.
+ (process_init_constructor_array): Use wide-int interfaces.
+
+
+fortran:
+ * target-memory.c: Include wide-int.h.
+ (gfc_interpret_logical): Use wide-int interfaces.
+ * trans-array.c: Include wide-int.h.
+ (gfc_conv_array_initializer): Use wide-int interfaces.
+ * trans-const.c: Include wide-int.h.
+ (gfc_conv_string_init): Use wide-int interfaces.
+ (gfc_conv_mpz_to_tree): Likewise.
+ (gfc_conv_tree_to_mpz): Likewise.
+ * trans-decl.c (gfc_can_put_var_on_stack): Use tree_fits_uhwi_p.
+ * trans-expr.c: Include wide-int.h.
+ (gfc_conv_cst_int_power): Use wide-int interfaces.
+ (gfc_string_to_single_character): Likewise.
+ (gfc_optimize_len_trim): Likewise.
+ * trans-intrinsic.c: Include wide-int.h.
+ (trans_this_image): Use wide-int interfaces.
+ (gfc_conv_intrinsic_bound): Likewise.
+ (conv_intrinsic_cobound): Likewise.
+ * trans-types.c (gfc_init_types): Likewise.
+ (gfc_get_array_type_bounds): Pass an integer of the correct type
+ instead of using integer_one_node.
+
+
+go:
+ * go-gcc.cc (Gcc_backend::type_size): Use tree_fits_uhwi_p.
+
+
+java:
+ * boehm.c: Include wide-int.h.
+ (mark_reference_fields): Use a wide_int mask.
+ (get_boehm_type_descriptor): Use wide-int interfaces.
+ * expr.c: Include wide-int.h.
+ (build_newarray): Remove bogus "== INTEGER_CST".
+ (expand_java_pushc): Use real_from_integer.
+ (build_field_ref): Use tree_int_cst_lt instead of INT_CST_LT_UNSIGNED.
+ * jcf-parse.c: Include wide-int.h.
+ (get_constant): Use wide-int interfaces.
+
+
+lto:
+ * lto.c (compare_tree_sccs_1): Use wide-int interfaces.
+ * lto-lang.c (get_nonnull_operand): Likewise.
+
+
+objc:
+ * objc-act.c: Include wide-int.h.
+ (objc_decl_method_attributes): Use wide-int interfaces.
+
+
+testsuite:
+ * gcc.dg/tree-ssa/pr45427.c: Update to look for 0x0 instead of 0.
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 6b46408d1a1..fd8c2c5b3ea 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1464,6 +1464,8 @@ OBJS = \
vmsdbgout.o \
vtable-verify.o \
web.o \
+ wide-int.o \
+ wide-int-print.o \
xcoffout.o \
$(out_object_file) \
$(EXTRA_OBJS) \
@@ -2229,7 +2231,7 @@ s-tm-texi: build/genhooks$(build_exeext) $(srcdir)/doc/tm.texi.in
GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(host_xm_file_list) \
$(tm_file_list) $(HASHTAB_H) $(SPLAY_TREE_H) $(srcdir)/bitmap.h \
- $(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \
+ $(srcdir)/wide-int.h $(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \
$(srcdir)/optabs.h $(srcdir)/tree.h $(srcdir)/tree-core.h \
$(srcdir)/libfuncs.h $(SYMTAB_H) \
$(srcdir)/real.h $(srcdir)/function.h $(srcdir)/insn-addr.h $(srcdir)/hwint.h \
@@ -2240,6 +2242,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
$(srcdir)/alias.c $(srcdir)/bitmap.c $(srcdir)/cselib.c $(srcdir)/cgraph.c \
$(srcdir)/ipa-prop.c $(srcdir)/ipa-cp.c $(srcdir)/ipa-utils.h \
$(srcdir)/dbxout.c \
+ $(srcdir)/signop.h \
$(srcdir)/dwarf2out.h \
$(srcdir)/dwarf2asm.c \
$(srcdir)/dwarf2cfi.c \
@@ -2442,10 +2445,9 @@ gengtype-state.o build/gengtype-state.o: gengtype-state.c $(SYSTEM_H) \
gengtype-state.o: $(CONFIG_H)
CFLAGS-gengtype-state.o += -DGENERATOR_FILE
build/gengtype-state.o: $(BCONFIG_H)
-
gengtype.o build/gengtype.o : gengtype.c $(SYSTEM_H) gengtype.h \
- rtl.def insn-notes.def errors.h double-int.h version.h $(HASHTAB_H) \
- $(OBSTACK_H) $(XREGEX_H)
+ rtl.def insn-notes.def errors.h double-int.h version.h \
+ $(HASHTAB_H) $(OBSTACK_H) $(XREGEX_H)
gengtype.o: $(CONFIG_H)
CFLAGS-gengtype.o += -DGENERATOR_FILE
build/gengtype.o: $(BCONFIG_H)
@@ -3752,7 +3754,7 @@ TAGS: lang.tags
incs="$$incs --include $$dir/TAGS.sub"; \
fi; \
done; \
- etags -o TAGS.sub c-family/*.h c-family/*.c *.h *.c; \
+ etags -o TAGS.sub c-family/*.h c-family/*.c *.h *.c *.cc; \
etags --include TAGS.sub $$incs)
# -----------------------------------------------------
diff --git a/gcc/ada/gcc-interface/cuintp.c b/gcc/ada/gcc-interface/cuintp.c
index c5736f5ecdc..1bf9c356223 100644
--- a/gcc/ada/gcc-interface/cuintp.c
+++ b/gcc/ada/gcc-interface/cuintp.c
@@ -160,7 +160,11 @@ UI_From_gnu (tree Input)
in a signed 64-bit integer. */
if (tree_fits_shwi_p (Input))
return UI_From_Int (tree_to_shwi (Input));
- else if (TREE_INT_CST_HIGH (Input) < 0 && TYPE_UNSIGNED (gnu_type))
+
+ gcc_assert (TYPE_PRECISION (gnu_type) <= 64);
+ if (TYPE_UNSIGNED (gnu_type)
+ && TYPE_PRECISION (gnu_type) == 64
+ && wi::neg_p (Input, SIGNED))
return No_Uint;
#endif
diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c
index 73945f10ec5..6d0b8b25038 100644
--- a/gcc/ada/gcc-interface/decl.c
+++ b/gcc/ada/gcc-interface/decl.c
@@ -1642,7 +1642,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition)
TYPE_PRECISION (gnu_type) = esize;
TYPE_UNSIGNED (gnu_type) = is_unsigned;
set_min_and_max_values_for_integral_type (gnu_type, esize,
- is_unsigned);
+ TYPE_SIGN (gnu_type));
process_attributes (&gnu_type, &attr_list, true, gnat_entity);
layout_type (gnu_type);
@@ -7521,11 +7521,9 @@ annotate_value (tree gnu_size)
if (TREE_CODE (TREE_OPERAND (gnu_size, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (gnu_size, 1);
- double_int signed_op1
- = tree_to_double_int (op1).sext (TYPE_PRECISION (sizetype));
- if (signed_op1.is_negative ())
+ if (wi::neg_p (op1))
{
- op1 = double_int_to_tree (sizetype, -signed_op1);
+ op1 = wide_int_to_tree (sizetype, wi::neg (op1));
pre_op1 = annotate_value (build1 (NEGATE_EXPR, sizetype, op1));
}
}
diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c
index c81ab00897d..b9b9dc1c52d 100644
--- a/gcc/ada/gcc-interface/utils.c
+++ b/gcc/ada/gcc-interface/utils.c
@@ -6187,8 +6187,7 @@ static bool
get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp)
{
/* Verify the arg number is a constant. */
- if (TREE_CODE (arg_num_expr) != INTEGER_CST
- || TREE_INT_CST_HIGH (arg_num_expr) != 0)
+ if (!tree_fits_uhwi_p (arg_num_expr))
return false;
*valp = TREE_INT_CST_LOW (arg_num_expr);
diff --git a/gcc/alias.c b/gcc/alias.c
index 5f374029193..5f50fc245c9 100644
--- a/gcc/alias.c
+++ b/gcc/alias.c
@@ -340,9 +340,10 @@ ao_ref_from_mem (ao_ref *ref, const_rtx mem)
if (MEM_EXPR (mem) != get_spill_slot_decl (false)
&& (ref->offset < 0
|| (DECL_P (ref->base)
- && (!tree_fits_uhwi_p (DECL_SIZE (ref->base))
- || (tree_to_uhwi (DECL_SIZE (ref->base))
- < (unsigned HOST_WIDE_INT) (ref->offset + ref->size))))))
+ && (DECL_SIZE (ref->base) == NULL_TREE
+ || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST
+ || wi::ltu_p (wi::to_offset (DECL_SIZE (ref->base)),
+ ref->offset + ref->size)))))
return false;
return true;
@@ -1532,9 +1533,7 @@ rtx_equal_for_memref_p (const_rtx x, const_rtx y)
case VALUE:
CASE_CONST_UNIQUE:
- /* There's no need to compare the contents of CONST_DOUBLEs or
- CONST_INTs because pointer equality is a good enough
- comparison for these nodes. */
+ /* Pointer equality guarantees equality for these nodes. */
return 0;
default:
@@ -2275,15 +2274,22 @@ adjust_offset_for_component_ref (tree x, bool *known_p,
{
tree xoffset = component_ref_field_offset (x);
tree field = TREE_OPERAND (x, 1);
+ if (TREE_CODE (xoffset) != INTEGER_CST)
+ {
+ *known_p = false;
+ return;
+ }
- if (! tree_fits_uhwi_p (xoffset))
+ offset_int woffset
+ = (wi::to_offset (xoffset)
+ + wi::lrshift (wi::to_offset (DECL_FIELD_BIT_OFFSET (field)),
+ LOG2_BITS_PER_UNIT));
+ if (!wi::fits_uhwi_p (woffset))
{
*known_p = false;
return;
}
- *offset += (tree_to_uhwi (xoffset)
- + (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
- / BITS_PER_UNIT));
+ *offset += woffset.to_uhwi ();
x = TREE_OPERAND (x, 0);
}
diff --git a/gcc/builtins.c b/gcc/builtins.c
index 42e4af263ab..140d6ba1a5b 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -413,7 +413,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp,
bitpos += ptr_bitpos;
if (TREE_CODE (exp) == MEM_REF
|| TREE_CODE (exp) == TARGET_MEM_REF)
- bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT;
+ bitpos += mem_ref_offset (exp).to_short_addr () * BITS_PER_UNIT;
}
}
else if (TREE_CODE (exp) == STRING_CST)
@@ -672,20 +672,24 @@ c_getstr (tree src)
return TREE_STRING_POINTER (src) + tree_to_uhwi (offset_node);
}
-/* Return a CONST_INT or CONST_DOUBLE corresponding to target reading
+/* Return a constant integer corresponding to target reading
GET_MODE_BITSIZE (MODE) bits from string constant STR. */
static rtx
c_readstr (const char *str, enum machine_mode mode)
{
- HOST_WIDE_INT c[2];
HOST_WIDE_INT ch;
unsigned int i, j;
+ HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
gcc_assert (GET_MODE_CLASS (mode) == MODE_INT);
+ unsigned int len = (GET_MODE_PRECISION (mode) + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT;
+
+ gcc_assert (len <= MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT);
+ for (i = 0; i < len; i++)
+ tmp[i] = 0;
- c[0] = 0;
- c[1] = 0;
ch = 1;
for (i = 0; i < GET_MODE_SIZE (mode); i++)
{
@@ -696,13 +700,14 @@ c_readstr (const char *str, enum machine_mode mode)
&& GET_MODE_SIZE (mode) >= UNITS_PER_WORD)
j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1;
j *= BITS_PER_UNIT;
- gcc_assert (j < HOST_BITS_PER_DOUBLE_INT);
if (ch)
ch = (unsigned char) str[i];
- c[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT);
+ tmp[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT);
}
- return immed_double_const (c[0], c[1], mode);
+
+ wide_int c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode));
+ return immed_wide_int_const (c, mode);
}
/* Cast a target constant CST to target CHAR and if that value fits into
@@ -718,7 +723,9 @@ target_char_cast (tree cst, char *p)
|| CHAR_TYPE_SIZE > HOST_BITS_PER_WIDE_INT)
return 1;
+ /* Do not care if it fits or not right here. */
val = TREE_INT_CST_LOW (cst);
+
if (CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT)
val &= (((unsigned HOST_WIDE_INT) 1) << CHAR_TYPE_SIZE) - 1;
@@ -3128,7 +3135,7 @@ determine_block_size (tree len, rtx len_rtx,
}
else
{
- double_int min, max;
+ wide_int min, max;
enum value_range_type range_type = VR_UNDEFINED;
/* Determine bounds from the type. */
@@ -3146,18 +3153,18 @@ determine_block_size (tree len, rtx len_rtx,
range_type = get_range_info (len, &min, &max);
if (range_type == VR_RANGE)
{
- if (min.fits_uhwi () && *min_size < min.to_uhwi ())
+ if (wi::fits_uhwi_p (min) && *min_size < min.to_uhwi ())
*min_size = min.to_uhwi ();
- if (max.fits_uhwi () && *max_size > max.to_uhwi ())
+ if (wi::fits_uhwi_p (max) && *max_size > max.to_uhwi ())
*probable_max_size = *max_size = max.to_uhwi ();
}
else if (range_type == VR_ANTI_RANGE)
{
/* Anti range 0...N lets us to determine minimal size to N+1. */
- if (min.is_zero ())
+ if (min == 0)
{
- if ((max + double_int_one).fits_uhwi ())
- *min_size = (max + double_int_one).to_uhwi ();
+ if (wi::fits_uhwi_p (max) && max.to_uhwi () + 1 != 0)
+ *min_size = max.to_uhwi () + 1;
}
/* Code like
@@ -3168,9 +3175,8 @@ determine_block_size (tree len, rtx len_rtx,
Produce anti range allowing negative values of N. We still
can use the information and make a guess that N is not negative.
*/
- else if (!max.ule (double_int_one.lshift (30))
- && min.fits_uhwi ())
- *probable_max_size = min.to_uhwi () - 1;
+ else if (!wi::leu_p (max, 1 << 30) && wi::fits_uhwi_p (min))
+ *probable_max_size = min.to_uhwi () - 1;
}
}
gcc_checking_assert (*max_size <=
@@ -4943,12 +4949,12 @@ expand_builtin_signbit (tree exp, rtx target)
if (bitpos < GET_MODE_BITSIZE (rmode))
{
- double_int mask = double_int_zero.set_bit (bitpos);
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (rmode));
if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode))
temp = gen_lowpart (rmode, temp);
temp = expand_binop (rmode, and_optab, temp,
- immed_double_int_const (mask, rmode),
+ immed_wide_int_const (mask, rmode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
}
else
@@ -8012,8 +8018,8 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg)
{
tree itype = TREE_TYPE (TREE_TYPE (fndecl));
tree ftype = TREE_TYPE (arg);
- double_int val;
REAL_VALUE_TYPE r;
+ bool fail = false;
switch (DECL_FUNCTION_CODE (fndecl))
{
@@ -8039,9 +8045,9 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg)
gcc_unreachable ();
}
- real_to_integer2 ((HOST_WIDE_INT *)&val.low, &val.high, &r);
- if (double_int_fits_to_tree_p (itype, val))
- return double_int_to_tree (itype, val);
+ wide_int val = real_to_integer (&r, &fail, TYPE_PRECISION (itype));
+ if (!fail)
+ return wide_int_to_tree (itype, val);
}
}
@@ -8074,94 +8080,39 @@ fold_builtin_bitop (tree fndecl, tree arg)
/* Optimize for constant argument. */
if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg))
{
- HOST_WIDE_INT hi, width, result;
- unsigned HOST_WIDE_INT lo;
- tree type;
-
- type = TREE_TYPE (arg);
- width = TYPE_PRECISION (type);
- lo = TREE_INT_CST_LOW (arg);
-
- /* Clear all the bits that are beyond the type's precision. */
- if (width > HOST_BITS_PER_WIDE_INT)
- {
- hi = TREE_INT_CST_HIGH (arg);
- if (width < HOST_BITS_PER_DOUBLE_INT)
- hi &= ~(HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT));
- }
- else
- {
- hi = 0;
- if (width < HOST_BITS_PER_WIDE_INT)
- lo &= ~(HOST_WIDE_INT_M1U << width);
- }
+ tree type = TREE_TYPE (arg);
+ int result;
switch (DECL_FUNCTION_CODE (fndecl))
{
CASE_INT_FN (BUILT_IN_FFS):
- if (lo != 0)
- result = ffs_hwi (lo);
- else if (hi != 0)
- result = HOST_BITS_PER_WIDE_INT + ffs_hwi (hi);
- else
- result = 0;
+ result = wi::ffs (arg);
break;
CASE_INT_FN (BUILT_IN_CLZ):
- if (hi != 0)
- result = width - floor_log2 (hi) - 1 - HOST_BITS_PER_WIDE_INT;
- else if (lo != 0)
- result = width - floor_log2 (lo) - 1;
+ if (wi::ne_p (arg, 0))
+ result = wi::clz (arg);
else if (! CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result))
- result = width;
+ result = TYPE_PRECISION (type);
break;
CASE_INT_FN (BUILT_IN_CTZ):
- if (lo != 0)
- result = ctz_hwi (lo);
- else if (hi != 0)
- result = HOST_BITS_PER_WIDE_INT + ctz_hwi (hi);
+ if (wi::ne_p (arg, 0))
+ result = wi::ctz (arg);
else if (! CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result))
- result = width;
+ result = TYPE_PRECISION (type);
break;
CASE_INT_FN (BUILT_IN_CLRSB):
- if (width > 2 * HOST_BITS_PER_WIDE_INT)
- return NULL_TREE;
- if (width > HOST_BITS_PER_WIDE_INT
- && (hi & ((unsigned HOST_WIDE_INT) 1
- << (width - HOST_BITS_PER_WIDE_INT - 1))) != 0)
- {
- hi = ~hi & ~(HOST_WIDE_INT_M1U
- << (width - HOST_BITS_PER_WIDE_INT - 1));
- lo = ~lo;
- }
- else if (width <= HOST_BITS_PER_WIDE_INT
- && (lo & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0)
- lo = ~lo & ~(HOST_WIDE_INT_M1U << (width - 1));
- if (hi != 0)
- result = width - floor_log2 (hi) - 2 - HOST_BITS_PER_WIDE_INT;
- else if (lo != 0)
- result = width - floor_log2 (lo) - 2;
- else
- result = width - 1;
+ result = wi::clrsb (arg);
break;
CASE_INT_FN (BUILT_IN_POPCOUNT):
- result = 0;
- while (lo)
- result++, lo &= lo - 1;
- while (hi)
- result++, hi &= (unsigned HOST_WIDE_INT) hi - 1;
+ result = wi::popcount (arg);
break;
CASE_INT_FN (BUILT_IN_PARITY):
- result = 0;
- while (lo)
- result++, lo &= lo - 1;
- while (hi)
- result++, hi &= (unsigned HOST_WIDE_INT) hi - 1;
- result &= 1;
+ result = wi::parity (arg);
break;
default:
@@ -8185,49 +8136,24 @@ fold_builtin_bswap (tree fndecl, tree arg)
/* Optimize constant value. */
if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg))
{
- HOST_WIDE_INT hi, width, r_hi = 0;
- unsigned HOST_WIDE_INT lo, r_lo = 0;
tree type = TREE_TYPE (TREE_TYPE (fndecl));
- width = TYPE_PRECISION (type);
- lo = TREE_INT_CST_LOW (arg);
- hi = TREE_INT_CST_HIGH (arg);
-
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_BSWAP16:
case BUILT_IN_BSWAP32:
case BUILT_IN_BSWAP64:
{
- int s;
-
- for (s = 0; s < width; s += 8)
- {
- int d = width - s - 8;
- unsigned HOST_WIDE_INT byte;
-
- if (s < HOST_BITS_PER_WIDE_INT)
- byte = (lo >> s) & 0xff;
- else
- byte = (hi >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
-
- if (d < HOST_BITS_PER_WIDE_INT)
- r_lo |= byte << d;
- else
- r_hi |= byte << (d - HOST_BITS_PER_WIDE_INT);
- }
+ signop sgn = TYPE_SIGN (type);
+ tree result =
+ wide_int_to_tree (type,
+ wide_int::from (arg, TYPE_PRECISION (type),
+ sgn).bswap ());
+ return result;
}
-
- break;
-
default:
gcc_unreachable ();
}
-
- if (width < HOST_BITS_PER_WIDE_INT)
- return build_int_cst (type, r_lo);
- else
- return build_int_cst_wide (type, r_lo, r_hi);
}
return NULL_TREE;
@@ -8289,7 +8215,7 @@ fold_builtin_logarithm (location_t loc, tree fndecl, tree arg,
/* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */
{
REAL_VALUE_TYPE dconst10;
- real_from_integer (&dconst10, VOIDmode, 10, 0, 0);
+ real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
x = build_real (type, dconst10);
}
exponent = CALL_EXPR_ARG (arg, 0);
@@ -8442,7 +8368,7 @@ fold_builtin_pow (location_t loc, tree fndecl, tree arg0, tree arg1, tree type)
/* Check for an integer exponent. */
n = real_to_integer (&c);
- real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
if (real_identical (&c, &cint))
{
/* Attempt to evaluate pow at compile-time, unless this should
@@ -8814,20 +8740,18 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src,
else if (TREE_CODE (src_base) == MEM_REF
&& TREE_CODE (dest_base) == MEM_REF)
{
- double_int off;
if (! operand_equal_p (TREE_OPERAND (src_base, 0),
TREE_OPERAND (dest_base, 0), 0))
return NULL_TREE;
- off = mem_ref_offset (src_base) +
- double_int::from_shwi (src_offset);
- if (!off.fits_shwi ())
+ offset_int off = mem_ref_offset (src_base) + src_offset;
+ if (!wi::fits_shwi_p (off))
return NULL_TREE;
- src_offset = off.low;
- off = mem_ref_offset (dest_base) +
- double_int::from_shwi (dest_offset);
- if (!off.fits_shwi ())
+ src_offset = off.to_shwi ();
+
+ off = mem_ref_offset (dest_base) + dest_offset;
+ if (!wi::fits_shwi_p (off))
return NULL_TREE;
- dest_offset = off.low;
+ dest_offset = off.to_shwi ();
if (ranges_overlap_p (src_offset, maxsize,
dest_offset, maxsize))
return NULL_TREE;
@@ -12690,8 +12614,7 @@ fold_builtin_object_size (tree ptr, tree ost)
if (TREE_CODE (ptr) == ADDR_EXPR)
{
bytes = compute_builtin_object_size (ptr, object_size_type);
- if (double_int_fits_to_tree_p (size_type_node,
- double_int::from_uhwi (bytes)))
+ if (wi::fits_to_tree_p (bytes, size_type_node))
return build_int_cstu (size_type_node, bytes);
}
else if (TREE_CODE (ptr) == SSA_NAME)
@@ -12701,8 +12624,7 @@ fold_builtin_object_size (tree ptr, tree ost)
it. */
bytes = compute_builtin_object_size (ptr, object_size_type);
if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0)
- && double_int_fits_to_tree_p (size_type_node,
- double_int::from_uhwi (bytes)))
+ && wi::fits_to_tree_p (bytes, size_type_node))
return build_int_cstu (size_type_node, bytes);
}
diff --git a/gcc/c-family/c-ada-spec.c b/gcc/c-family/c-ada-spec.c
index fc21b629aaa..a21bc498d07 100644
--- a/gcc/c-family/c-ada-spec.c
+++ b/gcc/c-family/c-ada-spec.c
@@ -29,21 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "cpplib.h"
#include "c-pragma.h"
#include "cpp-id-data.h"
-
-/* Adapted from hwint.h to use the Ada prefix. */
-#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG
-# if HOST_BITS_PER_WIDE_INT == 64
-# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \
- "16#%" HOST_LONG_FORMAT "x%016" HOST_LONG_FORMAT "x#"
-# else
-# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \
- "16#%" HOST_LONG_FORMAT "x%08" HOST_LONG_FORMAT "x#"
-# endif
-#else
- /* We can assume that 'long long' is at least 64 bits. */
-# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \
- "16#%" HOST_LONG_LONG_FORMAT "x%016" HOST_LONG_LONG_FORMAT "x#"
-#endif /* HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG */
+#include "wide-int.h"
/* Local functions, macros and variables. */
static int dump_generic_ada_node (pretty_printer *, tree, tree, int, int,
@@ -2211,19 +2197,19 @@ dump_generic_ada_node (pretty_printer *buffer, tree node, tree type, int spc,
pp_unsigned_wide_integer (buffer, tree_to_uhwi (node));
else
{
- tree val = node;
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val);
- HOST_WIDE_INT high = TREE_INT_CST_HIGH (val);
-
- if (tree_int_cst_sgn (val) < 0)
+ wide_int val = node;
+ int i;
+ if (wi::neg_p (val))
{
pp_minus (buffer);
- high = ~high + !low;
- low = -low;
+ val = -val;
}
sprintf (pp_buffer (buffer)->digit_buffer,
- ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- (unsigned HOST_WIDE_INT) high, low);
+ "16#%" HOST_WIDE_INT_PRINT "x",
+ val.elt (val.get_len () - 1));
+ for (i = val.get_len () - 2; i >= 0; i--)
+ sprintf (pp_buffer (buffer)->digit_buffer,
+ HOST_WIDE_INT_PRINT_PADDED_HEX, val.elt (i));
pp_string (buffer, pp_buffer (buffer)->digit_buffer);
}
break;
diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c
index 0ad955d9cca..0afe2f5ab38 100644
--- a/gcc/c-family/c-common.c
+++ b/gcc/c-family/c-common.c
@@ -49,6 +49,7 @@ along with GCC; see the file COPYING3. If not see
#include "cgraph.h"
#include "target-def.h"
#include "gimplify.h"
+#include "wide-int-print.h"
cpp_reader *parse_in; /* Declared in c-pragma.h. */
@@ -4122,9 +4123,12 @@ shorten_compare (location_t loc, tree *op0_ptr, tree *op1_ptr,
{
/* Convert primop1 to target type, but do not introduce
additional overflow. We know primop1 is an int_cst. */
- primop1 = force_fit_type_double (*restype_ptr,
- tree_to_double_int (primop1),
- 0, TREE_OVERFLOW (primop1));
+ primop1 = force_fit_type (*restype_ptr,
+ wide_int::from
+ (primop1,
+ TYPE_PRECISION (*restype_ptr),
+ TYPE_SIGN (TREE_TYPE (primop1))),
+ 0, TREE_OVERFLOW (primop1));
}
if (type != *restype_ptr)
{
@@ -4132,20 +4136,10 @@ shorten_compare (location_t loc, tree *op0_ptr, tree *op1_ptr,
maxval = convert (*restype_ptr, maxval);
}
- if (unsignedp && unsignedp0)
- {
- min_gt = INT_CST_LT_UNSIGNED (primop1, minval);
- max_gt = INT_CST_LT_UNSIGNED (primop1, maxval);
- min_lt = INT_CST_LT_UNSIGNED (minval, primop1);
- max_lt = INT_CST_LT_UNSIGNED (maxval, primop1);
- }
- else
- {
- min_gt = INT_CST_LT (primop1, minval);
- max_gt = INT_CST_LT (primop1, maxval);
- min_lt = INT_CST_LT (minval, primop1);
- max_lt = INT_CST_LT (maxval, primop1);
- }
+ min_gt = tree_int_cst_lt (primop1, minval);
+ max_gt = tree_int_cst_lt (primop1, maxval);
+ min_lt = tree_int_cst_lt (minval, primop1);
+ max_lt = tree_int_cst_lt (maxval, primop1);
val = 0;
/* This used to be a switch, but Genix compiler can't handle that. */
@@ -4434,8 +4428,7 @@ pointer_int_sum (location_t loc, enum tree_code resultcode,
convert (TREE_TYPE (intop), size_exp), 1);
intop = convert (sizetype, t);
if (TREE_OVERFLOW_P (intop) && !TREE_OVERFLOW (t))
- intop = build_int_cst_wide (TREE_TYPE (intop), TREE_INT_CST_LOW (intop),
- TREE_INT_CST_HIGH (intop));
+ intop = wide_int_to_tree (TREE_TYPE (intop), intop);
}
/* Create the sum or difference. */
@@ -5512,7 +5505,7 @@ c_common_nodes_and_builtins (void)
}
/* This node must not be shared. */
- void_zero_node = make_node (INTEGER_CST);
+ void_zero_node = make_int_cst (1, 1);
TREE_TYPE (void_zero_node) = void_type_node;
void_list_node = build_void_list_node ();
@@ -5719,7 +5712,7 @@ c_common_nodes_and_builtins (void)
/* Create the built-in __null node. It is important that this is
not shared. */
- null_node = make_node (INTEGER_CST);
+ null_node = make_int_cst (1, 1);
TREE_TYPE (null_node) = c_common_type_for_size (POINTER_SIZE, 0);
/* Since builtin_types isn't gc'ed, don't export these nodes. */
@@ -6097,22 +6090,14 @@ c_add_case_label (location_t loc, splay_tree cases, tree cond, tree orig_type,
static void
match_case_to_enum_1 (tree key, tree type, tree label)
{
- char buf[2 + 2*HOST_BITS_PER_WIDE_INT/4 + 1];
-
- /* ??? Not working too hard to print the double-word value.
- Should perhaps be done with %lwd in the diagnostic routines? */
- if (TREE_INT_CST_HIGH (key) == 0)
- snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_UNSIGNED,
- TREE_INT_CST_LOW (key));
- else if (!TYPE_UNSIGNED (type)
- && TREE_INT_CST_HIGH (key) == -1
- && TREE_INT_CST_LOW (key) != 0)
- snprintf (buf, sizeof (buf), "-" HOST_WIDE_INT_PRINT_UNSIGNED,
- -TREE_INT_CST_LOW (key));
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+
+ if (tree_fits_uhwi_p (key))
+ print_dec (key, buf, UNSIGNED);
+ else if (tree_fits_shwi_p (key))
+ print_dec (key, buf, SIGNED);
else
- snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (key),
- (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (key));
+ print_hex (key, buf);
if (TYPE_NAME (type) == 0)
warning_at (DECL_SOURCE_LOCATION (CASE_LABEL (label)),
@@ -8849,13 +8834,14 @@ check_nonnull_arg (void * ARG_UNUSED (ctx), tree param,
static bool
get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp)
{
- /* Verify the arg number is a constant. */
- if (TREE_CODE (arg_num_expr) != INTEGER_CST
- || TREE_INT_CST_HIGH (arg_num_expr) != 0)
+ /* Verify the arg number is a small constant. */
+ if (tree_fits_uhwi_p (arg_num_expr))
+ {
+ *valp = TREE_INT_CST_LOW (arg_num_expr);
+ return true;
+ }
+ else
return false;
-
- *valp = TREE_INT_CST_LOW (arg_num_expr);
- return true;
}
/* Handle a "nothrow" attribute; arguments as in
diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index 4c0313dc85f..eeefce883d4 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -227,7 +227,7 @@ check_format_string (tree fntype, unsigned HOST_WIDE_INT format_num,
static bool
get_constant (tree expr, unsigned HOST_WIDE_INT *value, int validated_p)
{
- if (TREE_CODE (expr) != INTEGER_CST || TREE_INT_CST_HIGH (expr) != 0)
+ if (!tree_fits_uhwi_p (expr))
{
gcc_assert (!validated_p);
return false;
diff --git a/gcc/c-family/c-lex.c b/gcc/c-family/c-lex.c
index e3e1da240d8..ea24bfc2a2e 100644
--- a/gcc/c-family/c-lex.c
+++ b/gcc/c-family/c-lex.c
@@ -35,6 +35,7 @@ along with GCC; see the file COPYING3. If not see
#include "splay-tree.h"
#include "debug.h"
#include "target.h"
+#include "wide-int.h"
/* We may keep statistics about how long which files took to compile. */
static int header_time, body_time;
@@ -49,9 +50,9 @@ static tree interpret_float (const cpp_token *, unsigned int, const char *,
enum overflow_type *);
static tree interpret_fixed (const cpp_token *, unsigned int);
static enum integer_type_kind narrowest_unsigned_type
- (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned int);
+ (const widest_int &, unsigned int);
static enum integer_type_kind narrowest_signed_type
- (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned int);
+ (const widest_int &, unsigned int);
static enum cpp_ttype lex_string (const cpp_token *, tree *, bool, bool);
static tree lex_charconst (const cpp_token *);
static void update_header_times (const char *);
@@ -527,9 +528,7 @@ c_lex_with_flags (tree *value, location_t *loc, unsigned char *cpp_flags,
there isn't one. */
static enum integer_type_kind
-narrowest_unsigned_type (unsigned HOST_WIDE_INT low,
- unsigned HOST_WIDE_INT high,
- unsigned int flags)
+narrowest_unsigned_type (const widest_int &val, unsigned int flags)
{
int itk;
@@ -548,9 +547,7 @@ narrowest_unsigned_type (unsigned HOST_WIDE_INT low,
continue;
upper = TYPE_MAX_VALUE (integer_types[itk]);
- if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high
- || ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high
- && TREE_INT_CST_LOW (upper) >= low))
+ if (wi::geu_p (wi::to_widest (upper), val))
return (enum integer_type_kind) itk;
}
@@ -559,8 +556,7 @@ narrowest_unsigned_type (unsigned HOST_WIDE_INT low,
/* Ditto, but narrowest signed type. */
static enum integer_type_kind
-narrowest_signed_type (unsigned HOST_WIDE_INT low,
- unsigned HOST_WIDE_INT high, unsigned int flags)
+narrowest_signed_type (const widest_int &val, unsigned int flags)
{
int itk;
@@ -571,7 +567,6 @@ narrowest_signed_type (unsigned HOST_WIDE_INT low,
else
itk = itk_long_long;
-
for (; itk < itk_none; itk += 2 /* skip signed types */)
{
tree upper;
@@ -580,9 +575,7 @@ narrowest_signed_type (unsigned HOST_WIDE_INT low,
continue;
upper = TYPE_MAX_VALUE (integer_types[itk]);
- if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high
- || ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high
- && TREE_INT_CST_LOW (upper) >= low))
+ if (wi::geu_p (wi::to_widest (upper), val))
return (enum integer_type_kind) itk;
}
@@ -597,6 +590,7 @@ interpret_integer (const cpp_token *token, unsigned int flags,
tree value, type;
enum integer_type_kind itk;
cpp_num integer;
+ HOST_WIDE_INT ival[3];
*overflow = OT_NONE;
@@ -604,18 +598,23 @@ interpret_integer (const cpp_token *token, unsigned int flags,
if (integer.overflow)
*overflow = OT_OVERFLOW;
+ ival[0] = integer.low;
+ ival[1] = integer.high;
+ ival[2] = 0;
+ widest_int wval = widest_int::from_array (ival, 3);
+
/* The type of a constant with a U suffix is straightforward. */
if (flags & CPP_N_UNSIGNED)
- itk = narrowest_unsigned_type (integer.low, integer.high, flags);
+ itk = narrowest_unsigned_type (wval, flags);
else
{
/* The type of a potentially-signed integer constant varies
depending on the base it's in, the standard in use, and the
length suffixes. */
enum integer_type_kind itk_u
- = narrowest_unsigned_type (integer.low, integer.high, flags);
+ = narrowest_unsigned_type (wval, flags);
enum integer_type_kind itk_s
- = narrowest_signed_type (integer.low, integer.high, flags);
+ = narrowest_signed_type (wval, flags);
/* In both C89 and C99, octal and hex constants may be signed or
unsigned, whichever fits tighter. We do not warn about this
@@ -667,7 +666,7 @@ interpret_integer (const cpp_token *token, unsigned int flags,
: "integer constant is too large for %<long%> type");
}
- value = build_int_cst_wide (type, integer.low, integer.high);
+ value = wide_int_to_tree (type, wval);
/* Convert imaginary to a complex type. */
if (flags & CPP_N_IMAGINARY)
@@ -1165,9 +1164,9 @@ lex_charconst (const cpp_token *token)
/* Cast to cppchar_signed_t to get correct sign-extension of RESULT
before possibly widening to HOST_WIDE_INT for build_int_cst. */
if (unsignedp || (cppchar_signed_t) result >= 0)
- value = build_int_cst_wide (type, result, 0);
+ value = build_int_cst (type, result);
else
- value = build_int_cst_wide (type, (cppchar_signed_t) result, -1);
+ value = build_int_cst (type, (cppchar_signed_t) result);
return value;
}
diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c
index 62a00306fba..2e97d0147c5 100644
--- a/gcc/c-family/c-pretty-print.c
+++ b/gcc/c-family/c-pretty-print.c
@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pretty-print.h"
#include "tree-iterator.h"
#include "diagnostic.h"
+#include "wide-int-print.h"
/* The pretty-printer code is primarily designed to closely follow
(GNU) C and C++ grammars. That is to be contrasted with spaghetti
@@ -923,16 +924,14 @@ pp_c_integer_constant (c_pretty_printer *pp, tree i)
pp_unsigned_wide_integer (pp, tree_to_uhwi (i));
else
{
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (i);
- HOST_WIDE_INT high = TREE_INT_CST_HIGH (i);
- if (tree_int_cst_sgn (i) < 0)
+ wide_int wi = i;
+
+ if (wi::lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i))))
{
pp_minus (pp);
- high = ~high + !low;
- low = -low;
+ wi = -wi;
}
- sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- (unsigned HOST_WIDE_INT) high, (unsigned HOST_WIDE_INT) low);
+ print_hex (wi, pp_buffer (pp)->digit_buffer);
pp_string (pp, pp_buffer (pp)->digit_buffer);
}
if (TYPE_UNSIGNED (type))
diff --git a/gcc/c-family/cilk.c b/gcc/c-family/cilk.c
index bf549ad1791..a952902533a 100644
--- a/gcc/c-family/cilk.c
+++ b/gcc/c-family/cilk.c
@@ -666,8 +666,7 @@ declare_one_free_variable (const void *var0, void **map0,
/* Maybe promote to int. */
if (INTEGRAL_TYPE_P (var_type) && COMPLETE_TYPE_P (var_type)
- && INT_CST_LT_UNSIGNED (TYPE_SIZE (var_type),
- TYPE_SIZE (integer_type_node)))
+ && tree_int_cst_lt (TYPE_SIZE (var_type), TYPE_SIZE (integer_type_node)))
arg_type = integer_type_node;
else
arg_type = var_type;
diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c
index 6e7c5895ec6..3abf6b98574 100644
--- a/gcc/c/c-decl.c
+++ b/gcc/c/c-decl.c
@@ -4880,8 +4880,8 @@ check_bitfield_type_and_width (tree *type, tree *width, tree orig_name)
{
struct lang_type *lt = TYPE_LANG_SPECIFIC (*type);
if (!lt
- || w < tree_int_cst_min_precision (lt->enum_min, TYPE_UNSIGNED (*type))
- || w < tree_int_cst_min_precision (lt->enum_max, TYPE_UNSIGNED (*type)))
+ || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type))
+ || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type)))
warning (0, "%qs is narrower than values of its type", name);
}
}
@@ -7605,7 +7605,8 @@ finish_enum (tree enumtype, tree values, tree attributes)
{
tree pair, tem;
tree minnode = 0, maxnode = 0;
- int precision, unsign;
+ int precision;
+ signop sign;
bool toplevel = (file_scope == current_scope);
struct lang_type *lt;
@@ -7632,13 +7633,13 @@ finish_enum (tree enumtype, tree values, tree attributes)
as one of the integral types - the narrowest one that fits, except
that normally we only go as narrow as int - and signed iff any of
the values are negative. */
- unsign = (tree_int_cst_sgn (minnode) >= 0);
- precision = MAX (tree_int_cst_min_precision (minnode, unsign),
- tree_int_cst_min_precision (maxnode, unsign));
+ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED;
+ precision = MAX (tree_int_cst_min_precision (minnode, sign),
+ tree_int_cst_min_precision (maxnode, sign));
if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node))
{
- tem = c_common_type_for_size (precision, unsign);
+ tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0);
if (tem == NULL)
{
warning (0, "enumeration values exceed range of largest integer");
@@ -7646,7 +7647,7 @@ finish_enum (tree enumtype, tree values, tree attributes)
}
}
else
- tem = unsign ? unsigned_type_node : integer_type_node;
+ tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node;
TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem);
TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem);
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 7947355c215..6e8f33bdac1 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -13616,7 +13616,7 @@ c_parser_cilk_clause_vectorlength (c_parser *parser, tree clauses,
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr)))
error_at (loc, "vectorlength must be an integer constant");
- else if (exact_log2 (TREE_INT_CST_LOW (expr)) == -1)
+ else if (wi::exact_log2 (expr) == -1)
error_at (loc, "vectorlength must be a power of 2");
else
{
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index 65fb035ae1d..7d2df6b1d12 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -50,6 +50,7 @@ along with GCC; see the file COPYING3. If not see
#include "c-family/c-common.h"
#include "c-family/c-ubsan.h"
#include "cilk.h"
+#include "wide-int.h"
/* Possible cases of implicit bad conversions. Used to select
diagnostic messages in convert_for_assignment. */
@@ -5126,9 +5127,7 @@ build_c_cast (location_t loc, tree type, tree expr)
}
else if (TREE_OVERFLOW (value))
/* Reset VALUE's overflow flags, ensuring constant sharing. */
- value = build_int_cst_wide (TREE_TYPE (value),
- TREE_INT_CST_LOW (value),
- TREE_INT_CST_HIGH (value));
+ value = wide_int_to_tree (TREE_TYPE (value), value);
}
}
@@ -8078,20 +8077,20 @@ set_nonincremental_init_from_string (tree str,
{
if (wchar_bytes == 1)
{
- val[1] = (unsigned char) *p++;
- val[0] = 0;
+ val[0] = (unsigned char) *p++;
+ val[1] = 0;
}
else
{
- val[0] = 0;
val[1] = 0;
+ val[0] = 0;
for (byte = 0; byte < wchar_bytes; byte++)
{
if (BYTES_BIG_ENDIAN)
bitpos = (wchar_bytes - byte - 1) * charwidth;
else
bitpos = byte * charwidth;
- val[bitpos < HOST_BITS_PER_WIDE_INT]
+ val[bitpos % HOST_BITS_PER_WIDE_INT]
|= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++))
<< (bitpos % HOST_BITS_PER_WIDE_INT);
}
@@ -8102,24 +8101,26 @@ set_nonincremental_init_from_string (tree str,
bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR;
if (bitpos < HOST_BITS_PER_WIDE_INT)
{
- if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1)))
+ if (val[0] & (((HOST_WIDE_INT) 1) << (bitpos - 1)))
{
- val[1] |= ((HOST_WIDE_INT) -1) << bitpos;
- val[0] = -1;
+ val[0] |= ((HOST_WIDE_INT) -1) << bitpos;
+ val[1] = -1;
}
}
else if (bitpos == HOST_BITS_PER_WIDE_INT)
{
- if (val[1] < 0)
- val[0] = -1;
+ if (val[0] < 0)
+ val[1] = -1;
}
- else if (val[0] & (((HOST_WIDE_INT) 1)
+ else if (val[1] & (((HOST_WIDE_INT) 1)
<< (bitpos - 1 - HOST_BITS_PER_WIDE_INT)))
- val[0] |= ((HOST_WIDE_INT) -1)
+ val[1] |= ((HOST_WIDE_INT) -1)
<< (bitpos - HOST_BITS_PER_WIDE_INT);
}
- value = build_int_cst_wide (type, val[1], val[0]);
+ value = wide_int_to_tree (type,
+ wide_int::from_array (val, 2,
+ HOST_BITS_PER_WIDE_INT * 2));
add_pending_init (input_location, purpose, value, NULL_TREE, true,
braced_init_obstack);
}
@@ -12365,8 +12366,7 @@ c_tree_equal (tree t1, tree t2)
switch (code1)
{
case INTEGER_CST:
- return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2);
+ return wi::eq_p (t1, t2);
case REAL_CST:
return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index 70744d83d19..91e8800560b 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -336,7 +336,8 @@ alloc_loop (void)
loop->exits = ggc_alloc_cleared_loop_exit ();
loop->exits->next = loop->exits->prev = loop->exits;
loop->can_be_parallel = false;
-
+ loop->nb_iterations_upper_bound = 0;
+ loop->nb_iterations_estimate = 0;
return loop;
}
@@ -1787,21 +1788,21 @@ get_loop_location (struct loop *loop)
I_BOUND times. */
void
-record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
- bool upper)
+record_niter_bound (struct loop *loop, const widest_int &i_bound,
+ bool realistic, bool upper)
{
/* Update the bounds only when there is no previous estimation, or when the
current estimation is smaller. */
if (upper
&& (!loop->any_upper_bound
- || i_bound.ult (loop->nb_iterations_upper_bound)))
+ || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
{
loop->any_upper_bound = true;
loop->nb_iterations_upper_bound = i_bound;
}
if (realistic
&& (!loop->any_estimate
- || i_bound.ult (loop->nb_iterations_estimate)))
+ || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
{
loop->any_estimate = true;
loop->nb_iterations_estimate = i_bound;
@@ -1811,7 +1812,8 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
number of iterations, use the upper bound instead. */
if (loop->any_upper_bound
&& loop->any_estimate
- && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate))
+ && wi::ltu_p (loop->nb_iterations_upper_bound,
+ loop->nb_iterations_estimate))
loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
}
@@ -1822,13 +1824,13 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
HOST_WIDE_INT
get_estimated_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!get_estimated_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -1859,7 +1861,7 @@ max_stmt_executions_int (struct loop *loop)
returns true. */
bool
-get_estimated_loop_iterations (struct loop *loop, double_int *nit)
+get_estimated_loop_iterations (struct loop *loop, widest_int *nit)
{
/* Even if the bound is not recorded, possibly we can derrive one from
profile. */
@@ -1867,7 +1869,7 @@ get_estimated_loop_iterations (struct loop *loop, double_int *nit)
{
if (loop->header->count)
{
- *nit = gcov_type_to_double_int
+ *nit = gcov_type_to_wide_int
(expected_loop_iterations_unbounded (loop) + 1);
return true;
}
@@ -1883,7 +1885,7 @@ get_estimated_loop_iterations (struct loop *loop, double_int *nit)
false, otherwise returns true. */
bool
-get_max_loop_iterations (struct loop *loop, double_int *nit)
+get_max_loop_iterations (struct loop *loop, widest_int *nit)
{
if (!loop->any_upper_bound)
return false;
@@ -1899,13 +1901,13 @@ get_max_loop_iterations (struct loop *loop, double_int *nit)
HOST_WIDE_INT
get_max_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!get_max_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
index 10a00eab177..ab8b8090e98 100644
--- a/gcc/cfgloop.h
+++ b/gcc/cfgloop.h
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#define GCC_CFGLOOP_H
#include "double-int.h"
+#include "wide-int.h"
#include "bitmap.h"
#include "sbitmap.h"
#include "function.h"
@@ -62,7 +63,7 @@ struct GTY ((chain_next ("%h.next"))) nb_iter_bound {
overflows (as MAX + 1 is sometimes produced as the estimate on number
of executions of STMT).
b) it is consistent with the result of number_of_iterations_exit. */
- double_int bound;
+ widest_int bound;
/* True if the statement will cause the loop to be leaved the (at most)
BOUND + 1-st time it is executed, that is, all the statements after it
@@ -146,12 +147,12 @@ struct GTY ((chain_next ("%h.next"))) loop {
/* An integer guaranteed to be greater or equal to nb_iterations. Only
valid if any_upper_bound is true. */
- double_int nb_iterations_upper_bound;
+ widest_int nb_iterations_upper_bound;
/* An integer giving an estimate on nb_iterations. Unlike
nb_iterations_upper_bound, there is no guarantee that it is at least
nb_iterations. */
- double_int nb_iterations_estimate;
+ widest_int nb_iterations_estimate;
bool any_upper_bound;
bool any_estimate;
@@ -737,27 +738,27 @@ loop_outermost (struct loop *loop)
return (*loop->superloops)[1];
}
-extern void record_niter_bound (struct loop *, double_int, bool, bool);
+extern void record_niter_bound (struct loop *, const widest_int &, bool, bool);
extern HOST_WIDE_INT get_estimated_loop_iterations_int (struct loop *);
extern HOST_WIDE_INT get_max_loop_iterations_int (struct loop *);
-extern bool get_estimated_loop_iterations (struct loop *loop, double_int *nit);
-extern bool get_max_loop_iterations (struct loop *loop, double_int *nit);
+extern bool get_estimated_loop_iterations (struct loop *loop, widest_int *nit);
+extern bool get_max_loop_iterations (struct loop *loop, widest_int *nit);
extern int bb_loop_depth (const_basic_block);
-/* Converts VAL to double_int. */
+/* Converts VAL to widest_int. */
-static inline double_int
-gcov_type_to_double_int (gcov_type val)
+static inline widest_int
+gcov_type_to_wide_int (gcov_type val)
{
- double_int ret;
+ HOST_WIDE_INT a[2];
- ret.low = (unsigned HOST_WIDE_INT) val;
+ a[0] = (unsigned HOST_WIDE_INT) val;
/* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
the size of type. */
val >>= HOST_BITS_PER_WIDE_INT - 1;
val >>= 1;
- ret.high = (unsigned HOST_WIDE_INT) val;
+ a[1] = (unsigned HOST_WIDE_INT) val;
- return ret;
+ return widest_int::from_array (a, 2);
}
#endif /* GCC_CFGLOOP_H */
diff --git a/gcc/cgraph.c b/gcc/cgraph.c
index 5b773e30036..2b4ce813c90 100644
--- a/gcc/cgraph.c
+++ b/gcc/cgraph.c
@@ -650,8 +650,7 @@ cgraph_add_thunk (struct cgraph_node *decl_node ATTRIBUTE_UNUSED,
node = cgraph_create_node (alias);
gcc_checking_assert (!virtual_offset
- || tree_to_double_int (virtual_offset) ==
- double_int::from_shwi (virtual_value));
+ || wi::eq_p (virtual_offset, virtual_value));
node->thunk.fixed_offset = fixed_offset;
node->thunk.this_adjusting = this_adjusting;
node->thunk.virtual_value = virtual_value;
diff --git a/gcc/combine.c b/gcc/combine.c
index 1096bad6c74..a2f42c58ccd 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -2671,22 +2671,15 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
offset = -1;
}
- if (offset >= 0
- && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp)))
- <= HOST_BITS_PER_DOUBLE_INT))
+ if (offset >= 0)
{
- double_int m, o, i;
rtx inner = SET_SRC (PATTERN (i3));
rtx outer = SET_SRC (temp);
- o = rtx_to_double_int (outer);
- i = rtx_to_double_int (inner);
-
- m = double_int::mask (width);
- i &= m;
- m = m.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
- i = i.llshift (offset, HOST_BITS_PER_DOUBLE_INT);
- o = o.and_not (m) | i;
+ wide_int o
+ = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp))),
+ std::make_pair (inner, GET_MODE (dest)),
+ offset, width);
combine_merges++;
subst_insn = i3;
@@ -2699,7 +2692,7 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p,
resulting insn the new pattern for I3. Then skip to where we
validate the pattern. Everything was set up above. */
SUBST (SET_SRC (temp),
- immed_double_int_const (o, GET_MODE (SET_DEST (temp))));
+ immed_wide_int_const (o, GET_MODE (SET_DEST (temp))));
newpat = PATTERN (i2);
@@ -5139,7 +5132,7 @@ subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
if (! x)
x = gen_rtx_CLOBBER (mode, const0_rtx);
}
- else if (CONST_INT_P (new_rtx)
+ else if (CONST_SCALAR_INT_P (new_rtx)
&& GET_CODE (x) == ZERO_EXTEND)
{
x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 8655f049ba4..6a6fb032647 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -6132,8 +6132,10 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
int count;
tree index = TYPE_DOMAIN (type);
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
@@ -6150,9 +6152,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -6164,8 +6164,10 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
@@ -6180,9 +6182,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -6196,8 +6196,10 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
@@ -6212,9 +6214,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -7557,8 +7557,8 @@ aarch64_float_const_representable_p (rtx x)
int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
int exponent;
unsigned HOST_WIDE_INT mantissa, mask;
- HOST_WIDE_INT m1, m2;
REAL_VALUE_TYPE r, m;
+ bool fail;
if (!CONST_DOUBLE_P (x))
return false;
@@ -7582,16 +7582,16 @@ aarch64_float_const_representable_p (rtx x)
WARNING: If we ever have a representation using more than 2 * H_W_I - 1
bits for the mantissa, this can fail (low bits will be lost). */
real_ldexp (&m, &r, point_pos - exponent);
- REAL_VALUE_TO_INT (&m1, &m2, m);
+ wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
/* If the low part of the mantissa has bits set we cannot represent
the value. */
- if (m1 != 0)
+ if (w.elt (0) != 0)
return false;
/* We have rejected the lower HOST_WIDE_INT, so update our
understanding of how many bits lie in the mantissa and
look only at the high HOST_WIDE_INT. */
- mantissa = m2;
+ mantissa = w.elt (1);
point_pos -= HOST_BITS_PER_WIDE_INT;
/* We can only represent values with a mantissa of the form 1.xxxx. */
diff --git a/gcc/config/arc/arc.c b/gcc/config/arc/arc.c
index 6d8dff658a9..113395bd5de 100644
--- a/gcc/config/arc/arc.c
+++ b/gcc/config/arc/arc.c
@@ -65,6 +65,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "context.h"
#include "pass_manager.h"
+#include "wide-int.h"
/* Which cpu we're compiling for (A5, ARC600, ARC601, ARC700). */
static const char *arc_cpu_string = "";
@@ -391,7 +392,8 @@ static bool arc_return_in_memory (const_tree, const_tree);
static void arc_init_simd_builtins (void);
static bool arc_vector_mode_supported_p (enum machine_mode);
-static bool arc_can_use_doloop_p (double_int, double_int, unsigned int, bool);
+static bool arc_can_use_doloop_p (const widest_int &, const widest_int &,
+ unsigned int, bool);
static const char *arc_invalid_within_doloop (const_rtx);
static void output_short_suffix (FILE *file);
@@ -5700,7 +5702,7 @@ arc_pass_by_reference (cumulative_args_t ca_v ATTRIBUTE_UNUSED,
/* Implement TARGET_CAN_USE_DOLOOP_P. */
static bool
-arc_can_use_doloop_p (double_int iterations, double_int,
+arc_can_use_doloop_p (const widest_int &iterations, const widest_int &,
unsigned int loop_depth, bool entered_at_top)
{
if (loop_depth > 1)
@@ -5708,9 +5710,8 @@ arc_can_use_doloop_p (double_int iterations, double_int,
/* Setting up the loop with two sr instructions costs 6 cycles. */
if (TARGET_ARC700
&& !entered_at_top
- && iterations.high == 0
- && iterations.low > 0
- && iterations.low <= (flag_pic ? 6 : 3))
+ && wi::gtu_p (iterations, 0)
+ && wi::leu_p (iterations, flag_pic ? 6 : 3))
return false;
return true;
}
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 16fc7edd96c..1e44080d601 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -5121,8 +5121,10 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
int count;
tree index = TYPE_DOMAIN (type);
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
@@ -5139,9 +5141,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -5153,8 +5153,10 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
@@ -5169,9 +5171,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -5185,8 +5185,10 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
@@ -5201,9 +5203,7 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep)
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -11920,8 +11920,8 @@ vfp3_const_double_index (rtx x)
int sign, exponent;
unsigned HOST_WIDE_INT mantissa, mant_hi;
unsigned HOST_WIDE_INT mask;
- HOST_WIDE_INT m1, m2;
int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
+ bool fail;
if (!TARGET_VFP3 || !CONST_DOUBLE_P (x))
return -1;
@@ -11941,9 +11941,9 @@ vfp3_const_double_index (rtx x)
WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
bits for the mantissa, this may fail (low bits would be lost). */
real_ldexp (&m, &r, point_pos - exponent);
- REAL_VALUE_TO_INT (&m1, &m2, m);
- mantissa = m1;
- mant_hi = m2;
+ wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
+ mantissa = w.elt (0);
+ mant_hi = w.elt (1);
/* If there are bits set in the low part of the mantissa, we can't
represent this value. */
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index 8935c158ab9..2edc78ac041 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -7566,6 +7566,8 @@ avr_out_round (rtx insn ATTRIBUTE_UNUSED, rtx *xop, int *plen)
// The smallest fractional bit not cleared by the rounding is 2^(-RP).
int fbit = (int) GET_MODE_FBIT (mode);
double_int i_add = double_int_zero.set_bit (fbit-1 - INTVAL (xop[2]));
+ wide_int wi_add = wi::set_bit_in_zero (fbit-1 - INTVAL (xop[2]),
+ GET_MODE_PRECISION (imode));
// Lengths of PLUS and AND parts.
int len_add = 0, *plen_add = plen ? &len_add : NULL;
int len_and = 0, *plen_and = plen ? &len_and : NULL;
@@ -7595,7 +7597,7 @@ avr_out_round (rtx insn ATTRIBUTE_UNUSED, rtx *xop, int *plen)
// Rounding point ^^^^^^^
// Added above ^^^^^^^^^
rtx xreg = simplify_gen_subreg (imode, xop[0], mode, 0);
- rtx xmask = immed_double_int_const (-i_add - i_add, imode);
+ rtx xmask = immed_wide_int_const (-wi_add - wi_add, imode);
xpattern = gen_rtx_SET (VOIDmode, xreg, gen_rtx_AND (imode, xreg, xmask));
@@ -12246,7 +12248,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
break;
}
- tmap = double_int_to_tree (map_type, tree_to_double_int (arg[0]));
+ tmap = wide_int_to_tree (map_type, arg[0]);
map = TREE_INT_CST_LOW (tmap);
if (TREE_CODE (tval) != INTEGER_CST
@@ -12351,8 +12353,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
/* Use map o G^-1 instead of original map to undo the effect of G. */
- tmap = double_int_to_tree (map_type,
- double_int::from_uhwi (best_g.map));
+ tmap = wide_int_to_tree (map_type, best_g.map);
return build_call_expr (fndecl, 3, tmap, tbits, tval);
} /* AVR_BUILTIN_INSERT_BITS */
diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c
index 8b28211895c..84b2d01c730 100644
--- a/gcc/config/bfin/bfin.c
+++ b/gcc/config/bfin/bfin.c
@@ -3288,8 +3288,8 @@ bfin_local_alignment (tree type, unsigned align)
memcpy can use 32 bit loads/stores. */
if (TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (TREE_INT_CST_LOW (TYPE_SIZE (type)) > 8
- || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 32)
+ && wi::gtu_p (TYPE_SIZE (type), 8)
+ && align < 32)
return 32;
return align;
}
@@ -3371,15 +3371,14 @@ find_prev_insn_start (rtx insn)
/* Implement TARGET_CAN_USE_DOLOOP_P. */
static bool
-bfin_can_use_doloop_p (double_int, double_int iterations_max,
+bfin_can_use_doloop_p (const widest_int &, const widest_int &iterations_max,
unsigned int, bool)
{
/* Due to limitations in the hardware (an initial loop count of 0
does not loop 2^32 times) we must avoid to generate a hardware
loops when we cannot rule out this case. */
if (!flag_unsafe_loop_optimizations
- && (iterations_max.high != 0
- || iterations_max.low >= 0xFFFFFFFF))
+ && wi::geu_p (iterations_max, 0xFFFFFFFF))
return false;
return true;
}
diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c
index adf370d4d07..3c50e24edf2 100644
--- a/gcc/config/darwin.c
+++ b/gcc/config/darwin.c
@@ -1299,22 +1299,17 @@ darwin_mergeable_constant_section (tree exp,
{
tree size = TYPE_SIZE_UNIT (TREE_TYPE (exp));
- if (TREE_CODE (size) == INTEGER_CST
- && TREE_INT_CST_LOW (size) == 4
- && TREE_INT_CST_HIGH (size) == 0)
- return darwin_sections[literal4_section];
- else if (TREE_CODE (size) == INTEGER_CST
- && TREE_INT_CST_LOW (size) == 8
- && TREE_INT_CST_HIGH (size) == 0)
- return darwin_sections[literal8_section];
- else if (HAVE_GAS_LITERAL16
- && TARGET_64BIT
- && TREE_CODE (size) == INTEGER_CST
- && TREE_INT_CST_LOW (size) == 16
- && TREE_INT_CST_HIGH (size) == 0)
- return darwin_sections[literal16_section];
- else
- return readonly_data_section;
+ if (TREE_CODE (size) == INTEGER_CST)
+ {
+ if (wi::eq_p (size, 4))
+ return darwin_sections[literal4_section];
+ else if (wi::eq_p (size, 8))
+ return darwin_sections[literal8_section];
+ else if (HAVE_GAS_LITERAL16
+ && TARGET_64BIT
+ && wi::eq_p (size, 16))
+ return darwin_sections[literal16_section];
+ }
}
return readonly_data_section;
@@ -1741,16 +1736,19 @@ machopic_select_rtx_section (enum machine_mode mode, rtx x,
{
if (GET_MODE_SIZE (mode) == 8
&& (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_WIDE_INT
|| GET_CODE (x) == CONST_DOUBLE))
return darwin_sections[literal8_section];
else if (GET_MODE_SIZE (mode) == 4
&& (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_WIDE_INT
|| GET_CODE (x) == CONST_DOUBLE))
return darwin_sections[literal4_section];
else if (HAVE_GAS_LITERAL16
&& TARGET_64BIT
&& GET_MODE_SIZE (mode) == 16
&& (GET_CODE (x) == CONST_INT
+ || GET_CODE (x) == CONST_WIDE_INT
|| GET_CODE (x) == CONST_DOUBLE
|| GET_CODE (x) == CONST_VECTOR))
return darwin_sections[literal16_section];
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 04085c98209..01ad5e50193 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic.h"
#include "dumpfile.h"
#include "tree-pass.h"
+#include "wide-int.h"
#include "context.h"
#include "pass_manager.h"
#include "target-globals.h"
@@ -26582,14 +26583,12 @@ ix86_data_alignment (tree type, int align, bool opt)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
{
- if ((TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align_compat
- || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
+ if (wi::geu_p (TYPE_SIZE (type), max_align_compat)
&& align < max_align_compat)
align = max_align_compat;
- if ((TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align
- || TREE_INT_CST_HIGH (TYPE_SIZE (type)))
- && align < max_align)
- align = max_align;
+ if (wi::geu_p (TYPE_SIZE (type), max_align)
+ && align < max_align)
+ align = max_align;
}
/* x86-64 ABI requires arrays greater than 16 bytes to be aligned
@@ -26599,8 +26598,8 @@ ix86_data_alignment (tree type, int align, bool opt)
if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE)
&& TYPE_SIZE (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128
- || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
+ && wi::geu_p (TYPE_SIZE (type), 128)
+ && align < 128)
return 128;
}
@@ -26709,13 +26708,13 @@ ix86_local_alignment (tree exp, enum machine_mode mode,
&& TARGET_SSE)
{
if (AGGREGATE_TYPE_P (type)
- && (va_list_type_node == NULL_TREE
- || (TYPE_MAIN_VARIANT (type)
- != TYPE_MAIN_VARIANT (va_list_type_node)))
- && TYPE_SIZE (type)
- && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
- && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16
- || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128)
+ && (va_list_type_node == NULL_TREE
+ || (TYPE_MAIN_VARIANT (type)
+ != TYPE_MAIN_VARIANT (va_list_type_node)))
+ && TYPE_SIZE (type)
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && wi::geu_p (TYPE_SIZE (type), 16)
+ && align < 128)
return 128;
}
if (TREE_CODE (type) == ARRAY_TYPE)
@@ -41375,7 +41374,7 @@ void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode,
e2 = gen_reg_rtx (mode);
e3 = gen_reg_rtx (mode);
- real_from_integer (&r, VOIDmode, -3, -1, 0);
+ real_from_integer (&r, VOIDmode, -3, SIGNED);
mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode);
real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL);
diff --git a/gcc/config/msp430/msp430.c b/gcc/config/msp430/msp430.c
index c844aa2a12c..1ec96526efd 100644
--- a/gcc/config/msp430/msp430.c
+++ b/gcc/config/msp430/msp430.c
@@ -1085,7 +1085,7 @@ msp430_attr (tree * node,
break;
case INTEGER_CST:
- if (TREE_INT_CST_LOW (value) > 63)
+ if (wi::gtu_p (value, 63))
/* Allow the attribute to be added - the linker script
being used may still recognise this value. */
warning (OPT_Wattributes,
diff --git a/gcc/config/nds32/nds32.c b/gcc/config/nds32/nds32.c
index 645d8dda335..6e5595c09f5 100644
--- a/gcc/config/nds32/nds32.c
+++ b/gcc/config/nds32/nds32.c
@@ -3148,8 +3148,8 @@ nds32_insert_attributes (tree decl, tree *attributes)
id = TREE_VALUE (id_list);
/* Issue error if it is not a valid integer value. */
if (TREE_CODE (id) != INTEGER_CST
- || TREE_INT_CST_LOW (id) < lower_bound
- || TREE_INT_CST_LOW (id) > upper_bound)
+ || wi::ltu_p (id, lower_bound)
+ || wi::gtu_p (id, upper_bound))
error ("invalid id value for interrupt/exception attribute");
/* Advance to next id. */
@@ -3176,8 +3176,8 @@ nds32_insert_attributes (tree decl, tree *attributes)
/* 3. Check valid integer value for reset. */
if (TREE_CODE (id) != INTEGER_CST
- || TREE_INT_CST_LOW (id) < lower_bound
- || TREE_INT_CST_LOW (id) > upper_bound)
+ || wi::ltu_p (id, lower_bound)
+ || wi::gtu_p (id, upper_bound))
error ("invalid id value for reset attribute");
/* 4. Check valid function for nmi/warm. */
diff --git a/gcc/config/rs6000/predicates.md b/gcc/config/rs6000/predicates.md
index 8c384b3808a..47050c3d03e 100644
--- a/gcc/config/rs6000/predicates.md
+++ b/gcc/config/rs6000/predicates.md
@@ -19,7 +19,7 @@
;; Return 1 for anything except PARALLEL.
(define_predicate "any_operand"
- (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem"))
+ (match_code "const_int,const_double,const_wide_int,const,symbol_ref,label_ref,subreg,reg,mem"))
;; Return 1 for any PARALLEL.
(define_predicate "any_parallel_operand"
@@ -601,7 +601,7 @@
;; Return 1 if operand is constant zero (scalars and vectors).
(define_predicate "zero_constant"
- (and (match_code "const_int,const_double,const_vector")
+ (and (match_code "const_int,const_double,const_wide_int,const_vector")
(match_test "op == CONST0_RTX (mode)")))
;; Return 1 if operand is 0.0.
@@ -796,7 +796,7 @@
;; Return 1 if op is a constant that is not a logical operand, but could
;; be split into one.
(define_predicate "non_logical_cint_operand"
- (and (match_code "const_int,const_double")
+ (and (match_code "const_int,const_wide_int")
(and (not (match_operand 0 "logical_operand"))
(match_operand 0 "reg_or_logical_cint_operand"))))
@@ -1073,7 +1073,7 @@
;; Return 1 if this operand is a valid input for a move insn.
(define_predicate "input_operand"
(match_code "symbol_ref,const,reg,subreg,mem,
- const_double,const_vector,const_int")
+ const_double,const_wide_int,const_vector,const_int")
{
/* Memory is always valid. */
if (memory_operand (op, mode))
@@ -1086,8 +1086,7 @@
/* Allow any integer constant. */
if (GET_MODE_CLASS (mode) == MODE_INT
- && (GET_CODE (op) == CONST_INT
- || GET_CODE (op) == CONST_DOUBLE))
+ && CONST_SCALAR_INT_P (op))
return 1;
/* Allow easy vector constants. */
@@ -1126,7 +1125,7 @@
;; Return 1 if this operand is a valid input for a vsx_splat insn.
(define_predicate "splat_input_operand"
(match_code "symbol_ref,const,reg,subreg,mem,
- const_double,const_vector,const_int")
+ const_double,const_wide_int,const_vector,const_int")
{
if (MEM_P (op))
{
diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c
index 46c4a9d8c2e..2b9cf7a0b38 100644
--- a/gcc/config/rs6000/rs6000-c.c
+++ b/gcc/config/rs6000/rs6000-c.c
@@ -28,6 +28,7 @@
#include "tree.h"
#include "stor-layout.h"
#include "stringpool.h"
+#include "wide-int.h"
#include "c-family/c-common.h"
#include "c-family/c-pragma.h"
#include "diagnostic-core.h"
@@ -4304,8 +4305,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_MEM_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && TREE_INT_CST_HIGH (arg2) == 0
- && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1))
+ && wi::ltu_p (arg2, 2))
{
tree call = NULL_TREE;
@@ -4319,8 +4319,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
}
else if (mode == V1TImode && VECTOR_MEM_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && TREE_INT_CST_HIGH (arg2) == 0
- && TREE_INT_CST_LOW (arg2) == 0)
+ && wi::eq_p (arg2, 0))
{
tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V1TI];
return build_call_expr (call, 2, arg1, arg2);
@@ -4409,8 +4408,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
mode = TYPE_MODE (arg1_type);
if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && TREE_INT_CST_HIGH (arg2) == 0
- && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1))
+ && wi::ltu_p (arg2, 2))
{
tree call = NULL_TREE;
@@ -4426,8 +4424,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
}
else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
&& TREE_CODE (arg2) == INTEGER_CST
- && TREE_INT_CST_HIGH (arg2) == 0
- && TREE_INT_CST_LOW (arg2) == 0)
+ && wi::eq_p (arg2, 0))
{
tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index bb521a934f4..e4a68347f57 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -4969,6 +4969,15 @@ num_insns_constant (rtx op, enum machine_mode mode)
else
return num_insns_constant_wide (INTVAL (op));
+ case CONST_WIDE_INT:
+ {
+ int i;
+ int ins = CONST_WIDE_INT_NUNITS (op) - 1;
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
+ ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
+ return ins;
+ }
+
case CONST_DOUBLE:
if (mode == SFmode || mode == SDmode)
{
@@ -5143,8 +5152,6 @@ easy_altivec_constant (rtx op, enum machine_mode mode)
else if (mode == V2DImode)
{
- /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
- easy. */
if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
|| GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
return false;
@@ -5309,9 +5316,7 @@ paired_expand_vector_init (rtx target, rtx vals)
for (i = 0; i < n_elts; ++i)
{
x = XVECEXP (vals, 0, i);
- if (!(CONST_INT_P (x)
- || GET_CODE (x) == CONST_DOUBLE
- || GET_CODE (x) == CONST_FIXED))
+ if (!CONSTANT_P (x))
++n_var;
}
if (n_var == 0)
@@ -5463,9 +5468,7 @@ rs6000_expand_vector_init (rtx target, rtx vals)
for (i = 0; i < n_elts; ++i)
{
x = XVECEXP (vals, 0, i);
- if (!(CONST_INT_P (x)
- || GET_CODE (x) == CONST_DOUBLE
- || GET_CODE (x) == CONST_FIXED))
+ if (!CONSTANT_P (x))
++n_var, one_var = i;
else if (x != CONST0_RTX (inner_mode))
all_const_zero = false;
@@ -6703,6 +6706,7 @@ rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
&& TARGET_NO_TOC
&& ! flag_pic
&& GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_WIDE_INT
&& GET_CODE (x) != CONST_DOUBLE
&& CONSTANT_P (x)
&& GET_MODE_NUNITS (mode) == 1
@@ -8167,21 +8171,12 @@ rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
}
/* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
- if (GET_CODE (operands[1]) == CONST_DOUBLE
- && ! FLOAT_MODE_P (mode)
+ if (CONST_WIDE_INT_P (operands[1])
&& GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
{
- /* FIXME. This should never happen. */
- /* Since it seems that it does, do the safe thing and convert
- to a CONST_INT. */
- operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
+ /* This should be fixed with the introduction of CONST_WIDE_INT. */
+ gcc_unreachable ();
}
- gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
- || FLOAT_MODE_P (mode)
- || ((CONST_DOUBLE_HIGH (operands[1]) != 0
- || CONST_DOUBLE_LOW (operands[1]) < 0)
- && (CONST_DOUBLE_HIGH (operands[1]) != -1
- || CONST_DOUBLE_LOW (operands[1]) >= 0)));
/* Check if GCC is setting up a block move that will end up using FP
registers as temporaries. We must make sure this is acceptable. */
@@ -8697,8 +8692,10 @@ rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
int count;
tree index = TYPE_DOMAIN (type);
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
@@ -8715,9 +8712,7 @@ rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
- tree_to_uhwi (TYPE_MIN_VALUE (index)));
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -8729,8 +8724,10 @@ rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
return -1;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
@@ -8745,9 +8742,7 @@ rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -8761,9 +8756,10 @@ rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
int sub_count;
tree field;
- /* Can't handle incomplete types. */
- if (!COMPLETE_TYPE_P (type))
- return -1;
+ /* Can't handle incomplete types nor sizes that are not
+ fixed. */
+ if (!COMPLETE_TYPE_P (type)
+ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
{
@@ -8777,9 +8773,7 @@ rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
}
/* There must be no padding. */
- if (!tree_fits_uhwi_p (TYPE_SIZE (type))
- || ((HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type))
- != count * GET_MODE_BITSIZE (*modep)))
+ if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
return -1;
return count;
@@ -12474,16 +12468,14 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
/* Check whether the 2nd and 3rd arguments are integer constants and in
range and prepare arguments. */
STRIP_NOPS (arg1);
- if (TREE_CODE (arg1) != INTEGER_CST
- || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
+ if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
{
error ("argument 2 must be 0 or 1");
return const0_rtx;
}
STRIP_NOPS (arg2);
- if (TREE_CODE (arg2) != INTEGER_CST
- || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15))
+ if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
{
error ("argument 3 must be in the range 0..15");
return const0_rtx;
@@ -17456,6 +17448,7 @@ rs6000_output_move_128bit (rtx operands[])
/* Constants. */
else if (dest_regno >= 0
&& (GET_CODE (src) == CONST_INT
+ || GET_CODE (src) == CONST_WIDE_INT
|| GET_CODE (src) == CONST_DOUBLE
|| GET_CODE (src) == CONST_VECTOR))
{
@@ -18495,8 +18488,7 @@ rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
if (TARGET_RELOCATABLE
&& in_section != toc_section
&& !recurse
- && GET_CODE (x) != CONST_INT
- && GET_CODE (x) != CONST_DOUBLE
+ && !CONST_SCALAR_INT_P (x)
&& CONSTANT_P (x))
{
char buf[256];
@@ -25243,6 +25235,15 @@ rs6000_hash_constant (rtx k)
case LABEL_REF:
return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
+ case CONST_WIDE_INT:
+ {
+ int i;
+ flen = CONST_WIDE_INT_NUNITS (k);
+ for (i = 0; i < flen; i++)
+ result = result * 613 + CONST_WIDE_INT_ELT (k, i);
+ return result;
+ }
+
case CONST_DOUBLE:
if (mode != VOIDmode)
return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
@@ -25447,7 +25448,7 @@ output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
/* If we're going to put a double constant in the TOC, make sure it's
aligned properly when strict alignment is on. */
- if (GET_CODE (x) == CONST_DOUBLE
+ if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
&& STRICT_ALIGNMENT
&& GET_MODE_BITSIZE (mode) >= 64
&& ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
@@ -29453,6 +29454,7 @@ rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
/* FALLTHRU */
case CONST_DOUBLE:
+ case CONST_WIDE_INT:
case CONST:
case HIGH:
case SYMBOL_REF:
@@ -30092,7 +30094,7 @@ rs6000_emit_swrsqrt (rtx dst, rtx src)
gcc_assert (code != CODE_FOR_nothing);
/* Load up the constant 1.5 either as a scalar, or as a vector. */
- real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0);
+ real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED);
SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 21330dc657d..f979905f1da 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -2689,3 +2689,4 @@ enum rs6000_builtin_type_index
extern GTY(()) tree rs6000_builtin_types[RS6000_BTI_MAX];
extern GTY(()) tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
+#define TARGET_SUPPORTS_WIDE_INT 1
diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md
index e853bc4f92c..f6da9b3a382 100644
--- a/gcc/config/rs6000/rs6000.md
+++ b/gcc/config/rs6000/rs6000.md
@@ -10336,7 +10336,7 @@
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand" "")
- (match_operand:DI 1 "const_double_operand" ""))]
+ (match_operand:DI 1 "const_scalar_int_operand" ""))]
"TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
@@ -10402,7 +10402,7 @@
(define_split
[(set (match_operand:TI2 0 "int_reg_operand" "")
- (match_operand:TI2 1 "const_double_operand" ""))]
+ (match_operand:TI2 1 "const_scalar_int_operand" ""))]
"TARGET_POWERPC64
&& (VECTOR_MEM_NONE_P (<MODE>mode)
|| (reload_completed && INT_REGNO_P (REGNO (operands[0]))))"
@@ -10414,12 +10414,12 @@
<MODE>mode);
operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
<MODE>mode);
- if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ if (CONST_WIDE_INT_P (operands[1]))
{
- operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
- operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ operands[4] = GEN_INT (CONST_WIDE_INT_ELT (operands[1], 1));
+ operands[5] = GEN_INT (CONST_WIDE_INT_ELT (operands[1], 0));
}
- else if (GET_CODE (operands[1]) == CONST_INT)
+ else if (CONST_INT_P (operands[1]))
{
operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0));
operands[5] = operands[1];
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index e67d8ba37c5..ef080ad6c79 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -474,9 +474,7 @@ s390_handle_hotpatch_attribute (tree *node, tree name, tree args,
if (TREE_CODE (expr) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (expr))
- || TREE_INT_CST_HIGH (expr) != 0
- || TREE_INT_CST_LOW (expr) > (unsigned int)
- s390_hotpatch_trampoline_halfwords_max)
+ || wi::gtu_p (expr, s390_hotpatch_trampoline_halfwords_max))
{
error ("requested %qE attribute is not a non-negative integer"
" constant or too large (max. %d)", name,
diff --git a/gcc/config/sol2-c.c b/gcc/config/sol2-c.c
index f6c26047fc8..96ef99d9a79 100644
--- a/gcc/config/sol2-c.c
+++ b/gcc/config/sol2-c.c
@@ -86,7 +86,7 @@ solaris_pragma_align (cpp_reader *pfile ATTRIBUTE_UNUSED)
{
tree t, x;
enum cpp_ttype ttype;
- HOST_WIDE_INT low;
+ unsigned HOST_WIDE_INT low;
if (pragma_lex (&x) != CPP_NUMBER
|| pragma_lex (&t) != CPP_OPEN_PAREN)
@@ -96,7 +96,7 @@ solaris_pragma_align (cpp_reader *pfile ATTRIBUTE_UNUSED)
}
low = TREE_INT_CST_LOW (x);
- if (TREE_INT_CST_HIGH (x) != 0
+ if (!tree_fits_uhwi_p (x)
|| (low != 1 && low != 2 && low != 4 && low != 8 && low != 16
&& low != 32 && low != 64 && low != 128))
{
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index cce879f7f72..ff1e9d154b2 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -69,6 +69,7 @@ along with GCC; see the file COPYING3. If not see
#include "opts.h"
#include "tree-pass.h"
#include "context.h"
+#include "wide-int.h"
/* Processor costs */
@@ -10930,30 +10931,30 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
&& TREE_CODE (arg2) == INTEGER_CST)
{
bool overflow = false;
- double_int result = TREE_INT_CST (arg2);
- double_int tmp;
+ wide_int result = arg2;
+ wide_int tmp;
unsigned i;
for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i)
{
- double_int e0 = TREE_INT_CST (VECTOR_CST_ELT (arg0, i));
- double_int e1 = TREE_INT_CST (VECTOR_CST_ELT (arg1, i));
+ tree e0 = VECTOR_CST_ELT (arg0, i);
+ tree e1 = VECTOR_CST_ELT (arg1, i);
bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf;
- tmp = e1.neg_with_overflow (&neg1_ovf);
- tmp = e0.add_with_sign (tmp, false, &add1_ovf);
- if (tmp.is_negative ())
- tmp = tmp.neg_with_overflow (&neg2_ovf);
+ tmp = wi::neg (e1, &neg1_ovf);
+ tmp = wi::add (e0, tmp, SIGNED, &add1_ovf);
+ if (wi::neg_p (tmp))
+ tmp = wi::neg (tmp, &neg2_ovf);
else
neg2_ovf = false;
- result = result.add_with_sign (tmp, false, &add2_ovf);
+ result = wi::add (result, tmp, SIGNED, &add2_ovf);
overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf;
}
gcc_assert (!overflow);
- return build_int_cst_wide (rtype, result.low, result.high);
+ return wide_int_to_tree (rtype, result);
}
default:
diff --git a/gcc/config/vax/vax.c b/gcc/config/vax/vax.c
index 2b152fdb494..818137baceb 100644
--- a/gcc/config/vax/vax.c
+++ b/gcc/config/vax/vax.c
@@ -45,6 +45,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm_p.h"
#include "target.h"
#include "target-def.h"
+#include "wide-int.h"
static void vax_option_override (void);
static bool vax_legitimate_address_p (enum machine_mode, rtx, bool);
@@ -645,7 +646,7 @@ vax_float_literal (rtx c)
{
int x = 1 << i;
bool ok;
- REAL_VALUE_FROM_INT (s, x, 0, mode);
+ real_from_integer (&s, mode, x, SIGNED);
if (REAL_VALUES_EQUAL (r, s))
return true;
diff --git a/gcc/coretypes.h b/gcc/coretypes.h
index d70fb11d893..41ad1a3b22d 100644
--- a/gcc/coretypes.h
+++ b/gcc/coretypes.h
@@ -58,6 +58,9 @@ typedef const struct rtx_def *const_rtx;
struct rtvec_def;
typedef struct rtvec_def *rtvec;
typedef const struct rtvec_def *const_rtvec;
+struct hwivec_def;
+typedef struct hwivec_def *hwivec;
+typedef const struct hwivec_def *const_hwivec;
union tree_node;
typedef union tree_node *tree;
typedef const union tree_node *const_tree;
diff --git a/gcc/cp/call.c b/gcc/cp/call.c
index 8c55c326fc7..857df57c60c 100644
--- a/gcc/cp/call.c
+++ b/gcc/cp/call.c
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "c-family/c-objc.h"
#include "timevar.h"
#include "cgraph.h"
+#include "wide-int.h"
/* The various kinds of conversion. */
@@ -6576,8 +6577,7 @@ type_passed_as (tree type)
else if (targetm.calls.promote_prototypes (type)
&& INTEGRAL_TYPE_P (type)
&& COMPLETE_TYPE_P (type)
- && INT_CST_LT_UNSIGNED (TYPE_SIZE (type),
- TYPE_SIZE (integer_type_node)))
+ && tree_int_cst_lt (TYPE_SIZE (type), TYPE_SIZE (integer_type_node)))
type = integer_type_node;
return type;
@@ -6617,8 +6617,7 @@ convert_for_arg_passing (tree type, tree val, tsubst_flags_t complain)
else if (targetm.calls.promote_prototypes (type)
&& INTEGRAL_TYPE_P (type)
&& COMPLETE_TYPE_P (type)
- && INT_CST_LT_UNSIGNED (TYPE_SIZE (type),
- TYPE_SIZE (integer_type_node)))
+ && tree_int_cst_lt (TYPE_SIZE (type), TYPE_SIZE (integer_type_node)))
val = cp_perform_integral_promotions (val, complain);
if ((complain & tf_warning)
&& warn_suggest_attribute_format)
diff --git a/gcc/cp/class.c b/gcc/cp/class.c
index 5cac488ee95..c96d79dbc82 100644
--- a/gcc/cp/class.c
+++ b/gcc/cp/class.c
@@ -40,6 +40,7 @@ along with GCC; see the file COPYING3. If not see
#include "dumpfile.h"
#include "splay-tree.h"
#include "gimplify.h"
+#include "wide-int.h"
/* The number of nested classes being processed. If we are not in the
scope of any class, this is zero. */
@@ -3811,7 +3812,7 @@ walk_subobject_offsets (tree type,
/* If this OFFSET is bigger than the MAX_OFFSET, then we should
stop. */
- if (max_offset && INT_CST_LT (max_offset, offset))
+ if (max_offset && tree_int_cst_lt (max_offset, offset))
return 0;
if (type == error_mark_node)
@@ -3968,8 +3969,8 @@ walk_subobject_offsets (tree type,
for (index = size_zero_node;
/* G++ 3.2 had an off-by-one error here. */
(abi_version_at_least (2)
- ? !INT_CST_LT (TYPE_MAX_VALUE (domain), index)
- : INT_CST_LT (index, TYPE_MAX_VALUE (domain)));
+ ? !tree_int_cst_lt (TYPE_MAX_VALUE (domain), index)
+ : tree_int_cst_lt (index, TYPE_MAX_VALUE (domain)));
index = size_binop (PLUS_EXPR, index, size_one_node))
{
r = walk_subobject_offsets (TREE_TYPE (type),
@@ -3985,7 +3986,7 @@ walk_subobject_offsets (tree type,
/* If this new OFFSET is bigger than the MAX_OFFSET, then
there's no point in iterating through the remaining
elements of the array. */
- if (max_offset && INT_CST_LT (max_offset, offset))
+ if (max_offset && tree_int_cst_lt (max_offset, offset))
break;
}
}
@@ -5922,7 +5923,7 @@ end_of_class (tree t, int include_virtuals_p)
continue;
offset = end_of_base (base_binfo);
- if (INT_CST_LT_UNSIGNED (result, offset))
+ if (tree_int_cst_lt (result, offset))
result = offset;
}
@@ -5932,7 +5933,7 @@ end_of_class (tree t, int include_virtuals_p)
vec_safe_iterate (vbases, i, &base_binfo); i++)
{
offset = end_of_base (base_binfo);
- if (INT_CST_LT_UNSIGNED (result, offset))
+ if (tree_int_cst_lt (result, offset))
result = offset;
}
@@ -6012,7 +6013,7 @@ include_empty_classes (record_layout_info rli)
CLASSTYPE_AS_BASE (rli->t) != NULL_TREE);
rli_size = rli_size_unit_so_far (rli);
if (TREE_CODE (rli_size) == INTEGER_CST
- && INT_CST_LT_UNSIGNED (rli_size, eoc))
+ && tree_int_cst_lt (rli_size, eoc))
{
if (!abi_version_at_least (2))
/* In version 1 of the ABI, the size of a class that ends with
@@ -6128,7 +6129,7 @@ layout_class_type (tree t, tree *virtuals_p)
type, then there are some special rules for allocating
it. */
if (DECL_C_BIT_FIELD (field)
- && INT_CST_LT (TYPE_SIZE (type), DECL_SIZE (field)))
+ && tree_int_cst_lt (TYPE_SIZE (type), DECL_SIZE (field)))
{
unsigned int itk;
tree integer_type;
@@ -6139,10 +6140,10 @@ layout_class_type (tree t, tree *virtuals_p)
bits as additional padding. */
for (itk = itk_char; itk != itk_none; ++itk)
if (integer_types[itk] != NULL_TREE
- && (INT_CST_LT (size_int (MAX_FIXED_MODE_SIZE),
- TYPE_SIZE (integer_types[itk]))
- || INT_CST_LT (DECL_SIZE (field),
- TYPE_SIZE (integer_types[itk]))))
+ && (tree_int_cst_lt (size_int (MAX_FIXED_MODE_SIZE),
+ TYPE_SIZE (integer_types[itk]))
+ || tree_int_cst_lt (DECL_SIZE (field),
+ TYPE_SIZE (integer_types[itk]))))
break;
/* ITK now indicates a type that is too large for the
@@ -6158,7 +6159,7 @@ layout_class_type (tree t, tree *virtuals_p)
3.2 always created a padding field, even if it had zero
width. */
if (!abi_version_at_least (2)
- || INT_CST_LT (TYPE_SIZE (integer_type), DECL_SIZE (field)))
+ || tree_int_cst_lt (TYPE_SIZE (integer_type), DECL_SIZE (field)))
{
if (abi_version_at_least (2) && TREE_CODE (t) == UNION_TYPE)
/* In a union, the padding field must have the full width
diff --git a/gcc/cp/cvt.c b/gcc/cp/cvt.c
index e8ece0e0b13..c833722538e 100644
--- a/gcc/cp/cvt.c
+++ b/gcc/cp/cvt.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "convert.h"
#include "decl.h"
#include "target.h"
+#include "wide-int.h"
static tree cp_convert_to_pointer (tree, tree, tsubst_flags_t);
static tree convert_to_pointer_force (tree, tree, tsubst_flags_t);
@@ -582,9 +583,7 @@ ignore_overflows (tree expr, tree orig)
{
gcc_assert (!TREE_OVERFLOW (orig));
/* Ensure constant sharing. */
- expr = build_int_cst_wide (TREE_TYPE (expr),
- TREE_INT_CST_LOW (expr),
- TREE_INT_CST_HIGH (expr));
+ expr = wide_int_to_tree (TREE_TYPE (expr), expr);
}
return expr;
}
diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c
index ffaff5c52c8..01a36252b2e 100644
--- a/gcc/cp/decl.c
+++ b/gcc/cp/decl.c
@@ -60,6 +60,7 @@ along with GCC; see the file COPYING3. If not see
#include "plugin.h"
#include "cgraph.h"
#include "cilk.h"
+#include "wide-int.h"
/* Possible cases of bad specifiers type used by bad_specifiers. */
enum bad_spec_place {
@@ -4844,7 +4845,7 @@ check_array_designated_initializer (constructor_elt *ce,
if (TREE_CODE (ce->index) == INTEGER_CST)
{
/* A C99 designator is OK if it matches the current index. */
- if (TREE_INT_CST_LOW (ce->index) == index)
+ if (wi::eq_p (ce->index, index))
return true;
else
sorry ("non-trivial designated initializers not supported");
@@ -8316,7 +8317,7 @@ compute_array_index_type (tree name, tree size, tsubst_flags_t complain)
constant_expression_error (size);
/* An array must have a positive number of elements. */
- if (INT_CST_LT (size, integer_zero_node))
+ if (tree_int_cst_lt (size, integer_zero_node))
{
if (!(complain & tf_error))
return error_mark_node;
@@ -12677,9 +12678,9 @@ finish_enum_value_list (tree enumtype)
enumeration. We must do this before the type of MINNODE and
MAXNODE are transformed, since tree_int_cst_min_precision relies
on the TREE_TYPE of the value it is passed. */
- bool unsignedp = tree_int_cst_sgn (minnode) >= 0;
- int lowprec = tree_int_cst_min_precision (minnode, unsignedp);
- int highprec = tree_int_cst_min_precision (maxnode, unsignedp);
+ signop sgn = tree_int_cst_sgn (minnode) >= 0 ? UNSIGNED : SIGNED;
+ int lowprec = tree_int_cst_min_precision (minnode, sgn);
+ int highprec = tree_int_cst_min_precision (maxnode, sgn);
int precision = MAX (lowprec, highprec);
unsigned int itk;
bool use_short_enum;
@@ -12711,7 +12712,7 @@ finish_enum_value_list (tree enumtype)
underlying_type = integer_types[itk];
if (underlying_type != NULL_TREE
&& TYPE_PRECISION (underlying_type) >= precision
- && TYPE_UNSIGNED (underlying_type) == unsignedp)
+ && TYPE_SIGN (underlying_type) == sgn)
break;
}
if (itk == itk_none)
@@ -12758,12 +12759,11 @@ finish_enum_value_list (tree enumtype)
= build_distinct_type_copy (underlying_type);
TYPE_PRECISION (ENUM_UNDERLYING_TYPE (enumtype)) = precision;
set_min_and_max_values_for_integral_type
- (ENUM_UNDERLYING_TYPE (enumtype), precision, unsignedp);
+ (ENUM_UNDERLYING_TYPE (enumtype), precision, sgn);
/* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */
if (flag_strict_enums)
- set_min_and_max_values_for_integral_type (enumtype, precision,
- unsignedp);
+ set_min_and_max_values_for_integral_type (enumtype, precision, sgn);
}
else
underlying_type = ENUM_UNDERLYING_TYPE (enumtype);
@@ -12887,14 +12887,14 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc)
value = error_mark_node;
else
{
- double_int di = TREE_INT_CST (prev_value)
- .add_with_sign (double_int_one,
- false, &overflowed);
+ tree type = TREE_TYPE (prev_value);
+ signop sgn = TYPE_SIGN (type);
+ widest_int wi = wi::add (wi::to_widest (prev_value), 1, sgn,
+ &overflowed);
if (!overflowed)
{
- tree type = TREE_TYPE (prev_value);
- bool pos = TYPE_UNSIGNED (type) || !di.is_negative ();
- if (!double_int_fits_to_tree_p (type, di))
+ bool pos = !wi::neg_p (wi, sgn);
+ if (!wi::fits_to_tree_p (wi, type))
{
unsigned int itk;
for (itk = itk_int; itk != itk_none; itk++)
@@ -12902,7 +12902,7 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc)
type = integer_types[itk];
if (type != NULL_TREE
&& (pos || !TYPE_UNSIGNED (type))
- && double_int_fits_to_tree_p (type, di))
+ && wi::fits_to_tree_p (wi, type))
break;
}
if (type && cxx_dialect < cxx11
@@ -12914,7 +12914,7 @@ incremented enumerator value is too large for %<long%>");
if (type == NULL_TREE)
overflowed = true;
else
- value = double_int_to_tree (type, di);
+ value = wide_int_to_tree (type, wi);
}
if (overflowed)
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index 46422a52304..6838d2aadd0 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "target.h"
#include "gimplify.h"
+#include "wide-int.h"
static bool begin_init_stmts (tree *, tree *);
static tree finish_init_stmts (bool, tree, tree);
@@ -2284,10 +2285,10 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
/* For arrays, a bounds checks on the NELTS parameter. */
tree outer_nelts_check = NULL_TREE;
bool outer_nelts_from_type = false;
- double_int inner_nelts_count = double_int_one;
+ offset_int inner_nelts_count = 1;
tree alloc_call, alloc_expr;
/* Size of the inner array elements. */
- double_int inner_size;
+ offset_int inner_size;
/* The address returned by the call to "operator new". This node is
a VAR_DECL and is therefore reusable. */
tree alloc_node;
@@ -2343,9 +2344,8 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
if (TREE_CODE (inner_nelts_cst) == INTEGER_CST)
{
bool overflow;
- double_int result = TREE_INT_CST (inner_nelts_cst)
- .mul_with_sign (inner_nelts_count,
- false, &overflow);
+ offset_int result = wi::mul (wi::to_offset (inner_nelts_cst),
+ inner_nelts_count, SIGNED, &overflow);
if (overflow)
{
if (complain & tf_error)
@@ -2456,42 +2456,40 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
{
/* Maximum available size in bytes. Half of the address space
minus the cookie size. */
- double_int max_size
- = double_int_one.llshift (TYPE_PRECISION (sizetype) - 1,
- HOST_BITS_PER_DOUBLE_INT);
+ offset_int max_size
+ = wi::set_bit_in_zero <offset_int> (TYPE_PRECISION (sizetype) - 1);
/* Maximum number of outer elements which can be allocated. */
- double_int max_outer_nelts;
+ offset_int max_outer_nelts;
tree max_outer_nelts_tree;
gcc_assert (TREE_CODE (size) == INTEGER_CST);
cookie_size = targetm.cxx.get_cookie_size (elt_type);
gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST);
- gcc_checking_assert (TREE_INT_CST (cookie_size).ult (max_size));
+ gcc_checking_assert (wi::ltu_p (wi::to_offset (cookie_size), max_size));
/* Unconditionally subtract the cookie size. This decreases the
maximum object size and is safe even if we choose not to use
a cookie after all. */
- max_size -= TREE_INT_CST (cookie_size);
+ max_size -= wi::to_offset (cookie_size);
bool overflow;
- inner_size = TREE_INT_CST (size)
- .mul_with_sign (inner_nelts_count, false, &overflow);
- if (overflow || inner_size.ugt (max_size))
+ inner_size = wi::mul (wi::to_offset (size), inner_nelts_count, SIGNED,
+ &overflow);
+ if (overflow || wi::gtu_p (inner_size, max_size))
{
if (complain & tf_error)
error ("size of array is too large");
return error_mark_node;
}
- max_outer_nelts = max_size.udiv (inner_size, TRUNC_DIV_EXPR);
+
+ max_outer_nelts = wi::udiv_trunc (max_size, inner_size);
/* Only keep the top-most seven bits, to simplify encoding the
constant in the instruction stream. */
{
- unsigned shift = HOST_BITS_PER_DOUBLE_INT - 7
- - (max_outer_nelts.high ? clz_hwi (max_outer_nelts.high)
- : (HOST_BITS_PER_WIDE_INT + clz_hwi (max_outer_nelts.low)));
- max_outer_nelts
- = max_outer_nelts.lrshift (shift, HOST_BITS_PER_DOUBLE_INT)
- .llshift (shift, HOST_BITS_PER_DOUBLE_INT);
+ unsigned shift = (max_outer_nelts.get_precision ()) - 7
+ - wi::clz (max_outer_nelts);
+ max_outer_nelts = wi::lshift (wi::lrshift (max_outer_nelts, shift),
+ shift);
}
- max_outer_nelts_tree = double_int_to_tree (sizetype, max_outer_nelts);
+ max_outer_nelts_tree = wide_int_to_tree (sizetype, max_outer_nelts);
size = size_binop (MULT_EXPR, size, convert (sizetype, nelts));
outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node,
@@ -2572,7 +2570,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
cookie_size = NULL_TREE;
/* No size arithmetic necessary, so the size check is
not needed. */
- if (outer_nelts_check != NULL && inner_size.is_one ())
+ if (outer_nelts_check != NULL && inner_size == 1)
outer_nelts_check = NULL_TREE;
}
/* Perform the overflow check. */
@@ -2617,7 +2615,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts,
cookie_size = NULL_TREE;
/* No size arithmetic necessary, so the size check is
not needed. */
- if (outer_nelts_check != NULL && inner_size.is_one ())
+ if (outer_nelts_check != NULL && inner_size == 1)
outer_nelts_check = NULL_TREE;
}
diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c
index 84d10a077c9..97fb4c6ec3c 100644
--- a/gcc/cp/mangle.c
+++ b/gcc/cp/mangle.c
@@ -57,6 +57,7 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "target.h"
#include "cgraph.h"
+#include "wide-int.h"
/* Debugging support. */
@@ -1513,8 +1514,8 @@ static inline void
write_integer_cst (const tree cst)
{
int sign = tree_int_cst_sgn (cst);
-
- if (TREE_INT_CST_HIGH (cst) + (sign < 0))
+ widest_int abs_value = wi::abs (wi::to_widest (cst));
+ if (!wi::fits_uhwi_p (abs_value))
{
/* A bignum. We do this in chunks, each of which fits in a
HOST_WIDE_INT. */
@@ -1540,8 +1541,7 @@ write_integer_cst (const tree cst)
type = c_common_signed_or_unsigned_type (1, TREE_TYPE (cst));
base = build_int_cstu (type, chunk);
- n = build_int_cst_wide (type,
- TREE_INT_CST_LOW (cst), TREE_INT_CST_HIGH (cst));
+ n = wide_int_to_tree (type, cst);
if (sign < 0)
{
@@ -1568,14 +1568,9 @@ write_integer_cst (const tree cst)
else
{
/* A small num. */
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (cst);
-
if (sign < 0)
- {
- write_char ('n');
- low = -low;
- }
- write_unsigned_number (low);
+ write_char ('n');
+ write_unsigned_number (abs_value.to_uhwi ());
}
}
@@ -3226,12 +3221,12 @@ write_array_type (const tree type)
{
/* The ABI specifies that we should mangle the number of
elements in the array, not the largest allowed index. */
- double_int dmax = tree_to_double_int (max) + double_int_one;
+ offset_int wmax = wi::to_offset (max) + 1;
/* Truncate the result - this will mangle [0, SIZE_INT_MAX]
number of elements as zero. */
- dmax = dmax.zext (TYPE_PRECISION (TREE_TYPE (max)));
- gcc_assert (dmax.fits_uhwi ());
- write_unsigned_number (dmax.low);
+ wmax = wi::zext (wmax, TYPE_PRECISION (TREE_TYPE (max)));
+ gcc_assert (wi::fits_uhwi_p (wmax));
+ write_unsigned_number (wmax.to_uhwi ());
}
else
{
diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c
index 4b39b9039c6..e14002482be 100644
--- a/gcc/cp/tree.c
+++ b/gcc/cp/tree.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "hash-table.h"
#include "gimple-expr.h"
#include "gimplify.h"
+#include "wide-int.h"
static tree bot_manip (tree *, int *, void *);
static tree bot_replace (tree *, int *, void *);
@@ -2620,8 +2621,7 @@ cp_tree_equal (tree t1, tree t2)
switch (code1)
{
case INTEGER_CST:
- return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2);
+ return tree_int_cst_equal (t1, t2);
case REAL_CST:
return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c
index 044d9719352..1d8b78278bf 100644
--- a/gcc/cp/typeck2.c
+++ b/gcc/cp/typeck2.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "cp-tree.h"
#include "flags.h"
#include "diagnostic-core.h"
+#include "wide-int.h"
static tree
process_init_constructor (tree type, tree init, tsubst_flags_t complain);
@@ -1165,12 +1166,10 @@ process_init_constructor_array (tree type, tree init,
{
tree domain = TYPE_DOMAIN (type);
if (domain && TREE_CONSTANT (TYPE_MAX_VALUE (domain)))
- len = (tree_to_double_int (TYPE_MAX_VALUE (domain))
- - tree_to_double_int (TYPE_MIN_VALUE (domain))
- + double_int_one)
- .ext (TYPE_PRECISION (TREE_TYPE (domain)),
- TYPE_UNSIGNED (TREE_TYPE (domain)))
- .low;
+ len = wi::ext (wi::to_offset (TYPE_MAX_VALUE (domain))
+ - wi::to_offset (TYPE_MIN_VALUE (domain)) + 1,
+ TYPE_PRECISION (TREE_TYPE (domain)),
+ TYPE_SIGN (TREE_TYPE (domain))).to_uhwi ();
else
unbounded = true; /* Take as many as there are. */
}
diff --git a/gcc/cse.c b/gcc/cse.c
index 710c845c13d..3ca8e179757 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -2336,15 +2336,20 @@ hash_rtx_cb (const_rtx x, enum machine_mode mode,
+ (unsigned int) INTVAL (x));
return hash;
+ case CONST_WIDE_INT:
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++)
+ hash += CONST_WIDE_INT_ELT (x, i);
+ return hash;
+
case CONST_DOUBLE:
/* This is like the general case, except that it only counts
the integers representing the constant. */
hash += (unsigned int) code + (unsigned int) GET_MODE (x);
- if (GET_MODE (x) != VOIDmode)
- hash += real_hash (CONST_DOUBLE_REAL_VALUE (x));
- else
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
hash += ((unsigned int) CONST_DOUBLE_LOW (x)
+ (unsigned int) CONST_DOUBLE_HIGH (x));
+ else
+ hash += real_hash (CONST_DOUBLE_REAL_VALUE (x));
return hash;
case CONST_FIXED:
@@ -3779,6 +3784,7 @@ equiv_constant (rtx x)
/* See if we previously assigned a constant value to this SUBREG. */
if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0
+ || (new_rtx = lookup_as_function (x, CONST_WIDE_INT)) != 0
|| (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0
|| (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0)
return new_rtx;
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 6bdc482b91f..00a04baab6e 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -942,8 +942,7 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, enum machine_mode memmode)
/* These won't be handled correctly by the code below. */
switch (GET_CODE (x))
{
- case CONST_DOUBLE:
- case CONST_FIXED:
+ CASE_CONST_UNIQUE:
case DEBUG_EXPR:
return 0;
@@ -1125,15 +1124,20 @@ cselib_hash_rtx (rtx x, int create, enum machine_mode memmode)
hash += ((unsigned) CONST_INT << 7) + UINTVAL (x);
return hash ? hash : (unsigned int) CONST_INT;
+ case CONST_WIDE_INT:
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++)
+ hash += CONST_WIDE_INT_ELT (x, i);
+ return hash;
+
case CONST_DOUBLE:
/* This is like the general case, except that it only counts
the integers representing the constant. */
hash += (unsigned) code + (unsigned) GET_MODE (x);
- if (GET_MODE (x) != VOIDmode)
- hash += real_hash (CONST_DOUBLE_REAL_VALUE (x));
- else
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
hash += ((unsigned) CONST_DOUBLE_LOW (x)
+ (unsigned) CONST_DOUBLE_HIGH (x));
+ else
+ hash += real_hash (CONST_DOUBLE_REAL_VALUE (x));
return hash ? hash : (unsigned int) CONST_DOUBLE;
case CONST_FIXED:
diff --git a/gcc/dbxout.c b/gcc/dbxout.c
index 5fbbdf6853b..e6c98850063 100644
--- a/gcc/dbxout.c
+++ b/gcc/dbxout.c
@@ -692,88 +692,39 @@ stabstr_U (unsigned HOST_WIDE_INT num)
static void
stabstr_O (tree cst)
{
- unsigned HOST_WIDE_INT high = TREE_INT_CST_HIGH (cst);
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (cst);
-
- char buf[128];
- char *p = buf + sizeof buf;
-
- /* GDB wants constants with no extra leading "1" bits, so
- we need to remove any sign-extension that might be
- present. */
- {
- const unsigned int width = TYPE_PRECISION (TREE_TYPE (cst));
- if (width == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (width > HOST_BITS_PER_WIDE_INT)
- high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1);
- else if (width == HOST_BITS_PER_WIDE_INT)
- high = 0;
- else
- high = 0, low &= (((HOST_WIDE_INT) 1 << width) - 1);
- }
+ int prec = TYPE_PRECISION (TREE_TYPE (cst));
+ int res_pres = prec % 3;
+ int i;
+ unsigned int digit;
/* Leading zero for base indicator. */
stabstr_C ('0');
/* If the value is zero, the base indicator will serve as the value
all by itself. */
- if (high == 0 && low == 0)
+ if (wi::eq_p (cst, 0))
return;
- /* If the high half is zero, we need only print the low half normally. */
- if (high == 0)
- NUMBER_FMT_LOOP (p, low, 8);
- else
+ /* GDB wants constants with no extra leading "1" bits, so
+ we need to remove any sign-extension that might be
+ present. */
+ if (res_pres == 1)
{
- /* When high != 0, we need to print enough zeroes from low to
- give the digits from high their proper place-values. Hence
- NUMBER_FMT_LOOP cannot be used. */
- const int n_digits = HOST_BITS_PER_WIDE_INT / 3;
- int i;
-
- for (i = 1; i <= n_digits; i++)
- {
- unsigned int digit = low % 8;
- low /= 8;
- *--p = '0' + digit;
- }
-
- /* Octal digits carry exactly three bits of information. The
- width of a HOST_WIDE_INT is not normally a multiple of three.
- Therefore, the next digit printed probably needs to carry
- information from both low and high. */
- if (HOST_BITS_PER_WIDE_INT % 3 != 0)
- {
- const int n_leftover_bits = HOST_BITS_PER_WIDE_INT % 3;
- const int n_bits_from_high = 3 - n_leftover_bits;
-
- const unsigned HOST_WIDE_INT
- low_mask = (((unsigned HOST_WIDE_INT)1) << n_leftover_bits) - 1;
- const unsigned HOST_WIDE_INT
- high_mask = (((unsigned HOST_WIDE_INT)1) << n_bits_from_high) - 1;
-
- unsigned int digit;
-
- /* At this point, only the bottom n_leftover_bits bits of low
- should be set. */
- gcc_assert (!(low & ~low_mask));
-
- digit = (low | ((high & high_mask) << n_leftover_bits));
- high >>= n_bits_from_high;
-
- *--p = '0' + digit;
- }
-
- /* Now we can format high in the normal manner. However, if
- the only bits of high that were set were handled by the
- digit split between low and high, high will now be zero, and
- we don't want to print extra digits in that case. */
- if (high)
- NUMBER_FMT_LOOP (p, high, 8);
+ digit = wi::extract_uhwi (cst, prec - 1, 1);
+ stabstr_C ('0' + digit);
+ }
+ else if (res_pres == 2)
+ {
+ digit = wi::extract_uhwi (cst, prec - 2, 2);
+ stabstr_C ('0' + digit);
}
- obstack_grow (&stabstr_ob, p, (buf + sizeof buf) - p);
+ prec -= res_pres;
+ for (i = prec - 3; i >= 0; i = i - 3)
+ {
+ digit = wi::extract_uhwi (cst, i, 3);
+ stabstr_C ('0' + digit);
+ }
}
/* Called whenever it is safe to break a stabs string into multiple
@@ -2301,10 +2252,7 @@ dbxout_type (tree type, int full)
if (TREE_CODE (value) == CONST_DECL)
value = DECL_INITIAL (value);
- if (TREE_INT_CST_HIGH (value) == 0)
- stabstr_D (TREE_INT_CST_LOW (value));
- else if (TREE_INT_CST_HIGH (value) == -1
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (value) < 0)
+ if (cst_and_fits_in_hwi (value))
stabstr_D (TREE_INT_CST_LOW (value));
else
stabstr_O (value);
diff --git a/gcc/defaults.h b/gcc/defaults.h
index 1c48759fd80..8d89a751b75 100644
--- a/gcc/defaults.h
+++ b/gcc/defaults.h
@@ -471,6 +471,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
your target, you should override these values by defining the
appropriate symbols in your tm.h file. */
+#if BITS_PER_UNIT == 8
+#define LOG2_BITS_PER_UNIT 3
+#elif BITS_PER_UNIT == 16
+#define LOG2_BITS_PER_UNIT 4
+#else
+#error Unknown BITS_PER_UNIT
+#endif
+
#ifndef BITS_PER_WORD
#define BITS_PER_WORD (BITS_PER_UNIT * UNITS_PER_WORD)
#endif
@@ -1392,6 +1400,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#define SWITCHABLE_TARGET 0
#endif
+/* If the target supports integers that are wider than two
+ HOST_WIDE_INTs on the host compiler, then the target should define
+ TARGET_SUPPORTS_WIDE_INT and make the appropriate fixups.
+ Otherwise the compiler really is not robust. */
+#ifndef TARGET_SUPPORTS_WIDE_INT
+#define TARGET_SUPPORTS_WIDE_INT 0
+#endif
+
#endif /* GCC_INSN_FLAGS_H */
#endif /* ! GCC_DEFAULTS_H */
diff --git a/gcc/dfp.c b/gcc/dfp.c
index 4f2abb195dc..8e798288acb 100644
--- a/gcc/dfp.c
+++ b/gcc/dfp.c
@@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree.h"
#include "tm_p.h"
#include "dfp.h"
+#include "wide-int.h"
/* The order of the following headers is important for making sure
decNumber structure is large enough to hold decimal128 digits. */
@@ -604,11 +605,11 @@ decimal_real_to_integer (const REAL_VALUE_TYPE *r)
return real_to_integer (&to);
}
-/* Likewise, but to an integer pair, HI+LOW. */
+/* Likewise, but returns a wide_int with PRECISION. *FAIL is set if the
+ value does not fit. */
-void
-decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
- const REAL_VALUE_TYPE *r)
+wide_int
+decimal_real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
{
decContext set;
decNumber dn, dn2, dn3;
@@ -628,7 +629,7 @@ decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
function. */
decNumberToString (&dn, string);
real_from_string (&to, string);
- real_to_integer2 (plow, phigh, &to);
+ return real_to_integer (&to, fail, precision);
}
/* Perform the decimal floating point operation described by CODE.
diff --git a/gcc/dfp.h b/gcc/dfp.h
index 268ceaae3ce..bf8c051a203 100644
--- a/gcc/dfp.h
+++ b/gcc/dfp.h
@@ -38,7 +38,7 @@ void decimal_real_convert (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALU
void decimal_real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int);
void decimal_do_fix_trunc (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *);
void decimal_real_maxval (REAL_VALUE_TYPE *, int, enum machine_mode);
-void decimal_real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *, const REAL_VALUE_TYPE *);
+wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
HOST_WIDE_INT decimal_real_to_integer (const REAL_VALUE_TYPE *);
#ifdef TREE_CODE
diff --git a/gcc/doc/generic.texi b/gcc/doc/generic.texi
index 5b3b528e5b3..7afb055f8cb 100644
--- a/gcc/doc/generic.texi
+++ b/gcc/doc/generic.texi
@@ -1022,10 +1022,15 @@ As this example indicates, the operands are zero-indexed.
@node Constant expressions
@subsection Constant expressions
@tindex INTEGER_CST
-@findex TREE_INT_CST_HIGH
-@findex TREE_INT_CST_LOW
@findex tree_int_cst_lt
@findex tree_int_cst_equal
+@tindex tree_fits_uhwi_p
+@tindex tree_fits_shwi_p
+@tindex tree_to_uhwi
+@tindex tree_to_shwi
+@tindex TREE_INT_CST_NUNITS
+@tindex TREE_INT_CST_ELT
+@tindex TREE_INT_CST_LOW
@tindex REAL_CST
@tindex FIXED_CST
@tindex COMPLEX_CST
@@ -1044,36 +1049,18 @@ These nodes represent integer constants. Note that the type of these
constants is obtained with @code{TREE_TYPE}; they are not always of type
@code{int}. In particular, @code{char} constants are represented with
@code{INTEGER_CST} nodes. The value of the integer constant @code{e} is
-given by
-@smallexample
-((TREE_INT_CST_HIGH (e) << HOST_BITS_PER_WIDE_INT)
-+ TREE_INST_CST_LOW (e))
-@end smallexample
-@noindent
-HOST_BITS_PER_WIDE_INT is at least thirty-two on all platforms. Both
-@code{TREE_INT_CST_HIGH} and @code{TREE_INT_CST_LOW} return a
-@code{HOST_WIDE_INT}. The value of an @code{INTEGER_CST} is interpreted
-as a signed or unsigned quantity depending on the type of the constant.
-In general, the expression given above will overflow, so it should not
-be used to calculate the value of the constant.
-
-The variable @code{integer_zero_node} is an integer constant with value
-zero. Similarly, @code{integer_one_node} is an integer constant with
-value one. The @code{size_zero_node} and @code{size_one_node} variables
-are analogous, but have type @code{size_t} rather than @code{int}.
-
-The function @code{tree_int_cst_lt} is a predicate which holds if its
-first argument is less than its second. Both constants are assumed to
-have the same signedness (i.e., either both should be signed or both
-should be unsigned.) The full width of the constant is used when doing
-the comparison; the usual rules about promotions and conversions are
-ignored. Similarly, @code{tree_int_cst_equal} holds if the two
-constants are equal. The @code{tree_int_cst_sgn} function returns the
-sign of a constant. The value is @code{1}, @code{0}, or @code{-1}
-according on whether the constant is greater than, equal to, or less
-than zero. Again, the signedness of the constant's type is taken into
-account; an unsigned constant is never less than zero, no matter what
-its bit-pattern.
+represented in an array of HOST_WIDE_INT. There are enough elements
+in the array to represent the value without taking extra elements for
+redundant 0s or -1. The number of elements used to represent @code{e}
+is available via @code{TREE_INT_CST_NUNITS}. Element @code{i} can be
+extracted by using @code{TREE_INT_CST_ELT (e, i)}.
+@code{TREE_INT_CST_LOW} is a shorthand for @code{TREE_INT_CST_ELT (e, 0)}.
+
+The functions @code{tree_fits_shwi_p} and @code{tree_fits_uhwi_p}
+can be used to tell if the value is small enough to fit in a
+signed HOST_WIDE_INT or an unsigned HOST_WIDE_INT respectively.
+The value can then be extracted using @code{tree_to_shwi} and
+@code{tree_to_uhwi}.
@item REAL_CST
diff --git a/gcc/doc/rtl.texi b/gcc/doc/rtl.texi
index 20b7187fbab..605a5a18734 100644
--- a/gcc/doc/rtl.texi
+++ b/gcc/doc/rtl.texi
@@ -1540,17 +1540,21 @@ Similarly, there is only one object for the integer whose value is
@findex const_double
@item (const_double:@var{m} @var{i0} @var{i1} @dots{})
-Represents either a floating-point constant of mode @var{m} or an
-integer constant too large to fit into @code{HOST_BITS_PER_WIDE_INT}
-bits but small enough to fit within twice that number of bits (GCC
-does not provide a mechanism to represent even larger constants). In
-the latter case, @var{m} will be @code{VOIDmode}. For integral values
-constants for modes with more bits than twice the number in
-@code{HOST_WIDE_INT} the implied high order bits of that constant are
-copies of the top bit of @code{CONST_DOUBLE_HIGH}. Note however that
-integral values are neither inherently signed nor inherently unsigned;
-where necessary, signedness is determined by the rtl operation
-instead.
+This represents either a floating-point constant of mode @var{m} or
+(on older ports that do not define
+@code{TARGET_SUPPORTS_WIDE_INT}) an integer constant too large to fit
+into @code{HOST_BITS_PER_WIDE_INT} bits but small enough to fit within
+twice that number of bits. In the latter case, @var{m} will be
+@code{VOIDmode}. For integral values constants for modes with more
+bits than twice the number in @code{HOST_WIDE_INT} the implied high
+order bits of that constant are copies of the top bit of
+@code{CONST_DOUBLE_HIGH}. Note however that integral values are
+neither inherently signed nor inherently unsigned; where necessary,
+signedness is determined by the rtl operation instead.
+
+On more modern ports, @code{CONST_DOUBLE} only represents floating
+point values. New ports define @code{TARGET_SUPPORTS_WIDE_INT} to
+make this designation.
@findex CONST_DOUBLE_LOW
If @var{m} is @code{VOIDmode}, the bits of the value are stored in
@@ -1565,6 +1569,37 @@ machine's or host machine's floating point format. To convert them to
the precise bit pattern used by the target machine, use the macro
@code{REAL_VALUE_TO_TARGET_DOUBLE} and friends (@pxref{Data Output}).
+@findex CONST_WIDE_INT
+@item (const_wide_int:@var{m} @var{nunits} @var{elt0} @dots{})
+This contains an array of @code{HOST_WIDE_INT}s that is large enough
+to hold any constant that can be represented on the target. This form
+of rtl is only used on targets that define
+@code{TARGET_SUPPORTS_WIDE_INT} to be nonzero and then
+@code{CONST_DOUBLE}s are only used to hold floating-point values. If
+the target leaves @code{TARGET_SUPPORTS_WIDE_INT} defined as 0,
+@code{CONST_WIDE_INT}s are not used and @code{CONST_DOUBLE}s are as
+they were before.
+
+The values are stored in a compressed format. The higher-order
+0s or -1s are not represented if they are just the logical sign
+extension of the number that is represented.
+
+@findex CONST_WIDE_INT_VEC
+@item CONST_WIDE_INT_VEC (@var{code})
+Returns the entire array of @code{HOST_WIDE_INT}s that are used to
+store the value. This macro should be rarely used.
+
+@findex CONST_WIDE_INT_NUNITS
+@item CONST_WIDE_INT_NUNITS (@var{code})
+The number of @code{HOST_WIDE_INT}s used to represent the number.
+Note that this generally is smaller than the number of
+@code{HOST_WIDE_INT}s implied by the mode size.
+
+@findex CONST_WIDE_INT_ELT
+@item CONST_WIDE_INT_NUNITS (@var{code},@var{i})
+Returns the @code{i}th element of the array. Element 0 is contains
+the low order bits of the constant.
+
@findex const_fixed
@item (const_fixed:@var{m} @dots{})
Represents a fixed-point constant of mode @var{m}.
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 4eed4511b60..fd4e4fd6628 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -9704,18 +9704,6 @@ Returns the negative of the floating point value @var{x}.
Returns the absolute value of @var{x}.
@end deftypefn
-@deftypefn Macro void REAL_VALUE_TO_INT (HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, REAL_VALUE_TYPE @var{x})
-Converts a floating point value @var{x} into a double-precision integer
-which is then stored into @var{low} and @var{high}. If the value is not
-integral, it is truncated.
-@end deftypefn
-
-@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, enum machine_mode @var{mode})
-Converts a double-precision integer found in @var{low} and @var{high},
-into a floating point value which is then stored into @var{x}. The
-value is truncated to fit in mode @var{mode}.
-@end deftypefn
-
@node Mode Switching
@section Mode Switching Instructions
@cindex mode switching
@@ -11024,7 +11012,7 @@ function version at run-time for a given set of function versions.
body must be generated.
@end deftypefn
-@deftypefn {Target Hook} bool TARGET_CAN_USE_DOLOOP_P (double_int @var{iterations}, double_int @var{iterations_max}, unsigned int @var{loop_depth}, bool @var{entered_at_top})
+@deftypefn {Target Hook} bool TARGET_CAN_USE_DOLOOP_P (const widest_int @var{&iterations}, const widest_int @var{&iterations_max}, unsigned int @var{loop_depth}, bool @var{entered_at_top})
Return true if it is possible to use low-overhead loops (@code{doloop_end}
and @code{doloop_begin}) for a particular loop. @var{iterations} gives the
exact number of iterations, or 0 if not known. @var{iterations_max} gives
@@ -11440,3 +11428,49 @@ If defined, this function returns an appropriate alignment in bits for an atomic
@deftypefn {Target Hook} void TARGET_ATOMIC_ASSIGN_EXPAND_FENV (tree *@var{hold}, tree *@var{clear}, tree *@var{update})
ISO C11 requires atomic compound assignments that may raise floating-point exceptions to raise exceptions corresponding to the arithmetic operation whose result was successfully stored in a compare-and-exchange sequence. This requires code equivalent to calls to @code{feholdexcept}, @code{feclearexcept} and @code{feupdateenv} to be generated at appropriate points in the compare-and-exchange sequence. This hook should set @code{*@var{hold}} to an expression equivalent to the call to @code{feholdexcept}, @code{*@var{clear}} to an expression equivalent to the call to @code{feclearexcept} and @code{*@var{update}} to an expression equivalent to the call to @code{feupdateenv}. The three expressions are @code{NULL_TREE} on entry to the hook and may be left as @code{NULL_TREE} if no code is required in a particular place. The default implementation leaves all three expressions as @code{NULL_TREE}. The @code{__atomic_feraiseexcept} function from @code{libatomic} may be of use as part of the code generated in @code{*@var{update}}.
@end deftypefn
+
+@defmac TARGET_SUPPORTS_WIDE_INT
+
+On older ports, large integers are stored in @code{CONST_DOUBLE} rtl
+objects. Newer ports define @code{TARGET_SUPPORTS_WIDE_INT} to be nonzero
+to indicate that large integers are stored in
+@code{CONST_WIDE_INT} rtl objects. The @code{CONST_WIDE_INT} allows
+very large integer constants to be represented. @code{CONST_DOUBLE}
+is limited to twice the size of the host's @code{HOST_WIDE_INT}
+representation.
+
+Converting a port mostly requires looking for the places where
+@code{CONST_DOUBLE}s are used with @code{VOIDmode} and replacing that
+code with code that accesses @code{CONST_WIDE_INT}s. @samp{"grep -i
+const_double"} at the port level gets you to 95% of the changes that
+need to be made. There are a few places that require a deeper look.
+
+@itemize @bullet
+@item
+There is no equivalent to @code{hval} and @code{lval} for
+@code{CONST_WIDE_INT}s. This would be difficult to express in the md
+language since there are a variable number of elements.
+
+Most ports only check that @code{hval} is either 0 or -1 to see if the
+value is small. As mentioned above, this will no longer be necessary
+since small constants are always @code{CONST_INT}. Of course there
+are still a few exceptions, the alpha's constraint used by the zap
+instruction certainly requires careful examination by C code.
+However, all the current code does is pass the hval and lval to C
+code, so evolving the c code to look at the @code{CONST_WIDE_INT} is
+not really a large change.
+
+@item
+Because there is no standard template that ports use to materialize
+constants, there is likely to be some futzing that is unique to each
+port in this code.
+
+@item
+The rtx costs may have to be adjusted to properly account for larger
+constants that are represented as @code{CONST_WIDE_INT}.
+@end itemize
+
+All and all it does not take long to convert ports that the
+maintainer is familiar with.
+
+@end defmac
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 3c59714a862..9c030df6990 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -7362,18 +7362,6 @@ Returns the negative of the floating point value @var{x}.
Returns the absolute value of @var{x}.
@end deftypefn
-@deftypefn Macro void REAL_VALUE_TO_INT (HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, REAL_VALUE_TYPE @var{x})
-Converts a floating point value @var{x} into a double-precision integer
-which is then stored into @var{low} and @var{high}. If the value is not
-integral, it is truncated.
-@end deftypefn
-
-@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, enum machine_mode @var{mode})
-Converts a double-precision integer found in @var{low} and @var{high},
-into a floating point value which is then stored into @var{x}. The
-value is truncated to fit in mode @var{mode}.
-@end deftypefn
-
@node Mode Switching
@section Mode Switching Instructions
@cindex mode switching
@@ -8425,3 +8413,49 @@ and the associated definitions of those functions.
@hook TARGET_ATOMIC_ALIGN_FOR_MODE
@hook TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+
+@defmac TARGET_SUPPORTS_WIDE_INT
+
+On older ports, large integers are stored in @code{CONST_DOUBLE} rtl
+objects. Newer ports define @code{TARGET_SUPPORTS_WIDE_INT} to be nonzero
+to indicate that large integers are stored in
+@code{CONST_WIDE_INT} rtl objects. The @code{CONST_WIDE_INT} allows
+very large integer constants to be represented. @code{CONST_DOUBLE}
+is limited to twice the size of the host's @code{HOST_WIDE_INT}
+representation.
+
+Converting a port mostly requires looking for the places where
+@code{CONST_DOUBLE}s are used with @code{VOIDmode} and replacing that
+code with code that accesses @code{CONST_WIDE_INT}s. @samp{"grep -i
+const_double"} at the port level gets you to 95% of the changes that
+need to be made. There are a few places that require a deeper look.
+
+@itemize @bullet
+@item
+There is no equivalent to @code{hval} and @code{lval} for
+@code{CONST_WIDE_INT}s. This would be difficult to express in the md
+language since there are a variable number of elements.
+
+Most ports only check that @code{hval} is either 0 or -1 to see if the
+value is small. As mentioned above, this will no longer be necessary
+since small constants are always @code{CONST_INT}. Of course there
+are still a few exceptions, the alpha's constraint used by the zap
+instruction certainly requires careful examination by C code.
+However, all the current code does is pass the hval and lval to C
+code, so evolving the c code to look at the @code{CONST_WIDE_INT} is
+not really a large change.
+
+@item
+Because there is no standard template that ports use to materialize
+constants, there is likely to be some futzing that is unique to each
+port in this code.
+
+@item
+The rtx costs may have to be adjusted to properly account for larger
+constants that are represented as @code{CONST_WIDE_INT}.
+@end itemize
+
+All and all it does not take long to convert ports that the
+maintainer is familiar with.
+
+@end defmac
diff --git a/gcc/dojump.c b/gcc/dojump.c
index 1c3a2bee872..17486900173 100644
--- a/gcc/dojump.c
+++ b/gcc/dojump.c
@@ -166,6 +166,7 @@ static bool
prefer_and_bit_test (enum machine_mode mode, int bitnum)
{
bool speed_p;
+ wide_int mask = wi::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode));
if (and_test == 0)
{
@@ -186,8 +187,7 @@ prefer_and_bit_test (enum machine_mode mode, int bitnum)
}
/* Fill in the integers. */
- XEXP (and_test, 1)
- = immed_double_int_const (double_int_zero.set_bit (bitnum), mode);
+ XEXP (and_test, 1) = immed_wide_int_const (mask, mode);
XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum);
speed_p = optimize_insn_for_speed_p ();
diff --git a/gcc/double-int.h b/gcc/double-int.h
index 5ca9ccf61f9..a2b11ba8419 100644
--- a/gcc/double-int.h
+++ b/gcc/double-int.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef DOUBLE_INT_H
#define DOUBLE_INT_H
+#include "wide-int.h"
+
/* A large integer is currently represented as a pair of HOST_WIDE_INTs.
It therefore represents a number with precision of
2 * HOST_BITS_PER_WIDE_INT bits (it is however possible that the
@@ -435,4 +437,36 @@ void mpz_set_double_int (mpz_t, double_int, bool);
double_int mpz_get_double_int (const_tree, mpz_t, bool);
#endif
+namespace wi
+{
+ template <>
+ struct int_traits <double_int>
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = true;
+ static const unsigned int precision = HOST_BITS_PER_DOUBLE_INT;
+ static unsigned int get_precision (const double_int &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const double_int &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <double_int>::get_precision (const double_int &)
+{
+ return precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <double_int>::decompose (HOST_WIDE_INT *scratch, unsigned int p,
+ const double_int &x)
+{
+ gcc_checking_assert (precision == p);
+ scratch[0] = x.low;
+ if ((x.high == 0 && scratch[0] >= 0) || (x.high == -1 && scratch[0] < 0))
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = x.high;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
#endif /* DOUBLE_INT_H */
diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c
index 12723265e87..5874d73821b 100644
--- a/gcc/dwarf2out.c
+++ b/gcc/dwarf2out.c
@@ -357,6 +357,16 @@ dump_struct_debug (tree type, enum debug_info_usage usage,
#endif
+/* Get the number of HOST_WIDE_INTs needed to represent the precision
+ of the number. */
+
+static unsigned int
+get_full_len (const wide_int &op)
+{
+ return ((op.get_precision () + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+}
+
static bool
should_emit_struct_debug (tree type, enum debug_info_usage usage)
{
@@ -1392,6 +1402,9 @@ dw_val_equal_p (dw_val_node *a, dw_val_node *b)
return (a->v.val_double.high == b->v.val_double.high
&& a->v.val_double.low == b->v.val_double.low);
+ case dw_val_class_wide_int:
+ return *a->v.val_wide == *b->v.val_wide;
+
case dw_val_class_vec:
{
size_t a_len = a->v.val_vec.elt_size * a->v.val_vec.length;
@@ -1648,6 +1661,10 @@ size_of_loc_descr (dw_loc_descr_ref loc)
case dw_val_class_const_double:
size += HOST_BITS_PER_DOUBLE_INT / BITS_PER_UNIT;
break;
+ case dw_val_class_wide_int:
+ size += (get_full_len (*loc->dw_loc_oprnd2.v.val_wide)
+ * HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT);
+ break;
default:
gcc_unreachable ();
}
@@ -1825,6 +1842,20 @@ output_loc_operands (dw_loc_descr_ref loc, int for_eh_or_skip)
second, NULL);
}
break;
+ case dw_val_class_wide_int:
+ {
+ int i;
+ int len = get_full_len (*val2->v.val_wide);
+ if (WORDS_BIG_ENDIAN)
+ for (i = len - 1; i >= 0; --i)
+ dw2_asm_output_data (HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR,
+ val2->v.val_wide->elt (i), NULL);
+ else
+ for (i = 0; i < len; ++i)
+ dw2_asm_output_data (HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR,
+ val2->v.val_wide->elt (i), NULL);
+ }
+ break;
case dw_val_class_addr:
gcc_assert (val1->v.val_unsigned == DWARF2_ADDR_SIZE);
dw2_asm_output_addr_rtx (DWARF2_ADDR_SIZE, val2->v.val_addr, NULL);
@@ -2034,6 +2065,21 @@ output_loc_operands (dw_loc_descr_ref loc, int for_eh_or_skip)
dw2_asm_output_data (l, second, NULL);
}
break;
+ case dw_val_class_wide_int:
+ {
+ int i;
+ int len = get_full_len (*val2->v.val_wide);
+ l = HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+
+ dw2_asm_output_data (1, len * l, NULL);
+ if (WORDS_BIG_ENDIAN)
+ for (i = len - 1; i >= 0; --i)
+ dw2_asm_output_data (l, val2->v.val_wide->elt (i), NULL);
+ else
+ for (i = 0; i < len; ++i)
+ dw2_asm_output_data (l, val2->v.val_wide->elt (i), NULL);
+ }
+ break;
default:
gcc_unreachable ();
}
@@ -3126,7 +3172,7 @@ static void add_AT_location_description (dw_die_ref, enum dwarf_attribute,
static void add_data_member_location_attribute (dw_die_ref, tree);
static bool add_const_value_attribute (dw_die_ref, rtx);
static void insert_int (HOST_WIDE_INT, unsigned, unsigned char *);
-static void insert_double (double_int, unsigned char *);
+static void insert_wide_int (const wide_int &, unsigned char *, int);
static void insert_float (const_rtx, unsigned char *);
static rtx rtl_for_decl_location (tree);
static bool add_location_or_const_value_attribute (dw_die_ref, tree, bool,
@@ -3758,6 +3804,21 @@ AT_unsigned (dw_attr_ref a)
return a->dw_attr_val.v.val_unsigned;
}
+/* Add an unsigned wide integer attribute value to a DIE. */
+
+static inline void
+add_AT_wide (dw_die_ref die, enum dwarf_attribute attr_kind,
+ const wide_int& w)
+{
+ dw_attr_node attr;
+
+ attr.dw_attr = attr_kind;
+ attr.dw_attr_val.val_class = dw_val_class_wide_int;
+ attr.dw_attr_val.v.val_wide = ggc_alloc_cleared_wide_int ();
+ *attr.dw_attr_val.v.val_wide = w;
+ add_dwarf_attr (die, &attr);
+}
+
/* Add an unsigned double integer attribute value to a DIE. */
static inline void
@@ -5332,6 +5393,21 @@ print_die (dw_die_ref die, FILE *outfile)
a->dw_attr_val.v.val_double.high,
a->dw_attr_val.v.val_double.low);
break;
+ case dw_val_class_wide_int:
+ {
+ int i = a->dw_attr_val.v.val_wide->get_len ();
+ fprintf (outfile, "constant (");
+ gcc_assert (i > 0);
+ if (a->dw_attr_val.v.val_wide->elt (i - 1) == 0)
+ fprintf (outfile, "0x");
+ fprintf (outfile, HOST_WIDE_INT_PRINT_HEX,
+ a->dw_attr_val.v.val_wide->elt (--i));
+ while (--i >= 0)
+ fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX,
+ a->dw_attr_val.v.val_wide->elt (i));
+ fprintf (outfile, ")");
+ break;
+ }
case dw_val_class_vec:
fprintf (outfile, "floating-point or vector constant");
break;
@@ -5505,6 +5581,9 @@ attr_checksum (dw_attr_ref at, struct md5_ctx *ctx, int *mark)
case dw_val_class_const_double:
CHECKSUM (at->dw_attr_val.v.val_double);
break;
+ case dw_val_class_wide_int:
+ CHECKSUM (*at->dw_attr_val.v.val_wide);
+ break;
case dw_val_class_vec:
CHECKSUM_BLOCK (at->dw_attr_val.v.val_vec.array,
(at->dw_attr_val.v.val_vec.length
@@ -5782,6 +5861,12 @@ attr_checksum_ordered (enum dwarf_tag tag, dw_attr_ref at,
CHECKSUM (at->dw_attr_val.v.val_double);
break;
+ case dw_val_class_wide_int:
+ CHECKSUM_ULEB128 (DW_FORM_block);
+ CHECKSUM_ULEB128 (sizeof (*at->dw_attr_val.v.val_wide));
+ CHECKSUM (*at->dw_attr_val.v.val_wide);
+ break;
+
case dw_val_class_vec:
CHECKSUM_ULEB128 (DW_FORM_block);
CHECKSUM_ULEB128 (at->dw_attr_val.v.val_vec.length
@@ -6264,6 +6349,8 @@ same_dw_val_p (const dw_val_node *v1, const dw_val_node *v2, int *mark)
case dw_val_class_const_double:
return v1->v.val_double.high == v2->v.val_double.high
&& v1->v.val_double.low == v2->v.val_double.low;
+ case dw_val_class_wide_int:
+ return *v1->v.val_wide == *v2->v.val_wide;
case dw_val_class_vec:
if (v1->v.val_vec.length != v2->v.val_vec.length
|| v1->v.val_vec.elt_size != v2->v.val_vec.elt_size)
@@ -7819,6 +7906,13 @@ size_of_die (dw_die_ref die)
if (HOST_BITS_PER_WIDE_INT >= 64)
size++; /* block */
break;
+ case dw_val_class_wide_int:
+ size += (get_full_len (*a->dw_attr_val.v.val_wide)
+ * HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR);
+ if (get_full_len (*a->dw_attr_val.v.val_wide) * HOST_BITS_PER_WIDE_INT
+ > 64)
+ size++; /* block */
+ break;
case dw_val_class_vec:
size += constant_size (a->dw_attr_val.v.val_vec.length
* a->dw_attr_val.v.val_vec.elt_size)
@@ -8188,6 +8282,20 @@ value_format (dw_attr_ref a)
default:
return DW_FORM_block1;
}
+ case dw_val_class_wide_int:
+ switch (get_full_len (*a->dw_attr_val.v.val_wide) * HOST_BITS_PER_WIDE_INT)
+ {
+ case 8:
+ return DW_FORM_data1;
+ case 16:
+ return DW_FORM_data2;
+ case 32:
+ return DW_FORM_data4;
+ case 64:
+ return DW_FORM_data8;
+ default:
+ return DW_FORM_block1;
+ }
case dw_val_class_vec:
switch (constant_size (a->dw_attr_val.v.val_vec.length
* a->dw_attr_val.v.val_vec.elt_size))
@@ -8627,6 +8735,32 @@ output_die (dw_die_ref die)
}
break;
+ case dw_val_class_wide_int:
+ {
+ int i;
+ int len = get_full_len (*a->dw_attr_val.v.val_wide);
+ int l = HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR;
+ if (len * HOST_BITS_PER_WIDE_INT > 64)
+ dw2_asm_output_data (1, get_full_len (*a->dw_attr_val.v.val_wide) * l,
+ NULL);
+
+ if (WORDS_BIG_ENDIAN)
+ for (i = len - 1; i >= 0; --i)
+ {
+ dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i),
+ name);
+ name = NULL;
+ }
+ else
+ for (i = 0; i < len; ++i)
+ {
+ dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i),
+ name);
+ name = NULL;
+ }
+ }
+ break;
+
case dw_val_class_vec:
{
unsigned int elt_size = a->dw_attr_val.v.val_vec.elt_size;
@@ -10320,19 +10454,19 @@ simple_type_size_in_bits (const_tree type)
return TYPE_ALIGN (type);
}
-/* Similarly, but return a double_int instead of UHWI. */
+/* Similarly, but return an offset_int instead of UHWI. */
-static inline double_int
-double_int_type_size_in_bits (const_tree type)
+static inline offset_int
+offset_int_type_size_in_bits (const_tree type)
{
if (TREE_CODE (type) == ERROR_MARK)
- return double_int::from_uhwi (BITS_PER_WORD);
+ return BITS_PER_WORD;
else if (TYPE_SIZE (type) == NULL_TREE)
- return double_int_zero;
+ return 0;
else if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
- return tree_to_double_int (TYPE_SIZE (type));
+ return wi::to_offset (TYPE_SIZE (type));
else
- return double_int::from_uhwi (TYPE_ALIGN (type));
+ return TYPE_ALIGN (type);
}
/* Given a pointer to a tree node for a subrange type, return a pointer
@@ -11826,9 +11960,7 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode,
rtx msb;
if (GET_MODE_CLASS (mode) != MODE_INT
- || GET_MODE (XEXP (rtl, 0)) != mode
- || (GET_CODE (rtl) == CLZ
- && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_DOUBLE_INT))
+ || GET_MODE (XEXP (rtl, 0)) != mode)
return NULL;
op0 = mem_loc_descriptor (XEXP (rtl, 0), mode, mem_mode,
@@ -11872,9 +12004,9 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode,
msb = GEN_INT ((unsigned HOST_WIDE_INT) 1
<< (GET_MODE_BITSIZE (mode) - 1));
else
- msb = immed_double_const (0, (unsigned HOST_WIDE_INT) 1
- << (GET_MODE_BITSIZE (mode)
- - HOST_BITS_PER_WIDE_INT - 1), mode);
+ msb = immed_wide_int_const
+ (wi::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1,
+ GET_MODE_PRECISION (mode)), mode);
if (GET_CODE (msb) == CONST_INT && INTVAL (msb) < 0)
tmp = new_loc_descr (HOST_BITS_PER_WIDE_INT == 32
? DW_OP_const4u : HOST_BITS_PER_WIDE_INT == 64
@@ -12800,10 +12932,14 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode,
{
dw_die_ref type_die;
- /* Note that a CONST_DOUBLE rtx could represent either an integer
- or a floating-point constant. A CONST_DOUBLE is used whenever
- the constant requires more than one word in order to be
- adequately represented. We output CONST_DOUBLEs as blocks. */
+ /* Note that if TARGET_SUPPORTS_WIDE_INT == 0, a
+ CONST_DOUBLE rtx could represent either a large integer
+ or a floating-point constant. If TARGET_SUPPORTS_WIDE_INT != 0,
+ the value is always a floating point constant.
+
+ When it is an integer, a CONST_DOUBLE is used whenever
+ the constant requires 2 HWIs to be adequately represented.
+ We output CONST_DOUBLEs as blocks. */
if (mode == VOIDmode
|| (GET_MODE (rtl) == VOIDmode
&& GET_MODE_BITSIZE (mode) != HOST_BITS_PER_DOUBLE_INT))
@@ -12816,7 +12952,16 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode,
mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_die_ref;
mem_loc_result->dw_loc_oprnd1.v.val_die_ref.die = type_die;
mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0;
- if (SCALAR_FLOAT_MODE_P (mode))
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ if (!SCALAR_FLOAT_MODE_P (mode))
+ {
+ mem_loc_result->dw_loc_oprnd2.val_class
+ = dw_val_class_const_double;
+ mem_loc_result->dw_loc_oprnd2.v.val_double
+ = rtx_to_double_int (rtl);
+ }
+ else
+#endif
{
unsigned int length = GET_MODE_SIZE (mode);
unsigned char *array
@@ -12828,13 +12973,26 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode,
mem_loc_result->dw_loc_oprnd2.v.val_vec.elt_size = 4;
mem_loc_result->dw_loc_oprnd2.v.val_vec.array = array;
}
- else
- {
- mem_loc_result->dw_loc_oprnd2.val_class
- = dw_val_class_const_double;
- mem_loc_result->dw_loc_oprnd2.v.val_double
- = rtx_to_double_int (rtl);
- }
+ }
+ break;
+
+ case CONST_WIDE_INT:
+ if (!dwarf_strict)
+ {
+ dw_die_ref type_die;
+
+ type_die = base_type_for_mode (mode,
+ GET_MODE_CLASS (mode) == MODE_INT);
+ if (type_die == NULL)
+ return NULL;
+ mem_loc_result = new_loc_descr (DW_OP_GNU_const_type, 0, 0);
+ mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_die_ref;
+ mem_loc_result->dw_loc_oprnd1.v.val_die_ref.die = type_die;
+ mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0;
+ mem_loc_result->dw_loc_oprnd2.val_class
+ = dw_val_class_wide_int;
+ mem_loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int ();
+ *mem_loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode);
}
break;
@@ -13305,7 +13463,15 @@ loc_descriptor (rtx rtl, enum machine_mode mode,
adequately represented. We output CONST_DOUBLEs as blocks. */
loc_result = new_loc_descr (DW_OP_implicit_value,
GET_MODE_SIZE (mode), 0);
- if (SCALAR_FLOAT_MODE_P (mode))
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ if (!SCALAR_FLOAT_MODE_P (mode))
+ {
+ loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double;
+ loc_result->dw_loc_oprnd2.v.val_double
+ = rtx_to_double_int (rtl);
+ }
+ else
+#endif
{
unsigned int length = GET_MODE_SIZE (mode);
unsigned char *array
@@ -13317,12 +13483,20 @@ loc_descriptor (rtx rtl, enum machine_mode mode,
loc_result->dw_loc_oprnd2.v.val_vec.elt_size = 4;
loc_result->dw_loc_oprnd2.v.val_vec.array = array;
}
- else
- {
- loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double;
- loc_result->dw_loc_oprnd2.v.val_double
- = rtx_to_double_int (rtl);
- }
+ }
+ break;
+
+ case CONST_WIDE_INT:
+ if (mode == VOIDmode)
+ mode = GET_MODE (rtl);
+
+ if (mode != VOIDmode && (dwarf_version >= 4 || !dwarf_strict))
+ {
+ loc_result = new_loc_descr (DW_OP_implicit_value,
+ GET_MODE_SIZE (mode), 0);
+ loc_result->dw_loc_oprnd2.val_class = dw_val_class_wide_int;
+ loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int ();
+ *loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode);
}
break;
@@ -13338,6 +13512,7 @@ loc_descriptor (rtx rtl, enum machine_mode mode,
ggc_alloc_atomic (length * elt_size);
unsigned int i;
unsigned char *p;
+ enum machine_mode imode = GET_MODE_INNER (mode);
gcc_assert (mode == GET_MODE (rtl) || VOIDmode == GET_MODE (rtl));
switch (GET_MODE_CLASS (mode))
@@ -13346,15 +13521,7 @@ loc_descriptor (rtx rtl, enum machine_mode mode,
for (i = 0, p = array; i < length; i++, p += elt_size)
{
rtx elt = CONST_VECTOR_ELT (rtl, i);
- double_int val = rtx_to_double_int (elt);
-
- if (elt_size <= sizeof (HOST_WIDE_INT))
- insert_int (val.to_shwi (), elt_size, p);
- else
- {
- gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT));
- insert_double (val, p);
- }
+ insert_wide_int (std::make_pair (elt, imode), p, elt_size);
}
break;
@@ -14676,15 +14843,10 @@ simple_decl_align_in_bits (const_tree decl)
/* Return the result of rounding T up to ALIGN. */
-static inline double_int
-round_up_to_align (double_int t, unsigned int align)
+static inline offset_int
+round_up_to_align (const offset_int &t, unsigned int align)
{
- double_int alignd = double_int::from_uhwi (align);
- t += alignd;
- t += double_int_minus_one;
- t = t.div (alignd, true, TRUNC_DIV_EXPR);
- t *= alignd;
- return t;
+ return wi::udiv_trunc (t + align - 1, align) * align;
}
/* Given a pointer to a FIELD_DECL, compute and return the byte offset of the
@@ -14697,9 +14859,9 @@ round_up_to_align (double_int t, unsigned int align)
static HOST_WIDE_INT
field_byte_offset (const_tree decl)
{
- double_int object_offset_in_bits;
- double_int object_offset_in_bytes;
- double_int bitpos_int;
+ offset_int object_offset_in_bits;
+ offset_int object_offset_in_bytes;
+ offset_int bitpos_int;
if (TREE_CODE (decl) == ERROR_MARK)
return 0;
@@ -14712,21 +14874,21 @@ field_byte_offset (const_tree decl)
if (TREE_CODE (bit_position (decl)) != INTEGER_CST)
return 0;
- bitpos_int = tree_to_double_int (bit_position (decl));
+ bitpos_int = wi::to_offset (bit_position (decl));
#ifdef PCC_BITFIELD_TYPE_MATTERS
if (PCC_BITFIELD_TYPE_MATTERS)
{
tree type;
tree field_size_tree;
- double_int deepest_bitpos;
- double_int field_size_in_bits;
+ offset_int deepest_bitpos;
+ offset_int field_size_in_bits;
unsigned int type_align_in_bits;
unsigned int decl_align_in_bits;
- double_int type_size_in_bits;
+ offset_int type_size_in_bits;
type = field_type (decl);
- type_size_in_bits = double_int_type_size_in_bits (type);
+ type_size_in_bits = offset_int_type_size_in_bits (type);
type_align_in_bits = simple_type_align_in_bits (type);
field_size_tree = DECL_SIZE (decl);
@@ -14738,7 +14900,7 @@ field_byte_offset (const_tree decl)
/* If the size of the field is not constant, use the type size. */
if (TREE_CODE (field_size_tree) == INTEGER_CST)
- field_size_in_bits = tree_to_double_int (field_size_tree);
+ field_size_in_bits = wi::to_offset (field_size_tree);
else
field_size_in_bits = type_size_in_bits;
@@ -14802,7 +14964,7 @@ field_byte_offset (const_tree decl)
object_offset_in_bits
= round_up_to_align (object_offset_in_bits, type_align_in_bits);
- if (object_offset_in_bits.ugt (bitpos_int))
+ if (wi::gtu_p (object_offset_in_bits, bitpos_int))
{
object_offset_in_bits = deepest_bitpos - type_size_in_bits;
@@ -14816,8 +14978,7 @@ field_byte_offset (const_tree decl)
object_offset_in_bits = bitpos_int;
object_offset_in_bytes
- = object_offset_in_bits.div (double_int::from_uhwi (BITS_PER_UNIT),
- true, TRUNC_DIV_EXPR);
+ = wi::lrshift (object_offset_in_bits, LOG2_BITS_PER_UNIT);
return object_offset_in_bytes.to_shwi ();
}
@@ -14993,22 +15154,36 @@ extract_int (const unsigned char *src, unsigned int size)
return val;
}
-/* Writes double_int values to dw_vec_const array. */
+/* Writes wide_int values to dw_vec_const array. */
static void
-insert_double (double_int val, unsigned char *dest)
+insert_wide_int (const wide_int &val, unsigned char *dest, int elt_size)
{
- unsigned char *p0 = dest;
- unsigned char *p1 = dest + sizeof (HOST_WIDE_INT);
+ int i;
- if (WORDS_BIG_ENDIAN)
+ if (elt_size <= HOST_BITS_PER_WIDE_INT/BITS_PER_UNIT)
{
- p0 = p1;
- p1 = dest;
+ insert_int ((HOST_WIDE_INT) val.elt (0), elt_size, dest);
+ return;
}
- insert_int ((HOST_WIDE_INT) val.low, sizeof (HOST_WIDE_INT), p0);
- insert_int ((HOST_WIDE_INT) val.high, sizeof (HOST_WIDE_INT), p1);
+ /* We'd have to extend this code to support odd sizes. */
+ gcc_assert (elt_size % (HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT) == 0);
+
+ int n = elt_size / (HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT);
+
+ if (WORDS_BIG_ENDIAN)
+ for (i = n - 1; i >= 0; i--)
+ {
+ insert_int ((HOST_WIDE_INT) val.elt (i), sizeof (HOST_WIDE_INT), dest);
+ dest += sizeof (HOST_WIDE_INT);
+ }
+ else
+ for (i = 0; i < n; i++)
+ {
+ insert_int ((HOST_WIDE_INT) val.elt (i), sizeof (HOST_WIDE_INT), dest);
+ dest += sizeof (HOST_WIDE_INT);
+ }
}
/* Writes floating point values to dw_vec_const array. */
@@ -15053,6 +15228,11 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
}
return true;
+ case CONST_WIDE_INT:
+ add_AT_wide (die, DW_AT_const_value,
+ std::make_pair (rtl, GET_MODE (rtl)));
+ return true;
+
case CONST_DOUBLE:
/* Note that a CONST_DOUBLE rtx could represent either an integer or a
floating-point constant. A CONST_DOUBLE is used whenever the
@@ -15061,7 +15241,10 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
{
enum machine_mode mode = GET_MODE (rtl);
- if (SCALAR_FLOAT_MODE_P (mode))
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && !SCALAR_FLOAT_MODE_P (mode))
+ add_AT_double (die, DW_AT_const_value,
+ CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl));
+ else
{
unsigned int length = GET_MODE_SIZE (mode);
unsigned char *array = (unsigned char *) ggc_alloc_atomic (length);
@@ -15069,9 +15252,6 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
insert_float (rtl, array);
add_AT_vec (die, DW_AT_const_value, length / 4, 4, array);
}
- else
- add_AT_double (die, DW_AT_const_value,
- CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl));
}
return true;
@@ -15084,6 +15264,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
(length * elt_size);
unsigned int i;
unsigned char *p;
+ enum machine_mode imode = GET_MODE_INNER (mode);
switch (GET_MODE_CLASS (mode))
{
@@ -15091,15 +15272,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl)
for (i = 0, p = array; i < length; i++, p += elt_size)
{
rtx elt = CONST_VECTOR_ELT (rtl, i);
- double_int val = rtx_to_double_int (elt);
-
- if (elt_size <= sizeof (HOST_WIDE_INT))
- insert_int (val.to_shwi (), elt_size, p);
- else
- {
- gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT));
- insert_double (val, p);
- }
+ insert_wide_int (std::make_pair (elt, imode), p, elt_size);
}
break;
@@ -16237,7 +16410,7 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b
consumers will treat DW_FORM_data[1248] as unsigned values,
regardless of the underlying type. */
else if (prec <= HOST_BITS_PER_WIDE_INT
- || TREE_INT_CST_HIGH (bound) == 0)
+ || tree_fits_uhwi_p (bound))
{
if (TYPE_UNSIGNED (TREE_TYPE (bound)))
add_AT_unsigned (subrange_die, bound_attr,
@@ -16250,8 +16423,7 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b
the precision of its type. The precision and signedness
of the type will be necessary to re-interpret it
unambiguously. */
- add_AT_double (subrange_die, bound_attr, TREE_INT_CST_HIGH (bound),
- TREE_INT_CST_LOW (bound));
+ add_AT_wide (subrange_die, bound_attr, bound);
}
break;
@@ -17410,8 +17582,7 @@ gen_enumeration_type_die (tree type, dw_die_ref context_die)
/* Enumeration constants may be wider than HOST_WIDE_INT. Handle
that here. TODO: This should be re-worked to use correct
signed/unsigned double tags for all cases. */
- add_AT_double (enum_die, DW_AT_const_value,
- TREE_INT_CST_HIGH (value), TREE_INT_CST_LOW (value));
+ add_AT_wide (enum_die, DW_AT_const_value, value);
}
add_gnat_descriptive_type_attribute (type_die, type, context_die);
@@ -23549,6 +23720,9 @@ hash_loc_operands (dw_loc_descr_ref loc, hashval_t hash)
hash = iterative_hash_object (val2->v.val_double.low, hash);
hash = iterative_hash_object (val2->v.val_double.high, hash);
break;
+ case dw_val_class_wide_int:
+ hash = iterative_hash_object (*val2->v.val_wide, hash);
+ break;
case dw_val_class_addr:
hash = iterative_hash_rtx (val2->v.val_addr, hash);
break;
@@ -23638,6 +23812,9 @@ hash_loc_operands (dw_loc_descr_ref loc, hashval_t hash)
hash = iterative_hash_object (val2->v.val_double.low, hash);
hash = iterative_hash_object (val2->v.val_double.high, hash);
break;
+ case dw_val_class_wide_int:
+ hash = iterative_hash_object (*val2->v.val_wide, hash);
+ break;
default:
gcc_unreachable ();
}
@@ -23786,6 +23963,8 @@ compare_loc_operands (dw_loc_descr_ref x, dw_loc_descr_ref y)
case dw_val_class_const_double:
return valx2->v.val_double.low == valy2->v.val_double.low
&& valx2->v.val_double.high == valy2->v.val_double.high;
+ case dw_val_class_wide_int:
+ return *valx2->v.val_wide == *valy2->v.val_wide;
case dw_val_class_addr:
return rtx_equal_p (valx2->v.val_addr, valy2->v.val_addr);
default:
@@ -23829,6 +24008,8 @@ compare_loc_operands (dw_loc_descr_ref x, dw_loc_descr_ref y)
case dw_val_class_const_double:
return valx2->v.val_double.low == valy2->v.val_double.low
&& valx2->v.val_double.high == valy2->v.val_double.high;
+ case dw_val_class_wide_int:
+ return *valx2->v.val_wide == *valy2->v.val_wide;
default:
gcc_unreachable ();
}
diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h
index 696fef938f1..bac50774e2c 100644
--- a/gcc/dwarf2out.h
+++ b/gcc/dwarf2out.h
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#define GCC_DWARF2OUT_H 1
#include "dwarf2.h" /* ??? Remove this once only used by dwarf2foo.c. */
+#include "wide-int.h"
typedef struct die_struct *dw_die_ref;
typedef const struct die_struct *const_dw_die_ref;
@@ -29,6 +30,7 @@ typedef struct dw_val_node *dw_val_ref;
typedef struct dw_cfi_node *dw_cfi_ref;
typedef struct dw_loc_descr_node *dw_loc_descr_ref;
typedef struct dw_loc_list_struct *dw_loc_list_ref;
+typedef wide_int *wide_int_ptr;
/* Call frames are described using a sequence of Call Frame
@@ -136,6 +138,7 @@ enum dw_val_class
dw_val_class_const,
dw_val_class_unsigned_const,
dw_val_class_const_double,
+ dw_val_class_wide_int,
dw_val_class_vec,
dw_val_class_flag,
dw_val_class_die_ref,
@@ -176,6 +179,7 @@ struct GTY(()) dw_val_node {
HOST_WIDE_INT GTY ((default)) val_int;
unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned;
double_int GTY ((tag ("dw_val_class_const_double"))) val_double;
+ wide_int_ptr GTY ((tag ("dw_val_class_wide_int"))) val_wide;
dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec;
struct dw_val_die_union
{
diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c
index e3fd0a5132b..f2b8257de7c 100644
--- a/gcc/emit-rtl.c
+++ b/gcc/emit-rtl.c
@@ -126,6 +126,9 @@ rtx cc0_rtx;
static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def)))
htab_t const_int_htab;
+static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def)))
+ htab_t const_wide_int_htab;
+
/* A hash table storing register attribute structures. */
static GTY ((if_marked ("ggc_marked_p"), param_is (struct reg_attrs)))
htab_t reg_attrs_htab;
@@ -147,6 +150,11 @@ static void set_used_decls (tree);
static void mark_label_nuses (rtx);
static hashval_t const_int_htab_hash (const void *);
static int const_int_htab_eq (const void *, const void *);
+#if TARGET_SUPPORTS_WIDE_INT
+static hashval_t const_wide_int_htab_hash (const void *);
+static int const_wide_int_htab_eq (const void *, const void *);
+static rtx lookup_const_wide_int (rtx);
+#endif
static hashval_t const_double_htab_hash (const void *);
static int const_double_htab_eq (const void *, const void *);
static rtx lookup_const_double (rtx);
@@ -181,6 +189,43 @@ const_int_htab_eq (const void *x, const void *y)
return (INTVAL ((const_rtx) x) == *((const HOST_WIDE_INT *) y));
}
+#if TARGET_SUPPORTS_WIDE_INT
+/* Returns a hash code for X (which is a really a CONST_WIDE_INT). */
+
+static hashval_t
+const_wide_int_htab_hash (const void *x)
+{
+ int i;
+ HOST_WIDE_INT hash = 0;
+ const_rtx xr = (const_rtx) x;
+
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++)
+ hash += CONST_WIDE_INT_ELT (xr, i);
+
+ return (hashval_t) hash;
+}
+
+/* Returns nonzero if the value represented by X (which is really a
+ CONST_WIDE_INT) is the same as that given by Y (which is really a
+ CONST_WIDE_INT). */
+
+static int
+const_wide_int_htab_eq (const void *x, const void *y)
+{
+ int i;
+ const_rtx xr = (const_rtx) x;
+ const_rtx yr = (const_rtx) y;
+ if (CONST_WIDE_INT_NUNITS (xr) != CONST_WIDE_INT_NUNITS (yr))
+ return 0;
+
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++)
+ if (CONST_WIDE_INT_ELT (xr, i) != CONST_WIDE_INT_ELT (yr, i))
+ return 0;
+
+ return 1;
+}
+#endif
+
/* Returns a hash code for X (which is really a CONST_DOUBLE). */
static hashval_t
const_double_htab_hash (const void *x)
@@ -188,7 +233,7 @@ const_double_htab_hash (const void *x)
const_rtx const value = (const_rtx) x;
hashval_t h;
- if (GET_MODE (value) == VOIDmode)
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (value) == VOIDmode)
h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value);
else
{
@@ -208,7 +253,7 @@ const_double_htab_eq (const void *x, const void *y)
if (GET_MODE (a) != GET_MODE (b))
return 0;
- if (GET_MODE (a) == VOIDmode)
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (a) == VOIDmode)
return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b)
&& CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b));
else
@@ -446,6 +491,7 @@ const_fixed_from_fixed_value (FIXED_VALUE_TYPE value, enum machine_mode mode)
return lookup_const_fixed (fixed);
}
+#if TARGET_SUPPORTS_WIDE_INT == 0
/* Constructs double_int from rtx CST. */
double_int
@@ -465,17 +511,70 @@ rtx_to_double_int (const_rtx cst)
return r;
}
+#endif
+#if TARGET_SUPPORTS_WIDE_INT
+/* Determine whether CONST_WIDE_INT WINT already exists in the hash table.
+ If so, return its counterpart; otherwise add it to the hash table and
+ return it. */
-/* Return a CONST_DOUBLE or CONST_INT for a value specified as
- a double_int. */
+static rtx
+lookup_const_wide_int (rtx wint)
+{
+ void **slot = htab_find_slot (const_wide_int_htab, wint, INSERT);
+ if (*slot == 0)
+ *slot = wint;
+
+ return (rtx) *slot;
+}
+#endif
+
+/* Return an rtx constant for V, given that the constant has mode MODE.
+ The returned rtx will be a CONST_INT if V fits, otherwise it will be
+ a CONST_DOUBLE (if !TARGET_SUPPORTS_WIDE_INT) or a CONST_WIDE_INT
+ (if TARGET_SUPPORTS_WIDE_INT). */
rtx
-immed_double_int_const (double_int i, enum machine_mode mode)
+immed_wide_int_const (const wide_int_ref &v, enum machine_mode mode)
{
- return immed_double_const (i.low, i.high, mode);
+ unsigned int len = v.get_len ();
+ unsigned int prec = GET_MODE_PRECISION (mode);
+
+ /* Allow truncation but not extension since we do not know if the
+ number is signed or unsigned. */
+ gcc_assert (prec <= v.get_precision ());
+
+ if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT)
+ return gen_int_mode (v.elt (0), mode);
+
+#if TARGET_SUPPORTS_WIDE_INT
+ {
+ unsigned int i;
+ rtx value;
+ unsigned int blocks_needed
+ = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
+
+ if (len > blocks_needed)
+ len = blocks_needed;
+
+ value = const_wide_int_alloc (len);
+
+ /* It is so tempting to just put the mode in here. Must control
+ myself ... */
+ PUT_MODE (value, VOIDmode);
+ CWI_PUT_NUM_ELEM (value, len);
+
+ for (i = 0; i < len; i++)
+ CONST_WIDE_INT_ELT (value, i) = v.elt (i);
+
+ return lookup_const_wide_int (value);
+ }
+#else
+ return immed_double_const (v.elt (0), v.elt (1), mode);
+#endif
}
+#if TARGET_SUPPORTS_WIDE_INT == 0
/* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair
of ints: I0 is the low-order word and I1 is the high-order word.
For values that are larger than HOST_BITS_PER_DOUBLE_INT, the
@@ -527,6 +626,7 @@ immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, enum machine_mode mode)
return lookup_const_double (value);
}
+#endif
rtx
gen_rtx_REG (enum machine_mode mode, unsigned int regno)
@@ -5629,11 +5729,15 @@ init_emit_once (void)
enum machine_mode mode;
enum machine_mode double_mode;
- /* Initialize the CONST_INT, CONST_DOUBLE, CONST_FIXED, and memory attribute
- hash tables. */
+ /* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE,
+ CONST_FIXED, and memory attribute hash tables. */
const_int_htab = htab_create_ggc (37, const_int_htab_hash,
const_int_htab_eq, NULL);
+#if TARGET_SUPPORTS_WIDE_INT
+ const_wide_int_htab = htab_create_ggc (37, const_wide_int_htab_hash,
+ const_wide_int_htab_eq, NULL);
+#endif
const_double_htab = htab_create_ggc (37, const_double_htab_hash,
const_double_htab_eq, NULL);
@@ -5695,9 +5799,9 @@ init_emit_once (void)
else
const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE);
- REAL_VALUE_FROM_INT (dconst0, 0, 0, double_mode);
- REAL_VALUE_FROM_INT (dconst1, 1, 0, double_mode);
- REAL_VALUE_FROM_INT (dconst2, 2, 0, double_mode);
+ real_from_integer (&dconst0, double_mode, 0, SIGNED);
+ real_from_integer (&dconst1, double_mode, 1, SIGNED);
+ real_from_integer (&dconst2, double_mode, 2, SIGNED);
dconstm1 = dconst1;
dconstm1.sign = 1;
diff --git a/gcc/explow.c b/gcc/explow.c
index 48e91a6444b..bc97c964e61 100644
--- a/gcc/explow.c
+++ b/gcc/explow.c
@@ -96,38 +96,9 @@ plus_constant (enum machine_mode mode, rtx x, HOST_WIDE_INT c)
switch (code)
{
- case CONST_INT:
- if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
- {
- double_int di_x = double_int::from_shwi (INTVAL (x));
- double_int di_c = double_int::from_shwi (c);
-
- bool overflow;
- double_int v = di_x.add_with_sign (di_c, false, &overflow);
- if (overflow)
- gcc_unreachable ();
-
- return immed_double_int_const (v, mode);
- }
-
- return gen_int_mode (UINTVAL (x) + c, mode);
-
- case CONST_DOUBLE:
- {
- double_int di_x = double_int::from_pair (CONST_DOUBLE_HIGH (x),
- CONST_DOUBLE_LOW (x));
- double_int di_c = double_int::from_shwi (c);
-
- bool overflow;
- double_int v = di_x.add_with_sign (di_c, false, &overflow);
- if (overflow)
- /* Sorry, we have no way to represent overflows this wide.
- To fix, add constant support wider than CONST_DOUBLE. */
- gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT);
-
- return immed_double_int_const (v, mode);
- }
-
+ CASE_CONST_SCALAR_INT:
+ return immed_wide_int_const (wi::add (std::make_pair (x, mode), c),
+ mode);
case MEM:
/* If this is a reference to the constant pool, try replacing it with
a reference to a new constant. If the resulting address isn't
diff --git a/gcc/expmed.c b/gcc/expmed.c
index 7c1c979f774..e76b6fcc724 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -62,7 +62,6 @@ static rtx extract_fixed_bit_field (enum machine_mode, rtx,
static rtx extract_fixed_bit_field_1 (enum machine_mode, rtx,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, rtx, int);
-static rtx mask_rtx (enum machine_mode, int, int, int);
static rtx lshift_value (enum machine_mode, unsigned HOST_WIDE_INT, int);
static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, int);
@@ -70,6 +69,19 @@ static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx);
static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT);
+/* Return a constant integer mask value of mode MODE with BITSIZE ones
+ followed by BITPOS zeros, or the complement of that if COMPLEMENT.
+ The mask is truncated if necessary to the width of mode MODE. The
+ mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
+
+static inline rtx
+mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement)
+{
+ return immed_wide_int_const
+ (wi::shifted_mask (bitpos, bitsize, complement,
+ GET_MODE_PRECISION (mode)), mode);
+}
+
/* Test whether a value is zero of a power of two. */
#define EXACT_POWER_OF_2_OR_ZERO_P(x) \
(((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0)
@@ -1885,26 +1897,6 @@ extract_fixed_bit_field_1 (enum machine_mode tmode, rtx op0,
return expand_shift (RSHIFT_EXPR, mode, op0,
GET_MODE_BITSIZE (mode) - bitsize, target, 0);
}
-
-/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value
- of mode MODE with BITSIZE ones followed by BITPOS zeros, or the
- complement of that if COMPLEMENT. The mask is truncated if
- necessary to the width of mode MODE. The mask is zero-extended if
- BITSIZE+BITPOS is too small for MODE. */
-
-static rtx
-mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement)
-{
- double_int mask;
-
- mask = double_int::mask (bitsize);
- mask = mask.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT);
-
- if (complement)
- mask = ~mask;
-
- return immed_double_int_const (mask, mode);
-}
/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
VALUE << BITPOS. */
@@ -1913,12 +1905,7 @@ static rtx
lshift_value (enum machine_mode mode, unsigned HOST_WIDE_INT value,
int bitpos)
{
- double_int val;
-
- val = double_int::from_uhwi (value);
- val = val.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT);
-
- return immed_double_int_const (val, mode);
+ return immed_wide_int_const (wi::lshift (value, bitpos), mode);
}
/* Extract a bit field that is split across two words
@@ -3154,38 +3141,22 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
only if the constant value exactly fits in an `unsigned int' without
any truncation. This means that multiplying by negative values does
not work; results are off by 2^32 on a 32 bit machine. */
-
if (CONST_INT_P (scalar_op1))
{
coeff = INTVAL (scalar_op1);
is_neg = coeff < 0;
}
+#if TARGET_SUPPORTS_WIDE_INT
+ else if (CONST_WIDE_INT_P (scalar_op1))
+#else
else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
+#endif
{
- /* If we are multiplying in DImode, it may still be a win
- to try to work with shifts and adds. */
- if (CONST_DOUBLE_HIGH (scalar_op1) == 0
- && (CONST_DOUBLE_LOW (scalar_op1) > 0
- || (CONST_DOUBLE_LOW (scalar_op1) < 0
- && EXACT_POWER_OF_2_OR_ZERO_P
- (CONST_DOUBLE_LOW (scalar_op1)))))
- {
- coeff = CONST_DOUBLE_LOW (scalar_op1);
- is_neg = false;
- }
- else if (CONST_DOUBLE_LOW (scalar_op1) == 0)
- {
- coeff = CONST_DOUBLE_HIGH (scalar_op1);
- if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
- {
- int shift = floor_log2 (coeff) + HOST_BITS_PER_WIDE_INT;
- if (shift < HOST_BITS_PER_DOUBLE_INT - 1
- || mode_bitsize <= HOST_BITS_PER_DOUBLE_INT)
- return expand_shift (LSHIFT_EXPR, mode, op0,
- shift, target, unsignedp);
- }
- goto skip_synth;
- }
+ int shift = wi::exact_log2 (std::make_pair (scalar_op1, mode));
+ /* Perfect power of 2 (other than 1, which is handled above). */
+ if (shift > 0)
+ return expand_shift (LSHIFT_EXPR, mode, op0,
+ shift, target, unsignedp);
else
goto skip_synth;
}
@@ -3362,7 +3333,6 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
unsigned HOST_WIDE_INT *multiplier_ptr,
int *post_shift_ptr, int *lgup_ptr)
{
- double_int mhigh, mlow;
int lgup, post_shift;
int pow, pow2;
@@ -3374,23 +3344,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
pow = n + lgup;
pow2 = n + lgup - precision;
- /* We could handle this with some effort, but this case is much
- better handled directly with a scc insn, so rely on caller using
- that. */
- gcc_assert (pow != HOST_BITS_PER_DOUBLE_INT);
-
/* mlow = 2^(N + lgup)/d */
- double_int val = double_int_zero.set_bit (pow);
- mlow = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR);
+ wide_int val = wi::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT);
+ wide_int mlow = wi::udiv_trunc (val, d);
/* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */
- val |= double_int_zero.set_bit (pow2);
- mhigh = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR);
-
- gcc_assert (!mhigh.high || val.high - d < d);
- gcc_assert (mhigh.high <= 1 && mlow.high <= 1);
- /* Assert that mlow < mhigh. */
- gcc_assert (mlow.ult (mhigh));
+ val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
+ wide_int mhigh = wi::udiv_trunc (val, d);
/* If precision == N, then mlow, mhigh exceed 2^N
(but they do not exceed 2^(N+1)). */
@@ -3398,14 +3358,15 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
/* Reduce to lowest terms. */
for (post_shift = lgup; post_shift > 0; post_shift--)
{
- int shft = HOST_BITS_PER_WIDE_INT - 1;
- unsigned HOST_WIDE_INT ml_lo = (mlow.high << shft) | (mlow.low >> 1);
- unsigned HOST_WIDE_INT mh_lo = (mhigh.high << shft) | (mhigh.low >> 1);
+ unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
+ HOST_BITS_PER_WIDE_INT);
+ unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
+ HOST_BITS_PER_WIDE_INT);
if (ml_lo >= mh_lo)
break;
- mlow = double_int::from_uhwi (ml_lo);
- mhigh = double_int::from_uhwi (mh_lo);
+ mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
+ mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
}
*post_shift_ptr = post_shift;
@@ -3413,13 +3374,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
if (n < HOST_BITS_PER_WIDE_INT)
{
unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1;
- *multiplier_ptr = mhigh.low & mask;
- return mhigh.low >= mask;
+ *multiplier_ptr = mhigh.to_uhwi () & mask;
+ return mhigh.to_uhwi () >= mask;
}
else
{
- *multiplier_ptr = mhigh.low;
- return mhigh.high;
+ *multiplier_ptr = mhigh.to_uhwi ();
+ return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
}
}
@@ -3686,9 +3647,9 @@ expmed_mult_highpart (enum machine_mode mode, rtx op0, rtx op1,
static rtx
expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
{
- unsigned HOST_WIDE_INT masklow, maskhigh;
rtx result, temp, shift, label;
int logd;
+ int prec = GET_MODE_PRECISION (mode);
logd = floor_log2 (d);
result = gen_reg_rtx (mode);
@@ -3701,8 +3662,8 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
mode, 0, -1);
if (signmask)
{
+ HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
signmask = force_reg (mode, signmask);
- masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd);
/* Use the rtx_cost of a LSHIFTRT instruction to determine
@@ -3749,19 +3710,11 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
modulus. By including the signbit in the operation, many targets
can avoid an explicit compare operation in the following comparison
against zero. */
-
- masklow = ((HOST_WIDE_INT) 1 << logd) - 1;
- if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
- {
- masklow |= HOST_WIDE_INT_M1U << (GET_MODE_BITSIZE (mode) - 1);
- maskhigh = -1;
- }
- else
- maskhigh = HOST_WIDE_INT_M1U
- << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1);
+ wide_int mask = wi::mask (logd, false, prec);
+ mask = wi::set_bit (mask, prec - 1);
temp = expand_binop (mode, and_optab, op0,
- immed_double_const (masklow, maskhigh, mode),
+ immed_wide_int_const (mask, mode),
result, 1, OPTAB_LIB_WIDEN);
if (temp != result)
emit_move_insn (result, temp);
@@ -3771,10 +3724,10 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d)
temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
- masklow = HOST_WIDE_INT_M1U << logd;
- maskhigh = -1;
+
+ mask = wi::mask (logd, true, prec);
temp = expand_binop (mode, ior_optab, temp,
- immed_double_const (masklow, maskhigh, mode),
+ immed_wide_int_const (mask, mode),
result, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
0, OPTAB_LIB_WIDEN);
@@ -5013,24 +4966,16 @@ make_tree (tree type, rtx x)
switch (GET_CODE (x))
{
case CONST_INT:
- {
- HOST_WIDE_INT hi = 0;
-
- if (INTVAL (x) < 0
- && !(TYPE_UNSIGNED (type)
- && (GET_MODE_BITSIZE (TYPE_MODE (type))
- < HOST_BITS_PER_WIDE_INT)))
- hi = -1;
-
- t = build_int_cst_wide (type, INTVAL (x), hi);
-
- return t;
- }
+ case CONST_WIDE_INT:
+ t = wide_int_to_tree (type, std::make_pair (x, TYPE_MODE (type)));
+ return t;
case CONST_DOUBLE:
- if (GET_MODE (x) == VOIDmode)
- t = build_int_cst_wide (type,
- CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x));
+ STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
+ t = wide_int_to_tree (type,
+ wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
+ HOST_BITS_PER_WIDE_INT * 2));
else
{
REAL_VALUE_TYPE d;
diff --git a/gcc/expr.c b/gcc/expr.c
index 767b889d899..2868d9d3443 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -711,64 +711,32 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns
if (mode == oldmode)
return x;
- /* There is one case that we must handle specially: If we are converting
- a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and
- we are to interpret the constant as unsigned, gen_lowpart will do
- the wrong if the constant appears negative. What we want to do is
- make the high-order word of the constant zero, not all ones. */
-
- if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT
- && CONST_INT_P (x) && INTVAL (x) < 0)
+ if (CONST_SCALAR_INT_P (x) && GET_MODE_CLASS (mode) == MODE_INT)
{
- double_int val = double_int::from_uhwi (INTVAL (x));
-
- /* We need to zero extend VAL. */
- if (oldmode != VOIDmode)
- val = val.zext (GET_MODE_BITSIZE (oldmode));
-
- return immed_double_int_const (val, mode);
+ /* If the caller did not tell us the old mode, then there is not
+ much to do with respect to canonicalization. We have to
+ assume that all the bits are significant. */
+ if (GET_MODE_CLASS (oldmode) != MODE_INT)
+ oldmode = MAX_MODE_INT;
+ wide_int w = wide_int::from (std::make_pair (x, oldmode),
+ GET_MODE_PRECISION (mode),
+ unsignedp ? UNSIGNED : SIGNED);
+ return immed_wide_int_const (w, mode);
}
/* We can do this with a gen_lowpart if both desired and current modes
are integer, and this is either a constant integer, a register, or a
- non-volatile MEM. Except for the constant case where MODE is no
- wider than HOST_BITS_PER_WIDE_INT, we must be narrowing the operand. */
-
- if ((CONST_INT_P (x)
- && GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT)
- || (GET_MODE_CLASS (mode) == MODE_INT
- && GET_MODE_CLASS (oldmode) == MODE_INT
- && (CONST_DOUBLE_AS_INT_P (x)
- || (GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode)
- && ((MEM_P (x) && ! MEM_VOLATILE_P (x)
- && direct_load[(int) mode])
- || (REG_P (x)
- && (! HARD_REGISTER_P (x)
- || HARD_REGNO_MODE_OK (REGNO (x), mode))
- && TRULY_NOOP_TRUNCATION_MODES_P (mode,
- GET_MODE (x))))))))
- {
- /* ?? If we don't know OLDMODE, we have to assume here that
- X does not need sign- or zero-extension. This may not be
- the case, but it's the best we can do. */
- if (CONST_INT_P (x) && oldmode != VOIDmode
- && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (oldmode))
- {
- HOST_WIDE_INT val = INTVAL (x);
-
- /* We must sign or zero-extend in this case. Start by
- zero-extending, then sign extend if we need to. */
- val &= GET_MODE_MASK (oldmode);
- if (! unsignedp
- && val_signbit_known_set_p (oldmode, val))
- val |= ~GET_MODE_MASK (oldmode);
-
- return gen_int_mode (val, mode);
- }
-
- return gen_lowpart (mode, x);
- }
+ non-volatile MEM. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_CLASS (oldmode) == MODE_INT
+ && GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode)
+ && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) mode])
+ || (REG_P (x)
+ && (!HARD_REGISTER_P (x)
+ || HARD_REGNO_MODE_OK (REGNO (x), mode))
+ && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x)))))
+
+ return gen_lowpart (mode, x);
/* Converting from integer constant into mode is always equivalent to an
subreg operation. */
@@ -1794,6 +1762,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize)
{
rtx first, second;
+ /* TODO: const_wide_int can have sizes other than this... */
gcc_assert (2 * len == ssize);
split_double (src, &first, &second);
if (i)
@@ -5330,8 +5299,8 @@ store_expr (tree exp, rtx target, int call_param_p, bool nontemporal)
/* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
the same as that of TARGET, adjust the constant. This is needed, for
- example, in case it is a CONST_DOUBLE and we want only a word-sized
- value. */
+ example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
+ only a word-sized value. */
if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
&& TREE_CODE (exp) != ERROR_MARK
&& GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
@@ -6692,7 +6661,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
enum machine_mode mode = VOIDmode;
bool blkmode_bitfield = false;
tree offset = size_zero_node;
- double_int bit_offset = double_int_zero;
+ offset_int bit_offset = 0;
/* First get the mode, signedness, and size. We do this from just the
outermost expression. */
@@ -6755,7 +6724,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2));
+ bit_offset += wi::to_offset (TREE_OPERAND (exp, 2));
break;
case COMPONENT_REF:
@@ -6770,7 +6739,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
break;
offset = size_binop (PLUS_EXPR, offset, this_offset);
- bit_offset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field));
+ bit_offset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
/* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
}
@@ -6802,7 +6771,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
break;
case IMAGPART_EXPR:
- bit_offset += double_int::from_uhwi (*pbitsize);
+ bit_offset += *pbitsize;
break;
case VIEW_CONVERT_EXPR:
@@ -6823,9 +6792,8 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
tree off = TREE_OPERAND (exp, 1);
if (!integer_zerop (off))
{
- double_int boff, coff = mem_ref_offset (exp);
- boff = coff.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ offset_int boff, coff = mem_ref_offset (exp);
+ boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
bit_offset += boff;
}
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -6849,11 +6817,11 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
this conversion. */
if (TREE_CODE (offset) == INTEGER_CST)
{
- double_int tem = tree_to_double_int (offset);
- tem = tem.sext (TYPE_PRECISION (sizetype));
- tem = tem.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
+ offset_int tem = wi::sext (wi::to_offset (offset),
+ TYPE_PRECISION (sizetype));
+ tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
tem += bit_offset;
- if (tem.fits_shwi ())
+ if (wi::fits_shwi_p (tem))
{
*pbitpos = tem.to_shwi ();
*poffset = offset = NULL_TREE;
@@ -6864,20 +6832,16 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize,
if (offset)
{
/* Avoid returning a negative bitpos as this may wreak havoc later. */
- if (bit_offset.is_negative ())
+ if (wi::neg_p (bit_offset))
{
- double_int mask
- = double_int::mask (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
- double_int tem = bit_offset.and_not (mask);
+ offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
+ offset_int tem = bit_offset.and_not (mask);
/* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
bit_offset -= tem;
- tem = tem.arshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT),
- HOST_BITS_PER_DOUBLE_INT);
+ tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
offset = size_binop (PLUS_EXPR, offset,
- double_int_to_tree (sizetype, tem));
+ wide_int_to_tree (sizetype, tem));
}
*pbitpos = bit_offset.to_shwi ();
@@ -7813,11 +7777,12 @@ expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
/* All elts simple constants => refer to a constant in memory. But
if this is a non-BLKmode mode, let it store a field at a time
- since that should make a CONST_INT or CONST_DOUBLE when we
- fold. Likewise, if we have a target we can use, it is best to
- store directly into the target unless the type is large enough
- that memcpy will be used. If we are making an initializer and
- all operands are constant, put it in memory as well.
+ since that should make a CONST_INT, CONST_WIDE_INT or
+ CONST_DOUBLE when we fold. Likewise, if we have a target we can
+ use, it is best to store directly into the target unless the type
+ is large enough that memcpy will be used. If we are making an
+ initializer and all operands are constant, put it in memory as
+ well.
FIXME: Avoid trying to fill vector constructors piece-meal.
Output them with output_constant_def below unless we're sure
@@ -8294,17 +8259,18 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
&& TREE_CONSTANT (treeop1))
{
rtx constant_part;
+ HOST_WIDE_INT wc;
+ enum machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
op1 = expand_expr (treeop1, subtarget, VOIDmode,
EXPAND_SUM);
- /* Use immed_double_const to ensure that the constant is
+ /* Use wi::shwi to ensure that the constant is
truncated according to the mode of OP1, then sign extended
to a HOST_WIDE_INT. Using the constant directly can result
in non-canonical RTL in a 64x32 cross compile. */
- constant_part
- = immed_double_const (TREE_INT_CST_LOW (treeop0),
- (HOST_WIDE_INT) 0,
- TYPE_MODE (TREE_TYPE (treeop1)));
+ wc = TREE_INT_CST_LOW (treeop0);
+ constant_part =
+ immed_wide_int_const (wi::shwi (wc, wmode), wmode);
op1 = plus_constant (mode, op1, INTVAL (constant_part));
if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
op1 = force_operand (op1, target);
@@ -8316,6 +8282,8 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
&& TREE_CONSTANT (treeop0))
{
rtx constant_part;
+ HOST_WIDE_INT wc;
+ enum machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
op0 = expand_expr (treeop0, subtarget, VOIDmode,
(modifier == EXPAND_INITIALIZER
@@ -8330,14 +8298,13 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
return simplify_gen_binary (PLUS, mode, op0, op1);
goto binop2;
}
- /* Use immed_double_const to ensure that the constant is
+ /* Use wi::shwi to ensure that the constant is
truncated according to the mode of OP1, then sign extended
to a HOST_WIDE_INT. Using the constant directly can result
in non-canonical RTL in a 64x32 cross compile. */
+ wc = TREE_INT_CST_LOW (treeop1);
constant_part
- = immed_double_const (TREE_INT_CST_LOW (treeop1),
- (HOST_WIDE_INT) 0,
- TYPE_MODE (TREE_TYPE (treeop0)));
+ = immed_wide_int_const (wi::shwi (wc, wmode), wmode);
op0 = plus_constant (mode, op0, INTVAL (constant_part));
if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
op0 = force_operand (op0, target);
@@ -8860,10 +8827,14 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode,
for unsigned bitfield expand this as XOR with a proper constant
instead. */
if (reduce_bit_field && TYPE_UNSIGNED (type))
- temp = expand_binop (mode, xor_optab, op0,
- immed_double_int_const
- (double_int::mask (TYPE_PRECISION (type)), mode),
- target, 1, OPTAB_LIB_WIDEN);
+ {
+ wide_int mask = wi::mask (TYPE_PRECISION (type),
+ false, GET_MODE_PRECISION (mode));
+
+ temp = expand_binop (mode, xor_optab, op0,
+ immed_wide_int_const (mask, mode),
+ target, 1, OPTAB_LIB_WIDEN);
+ }
else
temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
gcc_assert (temp);
@@ -9534,9 +9505,15 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
return decl_rtl;
case INTEGER_CST:
- temp = immed_double_const (TREE_INT_CST_LOW (exp),
- TREE_INT_CST_HIGH (exp), mode);
-
+ /* Given that TYPE_PRECISION (type) is not always equal to
+ GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
+ the former to the latter according to the signedness of the
+ type. */
+ temp = immed_wide_int_const (wide_int::from
+ (exp,
+ GET_MODE_PRECISION (TYPE_MODE (type)),
+ TYPE_SIGN (type)),
+ TYPE_MODE (type));
return temp;
case VECTOR_CST:
@@ -9723,7 +9700,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
might end up in a register. */
if (mem_ref_refers_to_non_mem_p (exp))
{
- HOST_WIDE_INT offset = mem_ref_offset (exp).low;
+ HOST_WIDE_INT offset = mem_ref_offset (exp).to_short_addr ();
base = TREE_OPERAND (base, 0);
if (offset == 0
&& tree_fits_uhwi_p (TYPE_SIZE (type))
@@ -9758,8 +9735,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
op0 = memory_address_addr_space (mode, op0, as);
if (!integer_zerop (TREE_OPERAND (exp, 1)))
{
- rtx off
- = immed_double_int_const (mem_ref_offset (exp), address_mode);
+ rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
op0 = memory_address_addr_space (mode, op0, as);
}
@@ -10649,9 +10625,10 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
}
else if (TYPE_UNSIGNED (type))
{
- rtx mask = immed_double_int_const (double_int::mask (prec),
- GET_MODE (exp));
- return expand_and (GET_MODE (exp), exp, mask, target);
+ enum machine_mode mode = GET_MODE (exp);
+ rtx mask = immed_wide_int_const
+ (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
+ return expand_and (mode, exp, mask, target);
}
else
{
@@ -11226,8 +11203,7 @@ const_vector_from_tree (tree exp)
RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
inner);
else
- RTVEC_ELT (v, i) = immed_double_int_const (tree_to_double_int (elt),
- inner);
+ RTVEC_ELT (v, i) = immed_wide_int_const (elt, inner);
}
return gen_rtx_CONST_VECTOR (mode, v);
diff --git a/gcc/final.c b/gcc/final.c
index 4e452915d8b..327143093fe 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -80,6 +80,7 @@ along with GCC; see the file COPYING3. If not see
#include "params.h"
#include "tree-pretty-print.h" /* for dump_function_header */
#include "asan.h"
+#include "wide-int-print.h"
#ifdef XCOFF_DEBUGGING_INFO
#include "xcoffout.h" /* Needed for external data
@@ -3885,8 +3886,21 @@ output_addr_const (FILE *file, rtx x)
output_addr_const (file, XEXP (x, 0));
break;
+ case CONST_WIDE_INT:
+ /* We do not know the mode here so we have to use a round about
+ way to build a wide-int to get it printed properly. */
+ {
+ wide_int w = wide_int::from_array (&CONST_WIDE_INT_ELT (x, 0),
+ CONST_WIDE_INT_NUNITS (x),
+ CONST_WIDE_INT_NUNITS (x)
+ * HOST_BITS_PER_WIDE_INT,
+ false);
+ print_decs (w, file);
+ }
+ break;
+
case CONST_DOUBLE:
- if (GET_MODE (x) == VOIDmode)
+ if (CONST_DOUBLE_AS_INT_P (x))
{
/* We can use %d if the number is one word and positive. */
if (CONST_DOUBLE_HIGH (x))
diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c
index 58f63ba6887..7c8ae0f0113 100644
--- a/gcc/fixed-value.c
+++ b/gcc/fixed-value.c
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "tm.h"
#include "tree.h"
#include "diagnostic-core.h"
+#include "wide-int.h"
/* Compare two fixed objects for bitwise identity. */
@@ -113,6 +114,7 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode)
REAL_VALUE_TYPE real_value, fixed_value, base_value;
unsigned int fbit;
enum fixed_value_range_code temp;
+ bool fail;
f->mode = mode;
fbit = GET_MODE_FBIT (mode);
@@ -127,8 +129,10 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode)
"large fixed-point constant implicitly truncated to fixed-point type");
real_2expN (&base_value, fbit, mode);
real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value);
- real_to_integer2 ((HOST_WIDE_INT *)&f->data.low, &f->data.high,
- &fixed_value);
+ wide_int w = real_to_integer (&fixed_value, &fail,
+ GET_MODE_PRECISION (mode));
+ f->data.low = w.elt (0);
+ f->data.high = w.elt (1);
if (temp == FIXED_MAX_EPS && ALL_FRACT_MODE_P (f->mode))
{
@@ -153,9 +157,12 @@ fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *f_orig,
{
REAL_VALUE_TYPE real_value, base_value, fixed_value;
+ signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED;
real_2expN (&base_value, GET_MODE_FBIT (f_orig->mode), f_orig->mode);
- real_from_integer (&real_value, VOIDmode, f_orig->data.low, f_orig->data.high,
- UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode));
+ real_from_integer (&real_value, VOIDmode,
+ wide_int::from (f_orig->data,
+ GET_MODE_PRECISION (f_orig->mode), sgn),
+ sgn);
real_arithmetic (&fixed_value, RDIV_EXPR, &real_value, &base_value);
real_to_decimal (str, &fixed_value, buf_size, 0, 1);
}
@@ -1041,12 +1048,17 @@ fixed_convert_from_real (FIXED_VALUE_TYPE *f, enum machine_mode mode,
int i_f_bits = GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode);
unsigned int fbit = GET_MODE_FBIT (mode);
enum fixed_value_range_code temp;
+ bool fail;
real_value = *a;
f->mode = mode;
real_2expN (&base_value, fbit, mode);
real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value);
- real_to_integer2 ((HOST_WIDE_INT *)&f->data.low, &f->data.high, &fixed_value);
+
+ wide_int w = real_to_integer (&fixed_value, &fail,
+ GET_MODE_PRECISION (mode));
+ f->data.low = w.elt (0);
+ f->data.high = w.elt (1);
temp = check_real_for_fixed_mode (&real_value, mode);
if (temp == FIXED_UNDERFLOW) /* Minimum. */
{
@@ -1091,9 +1103,11 @@ real_convert_from_fixed (REAL_VALUE_TYPE *r, enum machine_mode mode,
{
REAL_VALUE_TYPE base_value, fixed_value, real_value;
+ signop sgn = UNSIGNED_FIXED_POINT_MODE_P (f->mode) ? UNSIGNED : SIGNED;
real_2expN (&base_value, GET_MODE_FBIT (f->mode), f->mode);
- real_from_integer (&fixed_value, VOIDmode, f->data.low, f->data.high,
- UNSIGNED_FIXED_POINT_MODE_P (f->mode));
+ real_from_integer (&fixed_value, VOIDmode,
+ wide_int::from (f->data, GET_MODE_PRECISION (f->mode),
+ sgn), sgn);
real_arithmetic (&real_value, RDIV_EXPR, &fixed_value, &base_value);
real_convert (r, mode, &real_value);
}
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 65051a12c2a..831aaba683c 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -116,7 +116,6 @@ static tree decode_field_reference (location_t, tree, HOST_WIDE_INT *,
HOST_WIDE_INT *,
enum machine_mode *, int *, int *,
tree *, tree *);
-static int all_ones_mask_p (const_tree, int);
static tree sign_bit_p (tree, const_tree);
static int simple_operand_p (const_tree);
static bool simple_operand_p_2 (tree);
@@ -173,26 +172,18 @@ protected_set_expr_location_unshare (tree x, location_t loc)
return x;
}
-/* If ARG2 divides ARG1 with zero remainder, carries out the division
- of type CODE and returns the quotient.
- Otherwise returns NULL_TREE. */
+/* If ARG2 divides ARG1 with zero remainder, carries out the exact
+ division and returns the quotient. Otherwise returns
+ NULL_TREE. */
tree
-div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2)
+div_if_zero_remainder (const_tree arg1, const_tree arg2)
{
- double_int quo, rem;
- int uns;
+ widest_int quo;
- /* The sign of the division is according to operand two, that
- does the correct thing for POINTER_PLUS_EXPR where we want
- a signed division. */
- uns = TYPE_UNSIGNED (TREE_TYPE (arg2));
-
- quo = tree_to_double_int (arg1).divmod (tree_to_double_int (arg2),
- uns, code, &rem);
-
- if (rem.is_zero ())
- return build_int_cst_wide (TREE_TYPE (arg1), quo.low, quo.high);
+ if (wi::multiple_of_p (wi::to_widest (arg1), wi::to_widest (arg2),
+ SIGNED, &quo))
+ return wide_int_to_tree (TREE_TYPE (arg1), quo);
return NULL_TREE;
}
@@ -366,8 +357,6 @@ negate_mathfn_p (enum built_in_function code)
bool
may_negate_without_overflow_p (const_tree t)
{
- unsigned HOST_WIDE_INT val;
- unsigned int prec;
tree type;
gcc_assert (TREE_CODE (t) == INTEGER_CST);
@@ -376,19 +365,7 @@ may_negate_without_overflow_p (const_tree t)
if (TYPE_UNSIGNED (type))
return false;
- prec = TYPE_PRECISION (type);
- if (prec > HOST_BITS_PER_WIDE_INT)
- {
- if (TREE_INT_CST_LOW (t) != 0)
- return true;
- prec -= HOST_BITS_PER_WIDE_INT;
- val = TREE_INT_CST_HIGH (t);
- }
- else
- val = TREE_INT_CST_LOW (t);
- if (prec < HOST_BITS_PER_WIDE_INT)
- val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
- return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1));
+ return !wi::only_sign_bit_p (t);
}
/* Determine whether an expression T can be cheaply negated using
@@ -526,13 +503,11 @@ negate_expr_p (tree t)
break;
case RSHIFT_EXPR:
- /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
+ /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (TREE_INT_CST_HIGH (op1) == 0
- && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
- == TREE_INT_CST_LOW (op1))
+ if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
return true;
}
break;
@@ -741,13 +716,11 @@ fold_negate_expr (location_t loc, tree t)
break;
case RSHIFT_EXPR:
- /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */
+ /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */
if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST)
{
tree op1 = TREE_OPERAND (t, 1);
- if (TREE_INT_CST_HIGH (op1) == 0
- && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1)
- == TREE_INT_CST_LOW (op1))
+ if (wi::eq_p (op1, TYPE_PRECISION (type) - 1))
{
tree ntype = TYPE_UNSIGNED (type)
? signed_type_for (type)
@@ -977,168 +950,150 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2
to evaluate CODE at compile-time. */
static tree
-int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2,
+int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2,
int overflowable)
{
- double_int op1, op2, res, tmp;
+ wide_int res;
tree t;
tree type = TREE_TYPE (arg1);
- bool uns = TYPE_UNSIGNED (type);
+ signop sign = TYPE_SIGN (type);
bool overflow = false;
- op1 = tree_to_double_int (arg1);
- op2 = tree_to_double_int (arg2);
+ wide_int arg2 = wide_int::from (parg2, TYPE_PRECISION (type),
+ TYPE_SIGN (TREE_TYPE (parg2)));
switch (code)
{
case BIT_IOR_EXPR:
- res = op1 | op2;
+ res = wi::bit_or (arg1, arg2);
break;
case BIT_XOR_EXPR:
- res = op1 ^ op2;
+ res = wi::bit_xor (arg1, arg2);
break;
case BIT_AND_EXPR:
- res = op1 & op2;
+ res = wi::bit_and (arg1, arg2);
break;
case RSHIFT_EXPR:
- res = op1.rshift (op2.to_shwi (), TYPE_PRECISION (type), !uns);
- break;
-
case LSHIFT_EXPR:
- /* It's unclear from the C standard whether shifts can overflow.
- The following code ignores overflow; perhaps a C standard
- interpretation ruling is needed. */
- res = op1.lshift (op2.to_shwi (), TYPE_PRECISION (type), !uns);
- break;
+ if (wi::neg_p (arg2))
+ {
+ arg2 = -arg2;
+ if (code == RSHIFT_EXPR)
+ code = LSHIFT_EXPR;
+ else
+ code = RSHIFT_EXPR;
+ }
- case RROTATE_EXPR:
- res = op1.rrotate (op2.to_shwi (), TYPE_PRECISION (type));
+ if (code == RSHIFT_EXPR)
+ /* It's unclear from the C standard whether shifts can overflow.
+ The following code ignores overflow; perhaps a C standard
+ interpretation ruling is needed. */
+ res = wi::rshift (arg1, arg2, sign);
+ else
+ res = wi::lshift (arg1, arg2);
break;
+ case RROTATE_EXPR:
case LROTATE_EXPR:
- res = op1.lrotate (op2.to_shwi (), TYPE_PRECISION (type));
+ if (wi::neg_p (arg2))
+ {
+ arg2 = -arg2;
+ if (code == RROTATE_EXPR)
+ code = LROTATE_EXPR;
+ else
+ code = RROTATE_EXPR;
+ }
+
+ if (code == RROTATE_EXPR)
+ res = wi::rrotate (arg1, arg2);
+ else
+ res = wi::lrotate (arg1, arg2);
break;
case PLUS_EXPR:
- res = op1.add_with_sign (op2, false, &overflow);
+ res = wi::add (arg1, arg2, sign, &overflow);
break;
case MINUS_EXPR:
- res = op1.sub_with_overflow (op2, &overflow);
+ res = wi::sub (arg1, arg2, sign, &overflow);
break;
case MULT_EXPR:
- res = op1.mul_with_sign (op2, false, &overflow);
+ res = wi::mul (arg1, arg2, sign, &overflow);
break;
case MULT_HIGHPART_EXPR:
- if (TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT)
- {
- bool dummy_overflow;
- if (TYPE_PRECISION (type) != 2 * HOST_BITS_PER_WIDE_INT)
- return NULL_TREE;
- op1.wide_mul_with_sign (op2, uns, &res, &dummy_overflow);
- }
- else
- {
- bool dummy_overflow;
- /* MULT_HIGHPART_EXPR can't ever oveflow, as the multiplication
- is performed in twice the precision of arguments. */
- tmp = op1.mul_with_sign (op2, false, &dummy_overflow);
- res = tmp.rshift (TYPE_PRECISION (type),
- 2 * TYPE_PRECISION (type), !uns);
- }
+ res = wi::mul_high (arg1, arg2, sign);
break;
case TRUNC_DIV_EXPR:
- case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR:
case EXACT_DIV_EXPR:
- /* This is a shortcut for a common special case. */
- if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
- && !TREE_OVERFLOW (arg1)
- && !TREE_OVERFLOW (arg2)
- && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
- {
- if (code == CEIL_DIV_EXPR)
- op1.low += op2.low - 1;
+ if (arg2 == 0)
+ return NULL_TREE;
+ res = wi::div_trunc (arg1, arg2, sign, &overflow);
+ break;
- res.low = op1.low / op2.low, res.high = 0;
- break;
- }
+ case FLOOR_DIV_EXPR:
+ if (arg2 == 0)
+ return NULL_TREE;
+ res = wi::div_floor (arg1, arg2, sign, &overflow);
+ break;
- /* ... fall through ... */
+ case CEIL_DIV_EXPR:
+ if (arg2 == 0)
+ return NULL_TREE;
+ res = wi::div_ceil (arg1, arg2, sign, &overflow);
+ break;
case ROUND_DIV_EXPR:
- if (op2.is_zero ())
+ if (arg2 == 0)
return NULL_TREE;
- if (op2.is_one ())
- {
- res = op1;
- break;
- }
- if (op1 == op2 && !op1.is_zero ())
- {
- res = double_int_one;
- break;
- }
- res = op1.divmod_with_overflow (op2, uns, code, &tmp, &overflow);
+ res = wi::div_round (arg1, arg2, sign, &overflow);
break;
case TRUNC_MOD_EXPR:
- case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR:
- /* This is a shortcut for a common special case. */
- if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0
- && !TREE_OVERFLOW (arg1)
- && !TREE_OVERFLOW (arg2)
- && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0)
- {
- if (code == CEIL_MOD_EXPR)
- op1.low += op2.low - 1;
- res.low = op1.low % op2.low, res.high = 0;
- break;
- }
+ if (arg2 == 0)
+ return NULL_TREE;
+ res = wi::mod_trunc (arg1, arg2, sign, &overflow);
+ break;
- /* ... fall through ... */
+ case FLOOR_MOD_EXPR:
+ if (arg2 == 0)
+ return NULL_TREE;
+ res = wi::mod_floor (arg1, arg2, sign, &overflow);
+ break;
- case ROUND_MOD_EXPR:
- if (op2.is_zero ())
+ case CEIL_MOD_EXPR:
+ if (arg2 == 0)
return NULL_TREE;
+ res = wi::mod_ceil (arg1, arg2, sign, &overflow);
+ break;
- /* Check for the case the case of INT_MIN % -1 and return
- overflow and result = 0. The TImode case is handled properly
- in double-int. */
- if (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
- && !uns
- && op2.is_minus_one ()
- && op1.high == (HOST_WIDE_INT) -1
- && (HOST_WIDE_INT) op1.low
- == (((HOST_WIDE_INT)-1) << (TYPE_PRECISION (type) - 1)))
- {
- overflow = 1;
- res = double_int_zero;
- }
- else
- tmp = op1.divmod_with_overflow (op2, uns, code, &res, &overflow);
+ case ROUND_MOD_EXPR:
+ if (arg2 == 0)
+ return NULL_TREE;
+ res = wi::mod_round (arg1, arg2, sign, &overflow);
break;
case MIN_EXPR:
- res = op1.min (op2, uns);
+ res = wi::min (arg1, arg2, sign);
break;
case MAX_EXPR:
- res = op1.max (op2, uns);
+ res = wi::max (arg1, arg2, sign);
break;
default:
return NULL_TREE;
}
- t = force_fit_type_double (TREE_TYPE (arg1), res, overflowable,
- (!uns && overflow)
- | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2));
+ t = force_fit_type (type, res, overflowable,
+ (((sign == SIGNED || overflowable == -1)
+ && overflow)
+ | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (parg2)));
return t;
}
@@ -1266,9 +1221,12 @@ const_binop (enum tree_code code, tree arg1, tree arg2)
case LSHIFT_EXPR:
case RSHIFT_EXPR:
- f2.data.high = TREE_INT_CST_HIGH (arg2);
- f2.data.low = TREE_INT_CST_LOW (arg2);
- f2.mode = SImode;
+ {
+ wide_int w2 = arg2;
+ f2.data.high = w2.elt (1);
+ f2.data.low = w2.elt (0);
+ f2.mode = SImode;
+ }
break;
default:
@@ -1603,18 +1561,12 @@ size_diffop_loc (location_t loc, tree arg0, tree arg1)
static tree
fold_convert_const_int_from_int (tree type, const_tree arg1)
{
- tree t;
-
/* Given an integer constant, make new constant with new type,
- appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, tree_to_double_int (arg1),
- !POINTER_TYPE_P (TREE_TYPE (arg1)),
- (TREE_INT_CST_HIGH (arg1) < 0
- && (TYPE_UNSIGNED (type)
- < TYPE_UNSIGNED (TREE_TYPE (arg1))))
- | TREE_OVERFLOW (arg1));
-
- return t;
+ appropriately sign-extended or truncated. Use widest_int
+ so that any extension is done according ARG1's type. */
+ return force_fit_type (type, wi::to_widest (arg1),
+ !POINTER_TYPE_P (TREE_TYPE (arg1)),
+ TREE_OVERFLOW (arg1));
}
/* A subroutine of fold_convert_const handling conversions a REAL_CST
@@ -1623,7 +1575,7 @@ fold_convert_const_int_from_int (tree type, const_tree arg1)
static tree
fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1)
{
- int overflow = 0;
+ bool overflow = false;
tree t;
/* The following code implements the floating point to integer
@@ -1635,7 +1587,7 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg
C and C++ standards that simply state that the behavior of
FP-to-integer conversion is unspecified upon overflow. */
- double_int val;
+ wide_int val;
REAL_VALUE_TYPE r;
REAL_VALUE_TYPE x = TREE_REAL_CST (arg1);
@@ -1652,8 +1604,8 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg
/* If R is NaN, return zero and show we have an overflow. */
if (REAL_VALUE_ISNAN (r))
{
- overflow = 1;
- val = double_int_zero;
+ overflow = true;
+ val = wi::zero (TYPE_PRECISION (type));
}
/* See if R is less than the lower bound or greater than the
@@ -1665,8 +1617,8 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg
REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt);
if (REAL_VALUES_LESS (r, l))
{
- overflow = 1;
- val = tree_to_double_int (lt);
+ overflow = true;
+ val = lt;
}
}
@@ -1678,16 +1630,16 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg
REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut);
if (REAL_VALUES_LESS (u, r))
{
- overflow = 1;
- val = tree_to_double_int (ut);
+ overflow = true;
+ val = ut;
}
}
}
if (! overflow)
- real_to_integer2 ((HOST_WIDE_INT *) &val.low, &val.high, &r);
+ val = real_to_integer (&r, &overflow, TYPE_PRECISION (type));
- t = force_fit_type_double (type, val, -1, overflow | TREE_OVERFLOW (arg1));
+ t = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (arg1));
return t;
}
@@ -1730,11 +1682,11 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1)
/* Given a fixed-point constant, make new constant with new type,
appropriately sign-extended or truncated. */
- t = force_fit_type_double (type, temp, -1,
- (temp.is_negative ()
- && (TYPE_UNSIGNED (type)
- < TYPE_UNSIGNED (TREE_TYPE (arg1))))
- | TREE_OVERFLOW (arg1));
+ t = force_fit_type (type, temp, -1,
+ (temp.is_negative ()
+ && (TYPE_UNSIGNED (type)
+ < TYPE_UNSIGNED (TREE_TYPE (arg1))))
+ | TREE_OVERFLOW (arg1));
return t;
}
@@ -1817,9 +1769,17 @@ fold_convert_const_fixed_from_int (tree type, const_tree arg1)
FIXED_VALUE_TYPE value;
tree t;
bool overflow_p;
+ double_int di;
- overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type),
- TREE_INT_CST (arg1),
+ gcc_assert (TREE_INT_CST_NUNITS (arg1) <= 2);
+
+ di.low = TREE_INT_CST_ELT (arg1, 0);
+ if (TREE_INT_CST_NUNITS (arg1) == 1)
+ di.high = (HOST_WIDE_INT) di.low < 0 ? (HOST_WIDE_INT) -1 : 0;
+ else
+ di.high = TREE_INT_CST_ELT (arg1, 1);
+
+ overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type), di,
TYPE_UNSIGNED (TREE_TYPE (arg1)),
TYPE_SATURATING (type));
t = build_fixed (type, value);
@@ -3715,23 +3675,24 @@ decode_field_reference (location_t loc, tree exp, HOST_WIDE_INT *pbitsize,
}
/* Return nonzero if MASK represents a mask of SIZE ones in the low-order
- bit positions. */
+ bit positions and MASK is SIGNED. */
static int
-all_ones_mask_p (const_tree mask, int size)
+all_ones_mask_p (const_tree mask, unsigned int size)
{
tree type = TREE_TYPE (mask);
unsigned int precision = TYPE_PRECISION (type);
- tree tmask;
- tmask = build_int_cst_type (signed_type_for (type), -1);
+ /* If this function returns true when the type of the mask is
+ UNSIGNED, then there will be errors. In particular see
+ gcc.c-torture/execute/990326-1.c. There does not appear to be
+ any documentation paper trail as to why this is so. But the pre
+ wide-int worked with that restriction and it has been preserved
+ here. */
+ if (size > precision || TYPE_SIGN (type) == UNSIGNED)
+ return false;
- return
- tree_int_cst_equal (mask,
- const_binop (RSHIFT_EXPR,
- const_binop (LSHIFT_EXPR, tmask,
- size_int (precision - size)),
- size_int (precision - size)));
+ return wi::mask (size, false, precision) == mask;
}
/* Subroutine for fold: determine if VAL is the INTEGER_CONST that
@@ -3743,8 +3704,6 @@ all_ones_mask_p (const_tree mask, int size)
static tree
sign_bit_p (tree exp, const_tree val)
{
- unsigned HOST_WIDE_INT mask_lo, lo;
- HOST_WIDE_INT mask_hi, hi;
int width;
tree t;
@@ -3759,27 +3718,7 @@ sign_bit_p (tree exp, const_tree val)
return NULL_TREE;
width = TYPE_PRECISION (t);
- if (width > HOST_BITS_PER_WIDE_INT)
- {
- hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1);
- lo = 0;
-
- mask_hi = (HOST_WIDE_INT_M1U >> (HOST_BITS_PER_DOUBLE_INT - width));
- mask_lo = -1;
- }
- else
- {
- hi = 0;
- lo = (unsigned HOST_WIDE_INT) 1 << (width - 1);
-
- mask_hi = 0;
- mask_lo = (HOST_WIDE_INT_M1U >> (HOST_BITS_PER_WIDE_INT - width));
- }
-
- /* We mask off those bits beyond TREE_TYPE (exp) so that we can
- treat VAL as if it were unsigned. */
- if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi
- && (TREE_INT_CST_LOW (val) & mask_lo) == lo)
+ if (wi::only_sign_bit_p (val, width))
return exp;
/* Handle extension from a narrower type. */
@@ -4024,7 +3963,7 @@ make_range_step (location_t loc, enum tree_code code, tree arg0, tree arg1,
{
in_p = ! in_p;
high = range_binop (MINUS_EXPR, NULL_TREE, low, 0,
- integer_one_node, 0);
+ build_int_cst (TREE_TYPE (low), 1), 0);
low = build_int_cst (arg0_type, 0);
}
}
@@ -4094,9 +4033,9 @@ make_range_step (location_t loc, enum tree_code code, tree arg0, tree arg1,
if (n_low && n_high && tree_int_cst_lt (n_high, n_low))
{
low = range_binop (PLUS_EXPR, arg0_type, n_high, 0,
- integer_one_node, 0);
+ build_int_cst (TREE_TYPE (n_high), 1), 0);
high = range_binop (MINUS_EXPR, arg0_type, n_low, 0,
- integer_one_node, 0);
+ build_int_cst (TREE_TYPE (n_low), 1), 0);
/* If the range is of the form +/- [ x+1, x ], we won't
be able to normalize it. But then, it represents the
@@ -4334,23 +4273,9 @@ build_range_check (location_t loc, tree type, tree exp, int in_p,
/* Optimize (c>=1) && (c<=127) into (signed char)c > 0. */
if (integer_onep (low) && TREE_CODE (high) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT lo;
- HOST_WIDE_INT hi;
- int prec;
-
- prec = TYPE_PRECISION (etype);
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- hi = 0;
- lo = ((unsigned HOST_WIDE_INT) 1 << (prec - 1)) - 1;
- }
- else
- {
- hi = ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)) - 1;
- lo = HOST_WIDE_INT_M1U;
- }
+ int prec = TYPE_PRECISION (etype);
- if (TREE_INT_CST_HIGH (high) == hi && TREE_INT_CST_LOW (high) == lo)
+ if (wi::mask (prec - 1, false, prec) == high)
{
if (TYPE_UNSIGNED (etype))
{
@@ -4384,7 +4309,7 @@ build_range_check (location_t loc, tree type, tree exp, int in_p,
utype = unsigned_type_for (etype);
maxv = fold_convert_loc (loc, utype, TYPE_MAX_VALUE (etype));
maxv = range_binop (PLUS_EXPR, NULL_TREE, maxv, 1,
- integer_one_node, 1);
+ build_int_cst (TREE_TYPE (maxv), 1), 1);
minv = fold_convert_loc (loc, utype, TYPE_MIN_VALUE (etype));
if (integer_zerop (range_binop (NE_EXPR, integer_type_node,
@@ -4432,7 +4357,8 @@ range_predecessor (tree val)
&& operand_equal_p (val, TYPE_MIN_VALUE (type), 0))
return 0;
else
- return range_binop (MINUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0);
+ return range_binop (MINUS_EXPR, NULL_TREE, val, 0,
+ build_int_cst (TREE_TYPE (val), 1), 0);
}
/* Return the successor of VAL in its type, handling the infinite case. */
@@ -4446,7 +4372,8 @@ range_successor (tree val)
&& operand_equal_p (val, TYPE_MAX_VALUE (type), 0))
return 0;
else
- return range_binop (PLUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0);
+ return range_binop (PLUS_EXPR, NULL_TREE, val, 0,
+ build_int_cst (TREE_TYPE (val), 1), 0);
}
/* Given two ranges, see if we can merge them into one. Return 1 if we
@@ -4626,7 +4553,8 @@ merge_ranges (int *pin_p, tree *plow, tree *phigh, int in0_p, tree low0,
if (TYPE_UNSIGNED (TREE_TYPE (high1))
&& integer_zerop (range_binop (PLUS_EXPR, NULL_TREE,
high1, 1,
- integer_one_node, 1)))
+ build_int_cst (TREE_TYPE (high1), 1),
+ 1)))
high1 = 0;
break;
default:
@@ -5082,8 +5010,7 @@ unextend (tree c, int p, int unsignedp, tree mask)
/* We work by getting just the sign bit into the low-order bit, then
into the high-order bit, then sign-extend. We then XOR that value
with C. */
- temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1));
- temp = const_binop (BIT_AND_EXPR, temp, size_int (1));
+ temp = build_int_cst (TREE_TYPE (c), wi::extract_uhwi (c, p - 1, 1));
/* We must use a signed type in order to get an arithmetic right shift.
However, we must also avoid introducing accidental overflows, so that
@@ -5889,8 +5816,7 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
&& (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0)))
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- && TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1)
- && TREE_INT_CST_HIGH (op1) == 0
+ && wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
&& 0 != (t1 = fold_convert (ctype,
const_binop (LSHIFT_EXPR,
size_one_node,
@@ -6036,21 +5962,17 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type,
assuming no overflow. */
if (tcode == code)
{
- double_int mul;
- bool overflow_p;
- unsigned prec = TYPE_PRECISION (ctype);
- bool uns = TYPE_UNSIGNED (ctype);
- double_int diop1 = tree_to_double_int (op1).ext (prec, uns);
- double_int dic = tree_to_double_int (c).ext (prec, uns);
- mul = diop1.mul_with_sign (dic, false, &overflow_p);
- overflow_p = ((!uns && overflow_p)
- | TREE_OVERFLOW (c) | TREE_OVERFLOW (op1));
- if (!double_int_fits_to_tree_p (ctype, mul)
- && ((uns && tcode != MULT_EXPR) || !uns))
- overflow_p = 1;
+ bool overflow_p = false;
+ bool overflow_mul_p;
+ signop sign = TYPE_SIGN (ctype);
+ wide_int mul = wi::mul (op1, c, sign, &overflow_mul_p);
+ overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1);
+ if (overflow_mul_p
+ && ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED))
+ overflow_p = true;
if (!overflow_p)
return fold_build2 (tcode, ctype, fold_convert (ctype, op0),
- double_int_to_tree (ctype, mul));
+ wide_int_to_tree (ctype, mul));
}
/* If these operations "cancel" each other, we have the main
@@ -6449,29 +6371,26 @@ fold_div_compare (location_t loc,
tree prod, tmp, hi, lo;
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
- double_int val;
- bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0));
- bool neg_overflow;
+ signop sign = TYPE_SIGN (TREE_TYPE (arg0));
+ bool neg_overflow = false;
bool overflow;
/* We have to do this the hard way to detect unsigned overflow.
prod = int_const_binop (MULT_EXPR, arg01, arg1); */
- val = TREE_INT_CST (arg01)
- .mul_with_sign (TREE_INT_CST (arg1), unsigned_p, &overflow);
- prod = force_fit_type_double (TREE_TYPE (arg00), val, -1, overflow);
+ wide_int val = wi::mul (arg01, arg1, sign, &overflow);
+ prod = force_fit_type (TREE_TYPE (arg00), val, -1, overflow);
neg_overflow = false;
- if (unsigned_p)
+ if (sign == UNSIGNED)
{
tmp = int_const_binop (MINUS_EXPR, arg01,
build_int_cst (TREE_TYPE (arg01), 1));
lo = prod;
/* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp). */
- val = TREE_INT_CST (prod)
- .add_with_sign (TREE_INT_CST (tmp), unsigned_p, &overflow);
- hi = force_fit_type_double (TREE_TYPE (arg00), val,
- -1, overflow | TREE_OVERFLOW (prod));
+ val = wi::add (prod, tmp, sign, &overflow);
+ hi = force_fit_type (TREE_TYPE (arg00), val,
+ -1, overflow | TREE_OVERFLOW (prod));
}
else if (tree_int_cst_sgn (arg01) >= 0)
{
@@ -6662,10 +6581,9 @@ fold_single_bit_test (location_t loc, enum tree_code code,
not overflow, adjust BITNUM and INNER. */
if (TREE_CODE (inner) == RSHIFT_EXPR
&& TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST
- && tree_fits_uhwi_p (TREE_OPERAND (inner, 1))
&& bitnum < TYPE_PRECISION (type)
- && (tree_to_uhwi (TREE_OPERAND (inner, 1))
- < (unsigned) (TYPE_PRECISION (type) - bitnum)))
+ && wi::ltu_p (TREE_OPERAND (inner, 1),
+ TYPE_PRECISION (type) - bitnum))
{
bitnum += tree_to_uhwi (TREE_OPERAND (inner, 1));
inner = TREE_OPERAND (inner, 0);
@@ -6925,8 +6843,8 @@ fold_sign_changed_comparison (location_t loc, enum tree_code code, tree type,
return NULL_TREE;
if (TREE_CODE (arg1) == INTEGER_CST)
- arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1),
- 0, TREE_OVERFLOW (arg1));
+ arg1 = force_fit_type (inner_type, wi::to_widest (arg1), 0,
+ TREE_OVERFLOW (arg1));
else
arg1 = fold_convert_loc (loc, inner_type, arg1);
@@ -7014,7 +6932,7 @@ try_move_mult_to_index (location_t loc, tree addr, tree op1)
else
{
/* Try if delta is a multiple of step. */
- tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step);
+ tree tmp = div_if_zero_remainder (op1, step);
if (! tmp)
goto cont;
delta = tmp;
@@ -7086,7 +7004,7 @@ cont:
else
{
/* Try if delta is a multiple of step. */
- tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step);
+ tree tmp = div_if_zero_remainder (op1, step);
if (! tmp)
continue;
delta = tmp;
@@ -7242,7 +7160,7 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type,
arg10 = build_one_cst (type);
/* As we canonicalize A - 2 to A + -2 get rid of that sign for
the purpose of this canonicalization. */
- if (TREE_INT_CST_HIGH (arg1) == -1
+ if (wi::neg_p (arg1, TYPE_SIGN (TREE_TYPE (arg1)))
&& negate_expr_p (arg1)
&& code == PLUS_EXPR)
{
@@ -7340,11 +7258,9 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len)
for (byte = 0; byte < total_bytes; byte++)
{
int bitpos = byte * BITS_PER_UNIT;
- if (bitpos < HOST_BITS_PER_WIDE_INT)
- value = (unsigned char) (TREE_INT_CST_LOW (expr) >> bitpos);
- else
- value = (unsigned char) (TREE_INT_CST_HIGH (expr)
- >> (bitpos - HOST_BITS_PER_WIDE_INT));
+ /* Extend EXPR according to TYPE_SIGN if the precision isn't a whole
+ number of bytes. */
+ value = wi::extract_uhwi (wi::to_widest (expr), bitpos, BITS_PER_UNIT);
if (total_bytes > UNITS_PER_WORD)
{
@@ -7566,15 +7482,14 @@ static tree
native_interpret_int (tree type, const unsigned char *ptr, int len)
{
int total_bytes = GET_MODE_SIZE (TYPE_MODE (type));
- double_int result;
if (total_bytes > len
|| total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT)
return NULL_TREE;
- result = double_int::from_buffer (ptr, total_bytes);
+ wide_int result = wi::from_buffer (ptr, total_bytes);
- return double_int_to_tree (type, result);
+ return wide_int_to_tree (type, result);
}
@@ -8139,10 +8054,10 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
}
if (change)
{
- tem = force_fit_type_double (type, tree_to_double_int (and1),
- 0, TREE_OVERFLOW (and1));
+ tem = force_fit_type (type, wi::to_widest (and1), 0,
+ TREE_OVERFLOW (and1));
return fold_build2_loc (loc, BIT_AND_EXPR, type,
- fold_convert_loc (loc, type, and0), tem);
+ fold_convert_loc (loc, type, and0), tem);
}
}
@@ -8922,28 +8837,28 @@ maybe_canonicalize_comparison (location_t loc, enum tree_code code, tree type,
static bool
pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
{
- double_int di_offset, total;
-
if (!POINTER_TYPE_P (TREE_TYPE (base)))
return true;
if (bitpos < 0)
return true;
+ wide_int wi_offset;
+ int precision = TYPE_PRECISION (TREE_TYPE (base));
if (offset == NULL_TREE)
- di_offset = double_int_zero;
+ wi_offset = wi::zero (precision);
else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset))
return true;
else
- di_offset = TREE_INT_CST (offset);
+ wi_offset = offset;
bool overflow;
- double_int units = double_int::from_uhwi (bitpos / BITS_PER_UNIT);
- total = di_offset.add_with_sign (units, true, &overflow);
+ wide_int units = wi::shwi (bitpos / BITS_PER_UNIT, precision);
+ wide_int total = wi::add (wi_offset, units, UNSIGNED, &overflow);
if (overflow)
return true;
- if (total.high != 0)
+ if (!wi::fits_uhwi_p (total))
return true;
HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (TREE_TYPE (base)));
@@ -8961,7 +8876,7 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
size = base_size;
}
- return total.low > (unsigned HOST_WIDE_INT) size;
+ return total.to_uhwi () > (unsigned HOST_WIDE_INT) size;
}
/* Return the HOST_WIDE_INT least significant bits of T, a sizetype
@@ -8971,8 +8886,11 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos)
static HOST_WIDE_INT
size_low_cst (const_tree t)
{
- double_int d = tree_to_double_int (t);
- return d.sext (TYPE_PRECISION (TREE_TYPE (t))).low;
+ HOST_WIDE_INT w = TREE_INT_CST_ELT (t, 0);
+ int prec = TYPE_PRECISION (TREE_TYPE (t));
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ return sext_hwi (w, prec);
+ return w;
}
/* Subroutine of fold_binary. This routine performs all of the
@@ -9966,19 +9884,12 @@ exact_inverse (tree type, tree cst)
/* Mask out the tz least significant bits of X of type TYPE where
tz is the number of trailing zeroes in Y. */
-static double_int
-mask_with_tz (tree type, double_int x, double_int y)
+static wide_int
+mask_with_tz (tree type, const wide_int &x, const wide_int &y)
{
- int tz = y.trailing_zeros ();
-
+ int tz = wi::ctz (y);
if (tz > 0)
- {
- double_int mask;
-
- mask = ~double_int::mask (tz);
- mask = mask.ext (TYPE_PRECISION (type), TYPE_UNSIGNED (type));
- return mask & x;
- }
+ return wi::mask (tz, true, TYPE_PRECISION (type)) & x;
return x;
}
@@ -10633,9 +10544,7 @@ fold_binary_loc (location_t loc,
code11 = TREE_CODE (tree11);
if (code01 == INTEGER_CST
&& code11 == INTEGER_CST
- && TREE_INT_CST_HIGH (tree01) == 0
- && TREE_INT_CST_HIGH (tree11) == 0
- && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11))
+ && (wi::to_widest (tree01) + wi::to_widest (tree11)
== element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0)))))
{
tem = build2_loc (loc, LROTATE_EXPR,
@@ -11424,21 +11333,20 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- double_int c1, c2, c3, msk;
int width = TYPE_PRECISION (type), w;
-
- c1 = tree_to_double_int (TREE_OPERAND (arg0, 1));
- c2 = tree_to_double_int (arg1);
+ wide_int c1 = TREE_OPERAND (arg0, 1);
+ wide_int c2 = arg1;
/* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */
if ((c1 & c2) == c1)
return omit_one_operand_loc (loc, type, arg1,
TREE_OPERAND (arg0, 0));
- msk = double_int::mask (width);
+ wide_int msk = wi::mask (width, false,
+ TYPE_PRECISION (TREE_TYPE (arg1)));
/* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
- if (msk.and_not (c1 | c2).is_zero ())
+ if (msk.and_not (c1 | c2) == 0)
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
TREE_OPERAND (arg0, 0), arg1);
@@ -11447,17 +11355,14 @@ fold_binary_loc (location_t loc,
mode which allows further optimizations. */
c1 &= msk;
c2 &= msk;
- c3 = c1.and_not (c2);
- for (w = BITS_PER_UNIT;
- w <= width && w <= HOST_BITS_PER_WIDE_INT;
- w <<= 1)
+ wide_int c3 = c1.and_not (c2);
+ for (w = BITS_PER_UNIT; w <= width; w <<= 1)
{
- unsigned HOST_WIDE_INT mask
- = HOST_WIDE_INT_M1U >> (HOST_BITS_PER_WIDE_INT - w);
- if (((c1.low | c2.low) & mask) == mask
- && (c1.low & ~mask) == 0 && c1.high == 0)
+ wide_int mask = wi::mask (width - w, false,
+ TYPE_PRECISION (type));
+ if (((c1 | c2) & mask) == mask && c1.and_not (mask) == 0)
{
- c3 = double_int::from_uhwi (mask);
+ c3 = mask;
break;
}
}
@@ -11466,8 +11371,8 @@ fold_binary_loc (location_t loc,
return fold_build2_loc (loc, BIT_IOR_EXPR, type,
fold_build2_loc (loc, BIT_AND_EXPR, type,
TREE_OPERAND (arg0, 0),
- double_int_to_tree (type,
- c3)),
+ wide_int_to_tree (type,
+ c3)),
arg1);
}
@@ -11837,12 +11742,11 @@ fold_binary_loc (location_t loc,
multiple of 1 << CST. */
if (TREE_CODE (arg1) == INTEGER_CST)
{
- double_int cst1 = tree_to_double_int (arg1);
- double_int ncst1 = (-cst1).ext (TYPE_PRECISION (TREE_TYPE (arg1)),
- TYPE_UNSIGNED (TREE_TYPE (arg1)));
+ wide_int cst1 = arg1;
+ wide_int ncst1 = -cst1;
if ((cst1 & ncst1) == ncst1
&& multiple_of_p (type, arg0,
- double_int_to_tree (TREE_TYPE (arg1), ncst1)))
+ wide_int_to_tree (TREE_TYPE (arg1), ncst1)))
return fold_convert_loc (loc, type, arg0);
}
@@ -11852,24 +11756,22 @@ fold_binary_loc (location_t loc,
&& TREE_CODE (arg0) == MULT_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST)
{
- double_int darg1 = tree_to_double_int (arg1);
- double_int masked
- = mask_with_tz (type, darg1,
- tree_to_double_int (TREE_OPERAND (arg0, 1)));
+ wide_int warg1 = arg1;
+ wide_int masked = mask_with_tz (type, warg1, TREE_OPERAND (arg0, 1));
- if (masked.is_zero ())
+ if (masked == 0)
return omit_two_operands_loc (loc, type, build_zero_cst (type),
arg0, arg1);
- else if (masked != darg1)
+ else if (masked != warg1)
{
/* Avoid the transform if arg1 is a mask of some
mode which allows further optimizations. */
- int pop = darg1.popcount ();
+ int pop = wi::popcount (warg1);
if (!(pop >= BITS_PER_UNIT
&& exact_log2 (pop) != -1
- && double_int::mask (pop) == darg1))
+ && wi::mask (pop, false, warg1.get_precision ()) == warg1))
return fold_build2_loc (loc, code, type, op0,
- double_int_to_tree (type, masked));
+ wide_int_to_tree (type, masked));
}
}
@@ -11880,10 +11782,10 @@ fold_binary_loc (location_t loc,
and for - instead of + (or unary - instead of +)
and/or ^ instead of |.
If B is constant and (B & M) == 0, fold into A & M. */
- if (tree_fits_uhwi_p (arg1))
+ if (TREE_CODE (arg1) == INTEGER_CST)
{
- unsigned HOST_WIDE_INT cst1 = tree_to_uhwi (arg1);
- if (~cst1 && (cst1 & (cst1 + 1)) == 0
+ wide_int cst1 = arg1;
+ if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0
&& INTEGRAL_TYPE_P (TREE_TYPE (arg0))
&& (TREE_CODE (arg0) == PLUS_EXPR
|| TREE_CODE (arg0) == MINUS_EXPR
@@ -11893,7 +11795,7 @@ fold_binary_loc (location_t loc,
{
tree pmop[2];
int which = 0;
- unsigned HOST_WIDE_INT cst0;
+ wide_int cst0;
/* Now we know that arg0 is (C + D) or (C - D) or
-C and arg1 (M) is == (1LL << cst) - 1.
@@ -11906,9 +11808,7 @@ fold_binary_loc (location_t loc,
which = 1;
}
- if (!tree_fits_uhwi_p (TYPE_MAX_VALUE (TREE_TYPE (arg0)))
- || (tree_to_uhwi (TYPE_MAX_VALUE (TREE_TYPE (arg0)))
- & cst1) != cst1)
+ if ((wi::max_value (TREE_TYPE (arg0)) & cst1) != cst1)
which = -1;
for (; which >= 0; which--)
@@ -11920,9 +11820,7 @@ fold_binary_loc (location_t loc,
if (TREE_CODE (TREE_OPERAND (pmop[which], 1))
!= INTEGER_CST)
break;
- /* tree_to_[su]hwi not used, because we don't care about
- the upper bits. */
- cst0 = TREE_INT_CST_LOW (TREE_OPERAND (pmop[which], 1));
+ cst0 = TREE_OPERAND (pmop[which], 1);
cst0 &= cst1;
if (TREE_CODE (pmop[which]) == BIT_AND_EXPR)
{
@@ -11941,7 +11839,7 @@ fold_binary_loc (location_t loc,
omitted (assumed 0). */
if ((TREE_CODE (arg0) == PLUS_EXPR
|| (TREE_CODE (arg0) == MINUS_EXPR && which == 0))
- && (TREE_INT_CST_LOW (pmop[which]) & cst1) == 0)
+ && (cst1 & pmop[which]) == 0)
pmop[which] = NULL;
break;
default:
@@ -12002,9 +11900,8 @@ fold_binary_loc (location_t loc,
{
prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0)));
- if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT
- && (~TREE_INT_CST_LOW (arg1)
- & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0)
+ wide_int mask = wide_int::from (arg1, prec, UNSIGNED);
+ if (mask == -1)
return
fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0));
}
@@ -12407,17 +12304,10 @@ fold_binary_loc (location_t loc,
tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1),
arg1, TREE_OPERAND (arg0, 1));
if (sum && integer_zerop (sum)) {
- unsigned long pow2;
-
- if (TREE_INT_CST_LOW (arg1))
- pow2 = exact_log2 (TREE_INT_CST_LOW (arg1));
- else
- pow2 = exact_log2 (TREE_INT_CST_HIGH (arg1))
- + HOST_BITS_PER_WIDE_INT;
-
+ tree pow2 = build_int_cst (integer_type_node,
+ wi::exact_log2 (arg1));
return fold_build2_loc (loc, RSHIFT_EXPR, type,
- TREE_OPERAND (arg0, 0),
- build_int_cst (integer_type_node, pow2));
+ TREE_OPERAND (arg0, 0), pow2);
}
}
@@ -12435,13 +12325,8 @@ fold_binary_loc (location_t loc,
if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0)
{
tree sh_cnt = TREE_OPERAND (arg1, 1);
- unsigned long pow2;
-
- if (TREE_INT_CST_LOW (sval))
- pow2 = exact_log2 (TREE_INT_CST_LOW (sval));
- else
- pow2 = exact_log2 (TREE_INT_CST_HIGH (sval))
- + HOST_BITS_PER_WIDE_INT;
+ tree pow2 = build_int_cst (TREE_TYPE (sh_cnt),
+ wi::exact_log2 (sval));
if (strict_overflow_p)
fold_overflow_warning (("assuming signed overflow does not "
@@ -12449,11 +12334,9 @@ fold_binary_loc (location_t loc,
WARN_STRICT_OVERFLOW_MISC);
sh_cnt = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (sh_cnt),
- sh_cnt,
- build_int_cst (TREE_TYPE (sh_cnt),
- pow2));
+ sh_cnt, pow2);
return fold_build2_loc (loc, RSHIFT_EXPR, type,
- fold_convert_loc (loc, type, arg0), sh_cnt);
+ fold_convert_loc (loc, type, arg0), sh_cnt);
}
}
@@ -12476,8 +12359,7 @@ fold_binary_loc (location_t loc,
/* X / -1 is -X. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && TREE_INT_CST_LOW (arg1) == HOST_WIDE_INT_M1U
- && TREE_INT_CST_HIGH (arg1) == -1)
+ && wi::eq_p (arg1, -1))
return fold_convert_loc (loc, type, negate_expr (arg0));
/* Convert -A / -B to A / B when the type is signed and overflow is
@@ -12559,16 +12441,15 @@ fold_binary_loc (location_t loc,
/* X % -1 is zero. */
if (!TYPE_UNSIGNED (type)
&& TREE_CODE (arg1) == INTEGER_CST
- && TREE_INT_CST_LOW (arg1) == HOST_WIDE_INT_M1U
- && TREE_INT_CST_HIGH (arg1) == -1)
+ && wi::eq_p (arg1, -1))
return omit_one_operand_loc (loc, type, integer_zero_node, arg0);
/* X % -C is the same as X % C. */
if (code == TRUNC_MOD_EXPR
- && !TYPE_UNSIGNED (type)
+ && TYPE_SIGN (type) == SIGNED
&& TREE_CODE (arg1) == INTEGER_CST
&& !TREE_OVERFLOW (arg1)
- && TREE_INT_CST_HIGH (arg1) < 0
+ && wi::neg_p (arg1)
&& !TYPE_OVERFLOW_TRAPS (type)
/* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
&& !sign_bit_p (arg1, arg1))
@@ -12736,16 +12617,13 @@ fold_binary_loc (location_t loc,
fold_build2_loc (loc, code, type,
TREE_OPERAND (arg0, 1), arg1));
- /* Two consecutive rotates adding up to the precision of the
- type can be ignored. */
+ /* Two consecutive rotates adding up to the some integer
+ multiple of the precision of the type can be ignored. */
if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST
&& TREE_CODE (arg0) == RROTATE_EXPR
&& TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST
- && TREE_INT_CST_HIGH (arg1) == 0
- && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0
- && ((TREE_INT_CST_LOW (arg1)
- + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)))
- == prec))
+ && wi::umod_trunc (wi::add (arg1, TREE_OPERAND (arg0, 1)),
+ prec) == 0)
return TREE_OPERAND (arg0, 0);
/* Fold (X & C2) << C1 into (X << C1) & (C2 << C1)
@@ -13067,7 +12945,7 @@ fold_binary_loc (location_t loc,
&& operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0,
1)),
arg1, 0)
- && (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 0)) & 1) == 1)
+ && wi::extract_uhwi (TREE_OPERAND (arg0, 0), 0, 1) == 1)
{
return omit_two_operands_loc (loc, type,
code == NE_EXPR
@@ -13158,8 +13036,7 @@ fold_binary_loc (location_t loc,
prec = TYPE_PRECISION (itype);
/* Check for a valid shift count. */
- if (TREE_INT_CST_HIGH (arg001) == 0
- && TREE_INT_CST_LOW (arg001) < prec)
+ if (wi::ltu_p (arg001, prec))
{
tree arg01 = TREE_OPERAND (arg0, 1);
tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0);
@@ -13284,9 +13161,7 @@ fold_binary_loc (location_t loc,
tree arg00 = TREE_OPERAND (arg0, 0);
tree arg01 = TREE_OPERAND (arg0, 1);
tree itype = TREE_TYPE (arg00);
- if (TREE_INT_CST_HIGH (arg01) == 0
- && TREE_INT_CST_LOW (arg01)
- == (unsigned HOST_WIDE_INT) (TYPE_PRECISION (itype) - 1))
+ if (wi::eq_p (arg01, TYPE_PRECISION (itype) - 1))
{
if (TYPE_UNSIGNED (itype))
{
@@ -13688,59 +13563,16 @@ fold_binary_loc (location_t loc,
the specified precision will have known values. */
{
tree arg1_type = TREE_TYPE (arg1);
- unsigned int width = TYPE_PRECISION (arg1_type);
+ unsigned int prec = TYPE_PRECISION (arg1_type);
if (TREE_CODE (arg1) == INTEGER_CST
- && width <= HOST_BITS_PER_DOUBLE_INT
&& (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type)))
{
- HOST_WIDE_INT signed_max_hi;
- unsigned HOST_WIDE_INT signed_max_lo;
- unsigned HOST_WIDE_INT max_hi, max_lo, min_hi, min_lo;
-
- if (width <= HOST_BITS_PER_WIDE_INT)
- {
- signed_max_lo = ((unsigned HOST_WIDE_INT) 1 << (width - 1))
- - 1;
- signed_max_hi = 0;
- max_hi = 0;
-
- if (TYPE_UNSIGNED (arg1_type))
- {
- max_lo = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1;
- min_lo = 0;
- min_hi = 0;
- }
- else
- {
- max_lo = signed_max_lo;
- min_lo = (HOST_WIDE_INT_M1U << (width - 1));
- min_hi = -1;
- }
- }
- else
- {
- width -= HOST_BITS_PER_WIDE_INT;
- signed_max_lo = -1;
- signed_max_hi = ((unsigned HOST_WIDE_INT) 1 << (width - 1))
- - 1;
- max_lo = -1;
- min_lo = 0;
-
- if (TYPE_UNSIGNED (arg1_type))
- {
- max_hi = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1;
- min_hi = 0;
- }
- else
- {
- max_hi = signed_max_hi;
- min_hi = (HOST_WIDE_INT_M1U << (width - 1));
- }
- }
+ wide_int max = wi::max_value (arg1_type);
+ wide_int signed_max = wi::max_value (prec, SIGNED);
+ wide_int min = wi::min_value (arg1_type);
- if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) == max_hi
- && TREE_INT_CST_LOW (arg1) == max_lo)
+ if (wi::eq_p (arg1, max))
switch (code)
{
case GT_EXPR:
@@ -13761,9 +13593,7 @@ fold_binary_loc (location_t loc,
default:
break;
}
- else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1)
- == max_hi
- && TREE_INT_CST_LOW (arg1) == max_lo - 1)
+ else if (wi::eq_p (arg1, max - 1))
switch (code)
{
case GT_EXPR:
@@ -13783,9 +13613,7 @@ fold_binary_loc (location_t loc,
default:
break;
}
- else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1)
- == min_hi
- && TREE_INT_CST_LOW (arg1) == min_lo)
+ else if (wi::eq_p (arg1, min))
switch (code)
{
case LT_EXPR:
@@ -13803,19 +13631,19 @@ fold_binary_loc (location_t loc,
default:
break;
}
- else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1)
- == min_hi
- && TREE_INT_CST_LOW (arg1) == min_lo + 1)
+ else if (wi::eq_p (arg1, min + 1))
switch (code)
{
case GE_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
+ arg1 = const_binop (MINUS_EXPR, arg1,
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, NE_EXPR, type,
fold_convert_loc (loc,
TREE_TYPE (arg1), arg0),
arg1);
case LT_EXPR:
- arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node);
+ arg1 = const_binop (MINUS_EXPR, arg1,
+ build_int_cst (TREE_TYPE (arg1), 1));
return fold_build2_loc (loc, EQ_EXPR, type,
fold_convert_loc (loc, TREE_TYPE (arg1),
arg0),
@@ -13824,14 +13652,13 @@ fold_binary_loc (location_t loc,
break;
}
- else if (TREE_INT_CST_HIGH (arg1) == signed_max_hi
- && TREE_INT_CST_LOW (arg1) == signed_max_lo
+ else if (wi::eq_p (arg1, signed_max)
&& TYPE_UNSIGNED (arg1_type)
/* We will flip the signedness of the comparison operator
associated with the mode of arg1, so the sign bit is
specified by this mode. Check that arg1 is the signed
max associated with this sign bit. */
- && width == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
+ && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
/* signed_type does not work on pointer types. */
&& INTEGRAL_TYPE_P (arg1_type))
{
@@ -14356,8 +14183,6 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
&& TYPE_PRECISION (TREE_TYPE (tem))
< TYPE_PRECISION (type))
{
- unsigned HOST_WIDE_INT mask_lo;
- HOST_WIDE_INT mask_hi;
int inner_width, outer_width;
tree tem_type;
@@ -14366,36 +14191,17 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
if (outer_width > TYPE_PRECISION (type))
outer_width = TYPE_PRECISION (type);
- if (outer_width > HOST_BITS_PER_WIDE_INT)
- {
- mask_hi = (HOST_WIDE_INT_M1U
- >> (HOST_BITS_PER_DOUBLE_INT - outer_width));
- mask_lo = -1;
- }
- else
- {
- mask_hi = 0;
- mask_lo = (HOST_WIDE_INT_M1U
- >> (HOST_BITS_PER_WIDE_INT - outer_width));
- }
- if (inner_width > HOST_BITS_PER_WIDE_INT)
- {
- mask_hi &= ~(HOST_WIDE_INT_M1U
- >> (HOST_BITS_PER_WIDE_INT - inner_width));
- mask_lo = 0;
- }
- else
- mask_lo &= ~(HOST_WIDE_INT_M1U
- >> (HOST_BITS_PER_WIDE_INT - inner_width));
+ wide_int mask = wi::shifted_mask
+ (inner_width, outer_width - inner_width, false,
+ TYPE_PRECISION (TREE_TYPE (arg1)));
- if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == mask_hi
- && (TREE_INT_CST_LOW (arg1) & mask_lo) == mask_lo)
+ wide_int common = mask & arg1;
+ if (common == mask)
{
tem_type = signed_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
}
- else if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == 0
- && (TREE_INT_CST_LOW (arg1) & mask_lo) == 0)
+ else if (common == 0)
{
tem_type = unsigned_type_for (TREE_TYPE (tem));
tem = fold_convert_loc (loc, tem_type, tem);
@@ -14424,9 +14230,9 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
tree tem = TREE_OPERAND (arg0, 0);
STRIP_NOPS (tem);
if (TREE_CODE (tem) == RSHIFT_EXPR
- && TREE_CODE (TREE_OPERAND (tem, 1)) == INTEGER_CST
+ && tree_fits_uhwi_p (TREE_OPERAND (tem, 1))
&& (unsigned HOST_WIDE_INT) tree_log2 (arg1) ==
- TREE_INT_CST_LOW (TREE_OPERAND (tem, 1)))
+ tree_to_uhwi (TREE_OPERAND (tem, 1)))
return fold_build2_loc (loc, BIT_AND_EXPR, type,
TREE_OPERAND (tem, 0), arg1);
}
@@ -14648,7 +14454,6 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
{
unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i, mask;
unsigned char *sel = XALLOCAVEC (unsigned char, nelts);
- tree t;
bool need_mask_canon = false;
bool all_in_vec0 = true;
bool all_in_vec1 = true;
@@ -14664,11 +14469,16 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
if (TREE_CODE (val) != INTEGER_CST)
return NULL_TREE;
- sel[i] = TREE_INT_CST_LOW (val) & mask;
- if (TREE_INT_CST_HIGH (val)
- || ((unsigned HOST_WIDE_INT)
- TREE_INT_CST_LOW (val) != sel[i]))
- need_mask_canon = true;
+ /* Make sure that the perm value is in an acceptable
+ range. */
+ wide_int t = val;
+ if (wi::gtu_p (t, mask))
+ {
+ need_mask_canon = true;
+ sel[i] = t.to_uhwi () & mask;
+ }
+ else
+ sel[i] = t.to_uhwi ();
if (sel[i] < nelts)
all_in_vec1 = false;
@@ -14702,7 +14512,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type,
&& (TREE_CODE (op1) == VECTOR_CST
|| TREE_CODE (op1) == CONSTRUCTOR))
{
- t = fold_vec_perm (type, op0, op1, sel);
+ tree t = fold_vec_perm (type, op0, op1, sel);
if (t != NULL_TREE)
return t;
}
@@ -15471,9 +15281,7 @@ multiple_of_p (tree type, const_tree top, const_tree bottom)
op1 = TREE_OPERAND (top, 1);
/* const_binop may not detect overflow correctly,
so check for it explicitly here. */
- if (TYPE_PRECISION (TREE_TYPE (size_one_node))
- > TREE_INT_CST_LOW (op1)
- && TREE_INT_CST_HIGH (op1) == 0
+ if (wi::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1)
&& 0 != (t1 = fold_convert (type,
const_binop (LSHIFT_EXPR,
size_one_node,
@@ -15678,11 +15486,11 @@ tree_binary_nonnegative_warnv_p (enum tree_code code, tree type, tree op0,
&& TREE_CODE (inner1) == INTEGER_TYPE && unsigned1)
{
unsigned int precision0 = (TREE_CODE (op0) == INTEGER_CST)
- ? tree_int_cst_min_precision (op0, /*unsignedp=*/true)
+ ? tree_int_cst_min_precision (op0, UNSIGNED)
: TYPE_PRECISION (inner0);
unsigned int precision1 = (TREE_CODE (op1) == INTEGER_CST)
- ? tree_int_cst_min_precision (op1, /*unsignedp=*/true)
+ ? tree_int_cst_min_precision (op1, UNSIGNED)
: TYPE_PRECISION (inner1);
return precision0 + precision1 < TYPE_PRECISION (type);
@@ -15880,8 +15688,7 @@ tree_call_nonnegative_warnv_p (tree type, tree fndecl,
if ((n & 1) == 0)
{
REAL_VALUE_TYPE cint;
- real_from_integer (&cint, VOIDmode, n,
- n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
if (real_identical (&c, &cint))
return true;
}
@@ -16366,12 +16173,11 @@ fold_negate_const (tree arg0, tree type)
{
case INTEGER_CST:
{
- double_int val = tree_to_double_int (arg0);
bool overflow;
- val = val.neg_with_overflow (&overflow);
- t = force_fit_type_double (type, val, 1,
- (overflow | TREE_OVERFLOW (arg0))
- && !TYPE_UNSIGNED (type));
+ wide_int val = wi::neg (arg0, &overflow);
+ t = force_fit_type (type, val, 1,
+ (overflow | TREE_OVERFLOW (arg0))
+ && !TYPE_UNSIGNED (type));
break;
}
@@ -16413,12 +16219,9 @@ fold_abs_const (tree arg0, tree type)
{
case INTEGER_CST:
{
- double_int val = tree_to_double_int (arg0);
-
/* If the value is unsigned or non-negative, then the absolute value
is the same as the ordinary value. */
- if (TYPE_UNSIGNED (type)
- || !val.is_negative ())
+ if (!wi::neg_p (arg0, TYPE_SIGN (type)))
t = arg0;
/* If the value is negative, then the absolute value is
@@ -16426,9 +16229,9 @@ fold_abs_const (tree arg0, tree type)
else
{
bool overflow;
- val = val.neg_with_overflow (&overflow);
- t = force_fit_type_double (type, val, -1,
- overflow | TREE_OVERFLOW (arg0));
+ wide_int val = wi::neg (arg0, &overflow);
+ t = force_fit_type (type, val, -1,
+ overflow | TREE_OVERFLOW (arg0));
}
}
break;
@@ -16453,12 +16256,9 @@ fold_abs_const (tree arg0, tree type)
static tree
fold_not_const (const_tree arg0, tree type)
{
- double_int val;
-
gcc_assert (TREE_CODE (arg0) == INTEGER_CST);
- val = ~tree_to_double_int (arg0);
- return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0));
+ return force_fit_type (type, wi::bit_not (arg0), 0, TREE_OVERFLOW (arg0));
}
/* Given CODE, a relational operator, the target type, TYPE and two
@@ -16601,10 +16401,8 @@ fold_relational_const (enum tree_code code, tree type, tree op0, tree op1)
{
if (code == EQ_EXPR)
result = tree_int_cst_equal (op0, op1);
- else if (TYPE_UNSIGNED (TREE_TYPE (op0)))
- result = INT_CST_LT_UNSIGNED (op0, op1);
else
- result = INT_CST_LT (op0, op1);
+ result = tree_int_cst_lt (op0, op1);
}
else
return NULL_TREE;
@@ -16862,8 +16660,7 @@ fold_ignored_result (tree t)
}
}
-/* Return the value of VALUE, rounded up to a multiple of DIVISOR.
- This can only be applied to objects of a sizetype. */
+/* Return the value of VALUE, rounded up to a multiple of DIVISOR. */
tree
round_up_loc (location_t loc, tree value, int divisor)
@@ -16891,24 +16688,19 @@ round_up_loc (location_t loc, tree value, int divisor)
{
if (TREE_CODE (value) == INTEGER_CST)
{
- double_int val = tree_to_double_int (value);
+ wide_int val = value;
bool overflow_p;
- if ((val.low & (divisor - 1)) == 0)
+ if ((val & (divisor - 1)) == 0)
return value;
overflow_p = TREE_OVERFLOW (value);
- val.low &= ~(divisor - 1);
- val.low += divisor;
- if (val.low == 0)
- {
- val.high++;
- if (val.high == 0)
- overflow_p = true;
- }
+ val &= ~(divisor - 1);
+ val += divisor;
+ if (val == 0)
+ overflow_p = true;
- return force_fit_type_double (TREE_TYPE (value), val,
- -1, overflow_p);
+ return force_fit_type (TREE_TYPE (value), val, -1, overflow_p);
}
else
{
diff --git a/gcc/fold-const.h b/gcc/fold-const.h
index f69e9f890af..023f043853e 100644
--- a/gcc/fold-const.h
+++ b/gcc/fold-const.h
@@ -118,10 +118,10 @@ extern tree fold_indirect_ref_loc (location_t, tree);
extern tree build_simple_mem_ref_loc (location_t, tree);
#define build_simple_mem_ref(T)\
build_simple_mem_ref_loc (UNKNOWN_LOCATION, T)
-extern double_int mem_ref_offset (const_tree);
+extern offset_int mem_ref_offset (const_tree);
extern tree build_invariant_address (tree, tree, HOST_WIDE_INT);
extern tree constant_boolean_node (bool, tree);
-extern tree div_if_zero_remainder (enum tree_code, const_tree, const_tree);
+extern tree div_if_zero_remainder (const_tree, const_tree);
extern bool tree_swap_operands_p (const_tree, const_tree, bool);
extern enum tree_code swap_tree_comparison (enum tree_code);
diff --git a/gcc/fortran/target-memory.c b/gcc/fortran/target-memory.c
index 3baebade84d..012880405c5 100644
--- a/gcc/fortran/target-memory.c
+++ b/gcc/fortran/target-memory.c
@@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see
#include "trans-const.h"
#include "trans-types.h"
#include "target-memory.h"
+#include "wide-int.h"
/* --------------------------------------------------------------- */
/* Calculate the size of an expression. */
@@ -430,7 +431,7 @@ gfc_interpret_logical (int kind, unsigned char *buffer, size_t buffer_size,
{
tree t = native_interpret_expr (gfc_get_logical_type (kind), buffer,
buffer_size);
- *logical = tree_to_double_int (t).is_zero () ? 0 : 1;
+ *logical = wi::eq_p (t, 0) ? 0 : 1;
return size_logical (kind);
}
diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c
index 69c47bb1ab2..bb648f05891 100644
--- a/gcc/fortran/trans-array.c
+++ b/gcc/fortran/trans-array.c
@@ -90,6 +90,7 @@ along with GCC; see the file COPYING3. If not see
#include "trans-array.h"
#include "trans-const.h"
#include "dependency.h"
+#include "wide-int.h"
static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
@@ -5380,9 +5381,8 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr)
{
gfc_constructor *c;
tree tmp;
+ offset_int wtmp;
gfc_se se;
- HOST_WIDE_INT hi;
- unsigned HOST_WIDE_INT lo;
tree index, range;
vec<constructor_elt, va_gc> *v = NULL;
@@ -5404,20 +5404,13 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr)
else
gfc_conv_structure (&se, expr, 1);
- tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
- gcc_assert (tmp && INTEGER_CST_P (tmp));
- hi = TREE_INT_CST_HIGH (tmp);
- lo = TREE_INT_CST_LOW (tmp);
- lo++;
- if (lo == 0)
- hi++;
+ wtmp = wi::to_offset (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1;
+ gcc_assert (wtmp != 0);
/* This will probably eat buckets of memory for large arrays. */
- while (hi != 0 || lo != 0)
+ while (wtmp != 0)
{
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
- if (lo == 0)
- hi--;
- lo--;
+ wtmp -= 1;
}
break;
diff --git a/gcc/fortran/trans-const.c b/gcc/fortran/trans-const.c
index a2c3e31b649..6c54e202777 100644
--- a/gcc/fortran/trans-const.c
+++ b/gcc/fortran/trans-const.c
@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see
#include "trans-const.h"
#include "trans-types.h"
#include "target-memory.h"
+#include "wide-int.h"
tree gfc_rank_cst[GFC_MAX_DIMENSIONS + 1];
@@ -145,8 +146,7 @@ gfc_conv_string_init (tree length, gfc_expr * expr)
gcc_assert (expr->expr_type == EXPR_CONSTANT);
gcc_assert (expr->ts.type == BT_CHARACTER);
- gcc_assert (INTEGER_CST_P (length));
- gcc_assert (TREE_INT_CST_HIGH (length) == 0);
+ gcc_assert (tree_fits_uhwi_p (length));
len = TREE_INT_CST_LOW (length);
slen = expr->value.character.length;
@@ -201,8 +201,8 @@ gfc_init_constants (void)
tree
gfc_conv_mpz_to_tree (mpz_t i, int kind)
{
- double_int val = mpz_get_double_int (gfc_get_int_type (kind), i, true);
- return double_int_to_tree (gfc_get_int_type (kind), val);
+ wide_int val = wi::from_mpz (gfc_get_int_type (kind), i, true);
+ return wide_int_to_tree (gfc_get_int_type (kind), val);
}
/* Converts a backend tree into a GMP integer. */
@@ -210,8 +210,7 @@ gfc_conv_mpz_to_tree (mpz_t i, int kind)
void
gfc_conv_tree_to_mpz (mpz_t i, tree source)
{
- double_int val = tree_to_double_int (source);
- mpz_set_double_int (i, val, TYPE_UNSIGNED (TREE_TYPE (source)));
+ wi::to_mpz (source, i, TYPE_SIGN (TREE_TYPE (source)));
}
/* Converts a real constant into backend form. */
diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c
index ee6c7e3004d..bd1ebab46b2 100644
--- a/gcc/fortran/trans-decl.c
+++ b/gcc/fortran/trans-decl.c
@@ -406,7 +406,7 @@ gfc_can_put_var_on_stack (tree size)
if (gfc_option.flag_max_stack_var_size < 0)
return 1;
- if (TREE_INT_CST_HIGH (size) != 0)
+ if (!tree_fits_uhwi_p (size))
return 0;
low = TREE_INT_CST_LOW (size);
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index 6b9353767ef..5a501227863 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -40,7 +40,7 @@ along with GCC; see the file COPYING3. If not see
#include "trans-stmt.h"
#include "dependency.h"
#include "gimplify.h"
-
+#include "wide-int.h"
/* Convert a scalar to an array descriptor. To be used for assumed-rank
arrays. */
@@ -2112,13 +2112,14 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs)
HOST_WIDE_INT m;
unsigned HOST_WIDE_INT n;
int sgn;
+ wide_int wrhs = rhs;
/* If exponent is too large, we won't expand it anyway, so don't bother
with large integer values. */
- if (!TREE_INT_CST (rhs).fits_shwi ())
+ if (!wi::fits_shwi_p (wrhs))
return 0;
- m = TREE_INT_CST (rhs).to_shwi ();
+ m = wrhs.to_shwi ();
/* There's no ABS for HOST_WIDE_INT, so here we go. It also takes care
of the asymmetric range of the integer type. */
n = (unsigned HOST_WIDE_INT) (m < 0 ? -m : m);
@@ -2657,7 +2658,7 @@ gfc_string_to_single_character (tree len, tree str, int kind)
{
if (len == NULL
- || !INTEGER_CST_P (len) || TREE_INT_CST_HIGH (len) != 0
+ || !tree_fits_uhwi_p (len)
|| !POINTER_TYPE_P (TREE_TYPE (str)))
return NULL_TREE;
@@ -2771,8 +2772,9 @@ gfc_optimize_len_trim (tree len, tree str, int kind)
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST
&& array_ref_low_bound (TREE_OPERAND (str, 0))
== TREE_OPERAND (TREE_OPERAND (str, 0), 1)
- && TREE_INT_CST_LOW (len) >= 1
- && TREE_INT_CST_LOW (len)
+ && tree_fits_uhwi_p (len)
+ && tree_to_uhwi (len) >= 1
+ && tree_to_uhwi (len)
== (unsigned HOST_WIDE_INT)
TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0)))
{
diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c
index e13c0dedd11..c166c4f0bcf 100644
--- a/gcc/fortran/trans-intrinsic.c
+++ b/gcc/fortran/trans-intrinsic.c
@@ -43,6 +43,7 @@ along with GCC; see the file COPYING3. If not see
/* Only for gfc_trans_assign and gfc_trans_pointer_assign. */
#include "trans-stmt.h"
#include "tree-nested.h"
+#include "wide-int.h"
/* This maps Fortran intrinsic math functions to external library or GCC
builtin functions. */
@@ -987,12 +988,8 @@ trans_this_image (gfc_se * se, gfc_expr *expr)
if (INTEGER_CST_P (dim_arg))
{
- int hi, co_dim;
-
- hi = TREE_INT_CST_HIGH (dim_arg);
- co_dim = TREE_INT_CST_LOW (dim_arg);
- if (hi || co_dim < 1
- || co_dim > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))
+ if (wi::ltu_p (dim_arg, 1)
+ || wi::gtu_p (dim_arg, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
@@ -1352,14 +1349,9 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper)
if (INTEGER_CST_P (bound))
{
- int hi, low;
-
- hi = TREE_INT_CST_HIGH (bound);
- low = TREE_INT_CST_LOW (bound);
- if (hi || low < 0
- || ((!as || as->type != AS_ASSUMED_RANK)
- && low >= GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc)))
- || low > GFC_MAX_DIMENSIONS)
+ if (((!as || as->type != AS_ASSUMED_RANK)
+ && wi::geu_p (bound, GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))))
+ || wi::gtu_p (bound, GFC_MAX_DIMENSIONS))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", upper ? "UBOUND" : "LBOUND",
&expr->where);
@@ -1554,11 +1546,8 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr)
if (INTEGER_CST_P (bound))
{
- int hi, low;
-
- hi = TREE_INT_CST_HIGH (bound);
- low = TREE_INT_CST_LOW (bound);
- if (hi || low < 1 || low > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))
+ if (wi::ltu_p (bound, 1)
+ || wi::gtu_p (bound, GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))))
gfc_error ("'dim' argument of %s intrinsic at %L is not a valid "
"dimension index", expr->value.function.isym->name,
&expr->where);
diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c
index f69371288a9..77d0e785e0b 100644
--- a/gcc/fortran/trans-types.c
+++ b/gcc/fortran/trans-types.c
@@ -863,8 +863,6 @@ gfc_init_types (void)
int index;
tree type;
unsigned n;
- unsigned HOST_WIDE_INT hi;
- unsigned HOST_WIDE_INT lo;
/* Create and name the types. */
#define PUSH_TYPE(name, node) \
@@ -956,13 +954,10 @@ gfc_init_types (void)
descriptor. */
n = TYPE_PRECISION (gfc_array_index_type) - GFC_DTYPE_SIZE_SHIFT;
- lo = ~ (unsigned HOST_WIDE_INT) 0;
- if (n > HOST_BITS_PER_WIDE_INT)
- hi = lo >> (2*HOST_BITS_PER_WIDE_INT - n);
- else
- hi = 0, lo >>= HOST_BITS_PER_WIDE_INT - n;
gfc_max_array_element_size
- = build_int_cst_wide (long_unsigned_type_node, lo, hi);
+ = wide_int_to_tree (long_unsigned_type_node,
+ wi::mask (n, UNSIGNED,
+ TYPE_PRECISION (long_unsigned_type_node)));
boolean_type_node = gfc_get_logical_type (gfc_default_logical_kind);
boolean_true_node = build_int_cst (boolean_type_node, 1);
@@ -1902,7 +1897,7 @@ gfc_get_array_type_bounds (tree etype, int dimen, int codimen, tree * lbound,
if (stride)
rtype = build_range_type (gfc_array_index_type, gfc_index_zero_node,
int_const_binop (MINUS_EXPR, stride,
- integer_one_node));
+ build_int_cst (TREE_TYPE (stride), 1)));
else
rtype = gfc_array_range_type;
arraytype = build_array_type (etype, rtype);
diff --git a/gcc/gencheck.c b/gcc/gencheck.c
index b6dab646054..a5ec8361e7f 100644
--- a/gcc/gencheck.c
+++ b/gcc/gencheck.c
@@ -17,6 +17,9 @@ You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
+/* We don't have insn-modes.h, but we include tm.h. */
+#define BITS_PER_UNIT 8
+
#include "bconfig.h"
#include "system.h"
#include "coretypes.h"
diff --git a/gcc/genemit.c b/gcc/genemit.c
index faaa610a529..16b56449687 100644
--- a/gcc/genemit.c
+++ b/gcc/genemit.c
@@ -204,6 +204,7 @@ gen_exp (rtx x, enum rtx_code subroutine_type, char *used)
case CONST_DOUBLE:
case CONST_FIXED:
+ case CONST_WIDE_INT:
/* These shouldn't be written in MD files. Instead, the appropriate
routines in varasm.c should be called. */
gcc_unreachable ();
diff --git a/gcc/gengenrtl.c b/gcc/gengenrtl.c
index f1f9f16884b..d82ee39e7ee 100644
--- a/gcc/gengenrtl.c
+++ b/gcc/gengenrtl.c
@@ -142,6 +142,7 @@ static int
excluded_rtx (int idx)
{
return ((strcmp (defs[idx].enumname, "CONST_DOUBLE") == 0)
+ || (strcmp (defs[idx].enumname, "CONST_WIDE_INT") == 0)
|| (strcmp (defs[idx].enumname, "CONST_FIXED") == 0));
}
diff --git a/gcc/gengtype-lex.l b/gcc/gengtype-lex.l
index 09fbc9b85ea..936b28cdc4a 100644
--- a/gcc/gengtype-lex.l
+++ b/gcc/gengtype-lex.l
@@ -57,7 +57,7 @@ ITYPE {IWORD}({WS}{IWORD})*
/* Include '::' in identifiers to capture C++ scope qualifiers. */
ID {CID}({HWS}::{HWS}{CID})*
EOID [^[:alnum:]_]
-CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend
+CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend|static
%x in_struct in_struct_comment in_comment
%option warn noyywrap nounput nodefault perf-report
@@ -110,6 +110,7 @@ CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend
"const"/{EOID} /* don't care */
{CXX_KEYWORD}/{EOID} |
"~" |
+"^" |
"&" {
*yylval = XDUPVAR (const char, yytext, yyleng, yyleng + 1);
return IGNORABLE_CXX_KEYWORD;
diff --git a/gcc/gengtype-parse.c b/gcc/gengtype-parse.c
index bb7bcf72528..96f04764c58 100644
--- a/gcc/gengtype-parse.c
+++ b/gcc/gengtype-parse.c
@@ -197,6 +197,23 @@ require2 (int t1, int t2)
return v;
}
+/* If the next token does not have one of the codes T1, T2 or T3, report a
+ parse error; otherwise return the token's value. */
+static const char *
+require3 (int t1, int t2, int t3)
+{
+ int u = token ();
+ const char *v = advance ();
+ if (u != t1 && u != t2 && u != t3)
+ {
+ parse_error ("expected %s, %s or %s, have %s",
+ print_token (t1, 0), print_token (t2, 0),
+ print_token (t3, 0), print_token (u, v));
+ return 0;
+ }
+ return v;
+}
+
/* Near-terminals. */
/* C-style string constant concatenation: STRING+
@@ -243,18 +260,45 @@ require_template_declaration (const char *tmpl_name)
str = concat (tmpl_name, "<", (char *) 0);
/* Read the comma-separated list of identifiers. */
- while (token () != '>')
+ int depth = 1;
+ while (depth > 0)
{
- const char *id = require2 (ID, ',');
+ if (token () == ENUM)
+ {
+ advance ();
+ str = concat (str, "enum ", (char *) 0);
+ continue;
+ }
+ if (token () == NUM)
+ {
+ str = concat (str, advance (), (char *) 0);
+ continue;
+ }
+ if (token () == ':')
+ {
+ advance ();
+ str = concat (str, ":", (char *) 0);
+ continue;
+ }
+ if (token () == '<')
+ {
+ advance ();
+ str = concat (str, "<", (char *) 0);
+ depth += 1;
+ continue;
+ }
+ if (token () == '>')
+ {
+ advance ();
+ str = concat (str, ">", (char *) 0);
+ depth -= 1;
+ continue;
+ }
+ const char *id = require3 (SCALAR, ID, ',');
if (id == NULL)
id = ",";
str = concat (str, id, (char *) 0);
}
-
- /* Recognize the closing '>'. */
- require ('>');
- str = concat (str, ">", (char *) 0);
-
return str;
}
diff --git a/gcc/gengtype-state.c b/gcc/gengtype-state.c
index 0b925b539bc..2ca0e1de1e7 100644
--- a/gcc/gengtype-state.c
+++ b/gcc/gengtype-state.c
@@ -30,7 +30,6 @@
#endif
#include "system.h"
#include "errors.h" /* For fatal. */
-#include "double-int.h"
#include "hashtab.h"
#include "version.h" /* For version_string & pkgversion_string. */
#include "obstack.h"
diff --git a/gcc/gengtype.c b/gcc/gengtype.c
index 031004a7b1f..1c13eeaf7d4 100644
--- a/gcc/gengtype.c
+++ b/gcc/gengtype.c
@@ -25,7 +25,6 @@
#include "system.h"
#include "errors.h" /* for fatal */
#include "getopt.h"
-#include "double-int.h"
#include "version.h" /* for version_string & pkgversion_string. */
#include "hashtab.h"
#include "xregex.h"
@@ -535,7 +534,7 @@ do_typedef (const char *s, type_p t, struct fileloc *pos)
for (p = typedefs; p != NULL; p = p->next)
if (strcmp (p->name, s) == 0)
{
- if (p->type != t)
+ if (p->type != t && strcmp (s, "result_type") != 0)
{
error_at_line (pos, "type `%s' previously defined", s);
error_at_line (&p->line, "previously defined here");
@@ -1766,7 +1765,7 @@ open_base_files (void)
static const char *const ifiles[] = {
"config.h", "system.h", "coretypes.h", "tm.h",
"hashtab.h", "splay-tree.h", "obstack.h", "bitmap.h", "input.h",
- "tree.h", "rtl.h", "function.h", "insn-config.h", "expr.h",
+ "tree.h", "rtl.h", "wide-int.h", "function.h", "insn-config.h", "expr.h",
"hard-reg-set.h", "basic-block.h", "cselib.h", "insn-addr.h",
"optabs.h", "libfuncs.h", "debug.h", "ggc.h", "cgraph.h",
"pointer-set.h", "hash-table.h", "vec.h", "ggc.h", "basic-block.h",
@@ -5670,6 +5669,8 @@ main (int argc, char **argv)
POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos));
POS_HERE (do_scalar_typedef ("FIXED_VALUE_TYPE", &pos));
POS_HERE (do_scalar_typedef ("double_int", &pos));
+ POS_HERE (do_scalar_typedef ("offset_int", &pos));
+ POS_HERE (do_scalar_typedef ("widest_int", &pos));
POS_HERE (do_scalar_typedef ("uint64_t", &pos));
POS_HERE (do_scalar_typedef ("uint8", &pos));
POS_HERE (do_scalar_typedef ("uintptr_t", &pos));
diff --git a/gcc/genpreds.c b/gcc/genpreds.c
index eba3e1472c9..1fcf611f9c7 100644
--- a/gcc/genpreds.c
+++ b/gcc/genpreds.c
@@ -612,7 +612,7 @@ write_one_predicate_function (struct pred_data *p)
add_mode_tests (p);
/* A normal predicate can legitimately not look at enum machine_mode
- if it accepts only CONST_INTs and/or CONST_DOUBLEs. */
+ if it accepts only CONST_INTs and/or CONST_WIDE_INT and/or CONST_DOUBLEs. */
printf ("int\n%s (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)\n{\n",
p->name);
write_predicate_stmts (p->exp);
@@ -1075,12 +1075,17 @@ write_tm_constrs_h (void)
if (needs_ival)
puts (" if (CONST_INT_P (op))\n"
" ival = INTVAL (op);");
+#if TARGET_SUPPORTS_WIDE_INT
+ if (needs_lval || needs_hval)
+ error ("you can't use lval or hval");
+#else
if (needs_hval)
puts (" if (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode)"
" hval = CONST_DOUBLE_HIGH (op);");
if (needs_lval)
puts (" if (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode)"
" lval = CONST_DOUBLE_LOW (op);");
+#endif
if (needs_rval)
puts (" if (GET_CODE (op) == CONST_DOUBLE && mode != VOIDmode)"
" rval = CONST_DOUBLE_REAL_VALUE (op);");
diff --git a/gcc/genrecog.c b/gcc/genrecog.c
index a7949e81587..457b59c901d 100644
--- a/gcc/genrecog.c
+++ b/gcc/genrecog.c
@@ -586,6 +586,7 @@ validate_pattern (rtx pattern, rtx insn, rtx set, int set_code)
&& GET_CODE (src) != PC
&& GET_CODE (src) != CC0
&& !CONST_INT_P (src)
+ && !CONST_WIDE_INT_P (src)
&& GET_CODE (src) != CALL)
{
const char *which;
@@ -770,13 +771,14 @@ add_to_sequence (rtx pattern, struct decision_head *last,
We can optimize the generated code a little if either
(a) the predicate only accepts one code, or (b) the
- predicate does not allow CONST_INT, in which case it
- can match only if the modes match. */
+ predicate does not allow CONST_INT or CONST_WIDE_INT,
+ in which case it can match only if the modes match. */
pred = lookup_predicate (pred_name);
if (pred)
{
test->u.pred.data = pred;
- allows_const_int = pred->codes[CONST_INT];
+ allows_const_int = (pred->codes[CONST_INT]
+ || pred->codes[CONST_WIDE_INT]);
if (was_code == MATCH_PARALLEL
&& pred->singleton != PARALLEL)
error_with_line (pattern_lineno,
diff --git a/gcc/gensupport.c b/gcc/gensupport.c
index 33ea81363c6..b51f04d28fb 100644
--- a/gcc/gensupport.c
+++ b/gcc/gensupport.c
@@ -2806,7 +2806,12 @@ static const struct std_pred_table std_preds[] = {
{"scratch_operand", false, false, {SCRATCH, REG}},
{"immediate_operand", false, true, {UNKNOWN}},
{"const_int_operand", false, false, {CONST_INT}},
+#if TARGET_SUPPORTS_WIDE_INT
+ {"const_scalar_int_operand", false, false, {CONST_INT, CONST_WIDE_INT}},
+ {"const_double_operand", false, false, {CONST_DOUBLE}},
+#else
{"const_double_operand", false, false, {CONST_INT, CONST_DOUBLE}},
+#endif
{"nonimmediate_operand", false, false, {SUBREG, REG, MEM}},
{"nonmemory_operand", false, true, {SUBREG, REG}},
{"push_operand", false, false, {MEM}},
diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c
index 8b47f513652..9d42e6af685 100644
--- a/gcc/gimple-fold.c
+++ b/gcc/gimple-fold.c
@@ -2836,7 +2836,7 @@ get_base_constructor (tree base, HOST_WIDE_INT *bit_offset,
{
if (!tree_fits_shwi_p (TREE_OPERAND (base, 1)))
return NULL_TREE;
- *bit_offset += (mem_ref_offset (base).low
+ *bit_offset += (mem_ref_offset (base).to_short_addr ()
* BITS_PER_UNIT);
}
@@ -2931,9 +2931,10 @@ fold_array_ctor_reference (tree type, tree ctor,
{
unsigned HOST_WIDE_INT cnt;
tree cfield, cval;
- double_int low_bound, elt_size;
- double_int index, max_index;
- double_int access_index;
+ offset_int low_bound;
+ offset_int elt_size;
+ offset_int index, max_index;
+ offset_int access_index;
tree domain_type = NULL_TREE, index_type = NULL_TREE;
HOST_WIDE_INT inner_offset;
@@ -2945,32 +2946,30 @@ fold_array_ctor_reference (tree type, tree ctor,
/* Static constructors for variably sized objects makes no sense. */
gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type)) == INTEGER_CST);
index_type = TREE_TYPE (TYPE_MIN_VALUE (domain_type));
- low_bound = tree_to_double_int (TYPE_MIN_VALUE (domain_type));
+ low_bound = wi::to_offset (TYPE_MIN_VALUE (domain_type));
}
else
- low_bound = double_int_zero;
+ low_bound = 0;
/* Static constructors for variably sized objects makes no sense. */
gcc_assert (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor))))
== INTEGER_CST);
- elt_size =
- tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor))));
-
+ elt_size = wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor))));
/* We can handle only constantly sized accesses that are known to not
be larger than size of array element. */
if (!TYPE_SIZE_UNIT (type)
|| TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
- || elt_size.slt (tree_to_double_int (TYPE_SIZE_UNIT (type)))
- || elt_size.is_zero ())
+ || wi::lts_p (elt_size, wi::to_offset (TYPE_SIZE_UNIT (type)))
+ || elt_size == 0)
return NULL_TREE;
/* Compute the array index we look for. */
- access_index = double_int::from_uhwi (offset / BITS_PER_UNIT)
- .udiv (elt_size, TRUNC_DIV_EXPR);
+ access_index = wi::udiv_trunc (offset_int (offset / BITS_PER_UNIT),
+ elt_size);
access_index += low_bound;
if (index_type)
- access_index = access_index.ext (TYPE_PRECISION (index_type),
- TYPE_UNSIGNED (index_type));
+ access_index = wi::ext (access_index, TYPE_PRECISION (index_type),
+ TYPE_SIGN (index_type));
/* And offset within the access. */
inner_offset = offset % (elt_size.to_uhwi () * BITS_PER_UNIT);
@@ -2980,9 +2979,10 @@ fold_array_ctor_reference (tree type, tree ctor,
if (inner_offset + size > elt_size.to_uhwi () * BITS_PER_UNIT)
return NULL_TREE;
- index = low_bound - double_int_one;
+ index = low_bound - 1;
if (index_type)
- index = index.ext (TYPE_PRECISION (index_type), TYPE_UNSIGNED (index_type));
+ index = wi::ext (index, TYPE_PRECISION (index_type),
+ TYPE_SIGN (index_type));
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval)
{
@@ -2992,26 +2992,26 @@ fold_array_ctor_reference (tree type, tree ctor,
if (cfield)
{
if (TREE_CODE (cfield) == INTEGER_CST)
- max_index = index = tree_to_double_int (cfield);
+ max_index = index = wi::to_offset (cfield);
else
{
gcc_assert (TREE_CODE (cfield) == RANGE_EXPR);
- index = tree_to_double_int (TREE_OPERAND (cfield, 0));
- max_index = tree_to_double_int (TREE_OPERAND (cfield, 1));
+ index = wi::to_offset (TREE_OPERAND (cfield, 0));
+ max_index = wi::to_offset (TREE_OPERAND (cfield, 1));
}
}
else
{
- index += double_int_one;
+ index += 1;
if (index_type)
- index = index.ext (TYPE_PRECISION (index_type),
- TYPE_UNSIGNED (index_type));
+ index = wi::ext (index, TYPE_PRECISION (index_type),
+ TYPE_SIGN (index_type));
max_index = index;
}
/* Do we have match? */
- if (access_index.cmp (index, 1) >= 0
- && access_index.cmp (max_index, 1) <= 0)
+ if (wi::cmpu (access_index, index) >= 0
+ && wi::cmpu (access_index, max_index) <= 0)
return fold_ctor_reference (type, cval, inner_offset, size,
from_decl);
}
@@ -3038,10 +3038,8 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
tree byte_offset = DECL_FIELD_OFFSET (cfield);
tree field_offset = DECL_FIELD_BIT_OFFSET (cfield);
tree field_size = DECL_SIZE (cfield);
- double_int bitoffset;
- double_int byte_offset_cst = tree_to_double_int (byte_offset);
- double_int bits_per_unit_cst = double_int::from_uhwi (BITS_PER_UNIT);
- double_int bitoffset_end, access_end;
+ offset_int bitoffset;
+ offset_int bitoffset_end, access_end;
/* Variable sized objects in static constructors makes no sense,
but field_size can be NULL for flexible array members. */
@@ -3052,30 +3050,30 @@ fold_nonarray_ctor_reference (tree type, tree ctor,
: TREE_CODE (TREE_TYPE (cfield)) == ARRAY_TYPE));
/* Compute bit offset of the field. */
- bitoffset = tree_to_double_int (field_offset)
- + byte_offset_cst * bits_per_unit_cst;
+ bitoffset = (wi::to_offset (field_offset)
+ + wi::lshift (wi::to_offset (byte_offset),
+ LOG2_BITS_PER_UNIT));
/* Compute bit offset where the field ends. */
if (field_size != NULL_TREE)
- bitoffset_end = bitoffset + tree_to_double_int (field_size);
+ bitoffset_end = bitoffset + wi::to_offset (field_size);
else
- bitoffset_end = double_int_zero;
+ bitoffset_end = 0;
- access_end = double_int::from_uhwi (offset)
- + double_int::from_uhwi (size);
+ access_end = offset_int (offset) + size;
/* Is there any overlap between [OFFSET, OFFSET+SIZE) and
[BITOFFSET, BITOFFSET_END)? */
- if (access_end.cmp (bitoffset, 0) > 0
+ if (wi::cmps (access_end, bitoffset) > 0
&& (field_size == NULL_TREE
- || double_int::from_uhwi (offset).slt (bitoffset_end)))
+ || wi::lts_p (offset, bitoffset_end)))
{
- double_int inner_offset = double_int::from_uhwi (offset) - bitoffset;
+ offset_int inner_offset = offset_int (offset) - bitoffset;
/* We do have overlap. Now see if field is large enough to
cover the access. Give up for accesses spanning multiple
fields. */
- if (access_end.cmp (bitoffset_end, 0) > 0)
+ if (wi::cmps (access_end, bitoffset_end) > 0)
return NULL_TREE;
- if (double_int::from_uhwi (offset).slt (bitoffset))
+ if (wi::lts_p (offset, bitoffset))
return NULL_TREE;
return fold_ctor_reference (type, cval,
inner_offset.to_uhwi (), size,
@@ -3166,37 +3164,42 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree))
&& TREE_CODE (idx) == INTEGER_CST)
{
tree low_bound, unit_size;
- double_int doffset;
/* If the resulting bit-offset is constant, track it. */
if ((low_bound = array_ref_low_bound (t),
TREE_CODE (low_bound) == INTEGER_CST)
&& (unit_size = array_ref_element_size (t),
- tree_fits_uhwi_p (unit_size))
- && (doffset = (TREE_INT_CST (idx) - TREE_INT_CST (low_bound))
- .sext (TYPE_PRECISION (TREE_TYPE (idx))),
- doffset.fits_shwi ()))
+ tree_fits_uhwi_p (unit_size)))
{
- offset = doffset.to_shwi ();
- offset *= tree_to_uhwi (unit_size);
- offset *= BITS_PER_UNIT;
-
- base = TREE_OPERAND (t, 0);
- ctor = get_base_constructor (base, &offset, valueize);
- /* Empty constructor. Always fold to 0. */
- if (ctor == error_mark_node)
- return build_zero_cst (TREE_TYPE (t));
- /* Out of bound array access. Value is undefined,
- but don't fold. */
- if (offset < 0)
- return NULL_TREE;
- /* We can not determine ctor. */
- if (!ctor)
- return NULL_TREE;
- return fold_ctor_reference (TREE_TYPE (t), ctor, offset,
- tree_to_uhwi (unit_size)
- * BITS_PER_UNIT,
- base);
+ offset_int woffset
+ = wi::sext (wi::to_offset (idx) - wi::to_offset (low_bound),
+ TYPE_PRECISION (TREE_TYPE (idx)));
+
+ if (wi::fits_shwi_p (woffset))
+ {
+ offset = woffset.to_shwi ();
+ /* TODO: This code seems wrong, multiply then check
+ to see if it fits. */
+ offset *= tree_to_uhwi (unit_size);
+ offset *= BITS_PER_UNIT;
+
+ base = TREE_OPERAND (t, 0);
+ ctor = get_base_constructor (base, &offset, valueize);
+ /* Empty constructor. Always fold to 0. */
+ if (ctor == error_mark_node)
+ return build_zero_cst (TREE_TYPE (t));
+ /* Out of bound array access. Value is undefined,
+ but don't fold. */
+ if (offset < 0)
+ return NULL_TREE;
+ /* We can not determine ctor. */
+ if (!ctor)
+ return NULL_TREE;
+ return fold_ctor_reference (TREE_TYPE (t), ctor, offset,
+ tree_to_uhwi (unit_size)
+ * BITS_PER_UNIT,
+ base);
+ }
}
}
/* Fallthru. */
@@ -3503,7 +3506,7 @@ gimple_val_nonnegative_real_p (tree val)
if ((n & 1) == 0)
{
REAL_VALUE_TYPE cint;
- real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
if (real_identical (&c, &cint))
return true;
}
@@ -3616,9 +3619,7 @@ gimple_fold_indirect_ref (tree t)
|| DECL_P (TREE_OPERAND (addr, 0)))
return fold_build2 (MEM_REF, type,
addr,
- build_int_cst_wide (ptype,
- TREE_INT_CST_LOW (off),
- TREE_INT_CST_HIGH (off)));
+ wide_int_to_tree (ptype, off));
}
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c
index 741cd929983..77afa20dfe5 100644
--- a/gcc/gimple-pretty-print.c
+++ b/gcc/gimple-pretty-print.c
@@ -1755,7 +1755,7 @@ dump_ssaname_info (pretty_printer *buffer, tree node, int spc)
if (!POINTER_TYPE_P (TREE_TYPE (node))
&& SSA_NAME_RANGE_INFO (node))
{
- double_int min, max, nonzero_bits;
+ wide_int min, max, nonzero_bits;
value_range_type range_type = get_range_info (node, &min, &max);
if (range_type == VR_VARYING)
@@ -1764,22 +1764,16 @@ dump_ssaname_info (pretty_printer *buffer, tree node, int spc)
{
pp_printf (buffer, "# RANGE ");
pp_printf (buffer, "%s[", range_type == VR_RANGE ? "" : "~");
- pp_double_int (buffer, min, TYPE_UNSIGNED (TREE_TYPE (node)));
+ pp_wide_int (buffer, min, TYPE_SIGN (TREE_TYPE (node)));
pp_printf (buffer, ", ");
- pp_double_int (buffer, max, TYPE_UNSIGNED (TREE_TYPE (node)));
+ pp_wide_int (buffer, max, TYPE_SIGN (TREE_TYPE (node)));
pp_printf (buffer, "]");
}
nonzero_bits = get_nonzero_bits (node);
- if (nonzero_bits != double_int_minus_one
- && (nonzero_bits
- != double_int::mask (TYPE_PRECISION (TREE_TYPE (node)))))
+ if (nonzero_bits != -1)
{
pp_string (buffer, " NONZERO ");
- sprintf (pp_buffer (buffer)->digit_buffer,
- HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- (unsigned HOST_WIDE_INT) nonzero_bits.high,
- nonzero_bits.low);
- pp_string (buffer, pp_buffer (buffer)->digit_buffer);
+ pp_wide_int (buffer, nonzero_bits, UNSIGNED);
}
newline_and_indent (buffer, spc);
}
diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c
index dbf6a402140..a41d9722dae 100644
--- a/gcc/gimple-ssa-strength-reduction.c
+++ b/gcc/gimple-ssa-strength-reduction.c
@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3. If not see
#include "params.h"
#include "tree-ssa-address.h"
#include "tree-affine.h"
+#include "wide-int-print.h"
/* Information about a strength reduction candidate. Each statement
in the candidate table represents an expression of one of the
@@ -244,7 +245,7 @@ struct slsr_cand_d
tree stride;
/* The index constant i. */
- double_int index;
+ widest_int index;
/* The type of the candidate. This is normally the type of base_expr,
but casts may have occurred when combining feeding instructions.
@@ -319,7 +320,7 @@ typedef const struct cand_chain_d *const_cand_chain_t;
struct incr_info_d
{
/* The increment that relates a candidate to its basis. */
- double_int incr;
+ widest_int incr;
/* How many times the increment occurs in the candidate tree. */
unsigned count;
@@ -454,7 +455,7 @@ get_alternative_base (tree base)
tree_to_aff_combination_expand (base, TREE_TYPE (base),
&aff, &name_expansions);
- aff.offset = tree_to_double_int (integer_zero_node);
+ aff.offset = 0;
expr = aff_combination_to_tree (&aff);
result = (tree *) pointer_map_insert (alt_base_map, base);
@@ -627,7 +628,7 @@ record_potential_basis (slsr_cand_t c, tree base)
static slsr_cand_t
alloc_cand_and_find_basis (enum cand_kind kind, gimple gs, tree base,
- double_int index, tree stride, tree ctype,
+ const widest_int &index, tree stride, tree ctype,
unsigned savings)
{
slsr_cand_t c = (slsr_cand_t) obstack_alloc (&cand_obstack,
@@ -824,8 +825,8 @@ slsr_process_phi (gimple phi, bool speed)
CAND_PHI. */
base_type = TREE_TYPE (arg0_base);
- c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, double_int_zero,
- integer_one_node, base_type, savings);
+ c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base,
+ 0, integer_one_node, base_type, savings);
/* Add the candidate to the statement-candidate mapping. */
add_cand_for_stmt (phi, c);
@@ -842,7 +843,7 @@ slsr_process_phi (gimple phi, bool speed)
int (i * S).
Otherwise, just return double int zero. */
-static double_int
+static widest_int
backtrace_base_for_ref (tree *pbase)
{
tree base_in = *pbase;
@@ -858,19 +859,19 @@ backtrace_base_for_ref (tree *pbase)
base_in = get_unwidened (base_in, NULL_TREE);
if (TREE_CODE (base_in) != SSA_NAME)
- return tree_to_double_int (integer_zero_node);
+ return 0;
base_cand = base_cand_from_table (base_in);
while (base_cand && base_cand->kind != CAND_PHI)
{
if (base_cand->kind == CAND_ADD
- && base_cand->index.is_one ()
+ && base_cand->index == 1
&& TREE_CODE (base_cand->stride) == INTEGER_CST)
{
/* X = B + (1 * S), S is integer constant. */
*pbase = base_cand->base_expr;
- return tree_to_double_int (base_cand->stride);
+ return wi::to_widest (base_cand->stride);
}
else if (base_cand->kind == CAND_ADD
&& TREE_CODE (base_cand->stride) == INTEGER_CST
@@ -887,7 +888,7 @@ backtrace_base_for_ref (tree *pbase)
base_cand = NULL;
}
- return tree_to_double_int (integer_zero_node);
+ return 0;
}
/* Look for the following pattern:
@@ -917,38 +918,35 @@ backtrace_base_for_ref (tree *pbase)
*PINDEX: C1 + (C2 * C3) + C4 + (C5 * C3) */
static bool
-restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
+restructure_reference (tree *pbase, tree *poffset, widest_int *pindex,
tree *ptype)
{
tree base = *pbase, offset = *poffset;
- double_int index = *pindex;
- double_int bpu = double_int::from_uhwi (BITS_PER_UNIT);
- tree mult_op0, mult_op1, t1, t2, type;
- double_int c1, c2, c3, c4, c5;
+ widest_int index = *pindex;
+ tree mult_op0, t1, t2, type;
+ widest_int c1, c2, c3, c4, c5;
if (!base
|| !offset
|| TREE_CODE (base) != MEM_REF
|| TREE_CODE (offset) != MULT_EXPR
|| TREE_CODE (TREE_OPERAND (offset, 1)) != INTEGER_CST
- || !index.umod (bpu, FLOOR_MOD_EXPR).is_zero ())
+ || wi::umod_floor (index, BITS_PER_UNIT) != 0)
return false;
t1 = TREE_OPERAND (base, 0);
- c1 = mem_ref_offset (base);
+ c1 = widest_int::from (mem_ref_offset (base), SIGNED);
type = TREE_TYPE (TREE_OPERAND (base, 1));
mult_op0 = TREE_OPERAND (offset, 0);
- mult_op1 = TREE_OPERAND (offset, 1);
-
- c3 = tree_to_double_int (mult_op1);
+ c3 = wi::to_widest (TREE_OPERAND (offset, 1));
if (TREE_CODE (mult_op0) == PLUS_EXPR)
if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST)
{
t2 = TREE_OPERAND (mult_op0, 0);
- c2 = tree_to_double_int (TREE_OPERAND (mult_op0, 1));
+ c2 = wi::to_widest (TREE_OPERAND (mult_op0, 1));
}
else
return false;
@@ -958,7 +956,7 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST)
{
t2 = TREE_OPERAND (mult_op0, 0);
- c2 = -tree_to_double_int (TREE_OPERAND (mult_op0, 1));
+ c2 = -wi::to_widest (TREE_OPERAND (mult_op0, 1));
}
else
return false;
@@ -966,15 +964,15 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex,
else
{
t2 = mult_op0;
- c2 = double_int_zero;
+ c2 = 0;
}
- c4 = index.udiv (bpu, FLOOR_DIV_EXPR);
+ c4 = wi::lrshift (index, LOG2_BITS_PER_UNIT);
c5 = backtrace_base_for_ref (&t2);
*pbase = t1;
*poffset = fold_build2 (MULT_EXPR, sizetype, fold_convert (sizetype, t2),
- double_int_to_tree (sizetype, c3));
+ wide_int_to_tree (sizetype, c3));
*pindex = c1 + c2 * c3 + c4 + c5 * c3;
*ptype = type;
@@ -991,7 +989,6 @@ slsr_process_ref (gimple gs)
HOST_WIDE_INT bitsize, bitpos;
enum machine_mode mode;
int unsignedp, volatilep;
- double_int index;
slsr_cand_t c;
if (gimple_vdef (gs))
@@ -1007,7 +1004,7 @@ slsr_process_ref (gimple gs)
base = get_inner_reference (ref_expr, &bitsize, &bitpos, &offset, &mode,
&unsignedp, &volatilep, false);
- index = double_int::from_uhwi (bitpos);
+ widest_int index = bitpos;
if (!restructure_reference (&base, &offset, &index, &type))
return;
@@ -1028,7 +1025,7 @@ static slsr_cand_t
create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed)
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
- double_int index;
+ widest_int index;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1060,7 +1057,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed)
============================
X = B + ((i' * S) * Z) */
base = base_cand->base_expr;
- index = base_cand->index * tree_to_double_int (base_cand->stride);
+ index = base_cand->index * wi::to_widest (base_cand->stride);
stride = stride_in;
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -1079,7 +1076,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed)
/* No interpretations had anything useful to propagate, so
produce X = (Y + 0) * Z. */
base = base_in;
- index = double_int_zero;
+ index = 0;
stride = stride_in;
ctype = TREE_TYPE (base_in);
}
@@ -1098,7 +1095,7 @@ static slsr_cand_t
create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
- double_int index, temp;
+ widest_int index, temp;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1114,13 +1111,12 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
X = Y * c
============================
X = (B + i') * (S * c) */
- temp = tree_to_double_int (base_cand->stride)
- * tree_to_double_int (stride_in);
- if (double_int_fits_to_tree_p (TREE_TYPE (stride_in), temp))
+ temp = wi::to_widest (base_cand->stride) * wi::to_widest (stride_in);
+ if (wi::fits_to_tree_p (temp, TREE_TYPE (stride_in)))
{
base = base_cand->base_expr;
index = base_cand->index;
- stride = double_int_to_tree (TREE_TYPE (stride_in), temp);
+ stride = wide_int_to_tree (TREE_TYPE (stride_in), temp);
ctype = base_cand->cand_type;
if (has_single_use (base_in))
savings = (base_cand->dead_savings
@@ -1142,7 +1138,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
+ stmt_cost (base_cand->cand_stmt, speed));
}
else if (base_cand->kind == CAND_ADD
- && base_cand->index.is_one ()
+ && base_cand->index == 1
&& TREE_CODE (base_cand->stride) == INTEGER_CST)
{
/* Y = B + (1 * S), S constant
@@ -1150,7 +1146,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
===========================
X = (B + S) * c */
base = base_cand->base_expr;
- index = tree_to_double_int (base_cand->stride);
+ index = wi::to_widest (base_cand->stride);
stride = stride_in;
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -1169,7 +1165,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed)
/* No interpretations had anything useful to propagate, so
produce X = (Y + 0) * c. */
base = base_in;
- index = double_int_zero;
+ index = 0;
stride = stride_in;
ctype = TREE_TYPE (base_in);
}
@@ -1232,7 +1228,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
bool subtract_p, bool speed)
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL;
- double_int index;
+ widest_int index;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
@@ -1243,7 +1239,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (addend_cand && !base && addend_cand->kind != CAND_PHI)
{
if (addend_cand->kind == CAND_MULT
- && addend_cand->index.is_zero ()
+ && addend_cand->index == 0
&& TREE_CODE (addend_cand->stride) == INTEGER_CST)
{
/* Z = (B + 0) * S, S constant
@@ -1251,7 +1247,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
===========================
X = Y + ((+/-1 * S) * B) */
base = base_in;
- index = tree_to_double_int (addend_cand->stride);
+ index = wi::to_widest (addend_cand->stride);
if (subtract_p)
index = -index;
stride = addend_cand->base_expr;
@@ -1270,7 +1266,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (base_cand && !base && base_cand->kind != CAND_PHI)
{
if (base_cand->kind == CAND_ADD
- && (base_cand->index.is_zero ()
+ && (base_cand->index == 0
|| operand_equal_p (base_cand->stride,
integer_zero_node, 0)))
{
@@ -1279,7 +1275,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
============================
X = B + (+/-1 * Z) */
base = base_cand->base_expr;
- index = subtract_p ? double_int_minus_one : double_int_one;
+ index = subtract_p ? -1 : 1;
stride = addend_in;
ctype = base_cand->cand_type;
if (has_single_use (base_in))
@@ -1293,7 +1289,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
while (subtrahend_cand && !base && subtrahend_cand->kind != CAND_PHI)
{
if (subtrahend_cand->kind == CAND_MULT
- && subtrahend_cand->index.is_zero ()
+ && subtrahend_cand->index == 0
&& TREE_CODE (subtrahend_cand->stride) == INTEGER_CST)
{
/* Z = (B + 0) * S, S constant
@@ -1301,7 +1297,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
===========================
Value: X = Y + ((-1 * S) * B) */
base = base_in;
- index = tree_to_double_int (subtrahend_cand->stride);
+ index = wi::to_widest (subtrahend_cand->stride);
index = -index;
stride = subtrahend_cand->base_expr;
ctype = TREE_TYPE (base_in);
@@ -1328,7 +1324,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
/* No interpretations had anything useful to propagate, so
produce X = Y + (1 * Z). */
base = base_in;
- index = subtract_p ? double_int_minus_one : double_int_one;
+ index = subtract_p ? -1 : 1;
stride = addend_in;
ctype = TREE_TYPE (base_in);
}
@@ -1343,22 +1339,23 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in,
about BASE_IN into the new candidate. Return the new candidate. */
static slsr_cand_t
-create_add_imm_cand (gimple gs, tree base_in, double_int index_in, bool speed)
+create_add_imm_cand (gimple gs, tree base_in, const widest_int &index_in,
+ bool speed)
{
enum cand_kind kind = CAND_ADD;
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
- double_int index, multiple;
+ widest_int index, multiple;
unsigned savings = 0;
slsr_cand_t c;
slsr_cand_t base_cand = base_cand_from_table (base_in);
while (base_cand && !base && base_cand->kind != CAND_PHI)
{
- bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (base_cand->stride));
+ signop sign = TYPE_SIGN (TREE_TYPE (base_cand->stride));
if (TREE_CODE (base_cand->stride) == INTEGER_CST
- && index_in.multiple_of (tree_to_double_int (base_cand->stride),
- unsigned_p, &multiple))
+ && wi::multiple_of_p (index_in, wi::to_widest (base_cand->stride),
+ sign, &multiple))
{
/* Y = (B + i') * S, S constant, c = kS for some integer k
X = Y + c
@@ -1443,10 +1440,8 @@ slsr_process_add (gimple gs, tree rhs1, tree rhs2, bool speed)
}
else
{
- double_int index;
-
/* Record an interpretation for the add-immediate. */
- index = tree_to_double_int (rhs2);
+ widest_int index = wi::to_widest (rhs2);
if (subtract_p)
index = -index;
@@ -1594,10 +1589,10 @@ slsr_process_cast (gimple gs, tree rhs1, bool speed)
The first of these is somewhat arbitrary, but the choice of
1 for the stride simplifies the logic for propagating casts
into their uses. */
- c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, double_int_zero,
- integer_one_node, ctype, 0);
- c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, double_int_zero,
- integer_one_node, ctype, 0);
+ c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1,
+ 0, integer_one_node, ctype, 0);
+ c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1,
+ 0, integer_one_node, ctype, 0);
c->next_interp = c2->cand_num;
}
@@ -1651,10 +1646,10 @@ slsr_process_copy (gimple gs, tree rhs1, bool speed)
The first of these is somewhat arbitrary, but the choice of
1 for the stride simplifies the logic for propagating casts
into their uses. */
- c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, double_int_zero,
- integer_one_node, TREE_TYPE (rhs1), 0);
- c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, double_int_zero,
- integer_one_node, TREE_TYPE (rhs1), 0);
+ c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1,
+ 0, integer_one_node, TREE_TYPE (rhs1), 0);
+ c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1,
+ 0, integer_one_node, TREE_TYPE (rhs1), 0);
c->next_interp = c2->cand_num;
}
@@ -1771,7 +1766,7 @@ dump_candidate (slsr_cand_t c)
fputs (" MULT : (", dump_file);
print_generic_expr (dump_file, c->base_expr, 0);
fputs (" + ", dump_file);
- dump_double_int (dump_file, c->index, false);
+ print_decs (c->index, dump_file);
fputs (") * ", dump_file);
print_generic_expr (dump_file, c->stride, 0);
fputs (" : ", dump_file);
@@ -1780,7 +1775,7 @@ dump_candidate (slsr_cand_t c)
fputs (" ADD : ", dump_file);
print_generic_expr (dump_file, c->base_expr, 0);
fputs (" + (", dump_file);
- dump_double_int (dump_file, c->index, false);
+ print_decs (c->index, dump_file);
fputs (" * ", dump_file);
print_generic_expr (dump_file, c->stride, 0);
fputs (") : ", dump_file);
@@ -1791,7 +1786,7 @@ dump_candidate (slsr_cand_t c)
fputs (" + (", dump_file);
print_generic_expr (dump_file, c->stride, 0);
fputs (") + ", dump_file);
- dump_double_int (dump_file, c->index, false);
+ print_decs (c->index, dump_file);
fputs (" : ", dump_file);
break;
case CAND_PHI:
@@ -1870,7 +1865,7 @@ dump_incr_vec (void)
for (i = 0; i < incr_vec_len; i++)
{
fprintf (dump_file, "%3d increment: ", i);
- dump_double_int (dump_file, incr_vec[i].incr, false);
+ print_decs (incr_vec[i].incr, dump_file);
fprintf (dump_file, "\n count: %d", incr_vec[i].count);
fprintf (dump_file, "\n cost: %d", incr_vec[i].cost);
fputs ("\n initializer: ", dump_file);
@@ -1901,7 +1896,7 @@ replace_ref (tree *expr, slsr_cand_t c)
add_expr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (c->base_expr),
c->base_expr, c->stride);
mem_ref = fold_build2 (MEM_REF, acc_type, add_expr,
- double_int_to_tree (c->cand_type, c->index));
+ wide_int_to_tree (c->cand_type, c->index));
/* Gimplify the base addressing expression for the new MEM_REF tree. */
gimple_stmt_iterator gsi = gsi_for_stmt (c->cand_stmt);
@@ -1969,7 +1964,7 @@ phi_dependent_cand_p (slsr_cand_t c)
/* Calculate the increment required for candidate C relative to
its basis. */
-static double_int
+static widest_int
cand_increment (slsr_cand_t c)
{
slsr_cand_t basis;
@@ -1992,12 +1987,12 @@ cand_increment (slsr_cand_t c)
for this candidate, return the absolute value of that increment
instead. */
-static inline double_int
+static inline widest_int
cand_abs_increment (slsr_cand_t c)
{
- double_int increment = cand_increment (c);
+ widest_int increment = cand_increment (c);
- if (!address_arithmetic_p && increment.is_negative ())
+ if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
return increment;
@@ -2016,7 +2011,7 @@ cand_already_replaced (slsr_cand_t c)
replace_conditional_candidate. */
static void
-replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump)
+replace_mult_candidate (slsr_cand_t c, tree basis_name, widest_int bump)
{
tree target_type = TREE_TYPE (gimple_assign_lhs (c->cand_stmt));
enum tree_code cand_code = gimple_assign_rhs_code (c->cand_stmt);
@@ -2026,7 +2021,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump)
in this case. This does not affect siblings or dependents
of C. Restriction to signed HWI is conservative for unsigned
types but allows for safe negation without twisted logic. */
- if (bump.fits_shwi ()
+ if (wi::fits_shwi_p (bump)
&& bump.to_shwi () != HOST_WIDE_INT_MIN
/* It is not useful to replace casts, copies, or adds of
an SSA name and a constant. */
@@ -2044,13 +2039,13 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump)
types, introduce a cast. */
if (!useless_type_conversion_p (target_type, TREE_TYPE (basis_name)))
basis_name = introduce_cast_before_cand (c, target_type, basis_name);
- if (bump.is_negative ())
+ if (wi::neg_p (bump))
{
code = MINUS_EXPR;
bump = -bump;
}
- bump_tree = double_int_to_tree (target_type, bump);
+ bump_tree = wide_int_to_tree (target_type, bump);
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -2058,7 +2053,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump)
print_gimple_stmt (dump_file, c->cand_stmt, 0, 0);
}
- if (bump.is_zero ())
+ if (bump == 0)
{
tree lhs = gimple_assign_lhs (c->cand_stmt);
gimple copy_stmt = gimple_build_assign (lhs, basis_name);
@@ -2119,14 +2114,12 @@ static void
replace_unconditional_candidate (slsr_cand_t c)
{
slsr_cand_t basis;
- double_int stride, bump;
if (cand_already_replaced (c))
return;
basis = lookup_cand (c->basis);
- stride = tree_to_double_int (c->stride);
- bump = cand_increment (c) * stride;
+ widest_int bump = cand_increment (c) * wi::to_widest (c->stride);
replace_mult_candidate (c, gimple_assign_lhs (basis->cand_stmt), bump);
}
@@ -2136,7 +2129,7 @@ replace_unconditional_candidate (slsr_cand_t c)
MAX_INCR_VEC_LEN increments have been found. */
static inline int
-incr_vec_index (double_int increment)
+incr_vec_index (const widest_int &increment)
{
unsigned i;
@@ -2156,7 +2149,7 @@ incr_vec_index (double_int increment)
static tree
create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
- double_int increment, edge e, location_t loc,
+ widest_int increment, edge e, location_t loc,
bool known_stride)
{
basic_block insert_bb;
@@ -2167,7 +2160,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
/* If the add candidate along this incoming edge has the same
index as C's hidden basis, the hidden basis represents this
edge correctly. */
- if (increment.is_zero ())
+ if (increment == 0)
return basis_name;
basis_type = TREE_TYPE (basis_name);
@@ -2177,21 +2170,21 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
{
tree bump_tree;
enum tree_code code = PLUS_EXPR;
- double_int bump = increment * tree_to_double_int (c->stride);
- if (bump.is_negative ())
+ widest_int bump = increment * wi::to_widest (c->stride);
+ if (wi::neg_p (bump))
{
code = MINUS_EXPR;
bump = -bump;
}
- bump_tree = double_int_to_tree (basis_type, bump);
+ bump_tree = wide_int_to_tree (basis_type, bump);
new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name,
bump_tree);
}
else
{
int i;
- bool negate_incr = (!address_arithmetic_p && increment.is_negative ());
+ bool negate_incr = (!address_arithmetic_p && wi::neg_p (increment));
i = incr_vec_index (negate_incr ? -increment : increment);
gcc_assert (i >= 0);
@@ -2201,10 +2194,10 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name,
new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name,
incr_vec[i].initializer);
}
- else if (increment.is_one ())
+ else if (increment == 1)
new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, lhs, basis_name,
c->stride);
- else if (increment.is_minus_one ())
+ else if (increment == -1)
new_stmt = gimple_build_assign_with_ops (MINUS_EXPR, lhs, basis_name,
c->stride);
else
@@ -2265,11 +2258,11 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name,
/* If the phi argument is the base name of the CAND_PHI, then
this incoming arc should use the hidden basis. */
if (operand_equal_p (arg, phi_cand->base_expr, 0))
- if (basis->index.is_zero ())
+ if (basis->index == 0)
feeding_def = gimple_assign_lhs (basis->cand_stmt);
else
{
- double_int incr = -basis->index;
+ widest_int incr = -basis->index;
feeding_def = create_add_on_incoming_edge (c, basis_name, incr,
e, loc, known_stride);
}
@@ -2286,7 +2279,7 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name,
else
{
slsr_cand_t arg_cand = base_cand_from_table (arg);
- double_int diff = arg_cand->index - basis->index;
+ widest_int diff = arg_cand->index - basis->index;
feeding_def = create_add_on_incoming_edge (c, basis_name, diff,
e, loc, known_stride);
}
@@ -2332,7 +2325,6 @@ replace_conditional_candidate (slsr_cand_t c)
tree basis_name, name;
slsr_cand_t basis;
location_t loc;
- double_int stride, bump;
/* Look up the LHS SSA name from C's basis. This will be the
RHS1 of the adds we will introduce to create new phi arguments. */
@@ -2345,8 +2337,7 @@ replace_conditional_candidate (slsr_cand_t c)
name = create_phi_basis (c, lookup_cand (c->def_phi)->cand_stmt,
basis_name, loc, KNOWN_STRIDE);
/* Replace C with an add of the new basis phi and a constant. */
- stride = tree_to_double_int (c->stride);
- bump = c->index * stride;
+ widest_int bump = c->index * wi::to_widest (c->stride);
replace_mult_candidate (c, name, bump);
}
@@ -2478,14 +2469,14 @@ count_candidates (slsr_cand_t c)
candidates with the same increment, also record T_0 for subsequent use. */
static void
-record_increment (slsr_cand_t c, double_int increment, bool is_phi_adjust)
+record_increment (slsr_cand_t c, widest_int increment, bool is_phi_adjust)
{
bool found = false;
unsigned i;
/* Treat increments that differ only in sign as identical so as to
share initializers, unless we are generating pointer arithmetic. */
- if (!address_arithmetic_p && increment.is_negative ())
+ if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
for (i = 0; i < incr_vec_len; i++)
@@ -2529,8 +2520,8 @@ record_increment (slsr_cand_t c, double_int increment, bool is_phi_adjust)
if (c->kind == CAND_ADD
&& !is_phi_adjust
&& c->index == increment
- && (increment.sgt (double_int_one)
- || increment.slt (double_int_minus_one))
+ && (wi::gts_p (increment, 1)
+ || wi::lts_p (increment, -1))
&& (gimple_assign_rhs_code (c->cand_stmt) == PLUS_EXPR
|| gimple_assign_rhs_code (c->cand_stmt) == POINTER_PLUS_EXPR))
{
@@ -2588,7 +2579,7 @@ record_phi_increments (slsr_cand_t basis, gimple phi)
else
{
slsr_cand_t arg_cand = base_cand_from_table (arg);
- double_int diff = arg_cand->index - basis->index;
+ widest_int diff = arg_cand->index - basis->index;
record_increment (arg_cand, diff, PHI_ADJUST);
}
}
@@ -2639,7 +2630,7 @@ record_increments (slsr_cand_t c)
uses. */
static int
-phi_incr_cost (slsr_cand_t c, double_int incr, gimple phi, int *savings)
+phi_incr_cost (slsr_cand_t c, const widest_int &incr, gimple phi, int *savings)
{
unsigned i;
int cost = 0;
@@ -2664,7 +2655,7 @@ phi_incr_cost (slsr_cand_t c, double_int incr, gimple phi, int *savings)
else
{
slsr_cand_t arg_cand = base_cand_from_table (arg);
- double_int diff = arg_cand->index - basis->index;
+ widest_int diff = arg_cand->index - basis->index;
if (incr == diff)
{
@@ -2729,10 +2720,10 @@ optimize_cands_for_speed_p (slsr_cand_t c)
static int
lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c,
- double_int incr, bool count_phis)
+ const widest_int &incr, bool count_phis)
{
int local_cost, sib_cost, savings = 0;
- double_int cand_incr = cand_abs_increment (c);
+ widest_int cand_incr = cand_abs_increment (c);
if (cand_already_replaced (c))
local_cost = cost_in;
@@ -2775,11 +2766,11 @@ lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c,
would go dead. */
static int
-total_savings (int repl_savings, slsr_cand_t c, double_int incr,
+total_savings (int repl_savings, slsr_cand_t c, const widest_int &incr,
bool count_phis)
{
int savings = 0;
- double_int cand_incr = cand_abs_increment (c);
+ widest_int cand_incr = cand_abs_increment (c);
if (incr == cand_incr && !cand_already_replaced (c))
savings += repl_savings + c->dead_savings;
@@ -2829,7 +2820,7 @@ analyze_increments (slsr_cand_t first_dep, enum machine_mode mode, bool speed)
/* If somehow this increment is bigger than a HWI, we won't
be optimizing candidates that use it. And if the increment
has a count of zero, nothing will be done with it. */
- if (!incr_vec[i].incr.fits_shwi () || !incr_vec[i].count)
+ if (!wi::fits_shwi_p (incr_vec[i].incr) || !incr_vec[i].count)
incr_vec[i].cost = COST_INFINITE;
/* Increments of 0, 1, and -1 are always profitable to replace,
@@ -2983,7 +2974,7 @@ ncd_for_two_cands (basic_block bb1, basic_block bb2,
candidates, return the earliest candidate in the block in *WHERE. */
static basic_block
-ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi,
+ncd_with_phi (slsr_cand_t c, const widest_int &incr, gimple phi,
basic_block ncd, slsr_cand_t *where)
{
unsigned i;
@@ -3003,7 +2994,7 @@ ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi,
else
{
slsr_cand_t arg_cand = base_cand_from_table (arg);
- double_int diff = arg_cand->index - basis->index;
+ widest_int diff = arg_cand->index - basis->index;
basic_block pred = gimple_phi_arg_edge (phi, i)->src;
if ((incr == diff) || (!address_arithmetic_p && incr == -diff))
@@ -3022,7 +3013,7 @@ ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi,
return the earliest candidate in the block in *WHERE. */
static basic_block
-ncd_of_cand_and_phis (slsr_cand_t c, double_int incr, slsr_cand_t *where)
+ncd_of_cand_and_phis (slsr_cand_t c, const widest_int &incr, slsr_cand_t *where)
{
basic_block ncd = NULL;
@@ -3047,7 +3038,7 @@ ncd_of_cand_and_phis (slsr_cand_t c, double_int incr, slsr_cand_t *where)
*WHERE. */
static basic_block
-nearest_common_dominator_for_cands (slsr_cand_t c, double_int incr,
+nearest_common_dominator_for_cands (slsr_cand_t c, const widest_int &incr,
slsr_cand_t *where)
{
basic_block sib_ncd = NULL, dep_ncd = NULL, this_ncd = NULL, ncd;
@@ -3123,13 +3114,13 @@ insert_initializers (slsr_cand_t c)
slsr_cand_t where = NULL;
gimple init_stmt;
tree stride_type, new_name, incr_tree;
- double_int incr = incr_vec[i].incr;
+ widest_int incr = incr_vec[i].incr;
if (!profitable_increment_p (i)
- || incr.is_one ()
- || (incr.is_minus_one ()
+ || incr == 1
+ || (incr == -1
&& gimple_assign_rhs_code (c->cand_stmt) != POINTER_PLUS_EXPR)
- || incr.is_zero ())
+ || incr == 0)
continue;
/* We may have already identified an existing initializer that
@@ -3158,7 +3149,7 @@ insert_initializers (slsr_cand_t c)
/* Create the initializer and insert it in the latest possible
dominating position. */
- incr_tree = double_int_to_tree (stride_type, incr);
+ incr_tree = wide_int_to_tree (stride_type, incr);
init_stmt = gimple_build_assign_with_ops (MULT_EXPR, new_name,
c->stride, incr_tree);
if (where)
@@ -3215,9 +3206,9 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi)
{
int j;
slsr_cand_t arg_cand = base_cand_from_table (arg);
- double_int increment = arg_cand->index - basis->index;
+ widest_int increment = arg_cand->index - basis->index;
- if (!address_arithmetic_p && increment.is_negative ())
+ if (!address_arithmetic_p && wi::neg_p (increment))
increment = -increment;
j = incr_vec_index (increment);
@@ -3228,7 +3219,7 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi)
c->cand_num);
print_gimple_stmt (dump_file, phi, 0, 0);
fputs (" increment: ", dump_file);
- dump_double_int (dump_file, increment, false);
+ print_decs (increment, dump_file);
if (j < 0)
fprintf (dump_file,
"\n Not replaced; incr_vec overflow.\n");
@@ -3323,7 +3314,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
tree orig_rhs1, orig_rhs2;
tree rhs2;
enum tree_code orig_code, repl_code;
- double_int cand_incr;
+ widest_int cand_incr;
orig_code = gimple_assign_rhs_code (c->cand_stmt);
orig_rhs1 = gimple_assign_rhs1 (c->cand_stmt);
@@ -3371,7 +3362,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
from the basis name, or an add of the stride to the basis
name, respectively. It may be necessary to introduce a
cast (or reuse an existing cast). */
- else if (cand_incr.is_one ())
+ else if (cand_incr == 1)
{
tree stride_type = TREE_TYPE (c->stride);
tree orig_type = TREE_TYPE (orig_rhs2);
@@ -3386,7 +3377,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
c);
}
- else if (cand_incr.is_minus_one ())
+ else if (cand_incr == -1)
{
tree stride_type = TREE_TYPE (c->stride);
tree orig_type = TREE_TYPE (orig_rhs2);
@@ -3413,7 +3404,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name)
fputs (" (duplicate, not actually replacing)\n", dump_file);
}
- else if (cand_incr.is_zero ())
+ else if (cand_incr == 0)
{
tree lhs = gimple_assign_lhs (c->cand_stmt);
tree lhs_type = TREE_TYPE (lhs);
@@ -3463,7 +3454,7 @@ replace_profitable_candidates (slsr_cand_t c)
{
if (!cand_already_replaced (c))
{
- double_int increment = cand_abs_increment (c);
+ widest_int increment = cand_abs_increment (c);
enum tree_code orig_code = gimple_assign_rhs_code (c->cand_stmt);
int i;
diff --git a/gcc/gimple.c b/gcc/gimple.c
index 0b34ff1e1c7..d6bc15bf424 100644
--- a/gcc/gimple.c
+++ b/gcc/gimple.c
@@ -2777,11 +2777,7 @@ preprocess_case_label_vec_for_gimple (vec<tree> labels,
low = CASE_HIGH (labels[i - 1]);
if (!low)
low = CASE_LOW (labels[i - 1]);
- if ((TREE_INT_CST_LOW (low) + 1
- != TREE_INT_CST_LOW (high))
- || (TREE_INT_CST_HIGH (low)
- + (TREE_INT_CST_LOW (high) == 0)
- != TREE_INT_CST_HIGH (high)))
+ if (wi::add (low, 1) != high)
break;
}
if (i == len)
diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc
index 3d9fc8e369a..f4c242f60aa 100644
--- a/gcc/go/go-gcc.cc
+++ b/gcc/go/go-gcc.cc
@@ -1067,8 +1067,7 @@ Gcc_backend::type_size(Btype* btype)
if (t == error_mark_node)
return 1;
t = TYPE_SIZE_UNIT(t);
- gcc_assert(TREE_CODE(t) == INTEGER_CST);
- gcc_assert(TREE_INT_CST_HIGH(t) == 0);
+ gcc_assert(tree_fits_uhwi_p (t));
unsigned HOST_WIDE_INT val_wide = TREE_INT_CST_LOW(t);
size_t ret = static_cast<size_t>(val_wide);
gcc_assert(ret == val_wide);
diff --git a/gcc/godump.c b/gcc/godump.c
index 6d6b819bea3..2afd7f171a0 100644
--- a/gcc/godump.c
+++ b/gcc/godump.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "pointer-set.h"
#include "obstack.h"
#include "debug.h"
+#include "wide-int-print.h"
/* We dump this information from the debug hooks. This gives us a
stable and maintainable API to hook into. In order to work
@@ -961,7 +962,7 @@ go_output_typedef (struct godump_container *container, tree decl)
const char *name;
struct macro_hash_value *mhval;
void **slot;
- char buf[100];
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE];
name = IDENTIFIER_POINTER (TREE_PURPOSE (element));
@@ -982,10 +983,7 @@ go_output_typedef (struct godump_container *container, tree decl)
snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_UNSIGNED,
tree_to_uhwi (TREE_VALUE (element)));
else
- snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- ((unsigned HOST_WIDE_INT)
- TREE_INT_CST_HIGH (TREE_VALUE (element))),
- TREE_INT_CST_LOW (TREE_VALUE (element)));
+ print_hex (element, buf);
mhval->value = xstrdup (buf);
*slot = mhval;
diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c
index fc60845d85a..9ac9b67bb79 100644
--- a/gcc/graphite-clast-to-gimple.c
+++ b/gcc/graphite-clast-to-gimple.c
@@ -75,14 +75,13 @@ gmp_cst_to_tree (tree type, mpz_t val)
{
tree t = type ? type : integer_type_node;
mpz_t tmp;
- double_int di;
mpz_init (tmp);
mpz_set (tmp, val);
- di = mpz_get_double_int (t, tmp, true);
+ wide_int wi = wi::from_mpz (t, tmp, true);
mpz_clear (tmp);
- return double_int_to_tree (t, di);
+ return wide_int_to_tree (t, wi);
}
/* Sets RES to the min of V1 and V2. */
diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c
index 3c5cdf3c90e..0bc443302c6 100644
--- a/gcc/graphite-sese-to-poly.c
+++ b/gcc/graphite-sese-to-poly.c
@@ -73,8 +73,7 @@ along with GCC; see the file COPYING3. If not see
static inline void
tree_int_to_gmp (tree t, mpz_t res)
{
- double_int di = tree_to_double_int (t);
- mpz_set_double_int (res, di, TYPE_UNSIGNED (TREE_TYPE (t)));
+ wi::to_mpz (t, res, TYPE_SIGN (TREE_TYPE (t)));
}
/* Returns the index of the PHI argument defined in the outermost
@@ -1025,7 +1024,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop,
/* loop_i <= expr_nb_iters */
else if (!chrec_contains_undetermined (nb_iters))
{
- double_int nit;
+ widest_int nit;
isl_pw_aff *aff;
isl_set *valid;
isl_local_space *ls;
@@ -1061,7 +1060,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop,
isl_constraint *c;
mpz_init (g);
- mpz_set_double_int (g, nit, false);
+ wi::to_mpz (nit, g, SIGNED);
mpz_sub_ui (g, g, 1);
approx = extract_affine_gmp (g, isl_set_get_space (inner));
x = isl_pw_aff_ge_set (approx, aff);
diff --git a/gcc/hooks.c b/gcc/hooks.c
index 1c67bdfec12..5c065625733 100644
--- a/gcc/hooks.c
+++ b/gcc/hooks.c
@@ -332,7 +332,8 @@ hook_bool_rtx_int_int_int_intp_bool_false (rtx a ATTRIBUTE_UNUSED,
}
bool
-hook_bool_dint_dint_uint_bool_true (double_int, double_int, unsigned int, bool)
+hook_bool_wint_wint_uint_bool_true (const widest_int &, const widest_int &,
+ unsigned int, bool)
{
return true;
}
diff --git a/gcc/hooks.h b/gcc/hooks.h
index 896b41d8c56..ba42b6c1842 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -23,7 +23,7 @@
#define GCC_HOOKS_H
#include "machmode.h"
-#include "double-int.h"
+#include "wide-int.h"
extern bool hook_bool_void_false (void);
extern bool hook_bool_void_true (void);
@@ -61,7 +61,8 @@ extern bool hook_bool_rtx_int_int_int_intp_bool_false (rtx, int, int, int,
extern bool hook_bool_tree_tree_false (tree, tree);
extern bool hook_bool_tree_tree_true (tree, tree);
extern bool hook_bool_tree_bool_false (tree, bool);
-extern bool hook_bool_dint_dint_uint_bool_true (double_int, double_int,
+extern bool hook_bool_wint_wint_uint_bool_true (const widest_int &,
+ const widest_int &,
unsigned int, bool);
extern void hook_void_void (void);
diff --git a/gcc/internal-fn.c b/gcc/internal-fn.c
index 1062ea84705..68b2b66fbe7 100644
--- a/gcc/internal-fn.c
+++ b/gcc/internal-fn.c
@@ -239,12 +239,12 @@ ubsan_expand_si_overflow_addsub_check (tree_code code, gimple stmt)
;
else if (code == PLUS_EXPR && TREE_CODE (arg0) == SSA_NAME)
{
- double_int arg0_min, arg0_max;
+ wide_int arg0_min, arg0_max;
if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
{
- if (!arg0_min.is_negative ())
+ if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
pos_neg = 1;
- else if (arg0_max.is_negative ())
+ else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
pos_neg = 2;
}
if (pos_neg != 3)
@@ -256,12 +256,12 @@ ubsan_expand_si_overflow_addsub_check (tree_code code, gimple stmt)
}
if (pos_neg == 3 && !CONST_INT_P (op1) && TREE_CODE (arg1) == SSA_NAME)
{
- double_int arg1_min, arg1_max;
+ wide_int arg1_min, arg1_max;
if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
{
- if (!arg1_min.is_negative ())
+ if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
pos_neg = 1;
- else if (arg1_max.is_negative ())
+ else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
pos_neg = 2;
}
}
@@ -478,7 +478,7 @@ ubsan_expand_si_overflow_mul_check (gimple stmt)
rtx do_overflow = gen_label_rtx ();
rtx hipart_different = gen_label_rtx ();
- int hprec = GET_MODE_PRECISION (hmode);
+ unsigned int hprec = GET_MODE_PRECISION (hmode);
rtx hipart0 = expand_shift (RSHIFT_EXPR, mode, op0, hprec,
NULL_RTX, 0);
hipart0 = gen_lowpart (hmode, hipart0);
@@ -510,37 +510,35 @@ ubsan_expand_si_overflow_mul_check (gimple stmt)
if (TREE_CODE (arg0) == SSA_NAME)
{
- double_int arg0_min, arg0_max;
+ wide_int arg0_min, arg0_max;
if (get_range_info (arg0, &arg0_min, &arg0_max) == VR_RANGE)
{
- if (arg0_max.sle (double_int::max_value (hprec, false))
- && double_int::min_value (hprec, false).sle (arg0_min))
+ unsigned int mprec0 = wi::min_precision (arg0_min, SIGNED);
+ unsigned int mprec1 = wi::min_precision (arg0_max, SIGNED);
+ if (mprec0 <= hprec && mprec1 <= hprec)
op0_small_p = true;
- else if (arg0_max.sle (double_int::max_value (hprec, true))
- && (~double_int::max_value (hprec,
- true)).sle (arg0_min))
+ else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
op0_medium_p = true;
- if (!arg0_min.is_negative ())
+ if (!wi::neg_p (arg0_min, TYPE_SIGN (TREE_TYPE (arg0))))
op0_sign = 0;
- else if (arg0_max.is_negative ())
+ else if (wi::neg_p (arg0_max, TYPE_SIGN (TREE_TYPE (arg0))))
op0_sign = -1;
}
}
if (TREE_CODE (arg1) == SSA_NAME)
{
- double_int arg1_min, arg1_max;
+ wide_int arg1_min, arg1_max;
if (get_range_info (arg1, &arg1_min, &arg1_max) == VR_RANGE)
{
- if (arg1_max.sle (double_int::max_value (hprec, false))
- && double_int::min_value (hprec, false).sle (arg1_min))
+ unsigned int mprec0 = wi::min_precision (arg1_min, SIGNED);
+ unsigned int mprec1 = wi::min_precision (arg1_max, SIGNED);
+ if (mprec0 <= hprec && mprec1 <= hprec)
op1_small_p = true;
- else if (arg1_max.sle (double_int::max_value (hprec, true))
- && (~double_int::max_value (hprec,
- true)).sle (arg1_min))
+ else if (mprec0 <= hprec + 1 && mprec1 <= hprec + 1)
op1_medium_p = true;
- if (!arg1_min.is_negative ())
+ if (!wi::neg_p (arg1_min, TYPE_SIGN (TREE_TYPE (arg1))))
op1_sign = 0;
- else if (arg1_max.is_negative ())
+ else if (wi::neg_p (arg1_max, TYPE_SIGN (TREE_TYPE (arg1))))
op1_sign = -1;
}
}
diff --git a/gcc/ipa-devirt.c b/gcc/ipa-devirt.c
index 5cd4c832d2c..d0296e7e4c7 100644
--- a/gcc/ipa-devirt.c
+++ b/gcc/ipa-devirt.c
@@ -1362,7 +1362,7 @@ get_polymorphic_call_info (tree fndecl,
{
base_pointer = TREE_OPERAND (base, 0);
context->offset
- += offset2 + mem_ref_offset (base).low * BITS_PER_UNIT;
+ += offset2 + mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT;
context->outer_type = NULL;
}
/* We found base object. In this case the outer_type
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index 9f144fa3442..da6ffe86169 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -1099,7 +1099,7 @@ compute_complex_assign_jump_func (struct ipa_node_params *info,
|| max_size == -1
|| max_size != size)
return;
- offset += mem_ref_offset (base).low * BITS_PER_UNIT;
+ offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT;
ssa = TREE_OPERAND (base, 0);
if (TREE_CODE (ssa) != SSA_NAME
|| !SSA_NAME_IS_DEFAULT_DEF (ssa)
@@ -1159,7 +1159,7 @@ get_ancestor_addr_info (gimple assign, tree *obj_p, HOST_WIDE_INT *offset)
|| TREE_CODE (SSA_NAME_VAR (parm)) != PARM_DECL)
return NULL_TREE;
- *offset += mem_ref_offset (expr).low * BITS_PER_UNIT;
+ *offset += mem_ref_offset (expr).to_short_addr () * BITS_PER_UNIT;
*obj_p = obj;
return expr;
}
@@ -3787,8 +3787,7 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt,
if (TYPE_ALIGN (type) > align)
align = TYPE_ALIGN (type);
}
- misalign += (tree_to_double_int (off)
- .sext (TYPE_PRECISION (TREE_TYPE (off))).low
+ misalign += (offset_int::from (off, SIGNED).to_short_addr ()
* BITS_PER_UNIT);
misalign = misalign & (align - 1);
if (misalign != 0)
@@ -3994,7 +3993,7 @@ ipa_get_adjustment_candidate (tree **expr, bool *convert,
if (TREE_CODE (base) == MEM_REF)
{
- offset += mem_ref_offset (base).low * BITS_PER_UNIT;
+ offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT;
base = TREE_OPERAND (base, 0);
}
diff --git a/gcc/java/boehm.c b/gcc/java/boehm.c
index ddc424b138c..191ab867eca 100644
--- a/gcc/java/boehm.c
+++ b/gcc/java/boehm.c
@@ -32,8 +32,9 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "java-tree.h"
#include "parse.h"
#include "diagnostic-core.h"
+#include "wide-int.h"
-static void mark_reference_fields (tree, double_int *, unsigned int,
+static void mark_reference_fields (tree, wide_int *, unsigned int,
int *, int *, int *, HOST_WIDE_INT *);
/* A procedure-based object descriptor. We know that our
@@ -47,7 +48,7 @@ static void mark_reference_fields (tree, double_int *, unsigned int,
/* Recursively mark reference fields. */
static void
mark_reference_fields (tree field,
- double_int *mask,
+ wide_int *mask,
unsigned int ubit,
int *pointer_after_end,
int *all_bits_set,
@@ -107,7 +108,7 @@ mark_reference_fields (tree field,
bits for all words in the record. This is conservative, but the
size_words != 1 case is impossible in regular java code. */
for (i = 0; i < size_words; ++i)
- *mask = (*mask).set_bit (ubit - count - i - 1);
+ *mask = wi::set_bit (*mask, ubit - count - i - 1);
if (count >= ubit - 2)
*pointer_after_end = 1;
@@ -136,16 +137,15 @@ get_boehm_type_descriptor (tree type)
int last_set_index = 0;
HOST_WIDE_INT last_view_index = -1;
int pointer_after_end = 0;
- double_int mask;
tree field, value, value_type;
- mask = double_int_zero;
-
/* If the GC wasn't requested, just use a null pointer. */
if (! flag_use_boehm_gc)
return null_pointer_node;
value_type = java_type_for_mode (ptr_mode, 1);
+ wide_int mask = wi::zero (TYPE_PRECISION (value_type));
+
/* If we have a type of unknown size, use a proc. */
if (int_size_in_bytes (type) == -1)
goto procedure_object_descriptor;
@@ -194,22 +194,22 @@ get_boehm_type_descriptor (tree type)
that we don't have to emit reflection data for run time
marking. */
count = 0;
- mask = double_int_zero;
+ mask = wi::zero (TYPE_PRECISION (value_type));
++last_set_index;
while (last_set_index)
{
if ((last_set_index & 1))
- mask = mask.set_bit (log2_size + count);
+ mask = wi::set_bit (mask, log2_size + count);
last_set_index >>= 1;
++count;
}
- value = double_int_to_tree (value_type, mask);
+ value = wide_int_to_tree (value_type, mask);
}
else if (! pointer_after_end)
{
/* Bottom two bits for bitmap mark type are 01. */
- mask = mask.set_bit (0);
- value = double_int_to_tree (value_type, mask);
+ mask = wi::set_bit (mask, 0);
+ value = wide_int_to_tree (value_type, mask);
}
else
{
diff --git a/gcc/java/expr.c b/gcc/java/expr.c
index 69f6819c7a9..e66bdb152ab 100644
--- a/gcc/java/expr.c
+++ b/gcc/java/expr.c
@@ -46,6 +46,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "ggc.h"
#include "tree-iterator.h"
#include "target.h"
+#include "wide-int.h"
static void flush_quick_stack (void);
static void push_value (tree);
@@ -1051,7 +1052,7 @@ build_newarray (int atype_value, tree length)
tree prim_type = decode_newarray_type (atype_value);
tree type
= build_java_array_type (prim_type,
- tree_fits_shwi_p (length) == INTEGER_CST
+ tree_fits_shwi_p (length)
? tree_to_shwi (length) : -1);
/* Pass a reference to the primitive type class and save the runtime
@@ -1260,7 +1261,7 @@ expand_java_pushc (int ival, tree type)
else if (type == float_type_node || type == double_type_node)
{
REAL_VALUE_TYPE x;
- REAL_VALUE_FROM_INT (x, ival, 0, TYPE_MODE (type));
+ real_from_integer (&x, TYPE_MODE (type), ival, SIGNED);
value = build_real (type, x);
}
else
@@ -1717,7 +1718,7 @@ build_field_ref (tree self_value, tree self_class, tree name)
tree field_offset = byte_position (field_decl);
if (! page_size)
page_size = size_int (4096);
- check = ! INT_CST_LT_UNSIGNED (field_offset, page_size);
+ check = !tree_int_cst_lt (field_offset, page_size);
}
if (base_type != TREE_TYPE (self_value))
diff --git a/gcc/java/jcf-parse.c b/gcc/java/jcf-parse.c
index afe35f0e69a..748f7c3e061 100644
--- a/gcc/java/jcf-parse.c
+++ b/gcc/java/jcf-parse.c
@@ -41,6 +41,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */
#include "cgraph.h"
#include "bitmap.h"
#include "target.h"
+#include "wide-int.h"
#ifdef HAVE_LOCALE_H
#include <locale.h>
@@ -1041,14 +1042,13 @@ get_constant (JCF *jcf, int index)
case CONSTANT_Long:
{
unsigned HOST_WIDE_INT num;
- double_int val;
num = JPOOL_UINT (jcf, index);
- val = double_int::from_uhwi (num).llshift (32, 64);
+ wide_int val = wi::lshift (wide_int::from (num, 64, SIGNED), 32);
num = JPOOL_UINT (jcf, index + 1);
- val |= double_int::from_uhwi (num);
+ val |= num;
- value = double_int_to_tree (long_type_node, val);
+ value = wide_int_to_tree (long_type_node, val);
break;
}
diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c
index 3e9f3ce6edb..d03d13c648e 100644
--- a/gcc/loop-doloop.c
+++ b/gcc/loop-doloop.c
@@ -409,7 +409,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
basic_block loop_end = desc->out_edge->src;
enum machine_mode mode;
rtx true_prob_val;
- double_int iterations;
+ widest_int iterations;
jump_insn = BB_END (loop_end);
@@ -461,9 +461,9 @@ doloop_modify (struct loop *loop, struct niter_desc *desc,
/* Determine if the iteration counter will be non-negative.
Note that the maximum value loaded is iterations_max - 1. */
if (get_max_loop_iterations (loop, &iterations)
- && (iterations.ule (double_int_one.llshift
- (GET_MODE_PRECISION (mode) - 1,
- GET_MODE_PRECISION (mode)))))
+ && wi::leu_p (iterations,
+ wi::set_bit_in_zero <widest_int>
+ (GET_MODE_PRECISION (mode) - 1)))
nonneg = 1;
break;
@@ -597,7 +597,7 @@ doloop_optimize (struct loop *loop)
enum machine_mode mode;
rtx doloop_seq, doloop_pat, doloop_reg;
rtx count;
- double_int iterations, iterations_max;
+ widest_int iterations, iterations_max;
rtx start_label;
rtx condition;
unsigned level, est_niter;
@@ -655,11 +655,12 @@ doloop_optimize (struct loop *loop)
}
if (desc->const_iter)
- iterations = rtx_to_double_int (desc->niter_expr);
+ iterations = widest_int::from (std::make_pair (desc->niter_expr, mode),
+ UNSIGNED);
else
- iterations = double_int_zero;
+ iterations = 0;
if (!get_max_loop_iterations (loop, &iterations_max))
- iterations_max = double_int_zero;
+ iterations_max = 0;
level = get_loop_level (loop) + 1;
entered_at_top = (loop->latch == desc->in_edge->dest
&& contains_no_active_insn_p (loop->latch));
@@ -688,7 +689,7 @@ doloop_optimize (struct loop *loop)
computed, we must be sure that the number of iterations fits into
the new mode. */
&& (word_mode_size >= GET_MODE_PRECISION (mode)
- || iterations_max.ule (double_int::from_shwi (word_mode_max))))
+ || wi::leu_p (iterations_max, word_mode_max)))
{
if (word_mode_size > GET_MODE_PRECISION (mode))
count = simplify_gen_unary (ZERO_EXTEND, word_mode, count, mode);
diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c
index 42bcb75bec7..796bd59f2d6 100644
--- a/gcc/loop-iv.c
+++ b/gcc/loop-iv.c
@@ -2625,8 +2625,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
max = (up - down) / inc + 1;
if (!desc->infinite
&& !desc->assumptions)
- record_niter_bound (loop, double_int::from_uhwi (max),
- false, true);
+ record_niter_bound (loop, max, false, true);
if (iv0.step == const0_rtx)
{
@@ -2665,8 +2664,8 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
iv1.step = const0_rtx;
if (INTVAL (iv0.step) < 0)
{
- iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, mode);
- iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, mode);
+ iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode);
+ iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode);
}
iv0.step = lowpart_subreg (mode, iv0.step, comp_mode);
@@ -2840,8 +2839,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
desc->niter = val & GET_MODE_MASK (desc->mode);
if (!desc->infinite
&& !desc->assumptions)
- record_niter_bound (loop, double_int::from_uhwi (desc->niter),
- false, true);
+ record_niter_bound (loop, desc->niter, false, true);
}
else
{
@@ -2850,8 +2848,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition,
goto zero_iter_simplify;
if (!desc->infinite
&& !desc->assumptions)
- record_niter_bound (loop, double_int::from_uhwi (max),
- false, true);
+ record_niter_bound (loop, max, false, true);
/* simplify_using_initial_values does a copy propagation on the registers
in the expression for the number of iterations. This prolongs life
@@ -2876,8 +2873,7 @@ zero_iter_simplify:
zero_iter:
desc->const_iter = true;
desc->niter = 0;
- record_niter_bound (loop, double_int_zero,
- true, true);
+ record_niter_bound (loop, 0, true, true);
desc->noloop_assumptions = NULL_RTX;
desc->niter_expr = const0_rtx;
return;
diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c
index df6fc9c19f0..5797d200d95 100644
--- a/gcc/loop-unroll.c
+++ b/gcc/loop-unroll.c
@@ -641,7 +641,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
struct niter_desc *desc;
- double_int iterations;
+ widest_int iterations;
if (!(flags & UAP_UNROLL))
{
@@ -694,7 +694,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags)
if (desc->niter < 2 * nunroll
|| ((get_estimated_loop_iterations (loop, &iterations)
|| get_max_loop_iterations (loop, &iterations))
- && iterations.ult (double_int::from_shwi (2 * nunroll))))
+ && wi::ltu_p (iterations, 2 * nunroll)))
{
if (dump_file)
fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
@@ -814,11 +814,10 @@ unroll_loop_constant_iterations (struct loop *loop)
desc->noloop_assumptions = NULL_RTX;
desc->niter -= exit_mod;
- loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod);
+ loop->nb_iterations_upper_bound -= exit_mod;
if (loop->any_estimate
- && double_int::from_uhwi (exit_mod).ule
- (loop->nb_iterations_estimate))
- loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod);
+ && wi::leu_p (exit_mod, loop->nb_iterations_estimate))
+ loop->nb_iterations_estimate -= exit_mod;
else
loop->any_estimate = false;
}
@@ -858,11 +857,10 @@ unroll_loop_constant_iterations (struct loop *loop)
apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
desc->niter -= exit_mod + 1;
- loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod + 1);
+ loop->nb_iterations_upper_bound -= exit_mod + 1;
if (loop->any_estimate
- && double_int::from_uhwi (exit_mod + 1).ule
- (loop->nb_iterations_estimate))
- loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod + 1);
+ && wi::leu_p (exit_mod + 1, loop->nb_iterations_estimate))
+ loop->nb_iterations_estimate -= exit_mod + 1;
else
loop->any_estimate = false;
desc->noloop_assumptions = NULL_RTX;
@@ -914,14 +912,10 @@ unroll_loop_constant_iterations (struct loop *loop)
desc->niter /= max_unroll + 1;
loop->nb_iterations_upper_bound
- = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll
- + 1),
- TRUNC_DIV_EXPR);
+ = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
if (loop->any_estimate)
loop->nb_iterations_estimate
- = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll
- + 1),
- TRUNC_DIV_EXPR);
+ = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
desc->niter_expr = GEN_INT (desc->niter);
/* Remove the edges. */
@@ -941,7 +935,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
struct niter_desc *desc;
- double_int iterations;
+ widest_int iterations;
if (!(flags & UAP_UNROLL))
{
@@ -997,7 +991,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags)
/* Check whether the loop rolls. */
if ((get_estimated_loop_iterations (loop, &iterations)
|| get_max_loop_iterations (loop, &iterations))
- && iterations.ult (double_int::from_shwi (2 * nunroll)))
+ && wi::ltu_p (iterations, 2 * nunroll))
{
if (dump_file)
fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
@@ -1357,14 +1351,10 @@ unroll_loop_runtime_iterations (struct loop *loop)
simplify_gen_binary (UDIV, desc->mode, old_niter,
gen_int_mode (max_unroll + 1, desc->mode));
loop->nb_iterations_upper_bound
- = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll
- + 1),
- TRUNC_DIV_EXPR);
+ = wi::udiv_trunc (loop->nb_iterations_upper_bound, max_unroll + 1);
if (loop->any_estimate)
loop->nb_iterations_estimate
- = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll
- + 1),
- TRUNC_DIV_EXPR);
+ = wi::udiv_trunc (loop->nb_iterations_estimate, max_unroll + 1);
if (exit_at_end)
{
desc->niter_expr =
@@ -1372,7 +1362,7 @@ unroll_loop_runtime_iterations (struct loop *loop)
desc->noloop_assumptions = NULL_RTX;
--loop->nb_iterations_upper_bound;
if (loop->any_estimate
- && loop->nb_iterations_estimate != double_int_zero)
+ && loop->nb_iterations_estimate != 0)
--loop->nb_iterations_estimate;
else
loop->any_estimate = false;
@@ -1390,7 +1380,7 @@ static void
decide_peel_simple (struct loop *loop, int flags)
{
unsigned npeel;
- double_int iterations;
+ widest_int iterations;
if (!(flags & UAP_PEEL))
{
@@ -1434,7 +1424,7 @@ decide_peel_simple (struct loop *loop, int flags)
/* If we have realistic estimate on number of iterations, use it. */
if (get_estimated_loop_iterations (loop, &iterations))
{
- if (double_int::from_shwi (npeel).ule (iterations))
+ if (wi::leu_p (npeel, iterations))
{
if (dump_file)
{
@@ -1451,7 +1441,7 @@ decide_peel_simple (struct loop *loop, int flags)
/* If we have small enough bound on iterations, we can still peel (completely
unroll). */
else if (get_max_loop_iterations (loop, &iterations)
- && iterations.ult (double_int::from_shwi (npeel)))
+ && wi::ltu_p (iterations, npeel))
npeel = iterations.to_shwi () + 1;
else
{
@@ -1545,7 +1535,7 @@ decide_unroll_stupid (struct loop *loop, int flags)
{
unsigned nunroll, nunroll_by_av, i;
struct niter_desc *desc;
- double_int iterations;
+ widest_int iterations;
if (!(flags & UAP_UNROLL_ALL))
{
@@ -1602,7 +1592,7 @@ decide_unroll_stupid (struct loop *loop, int flags)
/* Check whether the loop rolls. */
if ((get_estimated_loop_iterations (loop, &iterations)
|| get_max_loop_iterations (loop, &iterations))
- && iterations.ult (double_int::from_shwi (2 * nunroll)))
+ && wi::ltu_p (iterations, 2 * nunroll))
{
if (dump_file)
fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c
index 20835f71d6a..d0c86626c66 100644
--- a/gcc/lto-streamer-in.c
+++ b/gcc/lto-streamer-in.c
@@ -596,6 +596,21 @@ make_new_block (struct function *fn, unsigned int index)
}
+/* Read a wide-int. */
+
+static widest_int
+streamer_read_wi (struct lto_input_block *ib)
+{
+ HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+ int i;
+ int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib);
+ int len = streamer_read_uhwi (ib);
+ for (i = 0; i < len; i++)
+ a[i] = streamer_read_hwi (ib);
+ return widest_int::from_array (a, len);
+}
+
+
/* Read the CFG for function FN from input block IB. */
static void
@@ -705,16 +720,10 @@ input_cfg (struct lto_input_block *ib, struct data_in *data_in,
loop->estimate_state = streamer_read_enum (ib, loop_estimation, EST_LAST);
loop->any_upper_bound = streamer_read_hwi (ib);
if (loop->any_upper_bound)
- {
- loop->nb_iterations_upper_bound.low = streamer_read_uhwi (ib);
- loop->nb_iterations_upper_bound.high = streamer_read_hwi (ib);
- }
+ loop->nb_iterations_upper_bound = streamer_read_wi (ib);
loop->any_estimate = streamer_read_hwi (ib);
if (loop->any_estimate)
- {
- loop->nb_iterations_estimate.low = streamer_read_uhwi (ib);
- loop->nb_iterations_estimate.high = streamer_read_hwi (ib);
- }
+ loop->nb_iterations_estimate = streamer_read_wi (ib);
/* Read OMP SIMD related info. */
loop->safelen = streamer_read_hwi (ib);
@@ -1267,12 +1276,18 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in,
}
else if (tag == LTO_integer_cst)
{
- /* For shared integer constants in singletons we can use the existing
- tree integer constant merging code. */
+ /* For shared integer constants in singletons we can use the
+ existing tree integer constant merging code. */
tree type = stream_read_tree (ib, data_in);
- unsigned HOST_WIDE_INT low = streamer_read_uhwi (ib);
- HOST_WIDE_INT high = streamer_read_hwi (ib);
- result = build_int_cst_wide (type, low, high);
+ unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib);
+ unsigned HOST_WIDE_INT i;
+ HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+
+ for (i = 0; i < len; i++)
+ a[i] = streamer_read_hwi (ib);
+ gcc_assert (TYPE_PRECISION (type) <= MAX_BITSIZE_MODE_ANY_INT);
+ result = wide_int_to_tree (type, wide_int::from_array
+ (a, len, TYPE_PRECISION (type)));
streamer_tree_cache_append (data_in->reader_cache, result, hash);
}
else if (tag == LTO_tree_scc)
diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c
index a40fd19b464..6f2bf9c307d 100644
--- a/gcc/lto-streamer-out.c
+++ b/gcc/lto-streamer-out.c
@@ -733,8 +733,11 @@ hash_tree (struct streamer_tree_cache_d *cache, tree t)
if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
{
- v = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), v);
- v = iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), v);
+ int i;
+ v = iterative_hash_host_wide_int (TREE_INT_CST_NUNITS (t), v);
+ v = iterative_hash_host_wide_int (TREE_INT_CST_EXT_NUNITS (t), v);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ v = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), v);
}
if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST))
@@ -1608,6 +1611,21 @@ output_ssa_names (struct output_block *ob, struct function *fn)
}
+/* Output a wide-int. */
+
+static void
+streamer_write_wi (struct output_block *ob,
+ const widest_int &w)
+{
+ int len = w.get_len ();
+
+ streamer_write_uhwi (ob, w.get_precision ());
+ streamer_write_uhwi (ob, len);
+ for (int i = 0; i < len; i++)
+ streamer_write_hwi (ob, w.elt (i));
+}
+
+
/* Output the cfg. */
static void
@@ -1680,16 +1698,10 @@ output_cfg (struct output_block *ob, struct function *fn)
loop_estimation, EST_LAST, loop->estimate_state);
streamer_write_hwi (ob, loop->any_upper_bound);
if (loop->any_upper_bound)
- {
- streamer_write_uhwi (ob, loop->nb_iterations_upper_bound.low);
- streamer_write_hwi (ob, loop->nb_iterations_upper_bound.high);
- }
+ streamer_write_wi (ob, loop->nb_iterations_upper_bound);
streamer_write_hwi (ob, loop->any_estimate);
if (loop->any_estimate)
- {
- streamer_write_uhwi (ob, loop->nb_iterations_estimate.low);
- streamer_write_hwi (ob, loop->nb_iterations_estimate.high);
- }
+ streamer_write_wi (ob, loop->nb_iterations_estimate);
/* Write OMP SIMD related info. */
streamer_write_hwi (ob, loop->safelen);
diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c
index 339bebfa87d..e213e923318 100644
--- a/gcc/lto/lto-lang.c
+++ b/gcc/lto/lto-lang.c
@@ -324,8 +324,7 @@ static bool
get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp)
{
/* Verify the arg number is a constant. */
- if (TREE_CODE (arg_num_expr) != INTEGER_CST
- || TREE_INT_CST_HIGH (arg_num_expr) != 0)
+ if (!tree_fits_uhwi_p (arg_num_expr))
return false;
*valp = TREE_INT_CST_LOW (arg_num_expr);
diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c
index 5f4478a58a8..1d9ac2c9ecd 100644
--- a/gcc/lto/lto.c
+++ b/gcc/lto/lto.c
@@ -1212,8 +1212,8 @@ compare_tree_sccs_1 (tree t1, tree t2, tree **map)
if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
{
- compare_values (TREE_INT_CST_LOW);
- compare_values (TREE_INT_CST_HIGH);
+ if (!wi::eq_p (t1, t2))
+ return false;
}
if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST))
diff --git a/gcc/mkconfig.sh b/gcc/mkconfig.sh
index c7146edb64d..c36759f72a1 100644
--- a/gcc/mkconfig.sh
+++ b/gcc/mkconfig.sh
@@ -100,6 +100,9 @@ case $output in
#if defined IN_GCC && !defined GENERATOR_FILE
# include "insn-modes.h"
#endif
+#if defined IN_GCC && defined GENERATOR_FILE && !defined BITS_PER_UNIT
+#include "machmode.h"
+#endif
EOF
;;
esac
diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c
index cda345b3de8..ff5d1e5d7aa 100644
--- a/gcc/objc/objc-act.c
+++ b/gcc/objc/objc-act.c
@@ -52,6 +52,7 @@ along with GCC; see the file COPYING3. If not see
#include "cgraph.h"
#include "tree-iterator.h"
#include "hash-table.h"
+#include "wide-int.h"
#include "langhooks-def.h"
/* Different initialization, code gen and meta data generation for each
runtime. */
@@ -4899,12 +4900,10 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
number = TREE_VALUE (second_argument);
if (number
&& TREE_CODE (number) == INTEGER_CST
- && TREE_INT_CST_HIGH (number) == 0)
- {
- TREE_VALUE (second_argument)
- = build_int_cst (integer_type_node,
- TREE_INT_CST_LOW (number) + 2);
- }
+ && !wi::eq_p (number, 0))
+ TREE_VALUE (second_argument)
+ = wide_int_to_tree (TREE_TYPE (number),
+ wi::add (number, 2));
/* This is the third argument, the "first-to-check",
which specifies the index of the first argument to
@@ -4914,13 +4913,10 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
number = TREE_VALUE (third_argument);
if (number
&& TREE_CODE (number) == INTEGER_CST
- && TREE_INT_CST_HIGH (number) == 0
- && TREE_INT_CST_LOW (number) != 0)
- {
- TREE_VALUE (third_argument)
- = build_int_cst (integer_type_node,
- TREE_INT_CST_LOW (number) + 2);
- }
+ && !wi::eq_p (number, 0))
+ TREE_VALUE (third_argument)
+ = wide_int_to_tree (TREE_TYPE (number),
+ wi::add (number, 2));
}
filtered_attributes = chainon (filtered_attributes,
new_attribute);
@@ -4952,15 +4948,11 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags)
{
/* Get the value of the argument and add 2. */
tree number = TREE_VALUE (argument);
- if (number
- && TREE_CODE (number) == INTEGER_CST
- && TREE_INT_CST_HIGH (number) == 0
- && TREE_INT_CST_LOW (number) != 0)
- {
- TREE_VALUE (argument)
- = build_int_cst (integer_type_node,
- TREE_INT_CST_LOW (number) + 2);
- }
+ if (number && TREE_CODE (number) == INTEGER_CST
+ && !wi::eq_p (number, 0))
+ TREE_VALUE (argument)
+ = wide_int_to_tree (TREE_TYPE (number),
+ wi::add (number, 2));
argument = TREE_CHAIN (argument);
}
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 453bd294fd4..453f580a838 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -2541,9 +2541,7 @@ scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
- *tp = build_int_cst_wide (tem,
- TREE_INT_CST_LOW (t),
- TREE_INT_CST_HIGH (t));
+ *tp = wide_int_to_tree (tem, t);
else
TREE_TYPE (t) = tem;
}
diff --git a/gcc/optabs.c b/gcc/optabs.c
index 54f07ab02b9..abc36ed41f8 100644
--- a/gcc/optabs.c
+++ b/gcc/optabs.c
@@ -854,7 +854,8 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
{
carries = outof_input;
- tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
+ tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD,
+ op1_mode), op1_mode);
tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
0, true, methods);
}
@@ -869,13 +870,15 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
outof_input, const1_rtx, 0, unsignedp, methods);
if (shift_mask == BITS_PER_WORD - 1)
{
- tmp = immed_double_const (-1, -1, op1_mode);
+ tmp = immed_wide_int_const
+ (wi::minus_one (GET_MODE_PRECISION (op1_mode)), op1_mode);
tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
0, true, methods);
}
else
{
- tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
+ tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD - 1,
+ op1_mode), op1_mode);
tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
0, true, methods);
}
@@ -1038,7 +1041,7 @@ expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
is true when the effective shift value is less than BITS_PER_WORD.
Set SUPERWORD_OP1 to the shift count that should be used to shift
OUTOF_INPUT into INTO_TARGET when the condition is false. */
- tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
+ tmp = immed_wide_int_const (wi::shwi (BITS_PER_WORD, op1_mode), op1_mode);
if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
{
/* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
@@ -2891,7 +2894,6 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
const struct real_format *fmt;
int bitpos, word, nwords, i;
enum machine_mode imode;
- double_int mask;
rtx temp, insns;
/* The format has to have a simple sign bit. */
@@ -2927,7 +2929,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = double_int_zero.set_bit (bitpos);
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
if (code == ABS)
mask = ~mask;
@@ -2949,7 +2951,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
{
temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
op0_piece,
- immed_double_int_const (mask, imode),
+ immed_wide_int_const (mask, imode),
targ_piece, 1, OPTAB_LIB_WIDEN);
if (temp != targ_piece)
emit_move_insn (targ_piece, temp);
@@ -2967,7 +2969,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
{
temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
gen_lowpart (imode, op0),
- immed_double_int_const (mask, imode),
+ immed_wide_int_const (mask, imode),
gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
target = lowpart_subreg_maybe_copy (mode, temp, imode);
@@ -3571,8 +3573,6 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
}
else
{
- double_int mask;
-
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
imode = int_mode_for_mode (mode);
@@ -3593,10 +3593,9 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
op1 = operand_subword_force (op1, word, mode);
}
- mask = double_int_zero.set_bit (bitpos);
-
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
sign = expand_binop (imode, and_optab, op1,
- immed_double_int_const (mask, imode),
+ immed_wide_int_const (mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
}
@@ -3640,7 +3639,6 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
int bitpos, bool op0_is_abs)
{
enum machine_mode imode;
- double_int mask;
int word, nwords, i;
rtx temp, insns;
@@ -3664,7 +3662,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
}
- mask = double_int_zero.set_bit (bitpos);
+ wide_int mask = wi::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode));
if (target == 0
|| target == op0
@@ -3686,12 +3684,11 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
if (!op0_is_abs)
op0_piece
= expand_binop (imode, and_optab, op0_piece,
- immed_double_int_const (~mask, imode),
+ immed_wide_int_const (~mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
-
op1 = expand_binop (imode, and_optab,
operand_subword_force (op1, i, mode),
- immed_double_int_const (mask, imode),
+ immed_wide_int_const (mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (imode, ior_optab, op0_piece, op1,
@@ -3711,13 +3708,13 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
else
{
op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
- immed_double_int_const (mask, imode),
+ immed_wide_int_const (mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
op0 = gen_lowpart (imode, op0);
if (!op0_is_abs)
op0 = expand_binop (imode, and_optab, op0,
- immed_double_int_const (~mask, imode),
+ immed_wide_int_const (~mask, imode),
NULL_RTX, 1, OPTAB_LIB_WIDEN);
temp = expand_binop (imode, ior_optab, op0, op1,
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 691467e1075..47687203d66 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -295,27 +295,27 @@ reload_cse_simplify_set (rtx set, rtx insn)
#ifdef LOAD_EXTEND_OP
if (extend_op != UNKNOWN)
{
- HOST_WIDE_INT this_val;
+ wide_int result;
- /* ??? I'm lazy and don't wish to handle CONST_DOUBLE. Other
- constants, such as SYMBOL_REF, cannot be extended. */
- if (!CONST_INT_P (this_rtx))
+ if (!CONST_SCALAR_INT_P (this_rtx))
continue;
- this_val = INTVAL (this_rtx);
switch (extend_op)
{
case ZERO_EXTEND:
- this_val &= GET_MODE_MASK (GET_MODE (src));
+ result = wide_int::from (std::make_pair (this_rtx,
+ GET_MODE (src)),
+ BITS_PER_WORD, UNSIGNED);
break;
case SIGN_EXTEND:
- /* ??? In theory we're already extended. */
- if (this_val == trunc_int_for_mode (this_val, GET_MODE (src)))
- break;
+ result = wide_int::from (std::make_pair (this_rtx,
+ GET_MODE (src)),
+ BITS_PER_WORD, SIGNED);
+ break;
default:
gcc_unreachable ();
}
- this_rtx = GEN_INT (this_val);
+ this_rtx = immed_wide_int_const (result, word_mode);
}
#endif
this_cost = set_src_cost (this_rtx, speed);
diff --git a/gcc/predict.c b/gcc/predict.c
index f074082535d..165cc4e2ec6 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -1306,76 +1306,59 @@ predict_iv_comparison (struct loop *loop, basic_block bb,
&& tree_fits_shwi_p (compare_base))
{
int probability;
- bool of, overflow = false;
- double_int mod, compare_count, tem, loop_count;
-
- double_int loop_bound = tree_to_double_int (loop_bound_var);
- double_int compare_bound = tree_to_double_int (compare_var);
- double_int base = tree_to_double_int (compare_base);
- double_int compare_step = tree_to_double_int (compare_step_var);
+ bool overflow, overall_overflow = false;
+ widest_int compare_count, tem;
/* (loop_bound - base) / compare_step */
- tem = loop_bound.sub_with_overflow (base, &of);
- overflow |= of;
- loop_count = tem.divmod_with_overflow (compare_step,
- 0, TRUNC_DIV_EXPR,
- &mod, &of);
- overflow |= of;
-
- if ((!compare_step.is_negative ())
+ tem = wi::sub (wi::to_widest (loop_bound_var),
+ wi::to_widest (compare_base), SIGNED, &overflow);
+ overall_overflow |= overflow;
+ widest_int loop_count = wi::div_trunc (tem,
+ wi::to_widest (compare_step_var),
+ SIGNED, &overflow);
+ overall_overflow |= overflow;
+
+ if (!wi::neg_p (wi::to_widest (compare_step_var))
^ (compare_code == LT_EXPR || compare_code == LE_EXPR))
{
/* (loop_bound - compare_bound) / compare_step */
- tem = loop_bound.sub_with_overflow (compare_bound, &of);
- overflow |= of;
- compare_count = tem.divmod_with_overflow (compare_step,
- 0, TRUNC_DIV_EXPR,
- &mod, &of);
- overflow |= of;
+ tem = wi::sub (wi::to_widest (loop_bound_var),
+ wi::to_widest (compare_var), SIGNED, &overflow);
+ overall_overflow |= overflow;
+ compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
+ SIGNED, &overflow);
+ overall_overflow |= overflow;
}
else
{
/* (compare_bound - base) / compare_step */
- tem = compare_bound.sub_with_overflow (base, &of);
- overflow |= of;
- compare_count = tem.divmod_with_overflow (compare_step,
- 0, TRUNC_DIV_EXPR,
- &mod, &of);
- overflow |= of;
+ tem = wi::sub (wi::to_widest (compare_var),
+ wi::to_widest (compare_base), SIGNED, &overflow);
+ overall_overflow |= overflow;
+ compare_count = wi::div_trunc (tem, wi::to_widest (compare_step_var),
+ SIGNED, &overflow);
+ overall_overflow |= overflow;
}
if (compare_code == LE_EXPR || compare_code == GE_EXPR)
++compare_count;
if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR)
++loop_count;
- if (compare_count.is_negative ())
- compare_count = double_int_zero;
- if (loop_count.is_negative ())
- loop_count = double_int_zero;
- if (loop_count.is_zero ())
+ if (wi::neg_p (compare_count))
+ compare_count = 0;
+ if (wi::neg_p (loop_count))
+ loop_count = 0;
+ if (loop_count == 0)
probability = 0;
- else if (compare_count.scmp (loop_count) == 1)
+ else if (wi::cmps (compare_count, loop_count) == 1)
probability = REG_BR_PROB_BASE;
else
{
- /* If loop_count is too big, such that REG_BR_PROB_BASE * loop_count
- could overflow, shift both loop_count and compare_count right
- a bit so that it doesn't overflow. Note both counts are known not
- to be negative at this point. */
- int clz_bits = clz_hwi (loop_count.high);
- gcc_assert (REG_BR_PROB_BASE < 32768);
- if (clz_bits < 16)
- {
- loop_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT);
- compare_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT);
- }
- tem = compare_count.mul_with_sign (double_int::from_shwi
- (REG_BR_PROB_BASE), true, &of);
- gcc_assert (!of);
- tem = tem.divmod (loop_count, true, TRUNC_DIV_EXPR, &mod);
+ tem = compare_count * REG_BR_PROB_BASE;
+ tem = wi::udiv_trunc (tem, loop_count);
probability = tem.to_uhwi ();
}
- if (!overflow)
+ if (!overall_overflow)
predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability);
return;
diff --git a/gcc/pretty-print.h b/gcc/pretty-print.h
index 247b25c815d..1deaa667290 100644
--- a/gcc/pretty-print.h
+++ b/gcc/pretty-print.h
@@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see
#include "obstack.h"
#include "input.h"
+#include "wide-int-print.h"
/* Maximum number of format string arguments. */
#define PP_NL_ARGMAX 30
@@ -261,6 +262,13 @@ pp_get_prefix (const pretty_printer *pp) { return pp->prefix; }
#define pp_decimal_int(PP, I) pp_scalar (PP, "%d", I)
#define pp_unsigned_wide_integer(PP, I) \
pp_scalar (PP, HOST_WIDE_INT_PRINT_UNSIGNED, (unsigned HOST_WIDE_INT) I)
+#define pp_wide_int(PP, W, SGN) \
+ do \
+ { \
+ print_dec (W, pp_buffer (PP)->digit_buffer, SGN); \
+ pp_string (PP, pp_buffer (PP)->digit_buffer); \
+ } \
+ while (0)
#define pp_wide_integer(PP, I) \
pp_scalar (PP, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) I)
#define pp_widest_integer(PP, I) \
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index 09ac387d18b..3b0efa3938a 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -629,6 +629,11 @@ print_rtx (const_rtx in_rtx)
fprintf (outfile, " [%s]", s);
}
break;
+
+ case CONST_WIDE_INT:
+ fprintf (outfile, " ");
+ cwi_output_hex (outfile, in_rtx);
+ break;
#endif
case CODE_LABEL:
diff --git a/gcc/print-tree.c b/gcc/print-tree.c
index 91b696cfa49..e26b0633d58 100644
--- a/gcc/print-tree.c
+++ b/gcc/print-tree.c
@@ -35,6 +35,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-cfg.h"
#include "tree-dump.h"
#include "dumpfile.h"
+#include "wide-int-print.h"
/* Define the hash table of nodes already seen.
Such nodes are not repeated; brief cross-references are used. */
@@ -125,16 +126,7 @@ print_node_brief (FILE *file, const char *prefix, const_tree node, int indent)
fprintf (file, " overflow");
fprintf (file, " ");
- if (TREE_INT_CST_HIGH (node) == 0)
- fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (node));
- else if (TREE_INT_CST_HIGH (node) == -1
- && TREE_INT_CST_LOW (node) != 0)
- fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED,
- -TREE_INT_CST_LOW (node));
- else
- fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (node),
- (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (node));
+ print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
}
if (TREE_CODE (node) == REAL_CST)
{
@@ -341,7 +333,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
if (TREE_VISITED (node))
fputs (" visited", file);
- if (code != TREE_VEC && code != SSA_NAME)
+ if (code != TREE_VEC && code != INTEGER_CST && code != SSA_NAME)
{
if (TREE_LANG_FLAG_0 (node))
fputs (" tree_0", file);
@@ -743,17 +735,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent)
fprintf (file, " overflow");
fprintf (file, " ");
- if (TREE_INT_CST_HIGH (node) == 0)
- fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED,
- TREE_INT_CST_LOW (node));
- else if (TREE_INT_CST_HIGH (node) == -1
- && TREE_INT_CST_LOW (node) != 0)
- fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED,
- -TREE_INT_CST_LOW (node));
- else
- fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (node),
- (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (node));
+ print_dec (node, file, TYPE_SIGN (TREE_TYPE (node)));
break;
case REAL_CST:
diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c
index 81ce1a864ae..046286dbf8a 100644
--- a/gcc/read-rtl.c
+++ b/gcc/read-rtl.c
@@ -811,6 +811,29 @@ validate_const_int (const char *string)
fatal_with_file_and_line ("invalid decimal constant \"%s\"\n", string);
}
+static void
+validate_const_wide_int (const char *string)
+{
+ const char *cp;
+ int valid = 1;
+
+ cp = string;
+ while (*cp && ISSPACE (*cp))
+ cp++;
+ /* Skip the leading 0x. */
+ if (cp[0] == '0' || cp[1] == 'x')
+ cp += 2;
+ else
+ valid = 0;
+ if (*cp == 0)
+ valid = 0;
+ for (; *cp; cp++)
+ if (! ISXDIGIT (*cp))
+ valid = 0;
+ if (!valid)
+ fatal_with_file_and_line ("invalid hex constant \"%s\"\n", string);
+}
+
/* Record that PTR uses iterator ITERATOR. */
static void
@@ -1327,6 +1350,54 @@ read_rtx_code (const char *code_name)
gcc_unreachable ();
}
+ if (CONST_WIDE_INT_P (return_rtx))
+ {
+ read_name (&name);
+ validate_const_wide_int (name.string);
+ {
+ const char *s = name.string;
+ int len;
+ int index = 0;
+ int gs = HOST_BITS_PER_WIDE_INT/4;
+ int pos;
+ char * buf = XALLOCAVEC (char, gs + 1);
+ unsigned HOST_WIDE_INT wi;
+ int wlen;
+
+ /* Skip the leading spaces. */
+ while (*s && ISSPACE (*s))
+ s++;
+
+ /* Skip the leading 0x. */
+ gcc_assert (s[0] == '0');
+ gcc_assert (s[1] == 'x');
+ s += 2;
+
+ len = strlen (s);
+ pos = len - gs;
+ wlen = (len + gs - 1) / gs; /* Number of words needed */
+
+ return_rtx = const_wide_int_alloc (wlen);
+
+ while (pos > 0)
+ {
+#if HOST_BITS_PER_WIDE_INT == 64
+ sscanf (s + pos, "%16" HOST_WIDE_INT_PRINT "x", &wi);
+#else
+ sscanf (s + pos, "%8" HOST_WIDE_INT_PRINT "x", &wi);
+#endif
+ CWI_ELT (return_rtx, index++) = wi;
+ pos -= gs;
+ }
+ strncpy (buf, s, gs - pos);
+ buf [gs - pos] = 0;
+ sscanf (buf, "%" HOST_WIDE_INT_PRINT "x", &wi);
+ CWI_ELT (return_rtx, index++) = wi;
+ /* TODO: After reading, do we want to canonicalize with:
+ value = lookup_const_wide_int (value); ? */
+ }
+ }
+
c = read_skip_spaces ();
/* Syntactic sugar for AND and IOR, allowing Lisp-like
arbitrary number of arguments for them. */
diff --git a/gcc/real.c b/gcc/real.c
index 5cf2525b90a..231fc96c932 100644
--- a/gcc/real.c
+++ b/gcc/real.c
@@ -29,6 +29,7 @@
#include "realmpfr.h"
#include "tm_p.h"
#include "dfp.h"
+#include "wide-int.h"
/* The floating point model used internally is not exactly IEEE 754
compliant, and close to the description in the ISO C99 standard,
@@ -1370,43 +1371,36 @@ real_to_integer (const REAL_VALUE_TYPE *r)
}
}
-/* Likewise, but to an integer pair, HI+LOW. */
+/* Likewise, but producing a wide-int of PRECISION. If the value cannot
+ be represented in precision, *FAIL is set to TRUE. */
-void
-real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
- const REAL_VALUE_TYPE *r)
+wide_int
+real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision)
{
- REAL_VALUE_TYPE t;
- unsigned HOST_WIDE_INT low;
- HOST_WIDE_INT high;
+ HOST_WIDE_INT val[2 * WIDE_INT_MAX_ELTS];
int exp;
+ int words, w;
+ wide_int result;
switch (r->cl)
{
case rvc_zero:
underflow:
- low = high = 0;
- break;
+ return wi::zero (precision);
case rvc_inf:
case rvc_nan:
overflow:
- high = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1);
+ *fail = true;
+
if (r->sign)
- low = 0;
+ return wi::set_bit_in_zero (precision - 1, precision);
else
- {
- high--;
- low = -1;
- }
- break;
+ return ~wi::set_bit_in_zero (precision - 1, precision);
case rvc_normal:
if (r->decimal)
- {
- decimal_real_to_integer2 (plow, phigh, r);
- return;
- }
+ return decimal_real_to_integer (r, fail, precision);
exp = REAL_EXP (r);
if (exp <= 0)
@@ -1415,42 +1409,49 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh,
undefined, so it doesn't matter what we return, and some callers
expect to be able to use this routine for both signed and
unsigned conversions. */
- if (exp > HOST_BITS_PER_DOUBLE_INT)
+ if (exp > precision)
goto overflow;
- rshift_significand (&t, r, HOST_BITS_PER_DOUBLE_INT - exp);
- if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG)
+ /* Put the significand into a wide_int that has precision W, which
+ is the smallest HWI-multiple that has at least PRECISION bits.
+ This ensures that the top bit of the significand is in the
+ top bit of the wide_int. */
+ words = (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT;
+ w = words * HOST_BITS_PER_WIDE_INT;
+
+#if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG)
+ for (int i = 0; i < words; i++)
{
- high = t.sig[SIGSZ-1];
- low = t.sig[SIGSZ-2];
+ int j = SIGSZ - words + i;
+ val[i] = (j < 0) ? 0 : r->sig[j];
}
- else
+#else
+ gcc_assert (HOST_BITS_PER_WIDE_INT == 2 * HOST_BITS_PER_LONG);
+ for (int i = 0; i < words; i++)
{
- gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG);
- high = t.sig[SIGSZ-1];
- high = high << (HOST_BITS_PER_LONG - 1) << 1;
- high |= t.sig[SIGSZ-2];
-
- low = t.sig[SIGSZ-3];
- low = low << (HOST_BITS_PER_LONG - 1) << 1;
- low |= t.sig[SIGSZ-4];
+ int j = SIGSZ - (words * 2) + (i * 2);
+ if (j < 0)
+ val[i] = 0;
+ else
+ val[i] = r->sig[j];
+ j += 1;
+ if (j >= 0)
+ val[i] |= (unsigned HOST_WIDE_INT) r->sig[j] << HOST_BITS_PER_LONG;
}
+#endif
+ /* Shift the value into place and truncate to the desired precision. */
+ result = wide_int::from_array (val, words, w);
+ result = wi::lrshift (result, w - exp);
+ result = wide_int::from (result, precision, UNSIGNED);
if (r->sign)
- {
- if (low == 0)
- high = -high;
- else
- low = -low, high = ~high;
- }
- break;
+ return -result;
+ else
+ return result;
default:
gcc_unreachable ();
}
-
- *plow = low;
- *phigh = high;
}
/* A subroutine of real_to_decimal. Compute the quotient and remainder
@@ -2113,43 +2114,88 @@ real_from_string3 (REAL_VALUE_TYPE *r, const char *s, enum machine_mode mode)
real_convert (r, mode, r);
}
-/* Initialize R from the integer pair HIGH+LOW. */
+/* Initialize R from the wide_int VAL_IN. The MODE is not VOIDmode,*/
void
real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode,
- unsigned HOST_WIDE_INT low, HOST_WIDE_INT high,
- int unsigned_p)
+ const wide_int_ref &val_in, signop sgn)
{
- if (low == 0 && high == 0)
+ if (val_in == 0)
get_zero (r, 0);
else
{
+ unsigned int len = val_in.get_precision ();
+ int i, j, e = 0;
+ int maxbitlen = MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT;
+ const unsigned int realmax = (SIGNIFICAND_BITS / HOST_BITS_PER_WIDE_INT
+ * HOST_BITS_PER_WIDE_INT);
+
memset (r, 0, sizeof (*r));
r->cl = rvc_normal;
- r->sign = high < 0 && !unsigned_p;
- SET_REAL_EXP (r, HOST_BITS_PER_DOUBLE_INT);
+ r->sign = wi::neg_p (val_in, sgn);
+
+ /* We have to ensure we can negate the largest negative number. */
+ wide_int val = wide_int::from (val_in, maxbitlen, sgn);
if (r->sign)
+ val = -val;
+
+ /* Ensure a multiple of HOST_BITS_PER_WIDE_INT, ceiling, as elt
+ won't work with precisions that are not a multiple of
+ HOST_BITS_PER_WIDE_INT. */
+ len += HOST_BITS_PER_WIDE_INT - 1;
+
+ /* Ensure we can represent the largest negative number. */
+ len += 1;
+
+ len = len/HOST_BITS_PER_WIDE_INT * HOST_BITS_PER_WIDE_INT;
+
+ /* Cap the size to the size allowed by real.h. */
+ if (len > realmax)
{
- high = ~high;
- if (low == 0)
- high += 1;
- else
- low = -low;
+ HOST_WIDE_INT cnt_l_z;
+ cnt_l_z = wi::clz (val);
+
+ if (maxbitlen - cnt_l_z > realmax)
+ {
+ e = maxbitlen - cnt_l_z - realmax;
+
+ /* This value is too large, we must shift it right to
+ preserve all the bits we can, and then bump the
+ exponent up by that amount. */
+ val = wi::lrshift (val, e);
+ }
+ len = realmax;
}
+ /* Clear out top bits so elt will work with precisions that aren't
+ a multiple of HOST_BITS_PER_WIDE_INT. */
+ val = wide_int::from (val, len, sgn);
+ len = len / HOST_BITS_PER_WIDE_INT;
+
+ SET_REAL_EXP (r, len * HOST_BITS_PER_WIDE_INT + e);
+
+ j = SIGSZ - 1;
if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT)
- {
- r->sig[SIGSZ-1] = high;
- r->sig[SIGSZ-2] = low;
- }
+ for (i = len - 1; i >= 0; i--)
+ {
+ r->sig[j--] = val.elt (i);
+ if (j < 0)
+ break;
+ }
else
{
gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT);
- r->sig[SIGSZ-1] = high >> (HOST_BITS_PER_LONG - 1) >> 1;
- r->sig[SIGSZ-2] = high;
- r->sig[SIGSZ-3] = low >> (HOST_BITS_PER_LONG - 1) >> 1;
- r->sig[SIGSZ-4] = low;
+ for (i = len - 1; i >= 0; i--)
+ {
+ HOST_WIDE_INT e = val.elt (i);
+ r->sig[j--] = e >> (HOST_BITS_PER_LONG - 1) >> 1;
+ if (j < 0)
+ break;
+ r->sig[j--] = e;
+ if (j < 0)
+ break;
+ }
}
normalize (r);
@@ -2239,7 +2285,7 @@ ten_to_ptwo (int n)
for (i = 0; i < n; ++i)
t *= t;
- real_from_integer (&tens[n], VOIDmode, t, 0, 1);
+ real_from_integer (&tens[n], VOIDmode, t, UNSIGNED);
}
else
{
@@ -2278,7 +2324,7 @@ real_digit (int n)
gcc_assert (n <= 9);
if (n > 0 && num[n].cl == rvc_zero)
- real_from_integer (&num[n], VOIDmode, n, 0, 1);
+ real_from_integer (&num[n], VOIDmode, n, UNSIGNED);
return &num[n];
}
diff --git a/gcc/real.h b/gcc/real.h
index ff0c523b659..01c405c201e 100644
--- a/gcc/real.h
+++ b/gcc/real.h
@@ -21,6 +21,9 @@
#define GCC_REAL_H
#include "machmode.h"
+#include "signop.h"
+#include "wide-int.h"
+#include "insn-modes.h"
/* An expanded form of the represented number. */
@@ -267,8 +270,6 @@ extern void real_to_hexadecimal (char *, const REAL_VALUE_TYPE *,
/* Render R as an integer. */
extern HOST_WIDE_INT real_to_integer (const REAL_VALUE_TYPE *);
-extern void real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *,
- const REAL_VALUE_TYPE *);
/* Initialize R from a decimal or hexadecimal string. Return -1 if
the value underflows, +1 if overflows, and 0 otherwise. */
@@ -276,10 +277,6 @@ extern int real_from_string (REAL_VALUE_TYPE *, const char *);
/* Wrapper to allow different internal representation for decimal floats. */
extern void real_from_string3 (REAL_VALUE_TYPE *, const char *, enum machine_mode);
-/* Initialize R from an integer pair HIGH/LOW. */
-extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode,
- unsigned HOST_WIDE_INT, HOST_WIDE_INT, int);
-
extern long real_to_target_fmt (long *, const REAL_VALUE_TYPE *,
const struct real_format *);
extern long real_to_target (long *, const REAL_VALUE_TYPE *, enum machine_mode);
@@ -361,12 +358,6 @@ extern const struct real_format arm_half_format;
#define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \
((OUT) = real_to_target (NULL, &(IN), mode_for_size (32, MODE_FLOAT, 0)))
-#define REAL_VALUE_FROM_INT(r, lo, hi, mode) \
- real_from_integer (&(r), mode, lo, hi, 0)
-
-#define REAL_VALUE_FROM_UNSIGNED_INT(r, lo, hi, mode) \
- real_from_integer (&(r), mode, lo, hi, 1)
-
/* Real values to IEEE 754 decimal floats. */
/* IN is a REAL_VALUE_TYPE. OUT is an array of longs. */
@@ -383,9 +374,6 @@ extern const struct real_format arm_half_format;
extern REAL_VALUE_TYPE real_value_truncate (enum machine_mode,
REAL_VALUE_TYPE);
-#define REAL_VALUE_TO_INT(plow, phigh, r) \
- real_to_integer2 (plow, phigh, &(r))
-
extern REAL_VALUE_TYPE real_value_negate (const REAL_VALUE_TYPE *);
extern REAL_VALUE_TYPE real_value_abs (const REAL_VALUE_TYPE *);
@@ -485,4 +473,12 @@ extern bool real_isinteger (const REAL_VALUE_TYPE *c, enum machine_mode mode);
number, (1 - b**-p) * b**emax for a given FP format FMT as a hex
float string. BUF must be large enough to contain the result. */
extern void get_max_float (const struct real_format *, char *, size_t);
+
+#ifndef GENERATOR_FILE
+/* real related routines. */
+extern wide_int real_to_integer (const REAL_VALUE_TYPE *, bool *, int);
+extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode,
+ const wide_int_ref &, signop);
+#endif
+
#endif /* ! GCC_REAL_H */
diff --git a/gcc/recog.c b/gcc/recog.c
index 00a052295f6..2f5cf8eb577 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -1165,7 +1165,7 @@ immediate_operand (rtx op, enum machine_mode mode)
: mode, op));
}
-/* Returns 1 if OP is an operand that is a CONST_INT. */
+/* Returns 1 if OP is an operand that is a CONST_INT of mode MODE. */
int
const_int_operand (rtx op, enum machine_mode mode)
@@ -1180,8 +1180,51 @@ const_int_operand (rtx op, enum machine_mode mode)
return 1;
}
+#if TARGET_SUPPORTS_WIDE_INT
+/* Returns 1 if OP is an operand that is a CONST_INT or CONST_WIDE_INT
+ of mode MODE. */
+int
+const_scalar_int_operand (rtx op, enum machine_mode mode)
+{
+ if (!CONST_SCALAR_INT_P (op))
+ return 0;
+
+ if (CONST_INT_P (op))
+ return const_int_operand (op, mode);
+
+ if (mode != VOIDmode)
+ {
+ int prec = GET_MODE_PRECISION (mode);
+ int bitsize = GET_MODE_BITSIZE (mode);
+
+ if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
+ return 0;
+
+ if (prec == bitsize)
+ return 1;
+ else
+ {
+ /* Multiword partial int. */
+ HOST_WIDE_INT x
+ = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1);
+ return (sext_hwi (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) == x);
+ }
+ }
+ return 1;
+}
+
/* Returns 1 if OP is an operand that is a constant integer or constant
- floating-point number. */
+ floating-point number of MODE. */
+
+int
+const_double_operand (rtx op, enum machine_mode mode)
+{
+ return (GET_CODE (op) == CONST_DOUBLE)
+ && (GET_MODE (op) == mode || mode == VOIDmode);
+}
+#else
+/* Returns 1 if OP is an operand that is a constant integer or constant
+ floating-point number of MODE. */
int
const_double_operand (rtx op, enum machine_mode mode)
@@ -1197,8 +1240,9 @@ const_double_operand (rtx op, enum machine_mode mode)
&& (mode == VOIDmode || GET_MODE (op) == mode
|| GET_MODE (op) == VOIDmode));
}
-
-/* Return 1 if OP is a general operand that is not an immediate operand. */
+#endif
+/* Return 1 if OP is a general operand that is not an immediate
+ operand of mode MODE. */
int
nonimmediate_operand (rtx op, enum machine_mode mode)
diff --git a/gcc/rtl.c b/gcc/rtl.c
index fd794498d88..d241c83885b 100644
--- a/gcc/rtl.c
+++ b/gcc/rtl.c
@@ -109,7 +109,7 @@ const enum rtx_class rtx_class[NUM_RTX_CODE] = {
const unsigned char rtx_code_size[NUM_RTX_CODE] = {
#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) \
(((ENUM) == CONST_INT || (ENUM) == CONST_DOUBLE \
- || (ENUM) == CONST_FIXED) \
+ || (ENUM) == CONST_FIXED || (ENUM) == CONST_WIDE_INT) \
? RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (HOST_WIDE_INT) \
: RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (rtunion)),
@@ -181,18 +181,24 @@ shallow_copy_rtvec (rtvec vec)
unsigned int
rtx_size (const_rtx x)
{
+ if (CONST_WIDE_INT_P (x))
+ return (RTX_HDR_SIZE
+ + sizeof (struct hwivec_def)
+ + ((CONST_WIDE_INT_NUNITS (x) - 1)
+ * sizeof (HOST_WIDE_INT)));
if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_HAS_BLOCK_INFO_P (x))
return RTX_HDR_SIZE + sizeof (struct block_symbol);
return RTX_CODE_SIZE (GET_CODE (x));
}
-/* Allocate an rtx of code CODE. The CODE is stored in the rtx;
- all the rest is initialized to zero. */
+/* Allocate an rtx of code CODE with EXTRA bytes in it. The CODE is
+ stored in the rtx; all the rest is initialized to zero. */
rtx
-rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL)
+rtx_alloc_stat_v (RTX_CODE code MEM_STAT_DECL, int extra)
{
- rtx rt = ggc_alloc_rtx_def_stat (RTX_CODE_SIZE (code) PASS_MEM_STAT);
+ rtx rt = ggc_alloc_rtx_def_stat (RTX_CODE_SIZE (code) + extra
+ PASS_MEM_STAT);
/* We want to clear everything up to the FLD array. Normally, this
is one int, but we don't want to assume that and it isn't very
@@ -210,6 +216,31 @@ rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL)
return rt;
}
+/* Allocate an rtx of code CODE. The CODE is stored in the rtx;
+ all the rest is initialized to zero. */
+
+rtx
+rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL)
+{
+ return rtx_alloc_stat_v (code PASS_MEM_STAT, 0);
+}
+
+/* Write the wide constant X to OUTFILE. */
+
+void
+cwi_output_hex (FILE *outfile, const_rtx x)
+{
+ int i = CWI_GET_NUM_ELEM (x);
+ gcc_assert (i > 0);
+ if (CWI_ELT (x, i - 1) == 0)
+ /* The HOST_WIDE_INT_PRINT_HEX prepends a 0x only if the val is
+ non zero. We want all numbers to have a 0x prefix. */
+ fprintf (outfile, "0x");
+ fprintf (outfile, HOST_WIDE_INT_PRINT_HEX, CWI_ELT (x, --i));
+ while (--i >= 0)
+ fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX, CWI_ELT (x, i));
+}
+
/* Return true if ORIG is a sharable CONST. */
@@ -646,6 +677,10 @@ iterative_hash_rtx (const_rtx x, hashval_t hash)
return iterative_hash_object (i, hash);
case CONST_INT:
return iterative_hash_object (INTVAL (x), hash);
+ case CONST_WIDE_INT:
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++)
+ hash = iterative_hash_object (CONST_WIDE_INT_ELT (x, i), hash);
+ return hash;
case SYMBOL_REF:
if (XSTR (x, 0))
return iterative_hash (XSTR (x, 0), strlen (XSTR (x, 0)) + 1,
@@ -811,6 +846,16 @@ rtl_check_failed_block_symbol (const char *file, int line, const char *func)
/* XXX Maybe print the vector? */
void
+cwi_check_failed_bounds (const_rtx x, int n, const char *file, int line,
+ const char *func)
+{
+ internal_error
+ ("RTL check: access of hwi elt %d of vector with last elt %d in %s, at %s:%d",
+ n, CWI_GET_NUM_ELEM (x) - 1, func, trim_filename (file), line);
+}
+
+/* XXX Maybe print the vector? */
+void
rtvec_check_failed_bounds (const_rtvec r, int n, const char *file, int line,
const char *func)
{
diff --git a/gcc/rtl.def b/gcc/rtl.def
index 56418c792e0..2d7847d4157 100644
--- a/gcc/rtl.def
+++ b/gcc/rtl.def
@@ -345,6 +345,9 @@ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA)
/* numeric integer constant */
DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ)
+/* numeric integer constant */
+DEF_RTL_EXPR(CONST_WIDE_INT, "const_wide_int", "", RTX_CONST_OBJ)
+
/* fixed-point constant */
DEF_RTL_EXPR(CONST_FIXED, "const_fixed", "www", RTX_CONST_OBJ)
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 595b6994f53..9fb75574e95 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_RTL_H
#define GCC_RTL_H
+#include <utility>
#include "statistics.h"
#include "machmode.h"
#include "input.h"
@@ -28,6 +29,7 @@ along with GCC; see the file COPYING3. If not see
#include "fixed-value.h"
#include "alias.h"
#include "hashtab.h"
+#include "wide-int.h"
#include "flags.h"
/* Value used by some passes to "recognize" noop moves as valid
@@ -248,6 +250,16 @@ struct GTY(()) object_block {
vec<rtx, va_gc> *anchors;
};
+struct GTY((variable_size)) hwivec_def {
+ HOST_WIDE_INT elem[1];
+};
+
+/* Number of elements of the HWIVEC if RTX is a CONST_WIDE_INT. */
+#define CWI_GET_NUM_ELEM(RTX) \
+ ((int)RTL_FLAG_CHECK1("CWI_GET_NUM_ELEM", (RTX), CONST_WIDE_INT)->u2.num_elem)
+#define CWI_PUT_NUM_ELEM(RTX, NUM) \
+ (RTL_FLAG_CHECK1("CWI_PUT_NUM_ELEM", (RTX), CONST_WIDE_INT)->u2.num_elem = (NUM))
+
/* RTL expression ("rtx"). */
struct GTY((chain_next ("RTX_NEXT (&%h)"),
@@ -334,6 +346,17 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"),
1 in a VALUE or DEBUG_EXPR is NO_LOC_P in var-tracking.c. */
unsigned return_val : 1;
+ union {
+ /* The final union field is aligned to 64 bits on LP64 hosts,
+ giving a 32-bit gap after the fields above. We optimize the
+ layout for that case and use the gap for extra code-specific
+ information. */
+
+ /* In a CONST_WIDE_INT (aka hwivec_def), this is the number of
+ HOST_WIDE_INTs in the hwivec_def. */
+ unsigned GTY ((tag ("CONST_WIDE_INT"))) num_elem:32;
+ } GTY ((desc ("GET_CODE (&%0)"))) u2;
+
/* The first element of the operands of this rtx.
The number of operands and their types are controlled
by the `code' field, according to rtl.def. */
@@ -343,6 +366,7 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"),
struct block_symbol block_sym;
struct real_value rv;
struct fixed_value fv;
+ struct hwivec_def hwiv;
} GTY ((special ("rtx_def"), desc ("GET_CODE (&%0)"))) u;
};
@@ -398,12 +422,38 @@ struct GTY((variable_size)) rtvec_def {
/* Predicate yielding nonzero iff X is an rtx for a memory location. */
#define MEM_P(X) (GET_CODE (X) == MEM)
+#if TARGET_SUPPORTS_WIDE_INT
+
+/* Match CONST_*s that can represent compile-time constant integers. */
+#define CASE_CONST_SCALAR_INT \
+ case CONST_INT: \
+ case CONST_WIDE_INT
+
+/* Match CONST_*s for which pointer equality corresponds to value
+ equality. */
+#define CASE_CONST_UNIQUE \
+ case CONST_INT: \
+ case CONST_WIDE_INT: \
+ case CONST_DOUBLE: \
+ case CONST_FIXED
+
+/* Match all CONST_* rtxes. */
+#define CASE_CONST_ANY \
+ case CONST_INT: \
+ case CONST_WIDE_INT: \
+ case CONST_DOUBLE: \
+ case CONST_FIXED: \
+ case CONST_VECTOR
+
+#else
+
/* Match CONST_*s that can represent compile-time constant integers. */
#define CASE_CONST_SCALAR_INT \
case CONST_INT: \
case CONST_DOUBLE
-/* Match CONST_*s for which pointer equality corresponds to value equality. */
+/* Match CONST_*s for which pointer equality corresponds to value
+ equality. */
#define CASE_CONST_UNIQUE \
case CONST_INT: \
case CONST_DOUBLE: \
@@ -415,10 +465,14 @@ struct GTY((variable_size)) rtvec_def {
case CONST_DOUBLE: \
case CONST_FIXED: \
case CONST_VECTOR
+#endif
/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
#define CONST_INT_P(X) (GET_CODE (X) == CONST_INT)
+/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
+#define CONST_WIDE_INT_P(X) (GET_CODE (X) == CONST_WIDE_INT)
+
/* Predicate yielding nonzero iff X is an rtx for a constant fixed-point. */
#define CONST_FIXED_P(X) (GET_CODE (X) == CONST_FIXED)
@@ -431,8 +485,13 @@ struct GTY((variable_size)) rtvec_def {
(GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) == VOIDmode)
/* Predicate yielding true iff X is an rtx for a integer const. */
+#if TARGET_SUPPORTS_WIDE_INT
+#define CONST_SCALAR_INT_P(X) \
+ (CONST_INT_P (X) || CONST_WIDE_INT_P (X))
+#else
#define CONST_SCALAR_INT_P(X) \
(CONST_INT_P (X) || CONST_DOUBLE_AS_INT_P (X))
+#endif
/* Predicate yielding true iff X is an rtx for a double-int. */
#define CONST_DOUBLE_AS_FLOAT_P(X) \
@@ -593,6 +652,15 @@ struct GTY((variable_size)) rtvec_def {
__FUNCTION__); \
&_rtx->u.hwint[_n]; }))
+#define CWI_ELT(RTX, I) __extension__ \
+(*({ __typeof (RTX) const _cwi = (RTX); \
+ int _max = CWI_GET_NUM_ELEM (_cwi); \
+ const int _i = (I); \
+ if (_i < 0 || _i >= _max) \
+ cwi_check_failed_bounds (_cwi, _i, __FILE__, __LINE__, \
+ __FUNCTION__); \
+ &_cwi->u.hwiv.elem[_i]; }))
+
#define XCWINT(RTX, N, C) __extension__ \
(*({ __typeof (RTX) const _rtx = (RTX); \
if (GET_CODE (_rtx) != (C)) \
@@ -629,6 +697,11 @@ struct GTY((variable_size)) rtvec_def {
__FUNCTION__); \
&_symbol->u.block_sym; })
+#define HWIVEC_CHECK(RTX,C) __extension__ \
+({ __typeof (RTX) const _symbol = (RTX); \
+ RTL_CHECKC1 (_symbol, 0, C); \
+ &_symbol->u.hwiv; })
+
extern void rtl_check_failed_bounds (const_rtx, int, const char *, int,
const char *)
ATTRIBUTE_NORETURN;
@@ -649,6 +722,9 @@ extern void rtl_check_failed_code_mode (const_rtx, enum rtx_code, enum machine_m
ATTRIBUTE_NORETURN;
extern void rtl_check_failed_block_symbol (const char *, int, const char *)
ATTRIBUTE_NORETURN;
+extern void cwi_check_failed_bounds (const_rtx, int, const char *, int,
+ const char *)
+ ATTRIBUTE_NORETURN;
extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int,
const char *)
ATTRIBUTE_NORETURN;
@@ -661,12 +737,14 @@ extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int,
#define RTL_CHECKC2(RTX, N, C1, C2) ((RTX)->u.fld[N])
#define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[I])
#define XWINT(RTX, N) ((RTX)->u.hwint[N])
+#define CWI_ELT(RTX, I) ((RTX)->u.hwiv.elem[I])
#define XCWINT(RTX, N, C) ((RTX)->u.hwint[N])
#define XCMWINT(RTX, N, C, M) ((RTX)->u.hwint[N])
#define XCNMWINT(RTX, N, C, M) ((RTX)->u.hwint[N])
#define XCNMPRV(RTX, C, M) (&(RTX)->u.rv)
#define XCNMPFV(RTX, C, M) (&(RTX)->u.fv)
#define BLOCK_SYMBOL_CHECK(RTX) (&(RTX)->u.block_sym)
+#define HWIVEC_CHECK(RTX,C) (&(RTX)->u.hwiv)
#endif
@@ -1153,9 +1231,19 @@ rhs_regno (const_rtx x)
#define INTVAL(RTX) XCWINT (RTX, 0, CONST_INT)
#define UINTVAL(RTX) ((unsigned HOST_WIDE_INT) INTVAL (RTX))
+/* For a CONST_WIDE_INT, CONST_WIDE_INT_NUNITS is the number of
+ elements actually needed to represent the constant.
+ CONST_WIDE_INT_ELT gets one of the elements. 0 is the least
+ significant HOST_WIDE_INT. */
+#define CONST_WIDE_INT_VEC(RTX) HWIVEC_CHECK (RTX, CONST_WIDE_INT)
+#define CONST_WIDE_INT_NUNITS(RTX) CWI_GET_NUM_ELEM (RTX)
+#define CONST_WIDE_INT_ELT(RTX, N) CWI_ELT (RTX, N)
+
/* For a CONST_DOUBLE:
+#if TARGET_SUPPORTS_WIDE_INT == 0
For a VOIDmode, there are two integers CONST_DOUBLE_LOW is the
low-order word and ..._HIGH the high-order.
+#endif
For a float, there is a REAL_VALUE_TYPE structure, and
CONST_DOUBLE_REAL_VALUE(r) is a pointer to it. */
#define CONST_DOUBLE_LOW(r) XCMWINT (r, 0, CONST_DOUBLE, VOIDmode)
@@ -1310,6 +1398,94 @@ struct address_info {
bool autoinc_p;
};
+/* This is used to bundle an rtx and a mode together so that the pair
+ can be used with the wi:: routines. If we ever put modes into rtx
+ integer constants, this should go away and then just pass an rtx in. */
+typedef std::pair <rtx, enum machine_mode> rtx_mode_t;
+
+namespace wi
+{
+ template <>
+ struct int_traits <rtx_mode_t>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ static const bool host_dependent_precision = false;
+ /* This ought to be true, except for the special case that BImode
+ is canonicalized to STORE_FLAG_VALUE, which might be 1. */
+ static const bool is_sign_extended = false;
+ static unsigned int get_precision (const rtx_mode_t &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const rtx_mode_t &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <rtx_mode_t>::get_precision (const rtx_mode_t &x)
+{
+ return GET_MODE_PRECISION (x.second);
+}
+
+inline wi::storage_ref
+wi::int_traits <rtx_mode_t>::decompose (HOST_WIDE_INT *,
+ unsigned int precision,
+ const rtx_mode_t &x)
+{
+ gcc_checking_assert (precision == get_precision (x));
+ switch (GET_CODE (x.first))
+ {
+ case CONST_INT:
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ /* Nonzero BImodes are stored as STORE_FLAG_VALUE, which on many
+ targets is 1 rather than -1. */
+ gcc_checking_assert (INTVAL (x.first)
+ == sext_hwi (INTVAL (x.first), precision)
+ || (x.second == BImode && INTVAL (x.first) == 1));
+
+ return wi::storage_ref (&INTVAL (x.first), 1, precision);
+
+ case CONST_WIDE_INT:
+ return wi::storage_ref (&CONST_WIDE_INT_ELT (x.first, 0),
+ CONST_WIDE_INT_NUNITS (x.first), precision);
+
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ case CONST_DOUBLE:
+ return wi::storage_ref (&CONST_DOUBLE_LOW (x.first), 2, precision);
+#endif
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+namespace wi
+{
+ hwi_with_prec shwi (HOST_WIDE_INT, enum machine_mode mode);
+ wide_int min_value (enum machine_mode, signop);
+ wide_int max_value (enum machine_mode, signop);
+}
+
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, enum machine_mode mode)
+{
+ return shwi (val, GET_MODE_PRECISION (mode));
+}
+
+/* Produce the smallest number that is represented in MODE. The precision
+ is taken from MODE and the sign from SGN. */
+inline wide_int
+wi::min_value (enum machine_mode mode, signop sgn)
+{
+ return min_value (GET_MODE_PRECISION (mode), sgn);
+}
+
+/* Produce the largest number that is represented in MODE. The precision
+ is taken from MODE and the sign from SGN. */
+inline wide_int
+wi::max_value (enum machine_mode mode, signop sgn)
+{
+ return max_value (GET_MODE_PRECISION (mode), sgn);
+}
+
extern void init_rtlanal (void);
extern int rtx_cost (rtx, enum rtx_code, int, bool);
extern int address_cost (rtx, enum machine_mode, addr_space_t, bool);
@@ -1765,6 +1941,12 @@ extern rtx plus_constant (enum machine_mode, rtx, HOST_WIDE_INT);
/* In rtl.c */
extern rtx rtx_alloc_stat (RTX_CODE MEM_STAT_DECL);
#define rtx_alloc(c) rtx_alloc_stat (c MEM_STAT_INFO)
+extern rtx rtx_alloc_stat_v (RTX_CODE MEM_STAT_DECL, int);
+#define rtx_alloc_v(c, SZ) rtx_alloc_stat_v (c MEM_STAT_INFO, SZ)
+#define const_wide_int_alloc(NWORDS) \
+ rtx_alloc_v (CONST_WIDE_INT, \
+ (sizeof (struct hwivec_def) \
+ + ((NWORDS)-1) * sizeof (HOST_WIDE_INT))) \
extern rtvec rtvec_alloc (int);
extern rtvec shallow_copy_rtvec (rtvec);
@@ -1821,10 +2003,17 @@ extern void start_sequence (void);
extern void push_to_sequence (rtx);
extern void push_to_sequence2 (rtx, rtx);
extern void end_sequence (void);
+#if TARGET_SUPPORTS_WIDE_INT == 0
extern double_int rtx_to_double_int (const_rtx);
-extern rtx immed_double_int_const (double_int, enum machine_mode);
+#endif
+extern void cwi_output_hex (FILE *, const_rtx);
+#ifndef GENERATOR_FILE
+extern rtx immed_wide_int_const (const wide_int_ref &, enum machine_mode);
+#endif
+#if TARGET_SUPPORTS_WIDE_INT == 0
extern rtx immed_double_const (HOST_WIDE_INT, HOST_WIDE_INT,
enum machine_mode);
+#endif
/* In loop-iv.c */
diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c
index f3471b1bcb6..82cfc1bf70b 100644
--- a/gcc/rtlanal.c
+++ b/gcc/rtlanal.c
@@ -3173,6 +3173,8 @@ commutative_operand_precedence (rtx op)
/* Constants always come the second operand. Prefer "nice" constants. */
if (code == CONST_INT)
return -8;
+ if (code == CONST_WIDE_INT)
+ return -8;
if (code == CONST_DOUBLE)
return -7;
if (code == CONST_FIXED)
@@ -3185,6 +3187,8 @@ commutative_operand_precedence (rtx op)
case RTX_CONST_OBJ:
if (code == CONST_INT)
return -6;
+ if (code == CONST_WIDE_INT)
+ return -6;
if (code == CONST_DOUBLE)
return -5;
if (code == CONST_FIXED)
@@ -5382,7 +5386,10 @@ get_address_mode (rtx mem)
/* Split up a CONST_DOUBLE or integer constant rtx
into two rtx's for single words,
storing in *FIRST the word that comes first in memory in the target
- and in *SECOND the other. */
+ and in *SECOND the other.
+
+ TODO: This function needs to be rewritten to work on any size
+ integer. */
void
split_double (rtx value, rtx *first, rtx *second)
@@ -5459,6 +5466,22 @@ split_double (rtx value, rtx *first, rtx *second)
}
}
}
+ else if (GET_CODE (value) == CONST_WIDE_INT)
+ {
+ /* All of this is scary code and needs to be converted to
+ properly work with any size integer. */
+ gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
+ if (WORDS_BIG_ENDIAN)
+ {
+ *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
+ *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
+ }
+ else
+ {
+ *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
+ *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
+ }
+ }
else if (!CONST_DOUBLE_P (value))
{
if (WORDS_BIG_ENDIAN)
diff --git a/gcc/sched-vis.c b/gcc/sched-vis.c
index 6a04a652fd5..6f89e084b66 100644
--- a/gcc/sched-vis.c
+++ b/gcc/sched-vis.c
@@ -428,6 +428,23 @@ print_value (pretty_printer *pp, const_rtx x, int verbose)
pp_scalar (pp, HOST_WIDE_INT_PRINT_HEX,
(unsigned HOST_WIDE_INT) INTVAL (x));
break;
+
+ case CONST_WIDE_INT:
+ {
+ const char *sep = "<";
+ int i;
+ for (i = CONST_WIDE_INT_NUNITS (x) - 1; i >= 0; i--)
+ {
+ pp_string (pp, sep);
+ sep = ",";
+ sprintf (tmp, HOST_WIDE_INT_PRINT_HEX,
+ (unsigned HOST_WIDE_INT) CONST_WIDE_INT_ELT (x, i));
+ pp_string (pp, tmp);
+ }
+ pp_greater (pp);
+ }
+ break;
+
case CONST_DOUBLE:
if (FLOAT_MODE_P (GET_MODE (x)))
{
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index f5a4ee03543..868083b1a02 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -1141,10 +1141,10 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs)
if (lhs == NULL || rhs == NULL)
return false;
- /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point
- to use reg, if const can be used. Moreover, scheduling const as rhs may
- lead to mode mismatch cause consts don't have modes but they could be
- merged from branches where the same const used in different modes. */
+ /* Do not schedule constants as rhs: no point to use reg, if const
+ can be used. Moreover, scheduling const as rhs may lead to mode
+ mismatch cause consts don't have modes but they could be merged
+ from branches where the same const used in different modes. */
if (CONSTANT_P (rhs))
return false;
diff --git a/gcc/signop.h b/gcc/signop.h
new file mode 100644
index 00000000000..05dac902df5
--- /dev/null
+++ b/gcc/signop.h
@@ -0,0 +1,35 @@
+/* Operations with SIGNED and UNSIGNED. -*- C++ -*-
+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef SIGNOP_H
+#define SIGNOP_H
+
+/* This type is used for the large number of functions that produce
+ different results depending on if the operands are signed types or
+ unsigned types. The signedness of a tree type can be found by
+ using the TYPE_SIGN macro. */
+
+enum signop_e {
+ SIGNED,
+ UNSIGNED
+};
+
+typedef enum signop_e signop;
+
+#endif
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index 04af01e6ea2..7fb1c6db63d 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -87,6 +87,22 @@ mode_signbit_p (enum machine_mode mode, const_rtx x)
if (width <= HOST_BITS_PER_WIDE_INT
&& CONST_INT_P (x))
val = INTVAL (x);
+#if TARGET_SUPPORTS_WIDE_INT
+ else if (CONST_WIDE_INT_P (x))
+ {
+ unsigned int i;
+ unsigned int elts = CONST_WIDE_INT_NUNITS (x);
+ if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
+ return false;
+ for (i = 0; i < elts - 1; i++)
+ if (CONST_WIDE_INT_ELT (x, i) != 0)
+ return false;
+ val = CONST_WIDE_INT_ELT (x, elts - 1);
+ width %= HOST_BITS_PER_WIDE_INT;
+ if (width == 0)
+ width = HOST_BITS_PER_WIDE_INT;
+ }
+#else
else if (width <= HOST_BITS_PER_DOUBLE_INT
&& CONST_DOUBLE_AS_INT_P (x)
&& CONST_DOUBLE_LOW (x) == 0)
@@ -94,8 +110,9 @@ mode_signbit_p (enum machine_mode mode, const_rtx x)
val = CONST_DOUBLE_HIGH (x);
width -= HOST_BITS_PER_WIDE_INT;
}
+#endif
else
- /* FIXME: We don't yet have a representation for wider modes. */
+ /* X is not an integer constant. */
return false;
if (width < HOST_BITS_PER_WIDE_INT)
@@ -1532,7 +1549,6 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
unsigned int width = GET_MODE_PRECISION (mode);
- unsigned int op_width = GET_MODE_PRECISION (op_mode);
if (code == VEC_DUPLICATE)
{
@@ -1600,336 +1616,123 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
if (code == FLOAT && CONST_SCALAR_INT_P (op))
{
- HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (CONST_INT_P (op))
- lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
- else
- lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
+ if (op_mode == VOIDmode)
+ {
+ /* CONST_INT have VOIDmode as the mode. We assume that all
+ the bits of the constant are significant, though, this is
+ a dangerous assumption as many times CONST_INTs are
+ created and used with garbage in the bits outside of the
+ precision of the implied mode of the const_int. */
+ op_mode = MAX_MODE_INT;
+ }
- REAL_VALUE_FROM_INT (d, lv, hv, mode);
+ real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED);
d = real_value_truncate (mode, d);
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
{
- HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (CONST_INT_P (op))
- lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
- else
- lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
-
- if (op_mode == VOIDmode
- || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT)
- /* We should never get a negative number. */
- gcc_assert (hv >= 0);
- else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT)
- hv = 0, lv &= GET_MODE_MASK (op_mode);
+ if (op_mode == VOIDmode)
+ {
+ /* CONST_INT have VOIDmode as the mode. We assume that all
+ the bits of the constant are significant, though, this is
+ a dangerous assumption as many times CONST_INTs are
+ created and used with garbage in the bits outside of the
+ precision of the implied mode of the const_int. */
+ op_mode = MAX_MODE_INT;
+ }
- REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
+ real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED);
d = real_value_truncate (mode, d);
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- if (CONST_INT_P (op)
- && width <= HOST_BITS_PER_WIDE_INT && width > 0)
+ if (CONST_SCALAR_INT_P (op) && width > 0)
{
- HOST_WIDE_INT arg0 = INTVAL (op);
- HOST_WIDE_INT val;
+ wide_int result;
+ enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode;
+ rtx_mode_t op0 = std::make_pair (op, imode);
+ int int_value;
+
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ /* This assert keeps the simplification from producing a result
+ that cannot be represented in a CONST_DOUBLE but a lot of
+ upstream callers expect that this function never fails to
+ simplify something and so you if you added this to the test
+ above the code would die later anyway. If this assert
+ happens, you just need to make the port support wide int. */
+ gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
+#endif
switch (code)
{
case NOT:
- val = ~ arg0;
+ result = wi::bit_not (op0);
break;
case NEG:
- val = - (unsigned HOST_WIDE_INT) arg0;
+ result = wi::neg (op0);
break;
case ABS:
- val = (arg0 >= 0 ? arg0 : - arg0);
+ result = wi::abs (op0);
break;
case FFS:
- arg0 &= GET_MODE_MASK (mode);
- val = ffs_hwi (arg0);
+ result = wi::shwi (wi::ffs (op0), mode);
break;
case CLZ:
- arg0 &= GET_MODE_MASK (mode);
- if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
- ;
- else
- val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1;
+ if (wi::ne_p (op0, 0))
+ int_value = wi::clz (op0);
+ else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
+ int_value = GET_MODE_PRECISION (mode);
+ result = wi::shwi (int_value, mode);
break;
case CLRSB:
- arg0 &= GET_MODE_MASK (mode);
- if (arg0 == 0)
- val = GET_MODE_PRECISION (mode) - 1;
- else if (arg0 >= 0)
- val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2;
- else if (arg0 < 0)
- val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2;
- break;
-
- case CTZ:
- arg0 &= GET_MODE_MASK (mode);
- if (arg0 == 0)
- {
- /* Even if the value at zero is undefined, we have to come
- up with some replacement. Seems good enough. */
- if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
- val = GET_MODE_PRECISION (mode);
- }
- else
- val = ctz_hwi (arg0);
- break;
-
- case POPCOUNT:
- arg0 &= GET_MODE_MASK (mode);
- val = 0;
- while (arg0)
- val++, arg0 &= arg0 - 1;
- break;
-
- case PARITY:
- arg0 &= GET_MODE_MASK (mode);
- val = 0;
- while (arg0)
- val++, arg0 &= arg0 - 1;
- val &= 1;
- break;
-
- case BSWAP:
- {
- unsigned int s;
-
- val = 0;
- for (s = 0; s < width; s += 8)
- {
- unsigned int d = width - s - 8;
- unsigned HOST_WIDE_INT byte;
- byte = (arg0 >> s) & 0xff;
- val |= byte << d;
- }
- }
- break;
-
- case TRUNCATE:
- val = arg0;
- break;
-
- case ZERO_EXTEND:
- /* When zero-extending a CONST_INT, we need to know its
- original mode. */
- gcc_assert (op_mode != VOIDmode);
- if (op_width == HOST_BITS_PER_WIDE_INT)
- {
- /* If we were really extending the mode,
- we would have to distinguish between zero-extension
- and sign-extension. */
- gcc_assert (width == op_width);
- val = arg0;
- }
- else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
- val = arg0 & GET_MODE_MASK (op_mode);
- else
- return 0;
- break;
-
- case SIGN_EXTEND:
- if (op_mode == VOIDmode)
- op_mode = mode;
- op_width = GET_MODE_PRECISION (op_mode);
- if (op_width == HOST_BITS_PER_WIDE_INT)
- {
- /* If we were really extending the mode,
- we would have to distinguish between zero-extension
- and sign-extension. */
- gcc_assert (width == op_width);
- val = arg0;
- }
- else if (op_width < HOST_BITS_PER_WIDE_INT)
- {
- val = arg0 & GET_MODE_MASK (op_mode);
- if (val_signbit_known_set_p (op_mode, val))
- val |= ~GET_MODE_MASK (op_mode);
- }
- else
- return 0;
- break;
-
- case SQRT:
- case FLOAT_EXTEND:
- case FLOAT_TRUNCATE:
- case SS_TRUNCATE:
- case US_TRUNCATE:
- case SS_NEG:
- case US_NEG:
- case SS_ABS:
- return 0;
-
- default:
- gcc_unreachable ();
- }
-
- return gen_int_mode (val, mode);
- }
-
- /* We can do some operations on integer CONST_DOUBLEs. Also allow
- for a DImode operation on a CONST_INT. */
- else if (width <= HOST_BITS_PER_DOUBLE_INT
- && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op)))
- {
- double_int first, value;
-
- if (CONST_DOUBLE_AS_INT_P (op))
- first = double_int::from_pair (CONST_DOUBLE_HIGH (op),
- CONST_DOUBLE_LOW (op));
- else
- first = double_int::from_shwi (INTVAL (op));
-
- switch (code)
- {
- case NOT:
- value = ~first;
- break;
-
- case NEG:
- value = -first;
- break;
-
- case ABS:
- if (first.is_negative ())
- value = -first;
- else
- value = first;
- break;
-
- case FFS:
- value.high = 0;
- if (first.low != 0)
- value.low = ffs_hwi (first.low);
- else if (first.high != 0)
- value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high);
- else
- value.low = 0;
- break;
-
- case CLZ:
- value.high = 0;
- if (first.high != 0)
- value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1
- - HOST_BITS_PER_WIDE_INT;
- else if (first.low != 0)
- value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1;
- else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
- value.low = GET_MODE_PRECISION (mode);
+ result = wi::shwi (wi::clrsb (op0), mode);
break;
case CTZ:
- value.high = 0;
- if (first.low != 0)
- value.low = ctz_hwi (first.low);
- else if (first.high != 0)
- value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high);
- else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low))
- value.low = GET_MODE_PRECISION (mode);
+ if (wi::ne_p (op0, 0))
+ int_value = wi::ctz (op0);
+ else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, int_value))
+ int_value = GET_MODE_PRECISION (mode);
+ result = wi::shwi (int_value, mode);
break;
case POPCOUNT:
- value = double_int_zero;
- while (first.low)
- {
- value.low++;
- first.low &= first.low - 1;
- }
- while (first.high)
- {
- value.low++;
- first.high &= first.high - 1;
- }
+ result = wi::shwi (wi::popcount (op0), mode);
break;
case PARITY:
- value = double_int_zero;
- while (first.low)
- {
- value.low++;
- first.low &= first.low - 1;
- }
- while (first.high)
- {
- value.low++;
- first.high &= first.high - 1;
- }
- value.low &= 1;
+ result = wi::shwi (wi::parity (op0), mode);
break;
case BSWAP:
- {
- unsigned int s;
-
- value = double_int_zero;
- for (s = 0; s < width; s += 8)
- {
- unsigned int d = width - s - 8;
- unsigned HOST_WIDE_INT byte;
-
- if (s < HOST_BITS_PER_WIDE_INT)
- byte = (first.low >> s) & 0xff;
- else
- byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff;
-
- if (d < HOST_BITS_PER_WIDE_INT)
- value.low |= byte << d;
- else
- value.high |= byte << (d - HOST_BITS_PER_WIDE_INT);
- }
- }
+ result = wide_int (op0).bswap ();
break;
case TRUNCATE:
- /* This is just a change-of-mode, so do nothing. */
- value = first;
- break;
-
case ZERO_EXTEND:
- gcc_assert (op_mode != VOIDmode);
-
- if (op_width > HOST_BITS_PER_WIDE_INT)
- return 0;
-
- value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode));
+ result = wide_int::from (op0, width, UNSIGNED);
break;
case SIGN_EXTEND:
- if (op_mode == VOIDmode
- || op_width > HOST_BITS_PER_WIDE_INT)
- return 0;
- else
- {
- value.low = first.low & GET_MODE_MASK (op_mode);
- if (val_signbit_known_set_p (op_mode, value.low))
- value.low |= ~GET_MODE_MASK (op_mode);
-
- value.high = HWI_SIGN_EXTEND (value.low);
- }
+ result = wide_int::from (op0, width, SIGNED);
break;
case SQRT:
- return 0;
-
default:
return 0;
}
- return immed_double_int_const (value, mode);
+ return immed_wide_int_const (result, mode);
}
else if (CONST_DOUBLE_AS_FLOAT_P (op)
@@ -1977,11 +1780,10 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
}
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
-
else if (CONST_DOUBLE_AS_FLOAT_P (op)
&& SCALAR_FLOAT_MODE_P (GET_MODE (op))
&& GET_MODE_CLASS (mode) == MODE_INT
- && width <= HOST_BITS_PER_DOUBLE_INT && width > 0)
+ && width > 0)
{
/* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
operators are intentionally left unspecified (to ease implementation
@@ -1990,9 +1792,13 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
/* This was formerly used only for non-IEEE float.
eggert@twinsun.com says it is safe for IEEE also. */
- HOST_WIDE_INT xh, xl, th, tl;
REAL_VALUE_TYPE x, t;
REAL_VALUE_FROM_CONST_DOUBLE (x, op);
+ wide_int wmax, wmin;
+ /* This is part of the abi to real_to_integer, but we check
+ things before making this call. */
+ bool fail;
+
switch (code)
{
case FIX:
@@ -2000,44 +1806,18 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
return const0_rtx;
/* Test against the signed upper bound. */
- if (width > HOST_BITS_PER_WIDE_INT)
- {
- th = ((unsigned HOST_WIDE_INT) 1
- << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
- tl = -1;
- }
- else
- {
- th = 0;
- tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
- }
- real_from_integer (&t, VOIDmode, tl, th, 0);
+ wmax = wi::max_value (width, SIGNED);
+ real_from_integer (&t, VOIDmode, wmax, SIGNED);
if (REAL_VALUES_LESS (t, x))
- {
- xh = th;
- xl = tl;
- break;
- }
+ return immed_wide_int_const (wmax, mode);
/* Test against the signed lower bound. */
- if (width > HOST_BITS_PER_WIDE_INT)
- {
- th = HOST_WIDE_INT_M1U << (width - HOST_BITS_PER_WIDE_INT - 1);
- tl = 0;
- }
- else
- {
- th = -1;
- tl = HOST_WIDE_INT_M1U << (width - 1);
- }
- real_from_integer (&t, VOIDmode, tl, th, 0);
+ wmin = wi::min_value (width, SIGNED);
+ real_from_integer (&t, VOIDmode, wmin, SIGNED);
if (REAL_VALUES_LESS (x, t))
- {
- xh = th;
- xl = tl;
- break;
- }
- REAL_VALUE_TO_INT (&xl, &xh, x);
+ return immed_wide_int_const (wmin, mode);
+
+ return immed_wide_int_const (real_to_integer (&x, &fail, width), mode);
break;
case UNSIGNED_FIX:
@@ -2045,37 +1825,18 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
return const0_rtx;
/* Test against the unsigned upper bound. */
- if (width == HOST_BITS_PER_DOUBLE_INT)
- {
- th = -1;
- tl = -1;
- }
- else if (width >= HOST_BITS_PER_WIDE_INT)
- {
- th = ((unsigned HOST_WIDE_INT) 1
- << (width - HOST_BITS_PER_WIDE_INT)) - 1;
- tl = -1;
- }
- else
- {
- th = 0;
- tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
- }
- real_from_integer (&t, VOIDmode, tl, th, 1);
+ wmax = wi::max_value (width, UNSIGNED);
+ real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
if (REAL_VALUES_LESS (t, x))
- {
- xh = th;
- xl = tl;
- break;
- }
+ return immed_wide_int_const (wmax, mode);
- REAL_VALUE_TO_INT (&xl, &xh, x);
+ return immed_wide_int_const (real_to_integer (&x, &fail, width),
+ mode);
break;
default:
gcc_unreachable ();
}
- return immed_double_const (xl, xh, mode);
}
return NULL_RTX;
@@ -2264,49 +2025,50 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (SCALAR_INT_MODE_P (mode))
{
- double_int coeff0, coeff1;
rtx lhs = op0, rhs = op1;
- coeff0 = double_int_one;
- coeff1 = double_int_one;
+ wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
+ wide_int coeff1 = wi::one (GET_MODE_PRECISION (mode));
if (GET_CODE (lhs) == NEG)
{
- coeff0 = double_int_minus_one;
+ coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && CONST_INT_P (XEXP (lhs, 1)))
+ && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
{
- coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
+ coeff0 = std::make_pair (XEXP (lhs, 1), mode);
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
&& CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (XEXP (lhs, 1)) >= 0
- && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
{
- coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
+ coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
+ GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- coeff1 = double_int_minus_one;
+ coeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
&& CONST_INT_P (XEXP (rhs, 1)))
{
- coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1)));
+ coeff1 = std::make_pair (XEXP (rhs, 1), mode);
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
- && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
{
- coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
+ coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
+ GET_MODE_PRECISION (mode));
rhs = XEXP (rhs, 0);
}
@@ -2314,11 +2076,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx orig = gen_rtx_PLUS (mode, op0, op1);
rtx coeff;
- double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- val = coeff0 + coeff1;
- coeff = immed_double_int_const (val, mode);
+ coeff = immed_wide_int_const (coeff0 + coeff1, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
@@ -2440,49 +2200,50 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
if (SCALAR_INT_MODE_P (mode))
{
- double_int coeff0, negcoeff1;
rtx lhs = op0, rhs = op1;
- coeff0 = double_int_one;
- negcoeff1 = double_int_minus_one;
+ wide_int coeff0 = wi::one (GET_MODE_PRECISION (mode));
+ wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (mode));
if (GET_CODE (lhs) == NEG)
{
- coeff0 = double_int_minus_one;
+ coeff0 = wi::minus_one (GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == MULT
- && CONST_INT_P (XEXP (lhs, 1)))
+ && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
{
- coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1)));
+ coeff0 = std::make_pair (XEXP (lhs, 1), mode);
lhs = XEXP (lhs, 0);
}
else if (GET_CODE (lhs) == ASHIFT
&& CONST_INT_P (XEXP (lhs, 1))
&& INTVAL (XEXP (lhs, 1)) >= 0
- && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode))
{
- coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1)));
+ coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
+ GET_MODE_PRECISION (mode));
lhs = XEXP (lhs, 0);
}
if (GET_CODE (rhs) == NEG)
{
- negcoeff1 = double_int_one;
+ negcoeff1 = wi::one (GET_MODE_PRECISION (mode));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == MULT
&& CONST_INT_P (XEXP (rhs, 1)))
{
- negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = wi::neg (std::make_pair (XEXP (rhs, 1), mode));
rhs = XEXP (rhs, 0);
}
else if (GET_CODE (rhs) == ASHIFT
&& CONST_INT_P (XEXP (rhs, 1))
&& INTVAL (XEXP (rhs, 1)) >= 0
- && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode))
{
- negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1)));
+ negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
+ GET_MODE_PRECISION (mode));
negcoeff1 = -negcoeff1;
rhs = XEXP (rhs, 0);
}
@@ -2491,11 +2252,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
{
rtx orig = gen_rtx_MINUS (mode, op0, op1);
rtx coeff;
- double_int val;
bool speed = optimize_function_for_speed_p (cfun);
- val = coeff0 + negcoeff1;
- coeff = immed_double_int_const (val, mode);
+ coeff = immed_wide_int_const (coeff0 + negcoeff1, mode);
tem = simplify_gen_binary (MULT, mode, lhs, coeff);
return set_src_cost (tem, speed) <= set_src_cost (orig, speed)
@@ -2647,26 +2406,13 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
&& trueop1 == CONST1_RTX (mode))
return op0;
- /* Convert multiply by constant power of two into shift unless
- we are still generating RTL. This test is a kludge. */
- if (CONST_INT_P (trueop1)
- && (val = exact_log2 (UINTVAL (trueop1))) >= 0
- /* If the mode is larger than the host word size, and the
- uppermost bit is set, then this isn't a power of two due
- to implicit sign extension. */
- && (width <= HOST_BITS_PER_WIDE_INT
- || val != HOST_BITS_PER_WIDE_INT - 1))
- return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
-
- /* Likewise for multipliers wider than a word. */
- if (CONST_DOUBLE_AS_INT_P (trueop1)
- && GET_MODE (op0) == mode
- && CONST_DOUBLE_LOW (trueop1) == 0
- && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0
- && (val < HOST_BITS_PER_DOUBLE_INT - 1
- || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT))
- return simplify_gen_binary (ASHIFT, mode, op0,
- GEN_INT (val + HOST_BITS_PER_WIDE_INT));
+ /* Convert multiply by constant power of two into shift. */
+ if (CONST_SCALAR_INT_P (trueop1))
+ {
+ val = wi::exact_log2 (std::make_pair (trueop1, mode));
+ if (val >= 0)
+ return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
+ }
/* x*2 is x+x and x*(-1) is -x */
if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
@@ -3770,8 +3516,6 @@ rtx
simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1)
{
- HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
- HOST_WIDE_INT val;
unsigned int width = GET_MODE_PRECISION (mode);
if (VECTOR_MODE_P (mode)
@@ -3965,299 +3709,143 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
- && width == HOST_BITS_PER_DOUBLE_INT
- && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0))
- && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1)))
+ && CONST_SCALAR_INT_P (op0)
+ && CONST_SCALAR_INT_P (op1))
{
- double_int o0, o1, res, tmp;
+ wide_int result;
bool overflow;
-
- o0 = rtx_to_double_int (op0);
- o1 = rtx_to_double_int (op1);
-
+ rtx_mode_t pop0 = std::make_pair (op0, mode);
+ rtx_mode_t pop1 = std::make_pair (op1, mode);
+
+#if TARGET_SUPPORTS_WIDE_INT == 0
+ /* This assert keeps the simplification from producing a result
+ that cannot be represented in a CONST_DOUBLE but a lot of
+ upstream callers expect that this function never fails to
+ simplify something and so you if you added this to the test
+ above the code would die later anyway. If this assert
+ happens, you just need to make the port support wide int. */
+ gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
+#endif
switch (code)
{
case MINUS:
- /* A - B == A + (-B). */
- o1 = -o1;
-
- /* Fall through.... */
+ result = wi::sub (pop0, pop1);
+ break;
case PLUS:
- res = o0 + o1;
+ result = wi::add (pop0, pop1);
break;
case MULT:
- res = o0 * o1;
+ result = wi::mul (pop0, pop1);
break;
case DIV:
- res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
- &tmp, &overflow);
+ result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
if (overflow)
- return 0;
+ return NULL_RTX;
break;
case MOD:
- tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR,
- &res, &overflow);
+ result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
if (overflow)
- return 0;
+ return NULL_RTX;
break;
case UDIV:
- res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
- &tmp, &overflow);
+ result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
if (overflow)
- return 0;
+ return NULL_RTX;
break;
case UMOD:
- tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR,
- &res, &overflow);
+ result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
if (overflow)
- return 0;
+ return NULL_RTX;
break;
case AND:
- res = o0 & o1;
+ result = wi::bit_and (pop0, pop1);
break;
case IOR:
- res = o0 | o1;
+ result = wi::bit_or (pop0, pop1);
break;
case XOR:
- res = o0 ^ o1;
+ result = wi::bit_xor (pop0, pop1);
break;
case SMIN:
- res = o0.smin (o1);
+ result = wi::smin (pop0, pop1);
break;
case SMAX:
- res = o0.smax (o1);
+ result = wi::smax (pop0, pop1);
break;
case UMIN:
- res = o0.umin (o1);
+ result = wi::umin (pop0, pop1);
break;
case UMAX:
- res = o0.umax (o1);
+ result = wi::umax (pop0, pop1);
break;
- case LSHIFTRT: case ASHIFTRT:
+ case LSHIFTRT:
+ case ASHIFTRT:
case ASHIFT:
- case ROTATE: case ROTATERT:
{
- unsigned HOST_WIDE_INT cnt;
-
+ wide_int wop1 = pop1;
if (SHIFT_COUNT_TRUNCATED)
- {
- o1.high = 0;
- o1.low &= GET_MODE_PRECISION (mode) - 1;
- }
-
- if (!o1.fits_uhwi ()
- || o1.to_uhwi () >= GET_MODE_PRECISION (mode))
- return 0;
-
- cnt = o1.to_uhwi ();
- unsigned short prec = GET_MODE_PRECISION (mode);
-
- if (code == LSHIFTRT || code == ASHIFTRT)
- res = o0.rshift (cnt, prec, code == ASHIFTRT);
- else if (code == ASHIFT)
- res = o0.alshift (cnt, prec);
- else if (code == ROTATE)
- res = o0.lrotate (cnt, prec);
- else /* code == ROTATERT */
- res = o0.rrotate (cnt, prec);
- }
- break;
-
- default:
- return 0;
- }
-
- return immed_double_int_const (res, mode);
- }
-
- if (CONST_INT_P (op0) && CONST_INT_P (op1)
- && width <= HOST_BITS_PER_WIDE_INT && width != 0)
- {
- /* Get the integer argument values in two forms:
- zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
-
- arg0 = INTVAL (op0);
- arg1 = INTVAL (op1);
-
- if (width < HOST_BITS_PER_WIDE_INT)
- {
- arg0 &= GET_MODE_MASK (mode);
- arg1 &= GET_MODE_MASK (mode);
-
- arg0s = arg0;
- if (val_signbit_known_set_p (mode, arg0s))
- arg0s |= ~GET_MODE_MASK (mode);
-
- arg1s = arg1;
- if (val_signbit_known_set_p (mode, arg1s))
- arg1s |= ~GET_MODE_MASK (mode);
- }
- else
- {
- arg0s = arg0;
- arg1s = arg1;
- }
-
- /* Compute the value of the arithmetic. */
-
- switch (code)
- {
- case PLUS:
- val = (unsigned HOST_WIDE_INT) arg0s + arg1s;
- break;
-
- case MINUS:
- val = (unsigned HOST_WIDE_INT) arg0s - arg1s;
- break;
-
- case MULT:
- val = (unsigned HOST_WIDE_INT) arg0s * arg1s;
- break;
-
- case DIV:
- if (arg1s == 0
- || ((unsigned HOST_WIDE_INT) arg0s
- == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = arg0s / arg1s;
- break;
-
- case MOD:
- if (arg1s == 0
- || ((unsigned HOST_WIDE_INT) arg0s
- == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = arg0s % arg1s;
- break;
-
- case UDIV:
- if (arg1 == 0
- || ((unsigned HOST_WIDE_INT) arg0s
- == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = (unsigned HOST_WIDE_INT) arg0 / arg1;
- break;
-
- case UMOD:
- if (arg1 == 0
- || ((unsigned HOST_WIDE_INT) arg0s
- == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = (unsigned HOST_WIDE_INT) arg0 % arg1;
- break;
-
- case AND:
- val = arg0 & arg1;
- break;
-
- case IOR:
- val = arg0 | arg1;
- break;
-
- case XOR:
- val = arg0 ^ arg1;
- break;
-
- case LSHIFTRT:
- case ASHIFT:
- case ASHIFTRT:
- /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
- the value is in range. We can't return any old value for
- out-of-range arguments because either the middle-end (via
- shift_truncation_mask) or the back-end might be relying on
- target-specific knowledge. Nor can we rely on
- shift_truncation_mask, since the shift might not be part of an
- ashlM3, lshrM3 or ashrM3 instruction. */
- if (SHIFT_COUNT_TRUNCATED)
- arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
- else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
- return 0;
-
- val = (code == ASHIFT
- ? ((unsigned HOST_WIDE_INT) arg0) << arg1
- : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
+ wop1 = wi::umod_trunc (wop1, width);
+ else if (wi::geu_p (wop1, width))
+ return NULL_RTX;
- /* Sign-extend the result for arithmetic right shifts. */
- if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= HOST_WIDE_INT_M1U << (width - arg1);
- break;
+ switch (code)
+ {
+ case LSHIFTRT:
+ result = wi::lrshift (pop0, wop1);
+ break;
- case ROTATERT:
- if (arg1 < 0)
- return 0;
+ case ASHIFTRT:
+ result = wi::arshift (pop0, wop1);
+ break;
- arg1 %= width;
- val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
- | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
- break;
+ case ASHIFT:
+ result = wi::lshift (pop0, wop1);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ }
case ROTATE:
- if (arg1 < 0)
- return 0;
-
- arg1 %= width;
- val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
- | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
- break;
-
- case COMPARE:
- /* Do nothing here. */
- return 0;
-
- case SMIN:
- val = arg0s <= arg1s ? arg0s : arg1s;
- break;
-
- case UMIN:
- val = ((unsigned HOST_WIDE_INT) arg0
- <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
- break;
-
- case SMAX:
- val = arg0s > arg1s ? arg0s : arg1s;
- break;
+ case ROTATERT:
+ {
+ if (wi::neg_p (pop1))
+ return NULL_RTX;
- case UMAX:
- val = ((unsigned HOST_WIDE_INT) arg0
- > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
- break;
+ switch (code)
+ {
+ case ROTATE:
+ result = wi::lrotate (pop0, pop1);
+ break;
- case SS_PLUS:
- case US_PLUS:
- case SS_MINUS:
- case US_MINUS:
- case SS_MULT:
- case US_MULT:
- case SS_DIV:
- case US_DIV:
- case SS_ASHIFT:
- case US_ASHIFT:
- /* ??? There are simplifications that can be done. */
- return 0;
+ case ROTATERT:
+ result = wi::rrotate (pop0, pop1);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ }
default:
- gcc_unreachable ();
+ return NULL_RTX;
}
-
- return gen_int_mode (val, mode);
+ return immed_wide_int_const (result, mode);
}
return NULL_RTX;
@@ -4940,10 +4528,11 @@ comparison_result (enum rtx_code code, int known_results)
}
}
-/* Check if the given comparison (done in the given MODE) is actually a
- tautology or a contradiction.
- If no simplification is possible, this function returns zero.
- Otherwise, it returns either const_true_rtx or const0_rtx. */
+/* Check if the given comparison (done in the given MODE) is actually
+ a tautology or a contradiction. If the mode is VOID_mode, the
+ comparison is done in "infinite precision". If no simplification
+ is possible, this function returns zero. Otherwise, it returns
+ either const_true_rtx or const0_rtx. */
rtx
simplify_const_relational_operation (enum rtx_code code,
@@ -5067,59 +4656,21 @@ simplify_const_relational_operation (enum rtx_code code,
/* Otherwise, see if the operands are both integers. */
if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
- && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0))
- && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1)))
+ && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
{
- int width = GET_MODE_PRECISION (mode);
- HOST_WIDE_INT l0s, h0s, l1s, h1s;
- unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
-
- /* Get the two words comprising each integer constant. */
- if (CONST_DOUBLE_AS_INT_P (trueop0))
- {
- l0u = l0s = CONST_DOUBLE_LOW (trueop0);
- h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
- }
- else
- {
- l0u = l0s = INTVAL (trueop0);
- h0u = h0s = HWI_SIGN_EXTEND (l0s);
- }
-
- if (CONST_DOUBLE_AS_INT_P (trueop1))
- {
- l1u = l1s = CONST_DOUBLE_LOW (trueop1);
- h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
- }
- else
- {
- l1u = l1s = INTVAL (trueop1);
- h1u = h1s = HWI_SIGN_EXTEND (l1s);
- }
-
- /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
- we have to sign or zero-extend the values. */
- if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
- {
- l0u &= GET_MODE_MASK (mode);
- l1u &= GET_MODE_MASK (mode);
-
- if (val_signbit_known_set_p (mode, l0s))
- l0s |= ~GET_MODE_MASK (mode);
-
- if (val_signbit_known_set_p (mode, l1s))
- l1s |= ~GET_MODE_MASK (mode);
- }
- if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
- h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
-
- if (h0u == h1u && l0u == l1u)
+ /* It would be nice if we really had a mode here. However, the
+ largest int representable on the target is as good as
+ infinite. */
+ enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
+ rtx_mode_t ptrueop0 = std::make_pair (trueop0, cmode);
+ rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode);
+
+ if (wi::eq_p (ptrueop0, ptrueop1))
return comparison_result (code, CMP_EQ);
else
{
- int cr;
- cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT;
- cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU;
+ int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
+ cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
return comparison_result (code, cr);
}
}
@@ -5575,9 +5126,9 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
return 0;
}
-/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
- or CONST_VECTOR,
- returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
+/* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
+ or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
+ CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
Works by unpacking OP into a collection of 8-bit values
represented as a little-endian array of 'unsigned char', selecting by BYTE,
@@ -5587,13 +5138,11 @@ static rtx
simplify_immed_subreg (enum machine_mode outermode, rtx op,
enum machine_mode innermode, unsigned int byte)
{
- /* We support up to 512-bit values (for V8DFmode). */
enum {
- max_bitsize = 512,
value_bit = 8,
value_mask = (1 << value_bit) - 1
};
- unsigned char value[max_bitsize / value_bit];
+ unsigned char value[MAX_BITSIZE_MODE_ANY_MODE / value_bit];
int value_start;
int i;
int elem;
@@ -5605,6 +5154,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
rtvec result_v = NULL;
enum mode_class outer_class;
enum machine_mode outer_submode;
+ int max_bitsize;
/* Some ports misuse CCmode. */
if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op))
@@ -5614,6 +5164,10 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
if (COMPLEX_MODE_P (outermode))
return NULL_RTX;
+ /* We support any size mode. */
+ max_bitsize = MAX (GET_MODE_BITSIZE (outermode),
+ GET_MODE_BITSIZE (innermode));
+
/* Unpack the value. */
if (GET_CODE (op) == CONST_VECTOR)
@@ -5663,8 +5217,20 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
*vp++ = INTVAL (el) < 0 ? -1 : 0;
break;
+ case CONST_WIDE_INT:
+ {
+ rtx_mode_t val = std::make_pair (el, innermode);
+ unsigned char extend = wi::sign_mask (val);
+
+ for (i = 0; i < elem_bitsize; i += value_bit)
+ *vp++ = wi::extract_uhwi (val, i, value_bit);
+ for (; i < elem_bitsize; i += value_bit)
+ *vp++ = extend;
+ }
+ break;
+
case CONST_DOUBLE:
- if (GET_MODE (el) == VOIDmode)
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode)
{
unsigned char extend = 0;
/* If this triggers, someone should have generated a
@@ -5687,7 +5253,8 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
}
else
{
- long tmp[max_bitsize / 32];
+ /* This is big enough for anything on the platform. */
+ long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
@@ -5807,24 +5374,30 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
case MODE_INT:
case MODE_PARTIAL_INT:
{
- unsigned HOST_WIDE_INT hi = 0, lo = 0;
-
- for (i = 0;
- i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
- i += value_bit)
- lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
- for (; i < elem_bitsize; i += value_bit)
- hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask)
- << (i - HOST_BITS_PER_WIDE_INT);
-
- /* immed_double_const doesn't call trunc_int_for_mode. I don't
- know why. */
- if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
- elems[elem] = gen_int_mode (lo, outer_submode);
- else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT)
- elems[elem] = immed_double_const (lo, hi, outer_submode);
- else
- return NULL_RTX;
+ int u;
+ int base = 0;
+ int units
+ = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT;
+ HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT];
+ wide_int r;
+
+ for (u = 0; u < units; u++)
+ {
+ unsigned HOST_WIDE_INT buf = 0;
+ for (i = 0;
+ i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize;
+ i += value_bit)
+ buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i;
+
+ tmp[u] = buf;
+ base += HOST_BITS_PER_WIDE_INT;
+ }
+ gcc_assert (GET_MODE_PRECISION (outer_submode)
+ <= MAX_BITSIZE_MODE_ANY_INT);
+ r = wide_int::from_array (tmp, units,
+ GET_MODE_PRECISION (outer_submode));
+ elems[elem] = immed_wide_int_const (r, outer_submode);
}
break;
@@ -5832,7 +5405,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op,
case MODE_DECIMAL_FLOAT:
{
REAL_VALUE_TYPE r;
- long tmp[max_bitsize / 32];
+ long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32];
/* real_from_target wants its input in words affected by
FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 5d68edb73f8..163d495b2b0 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -1237,9 +1237,7 @@ expand_case (gimple stmt)
original type. Make sure to drop overflow flags. */
low = fold_convert (index_type, low);
if (TREE_OVERFLOW (low))
- low = build_int_cst_wide (index_type,
- TREE_INT_CST_LOW (low),
- TREE_INT_CST_HIGH (low));
+ low = wide_int_to_tree (index_type, low);
/* The canonical from of a case label in GIMPLE is that a simple case
has an empty CASE_HIGH. For the casesi and tablejump expanders,
@@ -1248,9 +1246,7 @@ expand_case (gimple stmt)
high = low;
high = fold_convert (index_type, high);
if (TREE_OVERFLOW (high))
- high = build_int_cst_wide (index_type,
- TREE_INT_CST_LOW (high),
- TREE_INT_CST_HIGH (high));
+ high = wide_int_to_tree (index_type, high);
basic_block case_bb = label_to_block_fn (cfun, lab);
edge case_edge = find_edge (bb, case_bb);
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index 4f99fa3469a..8fa4dc884b1 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2264,13 +2264,10 @@ layout_type (tree type)
&& TYPE_UNSIGNED (TREE_TYPE (lb))
&& tree_int_cst_lt (ub, lb))
{
- unsigned prec = TYPE_PRECISION (TREE_TYPE (lb));
- lb = double_int_to_tree
- (ssizetype,
- tree_to_double_int (lb).sext (prec));
- ub = double_int_to_tree
- (ssizetype,
- tree_to_double_int (ub).sext (prec));
+ lb = wide_int_to_tree (ssizetype,
+ offset_int::from (lb, SIGNED));
+ ub = wide_int_to_tree (ssizetype,
+ offset_int::from (ub, SIGNED));
}
length
= fold_convert (sizetype,
@@ -2546,16 +2543,14 @@ initialize_sizetypes (void)
TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype));
TYPE_SIZE (sizetype) = bitsize_int (precision);
TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype)));
- set_min_and_max_values_for_integral_type (sizetype, precision,
- /*is_unsigned=*/true);
+ set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT));
TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype));
TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
TYPE_SIZE_UNIT (bitsizetype)
= size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype)));
- set_min_and_max_values_for_integral_type (bitsizetype, bprecision,
- /*is_unsigned=*/true);
+ set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
/* Create the signed variants of *sizetype. */
ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
@@ -2575,58 +2570,18 @@ initialize_sizetypes (void)
void
set_min_and_max_values_for_integral_type (tree type,
int precision,
- bool is_unsigned)
+ signop sgn)
{
- tree min_value;
- tree max_value;
-
/* For bitfields with zero width we end up creating integer types
with zero precision. Don't assign any minimum/maximum values
to those types, they don't have any valid value. */
if (precision < 1)
return;
- if (is_unsigned)
- {
- min_value = build_int_cst (type, 0);
- max_value
- = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
- ? -1
- : (HOST_WIDE_INT_1U << precision) - 1,
- precision - HOST_BITS_PER_WIDE_INT > 0
- ? ((unsigned HOST_WIDE_INT) ~0
- >> (HOST_BITS_PER_WIDE_INT
- - (precision - HOST_BITS_PER_WIDE_INT)))
- : 0);
- }
- else
- {
- min_value
- = build_int_cst_wide (type,
- (precision - HOST_BITS_PER_WIDE_INT > 0
- ? 0
- : HOST_WIDE_INT_M1U << (precision - 1)),
- (((HOST_WIDE_INT) (-1)
- << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? precision - HOST_BITS_PER_WIDE_INT - 1
- : 0))));
- max_value
- = build_int_cst_wide (type,
- (precision - HOST_BITS_PER_WIDE_INT > 0
- ? -1
- : (HOST_WIDE_INT)
- (((unsigned HOST_WIDE_INT) 1
- << (precision - 1)) - 1)),
- (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
- ? (HOST_WIDE_INT)
- ((((unsigned HOST_WIDE_INT) 1
- << (precision - HOST_BITS_PER_WIDE_INT
- - 1))) - 1)
- : 0));
- }
-
- TYPE_MIN_VALUE (type) = min_value;
- TYPE_MAX_VALUE (type) = max_value;
+ TYPE_MIN_VALUE (type)
+ = wide_int_to_tree (type, wi::min_value (precision, sgn));
+ TYPE_MAX_VALUE (type)
+ = wide_int_to_tree (type, wi::max_value (precision, sgn));
}
/* Set the extreme values of TYPE based on its precision in bits,
@@ -2639,14 +2594,7 @@ fixup_signed_type (tree type)
{
int precision = TYPE_PRECISION (type);
- /* We can not represent properly constants greater then
- HOST_BITS_PER_DOUBLE_INT, still we need the types
- as they are used by i386 vector extensions and friends. */
- if (precision > HOST_BITS_PER_DOUBLE_INT)
- precision = HOST_BITS_PER_DOUBLE_INT;
-
- set_min_and_max_values_for_integral_type (type, precision,
- /*is_unsigned=*/false);
+ set_min_and_max_values_for_integral_type (type, precision, SIGNED);
/* Lay out the type: set its alignment, size, etc. */
layout_type (type);
@@ -2661,16 +2609,9 @@ fixup_unsigned_type (tree type)
{
int precision = TYPE_PRECISION (type);
- /* We can not represent properly constants greater then
- HOST_BITS_PER_DOUBLE_INT, still we need the types
- as they are used by i386 vector extensions and friends. */
- if (precision > HOST_BITS_PER_DOUBLE_INT)
- precision = HOST_BITS_PER_DOUBLE_INT;
-
TYPE_UNSIGNED (type) = 1;
- set_min_and_max_values_for_integral_type (type, precision,
- /*is_unsigned=*/true);
+ set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
/* Lay out the type: set its alignment, size, etc. */
layout_type (type);
diff --git a/gcc/stor-layout.h b/gcc/stor-layout.h
index e2f800d3b76..0ff98f8f051 100644
--- a/gcc/stor-layout.h
+++ b/gcc/stor-layout.h
@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_STOR_LAYOUT_H
#define GCC_STOR_LAYOUT_H
-extern void set_min_and_max_values_for_integral_type (tree, int, bool);
+extern void set_min_and_max_values_for_integral_type (tree, int, signop);
extern void fixup_signed_type (tree);
extern void internal_reference_types (void);
extern unsigned int update_alignment_for_field (record_layout_info, tree,
diff --git a/gcc/system.h b/gcc/system.h
index 892271f58a8..b20b5cfde1d 100644
--- a/gcc/system.h
+++ b/gcc/system.h
@@ -711,6 +711,16 @@ extern void fancy_abort (const char *, int, const char *) ATTRIBUTE_NORETURN;
#define gcc_unreachable() (fancy_abort (__FILE__, __LINE__, __FUNCTION__))
#endif
+#if GCC_VERSION >= 3001
+#define STATIC_CONSTANT_P(X) (__builtin_constant_p (X) && (X))
+#else
+#define STATIC_CONSTANT_P(X) (false && (X))
+#endif
+
+/* Until we can use C++11's static_assert. */
+#define STATIC_ASSERT(X) \
+ typedef int assertion1[(X) ? 1 : -1] ATTRIBUTE_UNUSED
+
/* Provide a fake boolean type. We make no attempt to use the
C99 _Bool, as it may not be available in the bootstrap compiler,
and even if it is, it is liable to be buggy.
diff --git a/gcc/target.def b/gcc/target.def
index 7e0c96e472c..febd3207bed 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -3545,9 +3545,9 @@ loop is only entered from the top.\n\
This hook is only used if @code{doloop_end} is available. The default\n\
implementation returns true. You can use @code{can_use_doloop_if_innermost}\n\
if the loop must be the innermost, and if there are no other restrictions.",
- bool, (double_int iterations, double_int iterations_max,
+ bool, (const widest_int &iterations, const widest_int &iterations_max,
unsigned int loop_depth, bool entered_at_top),
- hook_bool_dint_dint_uint_bool_true)
+ hook_bool_wint_wint_uint_bool_true)
/* Returns NULL if target supports the insn within a doloop block,
otherwise it returns an error message. */
diff --git a/gcc/target.h b/gcc/target.h
index 7c28efad9b9..31123d97b3e 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -50,7 +50,7 @@
#include "insn-modes.h"
#include "insn-codes.h"
-#include "double-int.h"
+#include "wide-int.h"
#ifdef ENABLE_CHECKING
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 0be1978aa5f..3df93d39432 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -290,7 +290,7 @@ default_cxx_get_cookie_size (tree type)
sizetype_size = size_in_bytes (sizetype);
type_align = size_int (TYPE_ALIGN_UNIT (type));
- if (INT_CST_LT_UNSIGNED (type_align, sizetype_size))
+ if (tree_int_cst_lt (type_align, sizetype_size))
cookie_size = sizetype_size;
else
cookie_size = type_align;
@@ -1711,7 +1711,7 @@ std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
not support nested low-overhead loops. */
bool
-can_use_doloop_if_innermost (double_int, double_int,
+can_use_doloop_if_innermost (const widest_int &, const widest_int &,
unsigned int loop_depth, bool)
{
return loop_depth == 1;
diff --git a/gcc/targhooks.h b/gcc/targhooks.h
index dbaa1dc5f5c..4be33f8696c 100644
--- a/gcc/targhooks.h
+++ b/gcc/targhooks.h
@@ -207,5 +207,6 @@ extern bool default_member_type_forces_blk (const_tree, enum machine_mode);
extern void default_atomic_assign_expand_fenv (tree *, tree *, tree *);
extern tree build_va_arg_indirect_ref (tree);
extern tree std_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
-extern bool can_use_doloop_if_innermost (double_int, double_int,
+extern bool can_use_doloop_if_innermost (const widest_int &,
+ const widest_int &,
unsigned int, bool);
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c b/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c
index 0952b5a04f8..0bd1a188278 100644
--- a/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c
@@ -25,5 +25,5 @@ int main()
return 0;
}
-/* { dg-final { scan-tree-dump-times "bounded by 0" 0 "cunrolli"} } */
+/* { dg-final { scan-tree-dump-times "bounded by 0x0\[^0-9a-f\]" 0 "cunrolli"} } */
/* { dg-final { cleanup-tree-dump "cunrolli" } } */
diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c
index 91f9a9fee40..1d292c4ad09 100644
--- a/gcc/tree-affine.c
+++ b/gcc/tree-affine.c
@@ -35,13 +35,14 @@ along with GCC; see the file COPYING3. If not see
#include "flags.h"
#include "dumpfile.h"
#include "cfgexpand.h"
+#include "wide-int-print.h"
/* Extends CST as appropriate for the affine combinations COMB. */
-double_int
-double_int_ext_for_comb (double_int cst, aff_tree *comb)
+widest_int
+wide_int_ext_for_comb (const widest_int &cst, aff_tree *comb)
{
- return cst.sext (TYPE_PRECISION (comb->type));
+ return wi::sext (cst, TYPE_PRECISION (comb->type));
}
/* Initializes affine combination COMB so that its value is zero in TYPE. */
@@ -49,19 +50,22 @@ double_int_ext_for_comb (double_int cst, aff_tree *comb)
static void
aff_combination_zero (aff_tree *comb, tree type)
{
+ int i;
comb->type = type;
- comb->offset = double_int_zero;
+ comb->offset = 0;
comb->n = 0;
+ for (i = 0; i < MAX_AFF_ELTS; i++)
+ comb->elts[i].coef = 0;
comb->rest = NULL_TREE;
}
/* Sets COMB to CST. */
void
-aff_combination_const (aff_tree *comb, tree type, double_int cst)
+aff_combination_const (aff_tree *comb, tree type, const widest_int &cst)
{
aff_combination_zero (comb, type);
- comb->offset = double_int_ext_for_comb (cst, comb);
+ comb->offset = wide_int_ext_for_comb (cst, comb);;
}
/* Sets COMB to single element ELT. */
@@ -73,37 +77,34 @@ aff_combination_elt (aff_tree *comb, tree type, tree elt)
comb->n = 1;
comb->elts[0].val = elt;
- comb->elts[0].coef = double_int_one;
+ comb->elts[0].coef = 1;
}
/* Scales COMB by SCALE. */
void
-aff_combination_scale (aff_tree *comb, double_int scale)
+aff_combination_scale (aff_tree *comb, const widest_int &scale_in)
{
unsigned i, j;
- scale = double_int_ext_for_comb (scale, comb);
- if (scale.is_one ())
+ widest_int scale = wide_int_ext_for_comb (scale_in, comb);
+ if (scale == 1)
return;
- if (scale.is_zero ())
+ if (scale == 0)
{
aff_combination_zero (comb, comb->type);
return;
}
- comb->offset
- = double_int_ext_for_comb (scale * comb->offset, comb);
+ comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb);
for (i = 0, j = 0; i < comb->n; i++)
{
- double_int new_coef;
-
- new_coef
- = double_int_ext_for_comb (scale * comb->elts[i].coef, comb);
+ widest_int new_coef
+ = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb);
/* A coefficient may become zero due to overflow. Remove the zero
elements. */
- if (new_coef.is_zero ())
+ if (new_coef == 0)
continue;
comb->elts[j].coef = new_coef;
comb->elts[j].val = comb->elts[i].val;
@@ -125,30 +126,28 @@ aff_combination_scale (aff_tree *comb, double_int scale)
}
else
comb->rest = fold_build2 (MULT_EXPR, type, comb->rest,
- double_int_to_tree (type, scale));
+ wide_int_to_tree (type, scale));
}
}
/* Adds ELT * SCALE to COMB. */
void
-aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
+aff_combination_add_elt (aff_tree *comb, tree elt, const widest_int &scale_in)
{
unsigned i;
tree type;
- scale = double_int_ext_for_comb (scale, comb);
- if (scale.is_zero ())
+ widest_int scale = wide_int_ext_for_comb (scale_in, comb);
+ if (scale == 0)
return;
for (i = 0; i < comb->n; i++)
if (operand_equal_p (comb->elts[i].val, elt, 0))
{
- double_int new_coef;
-
- new_coef = comb->elts[i].coef + scale;
- new_coef = double_int_ext_for_comb (new_coef, comb);
- if (!new_coef.is_zero ())
+ widest_int new_coef
+ = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb);
+ if (new_coef != 0)
{
comb->elts[i].coef = new_coef;
return;
@@ -160,7 +159,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
if (comb->rest)
{
gcc_assert (comb->n == MAX_AFF_ELTS - 1);
- comb->elts[comb->n].coef = double_int_one;
+ comb->elts[comb->n].coef = 1;
comb->elts[comb->n].val = comb->rest;
comb->rest = NULL_TREE;
comb->n++;
@@ -179,12 +178,12 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
if (POINTER_TYPE_P (type))
type = sizetype;
- if (scale.is_one ())
+ if (scale == 1)
elt = fold_convert (type, elt);
else
elt = fold_build2 (MULT_EXPR, type,
fold_convert (type, elt),
- double_int_to_tree (type, scale));
+ wide_int_to_tree (type, scale));
if (comb->rest)
comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest,
@@ -196,9 +195,9 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale)
/* Adds CST to C. */
static void
-aff_combination_add_cst (aff_tree *c, double_int cst)
+aff_combination_add_cst (aff_tree *c, const widest_int &cst)
{
- c->offset = double_int_ext_for_comb (c->offset + cst, c);
+ c->offset = wide_int_ext_for_comb (c->offset + cst, c);
}
/* Adds COMB2 to COMB1. */
@@ -212,7 +211,7 @@ aff_combination_add (aff_tree *comb1, aff_tree *comb2)
for (i = 0; i < comb2->n; i++)
aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef);
if (comb2->rest)
- aff_combination_add_elt (comb1, comb2->rest, double_int_one);
+ aff_combination_add_elt (comb1, comb2->rest, 1);
}
/* Converts affine combination COMB to TYPE. */
@@ -237,13 +236,12 @@ aff_combination_convert (aff_tree *comb, tree type)
if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type))
return;
- comb->offset = double_int_ext_for_comb (comb->offset, comb);
+ comb->offset = wide_int_ext_for_comb (comb->offset, comb);
for (i = j = 0; i < comb->n; i++)
{
- double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb);
- if (new_coef.is_zero ())
+ if (comb->elts[i].coef == 0)
continue;
- comb->elts[j].coef = new_coef;
+ comb->elts[j].coef = comb->elts[i].coef;
comb->elts[j].val = fold_convert (type, comb->elts[i].val);
j++;
}
@@ -251,7 +249,7 @@ aff_combination_convert (aff_tree *comb, tree type)
comb->n = j;
if (comb->n < MAX_AFF_ELTS && comb->rest)
{
- comb->elts[comb->n].coef = double_int_one;
+ comb->elts[comb->n].coef = 1;
comb->elts[comb->n].val = comb->rest;
comb->rest = NULL_TREE;
comb->n++;
@@ -276,7 +274,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
switch (code)
{
case INTEGER_CST:
- aff_combination_const (comb, type, tree_to_double_int (expr));
+ aff_combination_const (comb, type, wi::to_widest (expr));
return;
case POINTER_PLUS_EXPR:
@@ -290,7 +288,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp);
if (code == MINUS_EXPR)
- aff_combination_scale (&tmp, double_int_minus_one);
+ aff_combination_scale (&tmp, -1);
aff_combination_add (comb, &tmp);
return;
@@ -299,19 +297,19 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
if (TREE_CODE (cst) != INTEGER_CST)
break;
tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
- aff_combination_scale (comb, tree_to_double_int (cst));
+ aff_combination_scale (comb, wi::to_widest (cst));
return;
case NEGATE_EXPR:
tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
- aff_combination_scale (comb, double_int_minus_one);
+ aff_combination_scale (comb, -1);
return;
case BIT_NOT_EXPR:
/* ~x = -x - 1 */
tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
- aff_combination_scale (comb, double_int_minus_one);
- aff_combination_add_cst (comb, double_int_minus_one);
+ aff_combination_scale (comb, -1);
+ aff_combination_add_cst (comb, -1);
return;
case ADDR_EXPR:
@@ -329,11 +327,10 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
false);
if (bitpos % BITS_PER_UNIT != 0)
break;
- aff_combination_const (comb, type,
- double_int::from_uhwi (bitpos / BITS_PER_UNIT));
+ aff_combination_const (comb, type, bitpos / BITS_PER_UNIT);
core = build_fold_addr_expr (core);
if (TREE_CODE (core) == ADDR_EXPR)
- aff_combination_add_elt (comb, core, double_int_one);
+ aff_combination_add_elt (comb, core, 1);
else
{
tree_to_aff_combination (core, type, &tmp);
@@ -376,25 +373,25 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb)
combination COMB. */
static tree
-add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
- aff_tree *comb)
+add_elt_to_tree (tree expr, tree type, tree elt, const widest_int &scale_in,
+ aff_tree *comb ATTRIBUTE_UNUSED)
{
enum tree_code code;
tree type1 = type;
if (POINTER_TYPE_P (type))
type1 = sizetype;
- scale = double_int_ext_for_comb (scale, comb);
+ widest_int scale = wide_int_ext_for_comb (scale_in, comb);
- if (scale.is_minus_one ()
+ if (scale == -1
&& POINTER_TYPE_P (TREE_TYPE (elt)))
{
elt = convert_to_ptrofftype (elt);
elt = fold_build1 (NEGATE_EXPR, TREE_TYPE (elt), elt);
- scale = double_int_one;
+ scale = 1;
}
- if (scale.is_one ())
+ if (scale == 1)
{
if (!expr)
{
@@ -412,7 +409,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
expr, fold_convert (type1, elt));
}
- if (scale.is_minus_one ())
+ if (scale == -1)
{
if (!expr)
return fold_build1 (NEGATE_EXPR, type1,
@@ -431,9 +428,9 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
elt = fold_convert (type1, elt);
if (!expr)
return fold_build2 (MULT_EXPR, type1, elt,
- double_int_to_tree (type1, scale));
+ wide_int_to_tree (type1, scale));
- if (scale.is_negative ())
+ if (wi::neg_p (scale))
{
code = MINUS_EXPR;
scale = -scale;
@@ -442,7 +439,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale,
code = PLUS_EXPR;
elt = fold_build2 (MULT_EXPR, type1, elt,
- double_int_to_tree (type1, scale));
+ wide_int_to_tree (type1, scale));
if (POINTER_TYPE_P (TREE_TYPE (expr)))
{
if (code == MINUS_EXPR)
@@ -460,7 +457,7 @@ aff_combination_to_tree (aff_tree *comb)
tree type = comb->type;
tree expr = NULL_TREE;
unsigned i;
- double_int off, sgn;
+ widest_int off, sgn;
tree type1 = type;
if (POINTER_TYPE_P (type))
type1 = sizetype;
@@ -472,21 +469,21 @@ aff_combination_to_tree (aff_tree *comb)
comb);
if (comb->rest)
- expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb);
+ expr = add_elt_to_tree (expr, type, comb->rest, 1, comb);
/* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
unsigned. */
- if (comb->offset.is_negative ())
+ if (wi::neg_p (comb->offset))
{
off = -comb->offset;
- sgn = double_int_minus_one;
+ sgn = -1;
}
else
{
off = comb->offset;
- sgn = double_int_one;
+ sgn = 1;
}
- return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn,
+ return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn,
comb);
}
@@ -513,7 +510,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m)
comb->elts[m] = comb->elts[comb->n];
if (comb->rest)
{
- comb->elts[comb->n].coef = double_int_one;
+ comb->elts[comb->n].coef = 1;
comb->elts[comb->n].val = comb->rest;
comb->rest = NULL_TREE;
comb->n++;
@@ -525,7 +522,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m)
static void
-aff_combination_add_product (aff_tree *c, double_int coef, tree val,
+aff_combination_add_product (aff_tree *c, const widest_int &coef, tree val,
aff_tree *r)
{
unsigned i;
@@ -576,7 +573,7 @@ aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r)
for (i = 0; i < c2->n; i++)
aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
if (c2->rest)
- aff_combination_add_product (c1, double_int_one, c2->rest, r);
+ aff_combination_add_product (c1, 1, c2->rest, r);
aff_combination_add_product (c1, c2->offset, NULL, r);
}
@@ -623,7 +620,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED,
aff_tree to_add, current, curre;
tree e, rhs;
gimple def;
- double_int scale;
+ widest_int scale;
void **slot;
struct name_expansion *exp;
@@ -768,25 +765,24 @@ free_affine_expand_cache (struct pointer_map_t **cache)
is set to true. */
static bool
-double_int_constant_multiple_p (double_int val, double_int div,
- bool *mult_set, double_int *mult)
+wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
+ bool *mult_set, widest_int *mult)
{
- double_int rem, cst;
+ widest_int rem, cst;
- if (val.is_zero ())
+ if (val == 0)
{
- if (*mult_set && !mult->is_zero ())
+ if (*mult_set && mult != 0)
return false;
*mult_set = true;
- *mult = double_int_zero;
+ *mult = 0;
return true;
}
- if (div.is_zero ())
+ if (div == 0)
return false;
- cst = val.sdivmod (div, FLOOR_DIV_EXPR, &rem);
- if (!rem.is_zero ())
+ if (!wi::multiple_of_p (val, div, SIGNED, &cst))
return false;
if (*mult_set && *mult != cst)
@@ -802,14 +798,14 @@ double_int_constant_multiple_p (double_int val, double_int div,
bool
aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
- double_int *mult)
+ widest_int *mult)
{
bool mult_set = false;
unsigned i;
- if (val->n == 0 && val->offset.is_zero ())
+ if (val->n == 0 && val->offset == 0)
{
- *mult = double_int_zero;
+ *mult = 0;
return true;
}
if (val->n != div->n)
@@ -818,8 +814,8 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
if (val->rest || div->rest)
return false;
- if (!double_int_constant_multiple_p (val->offset, div->offset,
- &mult_set, mult))
+ if (!wide_int_constant_multiple_p (val->offset, div->offset,
+ &mult_set, mult))
return false;
for (i = 0; i < div->n; i++)
@@ -828,8 +824,8 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
= aff_combination_find_elt (val, div->elts[i].val, NULL);
if (!elt)
return false;
- if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef,
- &mult_set, mult))
+ if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef,
+ &mult_set, mult))
return false;
}
@@ -843,13 +839,13 @@ static void
print_aff (FILE *file, aff_tree *val)
{
unsigned i;
- bool uns = TYPE_UNSIGNED (val->type);
+ signop sgn = TYPE_SIGN (val->type);
if (POINTER_TYPE_P (val->type))
- uns = false;
+ sgn = SIGNED;
fprintf (file, "{\n type = ");
print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS);
fprintf (file, "\n offset = ");
- dump_double_int (file, val->offset, uns);
+ print_dec (val->offset, file, sgn);
if (val->n > 0)
{
fprintf (file, "\n elements = {\n");
@@ -859,7 +855,7 @@ print_aff (FILE *file, aff_tree *val)
print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS);
fprintf (file, " * ");
- dump_double_int (file, val->elts[i].coef, uns);
+ print_dec (val->elts[i].coef, file, sgn);
if (i != val->n - 1)
fprintf (file, ", \n");
}
@@ -887,7 +883,7 @@ debug_aff (aff_tree *val)
which REF refers. */
tree
-get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
+get_inner_reference_aff (tree ref, aff_tree *addr, widest_int *size)
{
HOST_WIDE_INT bitsize, bitpos;
tree toff;
@@ -908,11 +904,10 @@ get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
aff_combination_add (addr, &tmp);
}
- aff_combination_const (&tmp, sizetype,
- double_int::from_shwi (bitpos / BITS_PER_UNIT));
+ aff_combination_const (&tmp, sizetype, bitpos / BITS_PER_UNIT);
aff_combination_add (addr, &tmp);
- *size = double_int::from_shwi ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT);
+ *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT;
return base;
}
@@ -921,26 +916,23 @@ get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size)
size SIZE2 at position DIFF cannot overlap. */
bool
-aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2)
+aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
+ const widest_int &size2)
{
- double_int d, bound;
-
/* Unless the difference is a constant, we fail. */
if (diff->n != 0)
return false;
- d = diff->offset;
- if (d.is_negative ())
+ if (wi::neg_p (diff->offset))
{
/* The second object is before the first one, we succeed if the last
element of the second object is before the start of the first one. */
- bound = d + size2 + double_int_minus_one;
- return bound.is_negative ();
+ return wi::neg_p (diff->offset + size2 - 1);
}
else
{
/* We succeed if the second object starts after the first one ends. */
- return size1.sle (d);
+ return wi::les_p (size1, diff->offset);
}
}
diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h
index 52a10a4d0c8..8c9d990b98d 100644
--- a/gcc/tree-affine.h
+++ b/gcc/tree-affine.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see
/* Affine combination of trees. We keep track of at most MAX_AFF_ELTS elements
to make things simpler; this is sufficient in most cases. */
+#include "wide-int.h"
+
#define MAX_AFF_ELTS 8
/* Element of an affine combination. */
@@ -30,7 +32,7 @@ struct aff_comb_elt
tree val;
/* Its coefficient in the combination. */
- double_int coef;
+ widest_int coef;
};
struct aff_tree
@@ -39,7 +41,7 @@ struct aff_tree
tree type;
/* Constant offset. */
- double_int offset;
+ widest_int offset;
/* Number of elements of the combination. */
unsigned n;
@@ -58,25 +60,26 @@ struct aff_tree
tree rest;
};
-double_int double_int_ext_for_comb (double_int, aff_tree *);
-void aff_combination_const (aff_tree *, tree, double_int);
+widest_int wide_int_ext_for_comb (const widest_int &, aff_tree *);
+void aff_combination_const (aff_tree *, tree, const widest_int &);
void aff_combination_elt (aff_tree *, tree, tree);
-void aff_combination_scale (aff_tree *, double_int);
+void aff_combination_scale (aff_tree *, const widest_int &);
void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *);
void aff_combination_add (aff_tree *, aff_tree *);
-void aff_combination_add_elt (aff_tree *, tree, double_int);
+void aff_combination_add_elt (aff_tree *, tree, const widest_int &);
void aff_combination_remove_elt (aff_tree *, unsigned);
void aff_combination_convert (aff_tree *, tree);
void tree_to_aff_combination (tree, tree, aff_tree *);
tree aff_combination_to_tree (aff_tree *);
void unshare_aff_combination (aff_tree *);
-bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, double_int *);
+bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, widest_int *);
void aff_combination_expand (aff_tree *, struct pointer_map_t **);
void tree_to_aff_combination_expand (tree, tree, aff_tree *,
struct pointer_map_t **);
-tree get_inner_reference_aff (tree, aff_tree *, double_int *);
+tree get_inner_reference_aff (tree, aff_tree *, widest_int *);
void free_affine_expand_cache (struct pointer_map_t **);
-bool aff_comb_cannot_overlap_p (aff_tree *, double_int, double_int);
+bool aff_comb_cannot_overlap_p (aff_tree *, const widest_int &,
+ const widest_int &);
/* Debugging functions. */
void debug_aff (aff_tree *);
@@ -88,7 +91,7 @@ aff_combination_zero_p (aff_tree *aff)
if (!aff)
return true;
- if (aff->n == 0 && aff->offset.is_zero ())
+ if (aff->n == 0 && aff->offset == 0)
return true;
return false;
diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c
index 269659caff5..f62fb40b359 100644
--- a/gcc/tree-call-cdce.c
+++ b/gcc/tree-call-cdce.c
@@ -205,7 +205,7 @@ check_pow (gimple pow_call)
return false;
if (REAL_VALUES_LESS (bcv, dconst1))
return false;
- real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1);
+ real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
if (REAL_VALUES_LESS (mv, bcv))
return false;
return true;
@@ -422,7 +422,7 @@ gen_conditions_for_pow_cst_base (tree base, tree expn,
REAL_VALUE_TYPE bcv = TREE_REAL_CST (base);
gcc_assert (!REAL_VALUES_EQUAL (bcv, dconst1)
&& !REAL_VALUES_LESS (bcv, dconst1));
- real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1);
+ real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED);
gcc_assert (!REAL_VALUES_LESS (mv, bcv));
exp_domain = get_domain (0, false, false,
diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c
index 066fcec5ae3..fee1ede809e 100644
--- a/gcc/tree-cfg.c
+++ b/gcc/tree-cfg.c
@@ -68,6 +68,8 @@ along with GCC; see the file COPYING3. If not see
#include "tree-ssa-live.h"
#include "omp-low.h"
#include "tree-cfgcleanup.h"
+#include "wide-int.h"
+#include "wide-int-print.h"
/* This file contains functions for building the Control Flow Graph (CFG)
for a function tree. */
@@ -1542,12 +1544,12 @@ group_case_labels_stmt (gimple stmt)
{
tree merge_case = gimple_switch_label (stmt, i);
basic_block merge_bb = label_to_block (CASE_LABEL (merge_case));
- double_int bhp1 = tree_to_double_int (base_high) + double_int_one;
+ wide_int bhp1 = wi::add (base_high, 1);
/* Merge the cases if they jump to the same place,
and their ranges are consecutive. */
if (merge_bb == base_bb
- && tree_to_double_int (CASE_LOW (merge_case)) == bhp1)
+ && wi::eq_p (CASE_LOW (merge_case), bhp1))
{
base_high = CASE_HIGH (merge_case) ?
CASE_HIGH (merge_case) : CASE_LOW (merge_case);
@@ -3651,7 +3653,7 @@ verify_gimple_assign_binary (gimple stmt)
only allow shifting by a constant multiple of the element size. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
&& (TREE_CODE (rhs2) != INTEGER_CST
- || !div_if_zero_remainder (EXACT_DIV_EXPR, rhs2,
+ || !div_if_zero_remainder (rhs2,
TYPE_SIZE (TREE_TYPE (rhs1_type)))))
{
error ("non-element sized vector shift of floating point vector");
@@ -7335,13 +7337,13 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity)
if (loop->any_upper_bound)
{
fprintf (file, ", upper_bound = ");
- dump_double_int (file, loop->nb_iterations_upper_bound, true);
+ print_decu (loop->nb_iterations_upper_bound, file);
}
if (loop->any_estimate)
{
fprintf (file, ", estimate = ");
- dump_double_int (file, loop->nb_iterations_estimate, true);
+ print_decu (loop->nb_iterations_estimate, file);
}
fprintf (file, ")\n");
diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c
index b9350f015e2..c78d9410429 100644
--- a/gcc/tree-chrec.c
+++ b/gcc/tree-chrec.c
@@ -480,7 +480,6 @@ chrec_fold_multiply (tree type,
static tree
tree_fold_binomial (tree type, tree n, unsigned int k)
{
- double_int num, denom, idx, di_res;
bool overflow;
unsigned int i;
tree res;
@@ -491,21 +490,18 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
if (k == 1)
return fold_convert (type, n);
- /* Numerator = n. */
- num = TREE_INT_CST (n);
-
/* Check that k <= n. */
- if (num.ult (double_int::from_uhwi (k)))
+ if (wi::ltu_p (n, k))
return NULL_TREE;
/* Denominator = 2. */
- denom = double_int::from_uhwi (2);
+ wide_int denom = wi::two (TYPE_PRECISION (TREE_TYPE (n)));
/* Index = Numerator-1. */
- idx = num - double_int_one;
+ wide_int idx = wi::sub (n, 1);
/* Numerator = Numerator*Index = n*(n-1). */
- num = num.mul_with_sign (idx, false, &overflow);
+ wide_int num = wi::smul (n, idx, &overflow);
if (overflow)
return NULL_TREE;
@@ -515,17 +511,17 @@ tree_fold_binomial (tree type, tree n, unsigned int k)
--idx;
/* Numerator *= Index. */
- num = num.mul_with_sign (idx, false, &overflow);
+ num = wi::smul (num, idx, &overflow);
if (overflow)
return NULL_TREE;
/* Denominator *= i. */
- denom *= double_int::from_uhwi (i);
+ denom *= i;
}
/* Result = Numerator / Denominator. */
- di_res = num.div (denom, true, EXACT_DIV_EXPR);
- res = build_int_cst_wide (type, di_res.low, di_res.high);
+ wide_int di_res = wi::udiv_trunc (num, denom);
+ res = wide_int_to_tree (type, di_res);
return int_fits_type_p (res, type) ? res : NULL_TREE;
}
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 373a9d689e0..72ef8e143a2 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -758,11 +758,31 @@ struct GTY(()) tree_base {
of the field must be large enough to hold addr_space_t values. */
unsigned address_space : 8;
} bits;
+
/* The following fields are present in tree_base to save space. The
nodes using them do not require any of the flags above and so can
make better use of the 4-byte sized word. */
+
+ /* The number of HOST_WIDE_INTs in an INTEGER_CST. */
+ struct {
+ /* The number of HOST_WIDE_INTs if the INTEGER_CST is accessed in
+ its native precision. */
+ unsigned char unextended;
+
+ /* The number of HOST_WIDE_INTs if the INTEGER_CST is extended to
+ wider precisions based on its TYPE_SIGN. */
+ unsigned char extended;
+
+ /* The number of HOST_WIDE_INTs if the INTEGER_CST is accessed in
+ offset_int precision, with smaller integers being extended
+ according to their TYPE_SIGN. This is equal to one of the two
+ fields above but is cached for speed. */
+ unsigned char offset;
+ } int_length;
+
/* VEC length. This field is only used with TREE_VEC. */
int length;
+
/* SSA version number. This field is only used with SSA_NAME. */
unsigned int version;
} GTY((skip(""))) u;
@@ -1051,7 +1071,7 @@ struct GTY(()) tree_common {
struct GTY(()) tree_int_cst {
struct tree_typed typed;
- double_int int_cst;
+ HOST_WIDE_INT val[1];
};
diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c
index 01d0a7a79d8..55dbf6a65d3 100644
--- a/gcc/tree-data-ref.c
+++ b/gcc/tree-data-ref.c
@@ -783,8 +783,8 @@ dr_analyze_innermost (struct data_reference *dr, struct loop *nest)
{
if (!integer_zerop (TREE_OPERAND (base, 1)))
{
- double_int moff = mem_ref_offset (base);
- tree mofft = double_int_to_tree (sizetype, moff);
+ offset_int moff = mem_ref_offset (base);
+ tree mofft = wide_int_to_tree (sizetype, moff);
if (!poffset)
poffset = mofft;
else
@@ -1380,10 +1380,10 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
if (!loop_nest)
{
aff_tree off1, off2;
- double_int size1, size2;
+ widest_int size1, size2;
get_inner_reference_aff (DR_REF (a), &off1, &size1);
get_inner_reference_aff (DR_REF (b), &off2, &size2);
- aff_combination_scale (&off1, double_int_minus_one);
+ aff_combination_scale (&off1, -1);
aff_combination_add (&off2, &off1);
if (aff_comb_cannot_overlap_p (&off2, size1, size2))
return false;
@@ -1758,15 +1758,15 @@ analyze_ziv_subscript (tree chrec_a,
static tree
max_stmt_executions_tree (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
if (!max_stmt_executions (loop, &nit))
return chrec_dont_know;
- if (!double_int_fits_to_tree_p (unsigned_type_node, nit))
+ if (!wi::fits_to_tree_p (nit, unsigned_type_node))
return chrec_dont_know;
- return double_int_to_tree (unsigned_type_node, nit);
+ return wide_int_to_tree (unsigned_type_node, nit);
}
/* Determine whether the CHREC is always positive/negative. If the expression
diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c
index a1f27587487..77f3cc0a98d 100644
--- a/gcc/tree-dfa.c
+++ b/gcc/tree-dfa.c
@@ -48,6 +48,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-inline.h"
#include "tree-pass.h"
#include "params.h"
+#include "wide-int.h"
/* Build and maintain data flow information for trees. */
@@ -389,10 +390,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
HOST_WIDE_INT *psize,
HOST_WIDE_INT *pmax_size)
{
- double_int bitsize = double_int_minus_one;
- double_int maxsize;
+ offset_int bitsize = -1;
+ offset_int maxsize;
tree size_tree = NULL_TREE;
- double_int bit_offset = double_int_zero;
+ offset_int bit_offset = 0;
bool seen_variable_array_ref = false;
/* First get the final access size from just the outermost expression. */
@@ -406,11 +407,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (mode == BLKmode)
size_tree = TYPE_SIZE (TREE_TYPE (exp));
else
- bitsize = double_int::from_uhwi (GET_MODE_BITSIZE (mode));
+ bitsize = int (GET_MODE_BITSIZE (mode));
}
if (size_tree != NULL_TREE
&& TREE_CODE (size_tree) == INTEGER_CST)
- bitsize = tree_to_double_int (size_tree);
+ bitsize = wi::to_offset (size_tree);
/* Initially, maxsize is the same as the accessed element size.
In the following it will only grow (or become -1). */
@@ -423,7 +424,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
switch (TREE_CODE (exp))
{
case BIT_FIELD_REF:
- bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2));
+ bit_offset += wi::to_offset (TREE_OPERAND (exp, 2));
break;
case COMPONENT_REF:
@@ -433,17 +434,16 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
if (this_offset && TREE_CODE (this_offset) == INTEGER_CST)
{
- double_int doffset = tree_to_double_int (this_offset);
- doffset = doffset.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
- doffset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field));
- bit_offset = bit_offset + doffset;
+ offset_int woffset = wi::lshift (wi::to_offset (this_offset),
+ LOG2_BITS_PER_UNIT);
+ woffset += wi::to_offset (DECL_FIELD_BIT_OFFSET (field));
+ bit_offset += woffset;
/* If we had seen a variable array ref already and we just
referenced the last field of a struct or a union member
then we have to adjust maxsize by the padding at the end
of our field. */
- if (seen_variable_array_ref && !maxsize.is_minus_one ())
+ if (seen_variable_array_ref && maxsize != -1)
{
tree stype = TREE_TYPE (TREE_OPERAND (exp, 0));
tree next = DECL_CHAIN (field);
@@ -458,16 +458,13 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
|| TREE_CODE (fsize) != INTEGER_CST
|| ssize == NULL
|| TREE_CODE (ssize) != INTEGER_CST)
- maxsize = double_int_minus_one;
+ maxsize = -1;
else
{
- double_int tem = tree_to_double_int (ssize)
- - tree_to_double_int (fsize);
- if (BITS_PER_UNIT == 8)
- tem = tem.lshift (3);
- else
- tem *= double_int::from_uhwi (BITS_PER_UNIT);
- tem -= doffset;
+ offset_int tem = (wi::to_offset (ssize)
+ - wi::to_offset (fsize));
+ tem = wi::lshift (tem, LOG2_BITS_PER_UNIT);
+ tem -= woffset;
maxsize += tem;
}
}
@@ -479,12 +476,12 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
/* We need to adjust maxsize to the whole structure bitsize.
But we can subtract any constant offset seen so far,
because that would get us out of the structure otherwise. */
- if (!maxsize.is_minus_one ()
+ if (maxsize != -1
&& csize
&& TREE_CODE (csize) == INTEGER_CST)
- maxsize = tree_to_double_int (csize) - bit_offset;
+ maxsize = wi::to_offset (csize) - bit_offset;
else
- maxsize = double_int_minus_one;
+ maxsize = -1;
}
}
break;
@@ -502,13 +499,12 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
&& (unit_size = array_ref_element_size (exp),
TREE_CODE (unit_size) == INTEGER_CST))
{
- double_int doffset
- = (TREE_INT_CST (index) - TREE_INT_CST (low_bound))
- .sext (TYPE_PRECISION (TREE_TYPE (index)));
- doffset *= tree_to_double_int (unit_size);
- doffset = doffset.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
- bit_offset = bit_offset + doffset;
+ offset_int woffset
+ = wi::sext (wi::to_offset (index) - wi::to_offset (low_bound),
+ TYPE_PRECISION (TREE_TYPE (index)));
+ woffset *= wi::to_offset (unit_size);
+ woffset = wi::lshift (woffset, LOG2_BITS_PER_UNIT);
+ bit_offset += woffset;
/* An array ref with a constant index up in the structure
hierarchy will constrain the size of any variable array ref
@@ -521,12 +517,12 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
/* We need to adjust maxsize to the whole array bitsize.
But we can subtract any constant offset seen so far,
because that would get us outside of the array otherwise. */
- if (!maxsize.is_minus_one ()
+ if (maxsize != -1
&& asize
&& TREE_CODE (asize) == INTEGER_CST)
- maxsize = tree_to_double_int (asize) - bit_offset;
+ maxsize = wi::to_offset (asize) - bit_offset;
else
- maxsize = double_int_minus_one;
+ maxsize = -1;
/* Remember that we have seen an array ref with a variable
index. */
@@ -552,8 +548,8 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
&& (TMR_INDEX (exp) || TMR_INDEX2 (exp)))
{
exp = TREE_OPERAND (TMR_BASE (exp), 0);
- bit_offset = double_int_zero;
- maxsize = double_int_minus_one;
+ bit_offset = 0;
+ maxsize = -1;
goto done;
}
/* Fallthru. */
@@ -569,12 +565,12 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
base type boundary. This needs to include possible trailing
padding that is there for alignment purposes. */
if (seen_variable_array_ref
- && !maxsize.is_minus_one ()
+ && maxsize != -1
&& (TYPE_SIZE (TREE_TYPE (exp)) == NULL_TREE
|| TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
|| (bit_offset + maxsize
- == tree_to_double_int (TYPE_SIZE (TREE_TYPE (exp))))))
- maxsize = double_int_minus_one;
+ == wi::to_offset (TYPE_SIZE (TREE_TYPE (exp))))))
+ maxsize = -1;
/* Hand back the decl for MEM[&decl, off]. */
if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
@@ -583,11 +579,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
else
{
- double_int off = mem_ref_offset (exp);
- off = off.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
+ offset_int off = mem_ref_offset (exp);
+ off = wi::lshift (off, LOG2_BITS_PER_UNIT);
off += bit_offset;
- if (off.fits_shwi ())
+ if (wi::fits_shwi_p (off))
{
bit_offset = off;
exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
@@ -605,15 +600,15 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
/* We need to deal with variable arrays ending structures. */
if (seen_variable_array_ref
- && !maxsize.is_minus_one ()
+ && maxsize != -1
&& (TYPE_SIZE (TREE_TYPE (exp)) == NULL_TREE
|| TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
|| (bit_offset + maxsize
- == tree_to_double_int (TYPE_SIZE (TREE_TYPE (exp))))))
- maxsize = double_int_minus_one;
+ == wi::to_offset (TYPE_SIZE (TREE_TYPE (exp))))))
+ maxsize = -1;
done:
- if (!bitsize.fits_shwi () || bitsize.is_negative ())
+ if (!wi::fits_shwi_p (bitsize) || wi::neg_p (bitsize))
{
*poffset = 0;
*psize = -1;
@@ -624,7 +619,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
*psize = bitsize.to_shwi ();
- if (!bit_offset.fits_shwi ())
+ if (!wi::fits_shwi_p (bit_offset))
{
*poffset = 0;
*pmax_size = -1;
@@ -638,27 +633,27 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset,
{
/* If maxsize is unknown adjust it according to the size of the
base decl. */
- if (maxsize.is_minus_one ()
+ if (maxsize == -1
&& DECL_SIZE (exp)
&& TREE_CODE (DECL_SIZE (exp)) == INTEGER_CST)
- maxsize = tree_to_double_int (DECL_SIZE (exp)) - bit_offset;
+ maxsize = wi::to_offset (DECL_SIZE (exp)) - bit_offset;
}
else if (CONSTANT_CLASS_P (exp))
{
/* If maxsize is unknown adjust it according to the size of the
base type constant. */
- if (maxsize.is_minus_one ()
+ if (maxsize == -1
&& TYPE_SIZE (TREE_TYPE (exp))
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) == INTEGER_CST)
- maxsize = tree_to_double_int (TYPE_SIZE (TREE_TYPE (exp)))
- - bit_offset;
+ maxsize = (wi::to_offset (TYPE_SIZE (TREE_TYPE (exp)))
+ - bit_offset);
}
/* ??? Due to negative offsets in ARRAY_REF we can end up with
negative bit_offset here. We might want to store a zero offset
in this case. */
*poffset = bit_offset.to_shwi ();
- if (!maxsize.fits_shwi () || maxsize.is_negative ())
+ if (!wi::fits_shwi_p (maxsize) || wi::neg_p (maxsize))
*pmax_size = -1;
else
*pmax_size = maxsize.to_shwi ();
diff --git a/gcc/tree-dfa.h b/gcc/tree-dfa.h
index 86590ad462d..7b076c7f73f 100644
--- a/gcc/tree-dfa.h
+++ b/gcc/tree-dfa.h
@@ -102,11 +102,10 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset,
&& (unit_size = array_ref_element_size (exp),
TREE_CODE (unit_size) == INTEGER_CST))
{
- double_int doffset
- = (TREE_INT_CST (index) - TREE_INT_CST (low_bound))
- .sext (TYPE_PRECISION (TREE_TYPE (index)));
- doffset *= tree_to_double_int (unit_size);
- byte_offset += doffset.to_shwi ();
+ offset_int woffset
+ = offset_int::from (wi::sub (index, low_bound), SIGNED);
+ woffset *= wi::to_offset (unit_size);
+ byte_offset += woffset.to_shwi ();
}
else
return NULL_TREE;
@@ -135,9 +134,8 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset,
{
if (!integer_zerop (TREE_OPERAND (exp, 1)))
{
- double_int off = mem_ref_offset (exp);
- gcc_assert (off.high == -1 || off.high == 0);
- byte_offset += off.to_shwi ();
+ offset_int off = mem_ref_offset (exp);
+ byte_offset += off.to_short_addr ();
}
exp = TREE_OPERAND (base, 0);
}
@@ -158,9 +156,8 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset,
return NULL_TREE;
if (!integer_zerop (TMR_OFFSET (exp)))
{
- double_int off = mem_ref_offset (exp);
- gcc_assert (off.high == -1 || off.high == 0);
- byte_offset += off.to_shwi ();
+ offset_int off = mem_ref_offset (exp);
+ byte_offset += off.to_short_addr ();
}
exp = TREE_OPERAND (base, 0);
}
diff --git a/gcc/tree-dump.c b/gcc/tree-dump.c
index fec493db3cf..9f89a04295a 100644
--- a/gcc/tree-dump.c
+++ b/gcc/tree-dump.c
@@ -30,6 +30,8 @@ along with GCC; see the file COPYING3. If not see
#include "tree-iterator.h"
#include "tree-pretty-print.h"
#include "tree-cfg.h"
+#include "wide-int.h"
+#include "wide-int-print.h"
static unsigned int queue (dump_info_p, const_tree, int);
static void dump_index (dump_info_p, unsigned int);
@@ -561,9 +563,8 @@ dequeue_and_dump (dump_info_p di)
break;
case INTEGER_CST:
- if (TREE_INT_CST_HIGH (t))
- dump_int (di, "high", TREE_INT_CST_HIGH (t));
- dump_int (di, "low", TREE_INT_CST_LOW (t));
+ fprintf (di->stream, "int: ");
+ print_decs (t, di->stream);
break;
case STRING_CST:
diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c
index c1e0e77098b..624f2775f7c 100644
--- a/gcc/tree-eh.c
+++ b/gcc/tree-eh.c
@@ -2652,14 +2652,14 @@ tree_could_trap_p (tree expr)
if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR)
{
tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0);
- double_int off = mem_ref_offset (expr);
- if (off.is_negative ())
+ offset_int off = mem_ref_offset (expr);
+ if (wi::neg_p (off, SIGNED))
return true;
if (TREE_CODE (base) == STRING_CST)
- return double_int::from_uhwi (TREE_STRING_LENGTH (base)).ule (off);
+ return wi::leu_p (TREE_STRING_LENGTH (base), off);
else if (DECL_SIZE_UNIT (base) == NULL_TREE
|| TREE_CODE (DECL_SIZE_UNIT (base)) != INTEGER_CST
- || tree_to_double_int (DECL_SIZE_UNIT (base)).ule (off))
+ || wi::leu_p (wi::to_offset (DECL_SIZE_UNIT (base)), off))
return true;
/* Now we are sure the first byte of the access is inside
the object. */
diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c
index 802d25fd71a..987e81506b7 100644
--- a/gcc/tree-inline.c
+++ b/gcc/tree-inline.c
@@ -858,8 +858,7 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
- *tp = build_int_cst_wide (new_type, TREE_INT_CST_LOW (*tp),
- TREE_INT_CST_HIGH (*tp));
+ *tp = wide_int_to_tree (new_type, *tp);
else
{
*tp = copy_node (*tp);
@@ -1037,8 +1036,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
*walk_subtrees = 0;
else if (TREE_CODE (*tp) == INTEGER_CST)
- *tp = build_int_cst_wide (new_type, TREE_INT_CST_LOW (*tp),
- TREE_INT_CST_HIGH (*tp));
+ *tp = wide_int_to_tree (new_type, *tp);
else
{
*tp = copy_node (*tp);
diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c
index 1596b7d1120..3c606b02d41 100644
--- a/gcc/tree-object-size.c
+++ b/gcc/tree-object-size.c
@@ -154,7 +154,7 @@ compute_object_offset (const_tree expr, const_tree var)
case MEM_REF:
gcc_assert (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR);
- return double_int_to_tree (sizetype, mem_ref_offset (expr));
+ return wide_int_to_tree (sizetype, mem_ref_offset (expr));
default:
return error_mark_node;
@@ -204,10 +204,10 @@ addr_object_size (struct object_size_info *osi, const_tree ptr,
}
if (sz != unknown[object_size_type])
{
- double_int dsz = double_int::from_uhwi (sz) - mem_ref_offset (pt_var);
- if (dsz.is_negative ())
+ offset_int dsz = wi::sub (sz, mem_ref_offset (pt_var));
+ if (wi::neg_p (dsz))
sz = 0;
- else if (dsz.fits_uhwi ())
+ else if (wi::fits_uhwi_p (dsz))
sz = dsz.to_uhwi ();
else
sz = unknown[object_size_type];
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 4d77401c706..2cb47db4e51 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -222,6 +222,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "tree-affine.h"
#include "tree-inline.h"
+#include "wide-int-print.h"
/* The maximum number of iterations between the considered memory
references. */
@@ -249,7 +250,7 @@ typedef struct dref_d
unsigned distance;
/* Number of iterations offset from the first reference in the component. */
- double_int offset;
+ widest_int offset;
/* Number of the reference in a component, in dominance ordering. */
unsigned pos;
@@ -365,7 +366,7 @@ dump_dref (FILE *file, dref ref)
DR_IS_READ (ref->ref) ? "" : ", write");
fprintf (file, " offset ");
- dump_double_int (file, ref->offset, false);
+ print_decs (ref->offset, file);
fprintf (file, "\n");
fprintf (file, " distance %u\n", ref->distance);
@@ -638,7 +639,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset)
tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset,
&name_expansions);
- aff_combination_const (&delta, type, tree_to_double_int (DR_INIT (dr)));
+ aff_combination_const (&delta, type, wi::to_widest (DR_INIT (dr)));
aff_combination_add (offset, &delta);
}
@@ -650,7 +651,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset)
static bool
determine_offset (struct data_reference *a, struct data_reference *b,
- double_int *off)
+ widest_int *off)
{
aff_tree diff, baseb, step;
tree typea, typeb;
@@ -671,7 +672,7 @@ determine_offset (struct data_reference *a, struct data_reference *b,
{
/* If the references have loop invariant address, check that they access
exactly the same location. */
- *off = double_int_zero;
+ *off = 0;
return (operand_equal_p (DR_OFFSET (a), DR_OFFSET (b), 0)
&& operand_equal_p (DR_INIT (a), DR_INIT (b), 0));
}
@@ -680,7 +681,7 @@ determine_offset (struct data_reference *a, struct data_reference *b,
is a multiple of step. */
aff_combination_dr_offset (a, &diff);
aff_combination_dr_offset (b, &baseb);
- aff_combination_scale (&baseb, double_int_minus_one);
+ aff_combination_scale (&baseb, -1);
aff_combination_add (&diff, &baseb);
tree_to_aff_combination_expand (DR_STEP (a), TREE_TYPE (DR_STEP (a)),
@@ -757,7 +758,7 @@ split_data_refs_to_components (struct loop *loop,
FOR_EACH_VEC_ELT (depends, i, ddr)
{
- double_int dummy_off;
+ widest_int dummy_off;
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
continue;
@@ -827,7 +828,7 @@ split_data_refs_to_components (struct loop *loop,
dataref = XCNEW (struct dref_d);
dataref->ref = dr;
dataref->stmt = DR_STMT (dr);
- dataref->offset = double_int_zero;
+ dataref->offset = 0;
dataref->distance = 0;
dataref->always_accessed
@@ -883,7 +884,7 @@ suitable_component_p (struct loop *loop, struct component *comp)
first = comp->refs[0];
ok = suitable_reference_p (first->ref, &comp->comp_step);
gcc_assert (ok);
- first->offset = double_int_zero;
+ first->offset = 0;
for (i = 1; comp->refs.iterate (i, &a); i++)
{
@@ -947,7 +948,7 @@ order_drefs (const void *a, const void *b)
{
const dref *const da = (const dref *) a;
const dref *const db = (const dref *) b;
- int offcmp = (*da)->offset.scmp ((*db)->offset);
+ int offcmp = wi::cmps ((*da)->offset, (*db)->offset);
if (offcmp != 0)
return offcmp;
@@ -969,16 +970,15 @@ static void
add_ref_to_chain (chain_p chain, dref ref)
{
dref root = get_chain_root (chain);
- double_int dist;
- gcc_assert (root->offset.sle (ref->offset));
- dist = ref->offset - root->offset;
- if (double_int::from_uhwi (MAX_DISTANCE).ule (dist))
+ gcc_assert (wi::les_p (root->offset, ref->offset));
+ widest_int dist = ref->offset - root->offset;
+ if (wi::leu_p (MAX_DISTANCE, dist))
{
free (ref);
return;
}
- gcc_assert (dist.fits_uhwi ());
+ gcc_assert (wi::fits_uhwi_p (dist));
chain->refs.safe_push (ref);
@@ -1073,7 +1073,7 @@ valid_initializer_p (struct data_reference *ref,
unsigned distance, struct data_reference *root)
{
aff_tree diff, base, step;
- double_int off;
+ widest_int off;
/* Both REF and ROOT must be accessing the same object. */
if (!operand_equal_p (DR_BASE_ADDRESS (ref), DR_BASE_ADDRESS (root), 0))
@@ -1093,7 +1093,7 @@ valid_initializer_p (struct data_reference *ref,
-DISTANCE-th iteration. */
aff_combination_dr_offset (root, &diff);
aff_combination_dr_offset (ref, &base);
- aff_combination_scale (&base, double_int_minus_one);
+ aff_combination_scale (&base, -1);
aff_combination_add (&diff, &base);
tree_to_aff_combination_expand (DR_STEP (root), TREE_TYPE (DR_STEP (root)),
@@ -1101,7 +1101,7 @@ valid_initializer_p (struct data_reference *ref,
if (!aff_combination_constant_multiple_p (&diff, &step, &off))
return false;
- if (off != double_int::from_uhwi (distance))
+ if (off != distance)
return false;
return true;
@@ -1229,7 +1229,7 @@ determine_roots_comp (struct loop *loop,
unsigned i;
dref a;
chain_p chain = NULL;
- double_int last_ofs = double_int_zero;
+ widest_int last_ofs = 0;
/* Invariants are handled specially. */
if (comp->comp_step == RS_INVARIANT)
@@ -1244,7 +1244,7 @@ determine_roots_comp (struct loop *loop,
FOR_EACH_VEC_ELT (comp->refs, i, a)
{
if (!chain || DR_IS_WRITE (a->ref)
- || double_int::from_uhwi (MAX_DISTANCE).ule (a->offset - last_ofs))
+ || wi::leu_p (MAX_DISTANCE, a->offset - last_ofs))
{
if (nontrivial_chain_p (chain))
{
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index bf3b96629c2..d516a9ddaa0 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see
#include "dumpfile.h"
#include "value-prof.h"
#include "predict.h"
+#include "wide-int-print.h"
#include <new> // For placement-new.
@@ -1238,9 +1239,22 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
pp_wide_integer (buffer, TREE_INT_CST_LOW (node));
pp_string (buffer, "B"); /* pseudo-unit */
}
+ else if (tree_fits_shwi_p (node))
+ pp_wide_integer (buffer, tree_to_shwi (node));
+ else if (tree_fits_uhwi_p (node))
+ pp_unsigned_wide_integer (buffer, tree_to_uhwi (node));
else
- pp_double_int (buffer, tree_to_double_int (node),
- TYPE_UNSIGNED (TREE_TYPE (node)));
+ {
+ wide_int val = node;
+
+ if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node))))
+ {
+ pp_minus (buffer);
+ val = -val;
+ }
+ print_hex (val, pp_buffer (buffer)->digit_buffer);
+ pp_string (buffer, pp_buffer (buffer)->digit_buffer);
+ }
if (TREE_OVERFLOW (node))
pp_string (buffer, "(OVF)");
break;
@@ -1489,7 +1503,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
tree field, val;
bool is_struct_init = false;
bool is_array_init = false;
- double_int curidx = double_int_zero;
+ widest_int curidx;
pp_left_brace (buffer);
if (TREE_CLOBBER_P (node))
pp_string (buffer, "CLOBBER");
@@ -1504,7 +1518,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
{
tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)));
is_array_init = true;
- curidx = tree_to_double_int (minv);
+ curidx = wi::to_widest (minv);
}
FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val)
{
@@ -1518,7 +1532,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
}
else if (is_array_init
&& (TREE_CODE (field) != INTEGER_CST
- || tree_to_double_int (field) != curidx))
+ || curidx != wi::to_widest (field)))
{
pp_left_bracket (buffer);
if (TREE_CODE (field) == RANGE_EXPR)
@@ -1529,17 +1543,17 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags,
dump_generic_node (buffer, TREE_OPERAND (field, 1), spc,
flags, false);
if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST)
- curidx = tree_to_double_int (TREE_OPERAND (field, 1));
+ curidx = wi::to_widest (TREE_OPERAND (field, 1));
}
else
dump_generic_node (buffer, field, spc, flags, false);
if (TREE_CODE (field) == INTEGER_CST)
- curidx = tree_to_double_int (field);
+ curidx = wi::to_widest (field);
pp_string (buffer, "]=");
}
}
if (is_array_init)
- curidx += double_int_one;
+ curidx += 1;
if (val && TREE_CODE (val) == ADDR_EXPR)
if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL)
val = TREE_OPERAND (val, 0);
diff --git a/gcc/tree-scalar-evolution.c b/gcc/tree-scalar-evolution.c
index f1ddc24b1a2..af723082962 100644
--- a/gcc/tree-scalar-evolution.c
+++ b/gcc/tree-scalar-evolution.c
@@ -1428,7 +1428,7 @@ simplify_peeled_chrec (struct loop *loop, tree arg, tree init_cond)
tree_to_aff_combination_expand (left, type, &aff1, &peeled_chrec_map);
tree_to_aff_combination_expand (step_val, type, &aff2, &peeled_chrec_map);
free_affine_expand_cache (&peeled_chrec_map);
- aff_combination_scale (&aff2, double_int_minus_one);
+ aff_combination_scale (&aff2, -1);
aff_combination_add (&aff1, &aff2);
/* Transform (init, {left, right}_LOOP)_LOOP to {init, right}_LOOP
diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c
index 741478c4de2..866afcf0265 100644
--- a/gcc/tree-ssa-address.c
+++ b/gcc/tree-ssa-address.c
@@ -208,15 +208,15 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as,
struct mem_addr_template *templ;
if (addr->step && !integer_onep (addr->step))
- st = immed_double_int_const (tree_to_double_int (addr->step), pointer_mode);
+ st = immed_wide_int_const (addr->step, pointer_mode);
else
st = NULL_RTX;
if (addr->offset && !integer_zerop (addr->offset))
- off = immed_double_int_const
- (tree_to_double_int (addr->offset)
- .sext (TYPE_PRECISION (TREE_TYPE (addr->offset))),
- pointer_mode);
+ {
+ offset_int dc = offset_int::from (addr->offset, SIGNED);
+ off = immed_wide_int_const (dc, pointer_mode);
+ }
else
off = NULL_RTX;
@@ -424,7 +424,7 @@ move_fixed_address_to_symbol (struct mem_address *parts, aff_tree *addr)
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.is_one ())
+ if (addr->elts[i].coef != 1)
continue;
val = addr->elts[i].val;
@@ -452,7 +452,7 @@ move_hint_to_base (tree type, struct mem_address *parts, tree base_hint,
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.is_one ())
+ if (addr->elts[i].coef != 1)
continue;
val = addr->elts[i].val;
@@ -484,7 +484,7 @@ move_pointer_to_base (struct mem_address *parts, aff_tree *addr)
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.is_one ())
+ if (addr->elts[i].coef != 1)
continue;
val = addr->elts[i].val;
@@ -520,7 +520,7 @@ move_variant_to_index (struct mem_address *parts, aff_tree *addr, tree v)
return;
parts->index = fold_convert (sizetype, val);
- parts->step = double_int_to_tree (sizetype, addr->elts[i].coef);
+ parts->step = wide_int_to_tree (sizetype, addr->elts[i].coef);
aff_combination_remove_elt (addr, i);
}
@@ -563,16 +563,15 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
addr_space_t as = TYPE_ADDR_SPACE (type);
enum machine_mode address_mode = targetm.addr_space.address_mode (as);
HOST_WIDE_INT coef;
- double_int best_mult, amult, amult_neg;
unsigned best_mult_cost = 0, acost;
tree mult_elt = NULL_TREE, elt;
unsigned i, j;
enum tree_code op_code;
- best_mult = double_int_zero;
+ offset_int best_mult = 0;
for (i = 0; i < addr->n; i++)
{
- if (!addr->elts[i].coef.fits_shwi ())
+ if (!wi::fits_shwi_p (addr->elts[i].coef))
continue;
coef = addr->elts[i].coef.to_shwi ();
@@ -585,7 +584,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
if (acost > best_mult_cost)
{
best_mult_cost = acost;
- best_mult = addr->elts[i].coef;
+ best_mult = offset_int::from (addr->elts[i].coef, SIGNED);
}
}
@@ -595,8 +594,8 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
/* Collect elements multiplied by best_mult. */
for (i = j = 0; i < addr->n; i++)
{
- amult = addr->elts[i].coef;
- amult_neg = double_int_ext_for_comb (-amult, addr);
+ offset_int amult = offset_int::from (addr->elts[i].coef, SIGNED);
+ offset_int amult_neg = -wi::sext (amult, TYPE_PRECISION (addr->type));
if (amult == best_mult)
op_code = PLUS_EXPR;
@@ -620,7 +619,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts,
addr->n = j;
parts->index = mult_elt;
- parts->step = double_int_to_tree (sizetype, best_mult);
+ parts->step = wide_int_to_tree (sizetype, best_mult);
}
/* Splits address ADDR for a memory access of type TYPE into PARTS.
@@ -648,8 +647,8 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
parts->index = NULL_TREE;
parts->step = NULL_TREE;
- if (!addr->offset.is_zero ())
- parts->offset = double_int_to_tree (sizetype, addr->offset);
+ if (addr->offset != 0)
+ parts->offset = wide_int_to_tree (sizetype, addr->offset);
else
parts->offset = NULL_TREE;
@@ -680,9 +679,9 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand,
for (i = 0; i < addr->n; i++)
{
part = fold_convert (sizetype, addr->elts[i].val);
- if (!addr->elts[i].coef.is_one ())
+ if (addr->elts[i].coef != 1)
part = fold_build2 (MULT_EXPR, sizetype, part,
- double_int_to_tree (sizetype, addr->elts[i].coef));
+ wide_int_to_tree (sizetype, addr->elts[i].coef));
add_to_parts (parts, part);
}
if (addr->rest)
@@ -890,8 +889,8 @@ copy_ref_info (tree new_ref, tree old_ref)
&& (TREE_INT_CST_LOW (TMR_STEP (new_ref))
< align)))))
{
- unsigned int inc = (mem_ref_offset (old_ref)
- - mem_ref_offset (new_ref)).low;
+ unsigned int inc = (mem_ref_offset (old_ref).to_short_addr ()
+ - mem_ref_offset (new_ref).to_short_addr ());
adjust_ptr_info_misalignment (new_pi, inc);
}
else
diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c
index cd2c110b41a..7781d632266 100644
--- a/gcc/tree-ssa-alias.c
+++ b/gcc/tree-ssa-alias.c
@@ -1031,7 +1031,6 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
tree ptrtype1, dbase2;
HOST_WIDE_INT offset1p = offset1, offset2p = offset2;
HOST_WIDE_INT doffset1, doffset2;
- double_int moff;
gcc_checking_assert ((TREE_CODE (base1) == MEM_REF
|| TREE_CODE (base1) == TARGET_MEM_REF)
@@ -1041,12 +1040,12 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
- moff = mem_ref_offset (base1);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.is_negative ())
- offset2p += (-moff).low;
+ offset_int moff = mem_ref_offset (base1);
+ moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ if (wi::neg_p (moff))
+ offset2p += (-moff).to_short_addr ();
else
- offset1p += moff.low;
+ offset1p += moff.to_short_addr ();
/* If only one reference is based on a variable, they cannot alias if
the pointer access is beyond the extent of the variable access.
@@ -1117,12 +1116,12 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
if (TREE_CODE (dbase2) == MEM_REF
|| TREE_CODE (dbase2) == TARGET_MEM_REF)
{
- double_int moff = mem_ref_offset (dbase2);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.is_negative ())
- doffset1 -= (-moff).low;
+ offset_int moff = mem_ref_offset (dbase2);
+ moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ if (wi::neg_p (moff))
+ doffset1 -= (-moff).to_short_addr ();
else
- doffset2 -= moff.low;
+ doffset2 -= moff.to_short_addr ();
}
/* If either reference is view-converted, give up now. */
@@ -1212,21 +1211,21 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1,
&& operand_equal_p (TMR_INDEX2 (base1),
TMR_INDEX2 (base2), 0))))))
{
- double_int moff;
+ offset_int moff;
/* The offset embedded in MEM_REFs can be negative. Bias them
so that the resulting offset adjustment is positive. */
moff = mem_ref_offset (base1);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.is_negative ())
- offset2 += (-moff).low;
+ moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ if (wi::neg_p (moff))
+ offset2 += (-moff).to_short_addr ();
else
- offset1 += moff.low;
+ offset1 += moff.to_shwi ();
moff = mem_ref_offset (base2);
- moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT));
- if (moff.is_negative ())
- offset1 += (-moff).low;
+ moff = wi::lshift (moff, LOG2_BITS_PER_UNIT);
+ if (wi::neg_p (moff))
+ offset1 += (-moff).to_short_addr ();
else
- offset2 += moff.low;
+ offset2 += moff.to_short_addr ();
return ranges_overlap_p (offset1, max_size1, offset2, max_size2);
}
if (!ptr_derefs_may_alias_p (ptr1, ptr2))
@@ -2198,15 +2197,13 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref)
if (!tree_int_cst_equal (TREE_OPERAND (base, 1),
TREE_OPERAND (ref->base, 1)))
{
- double_int off1 = mem_ref_offset (base);
- off1 = off1.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
- off1 = off1 + double_int::from_shwi (offset);
- double_int off2 = mem_ref_offset (ref->base);
- off2 = off2.lshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
- off2 = off2 + double_int::from_shwi (ref_offset);
- if (off1.fits_shwi () && off2.fits_shwi ())
+ offset_int off1 = mem_ref_offset (base);
+ off1 = wi::lshift (off1, LOG2_BITS_PER_UNIT);
+ off1 += offset;
+ offset_int off2 = mem_ref_offset (ref->base);
+ off2 = wi::lshift (off2, LOG2_BITS_PER_UNIT);
+ off2 += ref_offset;
+ if (wi::fits_shwi_p (off1) && wi::fits_shwi_p (off2))
{
offset = off1.to_shwi ();
ref_offset = off2.to_shwi ();
@@ -2259,12 +2256,11 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref)
if (!tree_fits_shwi_p (len))
return false;
tree rbase = ref->base;
- double_int roffset = double_int::from_shwi (ref->offset);
+ offset_int roffset = ref->offset;
ao_ref dref;
ao_ref_init_from_ptr_and_size (&dref, dest, len);
tree base = ao_ref_base (&dref);
- double_int offset = double_int::from_shwi (dref.offset);
- double_int bpu = double_int::from_uhwi (BITS_PER_UNIT);
+ offset_int offset = dref.offset;
if (!base || dref.size == -1)
return false;
if (TREE_CODE (base) == MEM_REF)
@@ -2272,19 +2268,19 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref)
if (TREE_CODE (rbase) != MEM_REF)
return false;
// Compare pointers.
- offset += bpu * mem_ref_offset (base);
- roffset += bpu * mem_ref_offset (rbase);
+ offset += wi::lshift (mem_ref_offset (base),
+ LOG2_BITS_PER_UNIT);
+ roffset += wi::lshift (mem_ref_offset (rbase),
+ LOG2_BITS_PER_UNIT);
base = TREE_OPERAND (base, 0);
rbase = TREE_OPERAND (rbase, 0);
}
- if (base == rbase)
- {
- double_int size = bpu * tree_to_double_int (len);
- double_int rsize = double_int::from_uhwi (ref->max_size);
- if (offset.sle (roffset)
- && (roffset + rsize).sle (offset + size))
- return true;
- }
+ if (base == rbase
+ && wi::les_p (offset, roffset)
+ && wi::les_p (roffset + ref->max_size,
+ offset + wi::lshift (wi::to_offset (len),
+ LOG2_BITS_PER_UNIT)))
+ return true;
break;
}
diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c
index 9e1b6aeb5bc..d7410122c46 100644
--- a/gcc/tree-ssa-ccp.c
+++ b/gcc/tree-ssa-ccp.c
@@ -98,6 +98,15 @@ along with GCC; see the file COPYING3. If not see
array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
final substitution and folding.
+ This algorithm uses wide-ints at the max precision of the target.
+ This means that, with one uninteresting exception, variables with
+ UNSIGNED types never go to VARYING because the bits above the
+ precision of the type of the variable are always zero. The
+ uninteresting case is a variable of UNSIGNED type that has the
+ maximum precision of the target. Such variables can go to VARYING,
+ but this causes no loss of infomation since these variables will
+ never be extended.
+
References:
Constant propagation with conditional branches,
@@ -144,6 +153,7 @@ along with GCC; see the file COPYING3. If not see
#include "diagnostic-core.h"
#include "dbgcnt.h"
#include "params.h"
+#include "wide-int-print.h"
/* Possible lattice values. */
@@ -162,9 +172,11 @@ struct prop_value_d {
/* Propagated value. */
tree value;
- /* Mask that applies to the propagated value during CCP. For
- X with a CONSTANT lattice value X & ~mask == value & ~mask. */
- double_int mask;
+ /* Mask that applies to the propagated value during CCP. For X
+ with a CONSTANT lattice value X & ~mask == value & ~mask. The
+ zero bits in the mask cover constant values. The ones mean no
+ information. */
+ widest_int mask;
};
typedef struct prop_value_d prop_value_t;
@@ -199,18 +211,20 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val)
break;
case CONSTANT:
if (TREE_CODE (val.value) != INTEGER_CST
- || val.mask.is_zero ())
+ || val.mask == 0)
{
fprintf (outf, "%sCONSTANT ", prefix);
print_generic_expr (outf, val.value, dump_flags);
}
else
{
- double_int cval = tree_to_double_int (val.value).and_not (val.mask);
- fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX,
- prefix, cval.high, cval.low);
- fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")",
- val.mask.high, val.mask.low);
+ widest_int cval = wi::bit_and_not (wi::to_widest (val.value),
+ val.mask);
+ fprintf (outf, "%sCONSTANT ", prefix);
+ print_hex (cval, outf);
+ fprintf (outf, " (");
+ print_hex (val.mask, outf);
+ fprintf (outf, ")");
}
break;
default:
@@ -230,6 +244,14 @@ debug_lattice_value (prop_value_t val)
fprintf (stderr, "\n");
}
+/* Extend NONZERO_BITS to a full mask, with the upper bits being set. */
+
+static widest_int
+extend_mask (const wide_int &nonzero_bits)
+{
+ return (wi::mask <widest_int> (wi::get_precision (nonzero_bits), true)
+ | widest_int::from (nonzero_bits, UNSIGNED));
+}
/* Compute a default value for variable VAR and store it in the
CONST_VAL array. The following rules are used to get default
@@ -252,7 +274,7 @@ debug_lattice_value (prop_value_t val)
static prop_value_t
get_default_value (tree var)
{
- prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } };
+ prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 };
gimple stmt;
stmt = SSA_NAME_DEF_STMT (var);
@@ -269,18 +291,15 @@ get_default_value (tree var)
else
{
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
if (flag_tree_bit_ccp)
{
- double_int nonzero_bits = get_nonzero_bits (var);
- double_int mask
- = double_int::mask (TYPE_PRECISION (TREE_TYPE (var)));
- if (nonzero_bits != double_int_minus_one && nonzero_bits != mask)
+ wide_int nonzero_bits = get_nonzero_bits (var);
+ if (nonzero_bits != -1)
{
val.lattice_val = CONSTANT;
val.value = build_zero_cst (TREE_TYPE (var));
- /* CCP wants the bits above precision set. */
- val.mask = nonzero_bits | ~mask;
+ val.mask = extend_mask (nonzero_bits);
}
}
}
@@ -314,7 +333,7 @@ get_default_value (tree var)
{
/* Otherwise, VAR will never take on a constant value. */
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
@@ -357,7 +376,7 @@ get_constant_value (tree var)
if (val
&& val->lattice_val == CONSTANT
&& (TREE_CODE (val->value) != INTEGER_CST
- || val->mask.is_zero ()))
+ || val->mask == 0))
return val->value;
return NULL_TREE;
}
@@ -371,7 +390,7 @@ set_value_varying (tree var)
val->lattice_val = VARYING;
val->value = NULL_TREE;
- val->mask = double_int_minus_one;
+ val->mask = -1;
}
/* For float types, modify the value of VAL to make ccp work correctly
@@ -455,8 +474,8 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val)
/* Bit-lattices have to agree in the still valid bits. */
if (TREE_CODE (old_val.value) == INTEGER_CST
&& TREE_CODE (new_val.value) == INTEGER_CST)
- return tree_to_double_int (old_val.value).and_not (new_val.mask)
- == tree_to_double_int (new_val.value).and_not (new_val.mask);
+ return (wi::bit_and_not (wi::to_widest (old_val.value), new_val.mask)
+ == wi::bit_and_not (wi::to_widest (new_val.value), new_val.mask));
/* Otherwise constant values have to agree. */
return operand_equal_p (old_val.value, new_val.value, 0);
@@ -481,9 +500,8 @@ set_lattice_value (tree var, prop_value_t new_val)
&& TREE_CODE (new_val.value) == INTEGER_CST
&& TREE_CODE (old_val->value) == INTEGER_CST)
{
- double_int diff;
- diff = tree_to_double_int (new_val.value)
- ^ tree_to_double_int (old_val->value);
+ widest_int diff = (wi::to_widest (new_val.value)
+ ^ wi::to_widest (old_val->value));
new_val.mask = new_val.mask | old_val->mask | diff;
}
@@ -517,21 +535,21 @@ set_lattice_value (tree var, prop_value_t new_val)
static prop_value_t get_value_for_expr (tree, bool);
static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
-static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *,
- tree, double_int, double_int,
- tree, double_int, double_int);
+static void bit_value_binop_1 (enum tree_code, tree, widest_int *, widest_int *,
+ tree, const widest_int &, const widest_int &,
+ tree, const widest_int &, const widest_int &);
-/* Return a double_int that can be used for bitwise simplifications
+/* Return a widest_int that can be used for bitwise simplifications
from VAL. */
-static double_int
-value_to_double_int (prop_value_t val)
+static widest_int
+value_to_wide_int (prop_value_t val)
{
if (val.value
&& TREE_CODE (val.value) == INTEGER_CST)
- return tree_to_double_int (val.value);
- else
- return double_int_zero;
+ return wi::to_widest (val.value);
+
+ return 0;
}
/* Return the value for the address expression EXPR based on alignment
@@ -549,14 +567,11 @@ get_value_from_alignment (tree expr)
get_pointer_alignment_1 (expr, &align, &bitpos);
val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
- ? double_int::mask (TYPE_PRECISION (type))
- : double_int_minus_one)
- .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1));
- val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT;
+ ? wi::mask <widest_int> (TYPE_PRECISION (type), false)
+ : -1).and_not (align / BITS_PER_UNIT - 1);
+ val.lattice_val = val.mask == -1 ? VARYING : CONSTANT;
if (val.lattice_val == CONSTANT)
- val.value
- = double_int_to_tree (type,
- double_int::from_uhwi (bitpos / BITS_PER_UNIT));
+ val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
else
val.value = NULL_TREE;
@@ -585,7 +600,7 @@ get_value_for_expr (tree expr, bool for_bits_p)
{
val.lattice_val = CONSTANT;
val.value = expr;
- val.mask = double_int_zero;
+ val.mask = 0;
canonicalize_value (&val);
}
else if (TREE_CODE (expr) == ADDR_EXPR)
@@ -593,7 +608,7 @@ get_value_for_expr (tree expr, bool for_bits_p)
else
{
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
val.value = NULL_TREE;
}
return val;
@@ -842,7 +857,7 @@ do_dbg_cnt (void)
if (!dbg_cnt (ccp))
{
const_val[i].lattice_val = VARYING;
- const_val[i].mask = double_int_minus_one;
+ const_val[i].mask = -1;
const_val[i].value = NULL_TREE;
}
}
@@ -888,7 +903,7 @@ ccp_finalize (void)
{
/* Trailing mask bits specify the alignment, trailing value
bits the misalignment. */
- tem = val->mask.low;
+ tem = val->mask.to_uhwi ();
align = (tem & -tem);
if (align > 1)
set_ptr_info_alignment (get_ptr_info (name), align,
@@ -897,8 +912,9 @@ ccp_finalize (void)
}
else
{
- double_int nonzero_bits = val->mask;
- nonzero_bits = nonzero_bits | tree_to_double_int (val->value);
+ unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
+ wide_int nonzero_bits = wide_int::from (val->mask, precision,
+ UNSIGNED) | val->value;
nonzero_bits &= get_nonzero_bits (name);
set_nonzero_bits (name, nonzero_bits);
}
@@ -942,7 +958,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
{
/* any M VARYING = VARYING. */
val1->lattice_val = VARYING;
- val1->mask = double_int_minus_one;
+ val1->mask = -1;
val1->value = NULL_TREE;
}
else if (val1->lattice_val == CONSTANT
@@ -955,10 +971,10 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
For INTEGER_CSTs mask unequal bits. If no equal bits remain,
drop to varying. */
- val1->mask = val1->mask | val2->mask
- | (tree_to_double_int (val1->value)
- ^ tree_to_double_int (val2->value));
- if (val1->mask.is_minus_one ())
+ val1->mask = (val1->mask | val2->mask
+ | (wi::to_widest (val1->value)
+ ^ wi::to_widest (val2->value)));
+ if (val1->mask == -1)
{
val1->lattice_val = VARYING;
val1->value = NULL_TREE;
@@ -991,7 +1007,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2)
{
/* Any other combination is VARYING. */
val1->lattice_val = VARYING;
- val1->mask = double_int_minus_one;
+ val1->mask = -1;
val1->value = NULL_TREE;
}
}
@@ -1146,8 +1162,8 @@ ccp_fold (gimple stmt)
static void
bit_value_unop_1 (enum tree_code code, tree type,
- double_int *val, double_int *mask,
- tree rtype, double_int rval, double_int rmask)
+ widest_int *val, widest_int *mask,
+ tree rtype, const widest_int &rval, const widest_int &rmask)
{
switch (code)
{
@@ -1158,33 +1174,32 @@ bit_value_unop_1 (enum tree_code code, tree type,
case NEGATE_EXPR:
{
- double_int temv, temm;
+ widest_int temv, temm;
/* Return ~rval + 1. */
bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask);
bit_value_binop_1 (PLUS_EXPR, type, val, mask,
- type, temv, temm,
- type, double_int_one, double_int_zero);
+ type, temv, temm, type, 1, 0);
break;
}
CASE_CONVERT:
{
- bool uns;
+ signop sgn;
/* First extend mask and value according to the original type. */
- uns = TYPE_UNSIGNED (rtype);
- *mask = rmask.ext (TYPE_PRECISION (rtype), uns);
- *val = rval.ext (TYPE_PRECISION (rtype), uns);
+ sgn = TYPE_SIGN (rtype);
+ *mask = wi::ext (rmask, TYPE_PRECISION (rtype), sgn);
+ *val = wi::ext (rval, TYPE_PRECISION (rtype), sgn);
/* Then extend mask and value according to the target type. */
- uns = TYPE_UNSIGNED (type);
- *mask = (*mask).ext (TYPE_PRECISION (type), uns);
- *val = (*val).ext (TYPE_PRECISION (type), uns);
+ sgn = TYPE_SIGN (type);
+ *mask = wi::ext (*mask, TYPE_PRECISION (type), sgn);
+ *val = wi::ext (*val, TYPE_PRECISION (type), sgn);
break;
}
default:
- *mask = double_int_minus_one;
+ *mask = -1;
break;
}
}
@@ -1195,14 +1210,19 @@ bit_value_unop_1 (enum tree_code code, tree type,
static void
bit_value_binop_1 (enum tree_code code, tree type,
- double_int *val, double_int *mask,
- tree r1type, double_int r1val, double_int r1mask,
- tree r2type, double_int r2val, double_int r2mask)
+ widest_int *val, widest_int *mask,
+ tree r1type, const widest_int &r1val,
+ const widest_int &r1mask, tree r2type,
+ const widest_int &r2val, const widest_int &r2mask)
{
- bool uns = TYPE_UNSIGNED (type);
- /* Assume we'll get a constant result. Use an initial varying value,
- we fall back to varying in the end if necessary. */
- *mask = double_int_minus_one;
+ signop sgn = TYPE_SIGN (type);
+ int width = TYPE_PRECISION (type);
+ bool swap_p = false;
+
+ /* Assume we'll get a constant result. Use an initial non varying
+ value, we fall back to varying in the end if necessary. */
+ *mask = -1;
+
switch (code)
{
case BIT_AND_EXPR:
@@ -1228,13 +1248,35 @@ bit_value_binop_1 (enum tree_code code, tree type,
case LROTATE_EXPR:
case RROTATE_EXPR:
- if (r2mask.is_zero ())
+ if (r2mask == 0)
{
- HOST_WIDE_INT shift = r2val.low;
- if (code == RROTATE_EXPR)
- shift = -shift;
- *mask = r1mask.lrotate (shift, TYPE_PRECISION (type));
- *val = r1val.lrotate (shift, TYPE_PRECISION (type));
+ widest_int shift = r2val;
+ if (shift == 0)
+ {
+ *mask = r1mask;
+ *val = r1val;
+ }
+ else
+ {
+ if (wi::neg_p (shift))
+ {
+ shift = -shift;
+ if (code == RROTATE_EXPR)
+ code = LROTATE_EXPR;
+ else
+ code = RROTATE_EXPR;
+ }
+ if (code == RROTATE_EXPR)
+ {
+ *mask = wi::rrotate (r1mask, shift, width);
+ *val = wi::rrotate (r1val, shift, width);
+ }
+ else
+ {
+ *mask = wi::lrotate (r1mask, shift, width);
+ *val = wi::lrotate (r1val, shift, width);
+ }
+ }
}
break;
@@ -1243,31 +1285,34 @@ bit_value_binop_1 (enum tree_code code, tree type,
/* ??? We can handle partially known shift counts if we know
its sign. That way we can tell that (x << (y | 8)) & 255
is zero. */
- if (r2mask.is_zero ())
+ if (r2mask == 0)
{
- HOST_WIDE_INT shift = r2val.low;
- if (code == RSHIFT_EXPR)
- shift = -shift;
- /* We need to know if we are doing a left or a right shift
- to properly shift in zeros for left shift and unsigned
- right shifts and the sign bit for signed right shifts.
- For signed right shifts we shift in varying in case
- the sign bit was varying. */
- if (shift > 0)
- {
- *mask = r1mask.llshift (shift, TYPE_PRECISION (type));
- *val = r1val.llshift (shift, TYPE_PRECISION (type));
- }
- else if (shift < 0)
+ widest_int shift = r2val;
+ if (shift == 0)
{
- shift = -shift;
- *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns);
- *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns);
+ *mask = r1mask;
+ *val = r1val;
}
else
{
- *mask = r1mask;
- *val = r1val;
+ if (wi::neg_p (shift))
+ {
+ shift = -shift;
+ if (code == RSHIFT_EXPR)
+ code = LSHIFT_EXPR;
+ else
+ code = RSHIFT_EXPR;
+ }
+ if (code == RSHIFT_EXPR)
+ {
+ *mask = wi::rshift (wi::ext (r1mask, width, sgn), shift, sgn);
+ *val = wi::rshift (wi::ext (r1val, width, sgn), shift, sgn);
+ }
+ else
+ {
+ *mask = wi::ext (wi::lshift (r1mask, shift), width, sgn);
+ *val = wi::ext (wi::lshift (r1val, shift), width, sgn);
+ }
}
}
break;
@@ -1275,21 +1320,20 @@ bit_value_binop_1 (enum tree_code code, tree type,
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
{
- double_int lo, hi;
/* Do the addition with unknown bits set to zero, to give carry-ins of
zero wherever possible. */
- lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
- lo = lo.ext (TYPE_PRECISION (type), uns);
+ widest_int lo = r1val.and_not (r1mask) + r2val.and_not (r2mask);
+ lo = wi::ext (lo, width, sgn);
/* Do the addition with unknown bits set to one, to give carry-ins of
one wherever possible. */
- hi = (r1val | r1mask) + (r2val | r2mask);
- hi = hi.ext (TYPE_PRECISION (type), uns);
+ widest_int hi = (r1val | r1mask) + (r2val | r2mask);
+ hi = wi::ext (hi, width, sgn);
/* Each bit in the result is known if (a) the corresponding bits in
both inputs are known, and (b) the carry-in to that bit position
is known. We can check condition (b) by seeing if we got the same
result with minimised carries as with maximised carries. */
*mask = r1mask | r2mask | (lo ^ hi);
- *mask = (*mask).ext (TYPE_PRECISION (type), uns);
+ *mask = wi::ext (*mask, width, sgn);
/* It shouldn't matter whether we choose lo or hi here. */
*val = lo;
break;
@@ -1297,7 +1341,7 @@ bit_value_binop_1 (enum tree_code code, tree type,
case MINUS_EXPR:
{
- double_int temv, temm;
+ widest_int temv, temm;
bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm,
r2type, r2val, r2mask);
bit_value_binop_1 (PLUS_EXPR, type, val, mask,
@@ -1310,18 +1354,18 @@ bit_value_binop_1 (enum tree_code code, tree type,
{
/* Just track trailing zeros in both operands and transfer
them to the other. */
- int r1tz = (r1val | r1mask).trailing_zeros ();
- int r2tz = (r2val | r2mask).trailing_zeros ();
- if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT)
+ int r1tz = wi::ctz (r1val | r1mask);
+ int r2tz = wi::ctz (r2val | r2mask);
+ if (r1tz + r2tz >= width)
{
- *mask = double_int_zero;
- *val = double_int_zero;
+ *mask = 0;
+ *val = 0;
}
else if (r1tz + r2tz > 0)
{
- *mask = ~double_int::mask (r1tz + r2tz);
- *mask = (*mask).ext (TYPE_PRECISION (type), uns);
- *val = double_int_zero;
+ *mask = wi::ext (wi::mask <widest_int> (r1tz + r2tz, true),
+ width, sgn);
+ *val = 0;
}
break;
}
@@ -1329,71 +1373,70 @@ bit_value_binop_1 (enum tree_code code, tree type,
case EQ_EXPR:
case NE_EXPR:
{
- double_int m = r1mask | r2mask;
+ widest_int m = r1mask | r2mask;
if (r1val.and_not (m) != r2val.and_not (m))
{
- *mask = double_int_zero;
- *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one);
+ *mask = 0;
+ *val = ((code == EQ_EXPR) ? 0 : 1);
}
else
{
/* We know the result of a comparison is always one or zero. */
- *mask = double_int_one;
- *val = double_int_zero;
+ *mask = 1;
+ *val = 0;
}
break;
}
case GE_EXPR:
case GT_EXPR:
- {
- double_int tem = r1val;
- r1val = r2val;
- r2val = tem;
- tem = r1mask;
- r1mask = r2mask;
- r2mask = tem;
- code = swap_tree_comparison (code);
- }
- /* Fallthru. */
+ swap_p = true;
+ code = swap_tree_comparison (code);
+ /* Fall through. */
case LT_EXPR:
case LE_EXPR:
{
int minmax, maxmin;
+
+ const widest_int &o1val = swap_p ? r2val : r1val;
+ const widest_int &o1mask = swap_p ? r2mask : r1mask;
+ const widest_int &o2val = swap_p ? r1val : r2val;
+ const widest_int &o2mask = swap_p ? r1mask : r2mask;
+
/* If the most significant bits are not known we know nothing. */
- if (r1mask.is_negative () || r2mask.is_negative ())
+ if (wi::neg_p (o1mask) || wi::neg_p (o2mask))
break;
/* For comparisons the signedness is in the comparison operands. */
- uns = TYPE_UNSIGNED (r1type);
+ sgn = TYPE_SIGN (r1type);
/* If we know the most significant bits we know the values
value ranges by means of treating varying bits as zero
or one. Do a cross comparison of the max/min pairs. */
- maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns);
- minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns);
- if (maxmin < 0) /* r1 is less than r2. */
+ maxmin = wi::cmp (o1val | o1mask, o2val.and_not (o2mask), sgn);
+ minmax = wi::cmp (o1val.and_not (o1mask), o2val | o2mask, sgn);
+ if (maxmin < 0) /* o1 is less than o2. */
{
- *mask = double_int_zero;
- *val = double_int_one;
+ *mask = 0;
+ *val = 1;
}
- else if (minmax > 0) /* r1 is not less or equal to r2. */
+ else if (minmax > 0) /* o1 is not less or equal to o2. */
{
- *mask = double_int_zero;
- *val = double_int_zero;
+ *mask = 0;
+ *val = 0;
}
- else if (maxmin == minmax) /* r1 and r2 are equal. */
+ else if (maxmin == minmax) /* o1 and o2 are equal. */
{
/* This probably should never happen as we'd have
folded the thing during fully constant value folding. */
- *mask = double_int_zero;
- *val = (code == LE_EXPR ? double_int_one : double_int_zero);
+ *mask = 0;
+ *val = (code == LE_EXPR ? 1 : 0);
}
else
{
/* We know the result of a comparison is always one or zero. */
- *mask = double_int_one;
- *val = double_int_zero;
+ *mask = 1;
+ *val = 0;
}
break;
}
@@ -1409,7 +1452,7 @@ static prop_value_t
bit_value_unop (enum tree_code code, tree type, tree rhs)
{
prop_value_t rval = get_value_for_expr (rhs, true);
- double_int value, mask;
+ widest_int value, mask;
prop_value_t val;
if (rval.lattice_val == UNDEFINED)
@@ -1417,21 +1460,21 @@ bit_value_unop (enum tree_code code, tree type, tree rhs)
gcc_assert ((rval.lattice_val == CONSTANT
&& TREE_CODE (rval.value) == INTEGER_CST)
- || rval.mask.is_minus_one ());
+ || rval.mask == -1);
bit_value_unop_1 (code, type, &value, &mask,
- TREE_TYPE (rhs), value_to_double_int (rval), rval.mask);
- if (!mask.is_minus_one ())
+ TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask);
+ if (mask != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
/* ??? Delay building trees here. */
- val.value = double_int_to_tree (type, value);
+ val.value = wide_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
}
@@ -1444,7 +1487,7 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
{
prop_value_t r1val = get_value_for_expr (rhs1, true);
prop_value_t r2val = get_value_for_expr (rhs2, true);
- double_int value, mask;
+ widest_int value, mask;
prop_value_t val;
if (r1val.lattice_val == UNDEFINED
@@ -1452,31 +1495,31 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
return val;
}
gcc_assert ((r1val.lattice_val == CONSTANT
&& TREE_CODE (r1val.value) == INTEGER_CST)
- || r1val.mask.is_minus_one ());
+ || r1val.mask == -1);
gcc_assert ((r2val.lattice_val == CONSTANT
&& TREE_CODE (r2val.value) == INTEGER_CST)
- || r2val.mask.is_minus_one ());
+ || r2val.mask == -1);
bit_value_binop_1 (code, type, &value, &mask,
- TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask,
- TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask);
- if (!mask.is_minus_one ())
+ TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask,
+ TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask);
+ if (mask != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
/* ??? Delay building trees here. */
- val.value = double_int_to_tree (type, value);
+ val.value = wide_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
}
@@ -1495,7 +1538,7 @@ bit_value_assume_aligned (gimple stmt, tree attr, prop_value_t ptrval,
tree align, misalign = NULL_TREE, type;
unsigned HOST_WIDE_INT aligni, misaligni = 0;
prop_value_t alignval;
- double_int value, mask;
+ widest_int value, mask;
prop_value_t val;
if (attr == NULL_TREE)
@@ -1514,7 +1557,7 @@ bit_value_assume_aligned (gimple stmt, tree attr, prop_value_t ptrval,
return ptrval;
gcc_assert ((ptrval.lattice_val == CONSTANT
&& TREE_CODE (ptrval.value) == INTEGER_CST)
- || ptrval.mask.is_minus_one ());
+ || ptrval.mask == -1);
if (attr == NULL_TREE)
{
/* Get aligni and misaligni from __builtin_assume_aligned. */
@@ -1564,23 +1607,23 @@ bit_value_assume_aligned (gimple stmt, tree attr, prop_value_t ptrval,
align = build_int_cst_type (type, -aligni);
alignval = get_value_for_expr (align, true);
bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask,
- type, value_to_double_int (ptrval), ptrval.mask,
- type, value_to_double_int (alignval), alignval.mask);
- if (!mask.is_minus_one ())
+ type, value_to_wide_int (ptrval), ptrval.mask,
+ type, value_to_wide_int (alignval), alignval.mask);
+ if (mask != -1)
{
val.lattice_val = CONSTANT;
val.mask = mask;
- gcc_assert ((mask.low & (aligni - 1)) == 0);
- gcc_assert ((value.low & (aligni - 1)) == 0);
- value.low |= misaligni;
+ gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
+ gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
+ value |= misaligni;
/* ??? Delay building trees here. */
- val.value = double_int_to_tree (type, value);
+ val.value = wide_int_to_tree (type, value);
}
else
{
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
return val;
}
@@ -1632,7 +1675,7 @@ evaluate_stmt (gimple stmt)
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
- val.mask = double_int_zero;
+ val.mask = 0;
}
}
/* If the statement is likely to have a VARYING result, then do not
@@ -1660,7 +1703,7 @@ evaluate_stmt (gimple stmt)
/* The statement produced a constant value. */
val.lattice_val = CONSTANT;
val.value = simplified;
- val.mask = double_int_zero;
+ val.mask = 0;
}
}
@@ -1672,7 +1715,7 @@ evaluate_stmt (gimple stmt)
enum gimple_code code = gimple_code (stmt);
val.lattice_val = VARYING;
val.value = NULL_TREE;
- val.mask = double_int_minus_one;
+ val.mask = -1;
if (code == GIMPLE_ASSIGN)
{
enum tree_code subcode = gimple_assign_rhs_code (stmt);
@@ -1728,9 +1771,8 @@ evaluate_stmt (gimple stmt)
case BUILT_IN_STRNDUP:
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = double_int::from_shwi
- (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT)
- / BITS_PER_UNIT - 1));
+ val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
+ / BITS_PER_UNIT - 1);
break;
case BUILT_IN_ALLOCA:
@@ -1740,8 +1782,7 @@ evaluate_stmt (gimple stmt)
: BIGGEST_ALIGNMENT);
val.lattice_val = CONSTANT;
val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
- val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align)
- / BITS_PER_UNIT - 1));
+ val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
break;
/* These builtins return their first argument, unmodified. */
@@ -1775,7 +1816,7 @@ evaluate_stmt (gimple stmt)
{
val.lattice_val = CONSTANT;
val.value = build_int_cst (ptr_type_node, 0);
- val.mask = double_int::from_shwi (-aligni);
+ val.mask = -aligni;
}
}
break;
@@ -1809,28 +1850,25 @@ evaluate_stmt (gimple stmt)
&& TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME)
{
tree lhs = gimple_get_lhs (stmt);
- double_int nonzero_bits = get_nonzero_bits (lhs);
- double_int mask = double_int::mask (TYPE_PRECISION (TREE_TYPE (lhs)));
- if (nonzero_bits != double_int_minus_one && nonzero_bits != mask)
+ wide_int nonzero_bits = get_nonzero_bits (lhs);
+ if (nonzero_bits != -1)
{
if (!is_constant)
{
val.lattice_val = CONSTANT;
val.value = build_zero_cst (TREE_TYPE (lhs));
- /* CCP wants the bits above precision set. */
- val.mask = nonzero_bits | ~mask;
+ val.mask = extend_mask (nonzero_bits);
is_constant = true;
}
else
{
- double_int valv = tree_to_double_int (val.value);
- if (!(valv & ~nonzero_bits & mask).is_zero ())
- val.value = double_int_to_tree (TREE_TYPE (lhs),
- valv & nonzero_bits);
- if (nonzero_bits.is_zero ())
- val.mask = double_int_zero;
+ if (wi::bit_and_not (val.value, nonzero_bits) != 0)
+ val.value = wide_int_to_tree (TREE_TYPE (lhs),
+ nonzero_bits & val.value);
+ if (nonzero_bits == 0)
+ val.mask = 0;
else
- val.mask = val.mask & (nonzero_bits | ~mask);
+ val.mask = val.mask & extend_mask (nonzero_bits);
}
}
}
@@ -1843,12 +1881,12 @@ evaluate_stmt (gimple stmt)
if (likelyvalue == UNDEFINED)
{
val.lattice_val = likelyvalue;
- val.mask = double_int_zero;
+ val.mask = 0;
}
else
{
val.lattice_val = VARYING;
- val.mask = double_int_minus_one;
+ val.mask = -1;
}
val.value = NULL_TREE;
@@ -2030,7 +2068,7 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi)
fold more conditionals here. */
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !val.mask.is_zero ())
+ || val.mask != 0)
return false;
if (dump_file)
@@ -2210,7 +2248,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p)
block = gimple_bb (stmt);
val = evaluate_stmt (stmt);
if (val.lattice_val != CONSTANT
- || !val.mask.is_zero ())
+ || val.mask != 0)
return SSA_PROP_VARYING;
/* Find which edge out of the conditional block will be taken and add it
@@ -2282,7 +2320,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p)
Mark them VARYING. */
FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
{
- prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } };
+ prop_value_t v = { VARYING, NULL_TREE, -1 };
set_lattice_value (def, v);
}
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 4ba3a736ec1..563abe0d2cc 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -829,9 +829,9 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0),
&def_rhs_offset)))
{
- double_int off = mem_ref_offset (lhs);
+ offset_int off = mem_ref_offset (lhs);
tree new_ptr;
- off += double_int::from_shwi (def_rhs_offset);
+ off += def_rhs_offset;
if (TREE_CODE (def_rhs_base) == MEM_REF)
{
off += mem_ref_offset (def_rhs_base);
@@ -841,7 +841,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
new_ptr = build_fold_addr_expr (def_rhs_base);
TREE_OPERAND (lhs, 0) = new_ptr;
TREE_OPERAND (lhs, 1)
- = double_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs, 1)), off);
+ = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs, 1)), off);
tidy_after_forward_propagate_addr (use_stmt);
/* Continue propagating into the RHS if this was not the only use. */
if (single_use_p)
@@ -920,9 +920,9 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0),
&def_rhs_offset)))
{
- double_int off = mem_ref_offset (rhs);
+ offset_int off = mem_ref_offset (rhs);
tree new_ptr;
- off += double_int::from_shwi (def_rhs_offset);
+ off += def_rhs_offset;
if (TREE_CODE (def_rhs_base) == MEM_REF)
{
off += mem_ref_offset (def_rhs_base);
@@ -932,7 +932,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs,
new_ptr = build_fold_addr_expr (def_rhs_base);
TREE_OPERAND (rhs, 0) = new_ptr;
TREE_OPERAND (rhs, 1)
- = double_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs, 1)), off);
+ = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs, 1)), off);
fold_stmt_inplace (use_stmt_gsi);
tidy_after_forward_propagate_addr (use_stmt);
return res;
@@ -1445,8 +1445,8 @@ constant_pointer_difference (tree p1, tree p2)
{
p = TREE_OPERAND (q, 0);
off = size_binop (PLUS_EXPR, off,
- double_int_to_tree (sizetype,
- mem_ref_offset (q)));
+ wide_int_to_tree (sizetype,
+ mem_ref_offset (q)));
}
else
{
@@ -2837,7 +2837,7 @@ associate_pointerplus_align (gimple_stmt_iterator *gsi)
if (gimple_assign_rhs1 (def_stmt) != ptr)
return false;
- algn = double_int_to_tree (TREE_TYPE (ptr), ~tree_to_double_int (algn));
+ algn = wide_int_to_tree (TREE_TYPE (ptr), wi::bit_not (algn));
gimple_assign_set_rhs_with_ops (gsi, BIT_AND_EXPR, ptr, algn);
fold_stmt_inplace (gsi);
update_stmt (stmt);
@@ -3098,8 +3098,10 @@ combine_conversions (gimple_stmt_iterator *gsi)
tree tem;
tem = fold_build2 (BIT_AND_EXPR, inside_type,
defop0,
- double_int_to_tree
- (inside_type, double_int::mask (inter_prec)));
+ wide_int_to_tree
+ (inside_type,
+ wi::mask (inter_prec, false,
+ TYPE_PRECISION (inside_type))));
if (!useless_type_conversion_p (type, inside_type))
{
tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE, true,
diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c
index 54156d7118b..5863127c493 100644
--- a/gcc/tree-ssa-loop-im.c
+++ b/gcc/tree-ssa-loop-im.c
@@ -1601,7 +1601,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
/* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same
object and their offset differ in such a way that the locations cannot
overlap, then they cannot alias. */
- double_int size1, size2;
+ widest_int size1, size2;
aff_tree off1, off2;
/* Perform basic offset and type-based disambiguation. */
@@ -1617,7 +1617,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2,
get_inner_reference_aff (mem2->mem.ref, &off2, &size2);
aff_combination_expand (&off1, ttae_cache);
aff_combination_expand (&off2, ttae_cache);
- aff_combination_scale (&off1, double_int_minus_one);
+ aff_combination_scale (&off1, -1);
aff_combination_add (&off2, &off1);
if (aff_comb_cannot_overlap_p (&off2, size1, size2))
diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c
index 6f164538132..e332918fd16 100644
--- a/gcc/tree-ssa-loop-ivcanon.c
+++ b/gcc/tree-ssa-loop-ivcanon.c
@@ -490,7 +490,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
into unreachable (or trap when debugging experience is supposed
to be good). */
if (!elt->is_exit
- && elt->bound.ult (double_int::from_uhwi (npeeled)))
+ && wi::ltu_p (elt->bound, npeeled))
{
gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt);
gimple stmt = gimple_build_call
@@ -507,7 +507,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled)
}
/* If we know the exit will be taken after peeling, update. */
else if (elt->is_exit
- && elt->bound.ule (double_int::from_uhwi (npeeled)))
+ && wi::leu_p (elt->bound, npeeled))
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
@@ -547,7 +547,7 @@ remove_redundant_iv_tests (struct loop *loop)
/* Exit is pointless if it won't be taken before loop reaches
upper bound. */
if (elt->is_exit && loop->any_upper_bound
- && loop->nb_iterations_upper_bound.ult (elt->bound))
+ && wi::ltu_p (loop->nb_iterations_upper_bound, elt->bound))
{
basic_block bb = gimple_bb (elt->stmt);
edge exit_edge = EDGE_SUCC (bb, 0);
@@ -564,8 +564,8 @@ remove_redundant_iv_tests (struct loop *loop)
|| !integer_zerop (niter.may_be_zero)
|| !niter.niter
|| TREE_CODE (niter.niter) != INTEGER_CST
- || !loop->nb_iterations_upper_bound.ult
- (tree_to_double_int (niter.niter)))
+ || !wi::ltu_p (loop->nb_iterations_upper_bound,
+ wi::to_widest (niter.niter)))
continue;
if (dump_file && (dump_flags & TDF_DETAILS))
@@ -946,7 +946,7 @@ canonicalize_loop_induction_variables (struct loop *loop,
by find_loop_niter_by_eval. Be sure to keep it for future. */
if (niter && TREE_CODE (niter) == INTEGER_CST)
{
- record_niter_bound (loop, tree_to_double_int (niter),
+ record_niter_bound (loop, wi::to_widest (niter),
exit == single_likely_exit (loop), true);
}
diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c
index 8bc4e8fc791..b0d39271798 100644
--- a/gcc/tree-ssa-loop-ivopts.c
+++ b/gcc/tree-ssa-loop-ivopts.c
@@ -948,7 +948,7 @@ alloc_iv (tree base, tree step)
&& !DECL_P (TREE_OPERAND (base_object, 0)))
{
aff_tree comb;
- double_int size;
+ widest_int size;
base_object = get_inner_reference_aff (TREE_OPERAND (base_object, 0),
&comb, &size);
gcc_assert (base_object != NULL_TREE);
@@ -1611,19 +1611,19 @@ idx_record_use (tree base, tree *idx,
signedness of TOP and BOT. */
static bool
-constant_multiple_of (tree top, tree bot, double_int *mul)
+constant_multiple_of (tree top, tree bot, widest_int *mul)
{
tree mby;
enum tree_code code;
- double_int res, p0, p1;
unsigned precision = TYPE_PRECISION (TREE_TYPE (top));
+ widest_int res, p0, p1;
STRIP_NOPS (top);
STRIP_NOPS (bot);
if (operand_equal_p (top, bot, 0))
{
- *mul = double_int_one;
+ *mul = 1;
return true;
}
@@ -1638,7 +1638,7 @@ constant_multiple_of (tree top, tree bot, double_int *mul)
if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res))
return false;
- *mul = (res * tree_to_double_int (mby)).sext (precision);
+ *mul = wi::sext (res * wi::to_widest (mby), precision);
return true;
case PLUS_EXPR:
@@ -1649,19 +1649,19 @@ constant_multiple_of (tree top, tree bot, double_int *mul)
if (code == MINUS_EXPR)
p1 = -p1;
- *mul = (p0 + p1).sext (precision);
+ *mul = wi::sext (p0 + p1, precision);
return true;
case INTEGER_CST:
if (TREE_CODE (bot) != INTEGER_CST)
return false;
- p0 = tree_to_double_int (top).sext (precision);
- p1 = tree_to_double_int (bot).sext (precision);
- if (p1.is_zero ())
+ p0 = widest_int::from (top, SIGNED);
+ p1 = widest_int::from (bot, SIGNED);
+ if (p1 == 0)
return false;
- *mul = p0.sdivmod (p1, FLOOR_DIV_EXPR, &res).sext (precision);
- return res.is_zero ();
+ *mul = wi::sext (wi::divmod_trunc (p0, p1, SIGNED, &res), precision);
+ return res == 0;
default:
return false;
@@ -3018,7 +3018,7 @@ get_computation_aff (struct loop *loop,
tree common_type, var;
tree uutype;
aff_tree cbase_aff, var_aff;
- double_int rat;
+ widest_int rat;
if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype))
{
@@ -3838,7 +3838,7 @@ ptr_difference_cost (struct ivopts_data *data,
type = signed_type_for (TREE_TYPE (e1));
tree_to_aff_combination (e1, type, &aff_e1);
tree_to_aff_combination (e2, type, &aff_e2);
- aff_combination_scale (&aff_e2, double_int_minus_one);
+ aff_combination_scale (&aff_e2, -1);
aff_combination_add (&aff_e1, &aff_e2);
return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on);
@@ -3893,7 +3893,7 @@ difference_cost (struct ivopts_data *data,
type = signed_type_for (TREE_TYPE (e1));
tree_to_aff_combination (e1, type, &aff_e1);
tree_to_aff_combination (e2, type, &aff_e2);
- aff_combination_scale (&aff_e2, double_int_minus_one);
+ aff_combination_scale (&aff_e2, -1);
aff_combination_add (&aff_e1, &aff_e2);
return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on);
@@ -4037,7 +4037,7 @@ get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase,
tree_to_aff_combination (ub, TREE_TYPE (ub), &ubase_aff);
tree_to_aff_combination (cb, TREE_TYPE (cb), &cbase_aff);
- aff_combination_scale (&cbase_aff, double_int::from_shwi (-1 * ratio));
+ aff_combination_scale (&cbase_aff, -1 * ratio);
aff_combination_add (&ubase_aff, &cbase_aff);
expr = aff_combination_to_tree (&ubase_aff);
return get_expr_id (data, expr);
@@ -4067,7 +4067,7 @@ get_computation_cost_at (struct ivopts_data *data,
HOST_WIDE_INT ratio, aratio;
bool var_present, symbol_present, stmt_is_after_inc;
comp_cost cost;
- double_int rat;
+ widest_int rat;
bool speed = optimize_bb_for_speed_p (gimple_bb (at));
enum machine_mode mem_mode = (address_p
? TYPE_MODE (TREE_TYPE (*use->op_p))
@@ -4126,7 +4126,7 @@ get_computation_cost_at (struct ivopts_data *data,
if (!constant_multiple_of (ustep, cstep, &rat))
return infinite_cost;
- if (rat.fits_shwi ())
+ if (wi::fits_shwi_p (rat))
ratio = rat.to_shwi ();
else
return infinite_cost;
@@ -4640,11 +4640,11 @@ iv_elimination_compare_lt (struct ivopts_data *data,
tree_to_aff_combination (niter->niter, nit_type, &nit);
tree_to_aff_combination (fold_convert (nit_type, a), nit_type, &tmpa);
tree_to_aff_combination (fold_convert (nit_type, b), nit_type, &tmpb);
- aff_combination_scale (&nit, double_int_minus_one);
- aff_combination_scale (&tmpa, double_int_minus_one);
+ aff_combination_scale (&nit, -1);
+ aff_combination_scale (&tmpa, -1);
aff_combination_add (&tmpb, &tmpa);
aff_combination_add (&tmpb, &nit);
- if (tmpb.n != 0 || tmpb.offset != double_int_one)
+ if (tmpb.n != 0 || tmpb.offset != 1)
return false;
/* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
@@ -4730,13 +4730,13 @@ may_eliminate_iv (struct ivopts_data *data,
entire loop and compare against that instead. */
else
{
- double_int period_value, max_niter;
+ widest_int period_value, max_niter;
max_niter = desc->max;
if (stmt_after_increment (loop, cand, use->stmt))
- max_niter += double_int_one;
- period_value = tree_to_double_int (period);
- if (max_niter.ugt (period_value))
+ max_niter += 1;
+ period_value = wi::to_widest (period);
+ if (wi::gtu_p (max_niter, period_value))
{
/* See if we can take advantage of inferred loop bound information. */
if (data->loop_single_exit_p)
@@ -4744,7 +4744,7 @@ may_eliminate_iv (struct ivopts_data *data,
if (!max_loop_iterations (loop, &max_niter))
return false;
/* The loop bound is already adjusted by adding 1. */
- if (max_niter.ugt (period_value))
+ if (wi::gtu_p (max_niter, period_value))
return false;
}
else
diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c
index 7628363cc62..a48ad10424e 100644
--- a/gcc/tree-ssa-loop-niter.c
+++ b/gcc/tree-ssa-loop-niter.c
@@ -55,6 +55,7 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "stringpool.h"
#include "tree-ssanames.h"
+#include "wide-int-print.h"
#define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0)
@@ -85,7 +86,6 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
{
tree type = TREE_TYPE (expr);
tree op0, op1;
- double_int off;
bool negate = false;
*var = expr;
@@ -107,17 +107,14 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
*var = op0;
/* Always sign extend the offset. */
- off = tree_to_double_int (op1);
- off = off.sext (TYPE_PRECISION (type));
- mpz_set_double_int (offset, off, false);
+ wi::to_mpz (op1, offset, SIGNED);
if (negate)
mpz_neg (offset, offset);
break;
case INTEGER_CST:
*var = build_int_cst_type (type, 0);
- off = tree_to_double_int (expr);
- mpz_set_double_int (offset, off, TYPE_UNSIGNED (type));
+ wi::to_mpz (expr, offset, TYPE_SIGN (type));
break;
default:
@@ -132,7 +129,7 @@ static void
determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
mpz_t min, mpz_t max)
{
- double_int minv, maxv;
+ wide_int minv, maxv;
enum value_range_type rtype = VR_VARYING;
/* If the expression is a constant, we know its value exactly. */
@@ -149,6 +146,7 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
if (TREE_CODE (var) == SSA_NAME && INTEGRAL_TYPE_P (type))
{
edge e = loop_preheader_edge (loop);
+ signop sgn = TYPE_SIGN (type);
gimple_stmt_iterator gsi;
/* Either for VAR itself... */
@@ -158,7 +156,7 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
- double_int minc, maxc;
+ wide_int minc, maxc;
if (PHI_ARG_DEF_FROM_EDGE (phi, e) == var
&& (get_range_info (gimple_phi_result (phi), &minc, &maxc)
== VR_RANGE))
@@ -171,13 +169,13 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
}
else
{
- minv = minv.max (minc, TYPE_UNSIGNED (type));
- maxv = maxv.min (maxc, TYPE_UNSIGNED (type));
+ minv = wi::max (minv, minc, sgn);
+ maxv = wi::min (maxv, maxc, sgn);
/* If the PHI result range are inconsistent with
the VAR range, give up on looking at the PHI
results. This can happen if VR_UNDEFINED is
involved. */
- if (minv.cmp (maxv, TYPE_UNSIGNED (type)) > 0)
+ if (wi::gt_p (minv, maxv, sgn))
{
rtype = get_range_info (var, &minv, &maxv);
break;
@@ -188,11 +186,11 @@ determine_value_range (struct loop *loop, tree type, tree var, mpz_t off,
if (rtype == VR_RANGE)
{
mpz_t minm, maxm;
- gcc_assert (minv.cmp (maxv, TYPE_UNSIGNED (type)) <= 0);
+ gcc_assert (wi::le_p (minv, maxv, sgn));
mpz_init (minm);
mpz_init (maxm);
- mpz_set_double_int (minm, minv, TYPE_UNSIGNED (type));
- mpz_set_double_int (maxm, maxv, TYPE_UNSIGNED (type));
+ wi::to_mpz (minv, minm, sgn);
+ wi::to_mpz (maxv, maxm, sgn);
mpz_add (minm, minm, off);
mpz_add (maxm, maxm, off);
/* If the computation may not wrap or off is zero, then this
@@ -262,7 +260,7 @@ bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
}
mpz_init (m);
- mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), m, UNSIGNED);
mpz_add_ui (m, m, 1);
mpz_sub (bnds->up, x, y);
mpz_set (bnds->below, bnds->up);
@@ -541,15 +539,15 @@ end:
difference of two values in TYPE. */
static void
-bounds_add (bounds *bnds, double_int delta, tree type)
+bounds_add (bounds *bnds, const widest_int &delta, tree type)
{
mpz_t mdelta, max;
mpz_init (mdelta);
- mpz_set_double_int (mdelta, delta, false);
+ wi::to_mpz (delta, mdelta, SIGNED);
mpz_init (max);
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
mpz_add (bnds->up, bnds->up, mdelta);
mpz_add (bnds->below, bnds->below, mdelta);
@@ -643,7 +641,7 @@ static void
number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
bounds *bnds, bool exit_must_be_taken)
{
- double_int max;
+ widest_int max;
mpz_t d;
tree type = TREE_TYPE (c);
bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
@@ -652,10 +650,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
if (integer_onep (s)
|| (TREE_CODE (c) == INTEGER_CST
&& TREE_CODE (s) == INTEGER_CST
- && tree_to_double_int (c).mod (tree_to_double_int (s),
- TYPE_UNSIGNED (type),
- EXACT_DIV_EXPR).is_zero ())
- || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (c))
+ && wi::mod_trunc (c, s, TYPE_SIGN (type)) == 0)
+ || (TYPE_OVERFLOW_UNDEFINED (type)
&& multiple_of_p (type, c, s)))
{
/* If C is an exact multiple of S, then its value will be reached before
@@ -673,15 +669,14 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
the whole # of iterations analysis will fail). */
if (!no_overflow)
{
- max = double_int::mask (TYPE_PRECISION (type)
- - tree_to_uhwi (num_ending_zeros (s)));
- mpz_set_double_int (bnd, max, true);
+ max = wi::mask <widest_int> (TYPE_PRECISION (type) - wi::ctz (s), false);
+ wi::to_mpz (max, bnd, UNSIGNED);
return;
}
/* Now we know that the induction variable does not overflow, so the loop
iterates at most (range of type / S) times. */
- mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), bnd, UNSIGNED);
/* If the induction variable is guaranteed to reach the value of C before
overflow, ... */
@@ -690,13 +685,13 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
/* ... then we can strengthen this to C / S, and possibly we can use
the upper bound on C given by BNDS. */
if (TREE_CODE (c) == INTEGER_CST)
- mpz_set_double_int (bnd, tree_to_double_int (c), true);
+ wi::to_mpz (c, bnd, UNSIGNED);
else if (bnds_u_valid)
mpz_set (bnd, bnds->up);
}
mpz_init (d);
- mpz_set_double_int (d, tree_to_double_int (s), true);
+ wi::to_mpz (s, d, UNSIGNED);
mpz_fdiv_q (bnd, bnd, d);
mpz_clear (d);
}
@@ -747,7 +742,8 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final,
mpz_init (max);
number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
exit_must_be_taken);
- niter->max = mpz_get_double_int (niter_type, max, false);
+ niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
+ TYPE_SIGN (niter_type));
mpz_clear (max);
/* First the trivial cases -- when the step is 1. */
@@ -820,7 +816,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
tmod = fold_convert (type1, mod);
mpz_init (mmod);
- mpz_set_double_int (mmod, tree_to_double_int (mod), true);
+ wi::to_mpz (mod, mmod, UNSIGNED);
mpz_neg (mmod, mmod);
/* If the induction variable does not overflow and the exit is taken,
@@ -902,7 +898,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
niter->may_be_zero,
noloop);
- bounds_add (bnds, tree_to_double_int (mod), type);
+ bounds_add (bnds, wi::to_widest (mod), type);
*delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod);
ret = true;
@@ -992,7 +988,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
tree assumption = boolean_true_node, bound, diff;
tree mbz, mbzl, mbzr, type1;
bool rolls_p, no_overflow_p;
- double_int dstep;
+ widest_int dstep;
mpz_t mstep, max;
/* We are going to compute the number of iterations as
@@ -1018,22 +1014,22 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
/* First check whether the answer does not follow from the bounds we gathered
before. */
if (integer_nonzerop (iv0->step))
- dstep = tree_to_double_int (iv0->step);
+ dstep = wi::to_widest (iv0->step);
else
{
- dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type));
+ dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type));
dstep = -dstep;
}
mpz_init (mstep);
- mpz_set_double_int (mstep, dstep, true);
+ wi::to_mpz (dstep, mstep, UNSIGNED);
mpz_neg (mstep, mstep);
mpz_add_ui (mstep, mstep, 1);
rolls_p = mpz_cmp (mstep, bnds->below) <= 0;
mpz_init (max);
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true);
+ wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)), max, UNSIGNED);
mpz_add (max, max, mstep);
no_overflow_p = (mpz_cmp (bnds->up, max) <= 0
/* For pointers, only values lying inside a single object
@@ -1160,7 +1156,8 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,
iv1->base, iv0->base);
niter->niter = delta;
- niter->max = mpz_get_double_int (niter_type, bnds->up, false);
+ niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
+ TYPE_SIGN (niter_type));
return true;
}
@@ -1203,11 +1200,12 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1,
mpz_init (mstep);
mpz_init (tmp);
- mpz_set_double_int (mstep, tree_to_double_int (step), true);
+ wi::to_mpz (step, mstep, UNSIGNED);
mpz_add (tmp, bnds->up, mstep);
mpz_sub_ui (tmp, tmp, 1);
mpz_fdiv_q (tmp, tmp, mstep);
- niter->max = mpz_get_double_int (niter_type, tmp, false);
+ niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
+ TYPE_SIGN (niter_type));
mpz_clear (mstep);
mpz_clear (tmp);
@@ -1270,7 +1268,7 @@ number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1,
iv0->base = fold_build2 (MINUS_EXPR, type1,
iv0->base, build_int_cst (type1, 1));
- bounds_add (bnds, double_int_one, type1);
+ bounds_add (bnds, 1, type1);
return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken,
bnds);
@@ -1342,8 +1340,7 @@ number_of_iterations_cond (struct loop *loop,
niter->assumptions = boolean_true_node;
niter->may_be_zero = boolean_false_node;
niter->niter = NULL_TREE;
- niter->max = double_int_zero;
-
+ niter->max = 0;
niter->bound = NULL_TREE;
niter->cmp = ERROR_MARK;
@@ -1415,7 +1412,7 @@ number_of_iterations_cond (struct loop *loop,
if (tem && integer_zerop (tem))
{
niter->niter = build_int_cst (unsigned_type_for (type), 0);
- niter->max = double_int_zero;
+ niter->max = 0;
return true;
}
@@ -1491,7 +1488,7 @@ number_of_iterations_cond (struct loop *loop,
fprintf (dump_file, " # of iterations ");
print_generic_expr (dump_file, niter->niter, TDF_SLIM);
fprintf (dump_file, ", bounded by ");
- dump_double_int (dump_file, niter->max, true);
+ print_decu (niter->max, dump_file);
fprintf (dump_file, "\n");
}
else
@@ -2003,7 +2000,7 @@ number_of_iterations_exit (struct loop *loop, edge exit,
/* If NITER has simplified into a constant, update MAX. */
if (TREE_CODE (niter->niter) == INTEGER_CST)
- niter->max = tree_to_double_int (niter->niter);
+ niter->max = wi::to_widest (niter->niter);
if (integer_onep (niter->assumptions))
return true;
@@ -2115,7 +2112,7 @@ find_loop_niter (struct loop *loop, edge *exit)
bool
finite_loop_p (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
int flags;
if (flag_unsafe_loop_optimizations)
@@ -2430,13 +2427,13 @@ find_loop_niter_by_eval (struct loop *loop, edge *exit)
*/
-static double_int derive_constant_upper_bound_ops (tree, tree,
+static widest_int derive_constant_upper_bound_ops (tree, tree,
enum tree_code, tree);
/* Returns a constant upper bound on the value of the right-hand side of
an assignment statement STMT. */
-static double_int
+static widest_int
derive_constant_upper_bound_assign (gimple stmt)
{
enum tree_code code = gimple_assign_rhs_code (stmt);
@@ -2451,7 +2448,7 @@ derive_constant_upper_bound_assign (gimple stmt)
is considered to be unsigned. If its type is signed, its value must
be nonnegative. */
-static double_int
+static widest_int
derive_constant_upper_bound (tree val)
{
enum tree_code code;
@@ -2465,12 +2462,12 @@ derive_constant_upper_bound (tree val)
whose type is TYPE. The expression is considered to be unsigned. If
its type is signed, its value must be nonnegative. */
-static double_int
+static widest_int
derive_constant_upper_bound_ops (tree type, tree op0,
enum tree_code code, tree op1)
{
tree subtype, maxt;
- double_int bnd, max, mmax, cst;
+ widest_int bnd, max, mmax, cst;
gimple stmt;
if (INTEGRAL_TYPE_P (type))
@@ -2478,12 +2475,12 @@ derive_constant_upper_bound_ops (tree type, tree op0,
else
maxt = upper_bound_in_type (type, type);
- max = tree_to_double_int (maxt);
+ max = wi::to_widest (maxt);
switch (code)
{
case INTEGER_CST:
- return tree_to_double_int (op0);
+ return wi::to_widest (op0);
CASE_CONVERT:
subtype = TREE_TYPE (op0);
@@ -2505,7 +2502,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* If the bound does not fit in TYPE, max. value of TYPE could be
attained. */
- if (max.ult (bnd))
+ if (wi::ltu_p (max, bnd))
return max;
return bnd;
@@ -2520,25 +2517,24 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
choose the most logical way how to treat this constant regardless
of the signedness of the type. */
- cst = tree_to_double_int (op1);
- cst = cst.sext (TYPE_PRECISION (type));
+ cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type));
if (code != MINUS_EXPR)
cst = -cst;
bnd = derive_constant_upper_bound (op0);
- if (cst.is_negative ())
+ if (wi::neg_p (cst))
{
cst = -cst;
/* Avoid CST == 0x80000... */
- if (cst.is_negative ())
+ if (wi::neg_p (cst))
return max;;
/* OP0 + CST. We need to check that
BND <= MAX (type) - CST. */
mmax -= cst;
- if (bnd.ugt (mmax))
+ if (wi::ltu_p (bnd, max))
return max;
return bnd + cst;
@@ -2558,13 +2554,13 @@ derive_constant_upper_bound_ops (tree type, tree op0,
/* This should only happen if the type is unsigned; however, for
buggy programs that use overflowing signed arithmetics even with
-fno-wrapv, this condition may also be true for signed values. */
- if (bnd.ult (cst))
+ if (wi::ltu_p (bnd, cst))
return max;
if (TYPE_UNSIGNED (type))
{
tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,
- double_int_to_tree (type, cst));
+ wide_int_to_tree (type, cst));
if (!tem || integer_nonzerop (tem))
return max;
}
@@ -2581,13 +2577,13 @@ derive_constant_upper_bound_ops (tree type, tree op0,
return max;
bnd = derive_constant_upper_bound (op0);
- return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR);
+ return wi::udiv_floor (bnd, wi::to_widest (op1));
case BIT_AND_EXPR:
if (TREE_CODE (op1) != INTEGER_CST
|| tree_int_cst_sign_bit (op1))
return max;
- return tree_to_double_int (op1);
+ return wi::to_widest (op1);
case SSA_NAME:
stmt = SSA_NAME_DEF_STMT (op0);
@@ -2605,7 +2601,7 @@ derive_constant_upper_bound_ops (tree type, tree op0,
static void
do_warn_aggressive_loop_optimizations (struct loop *loop,
- double_int i_bound, gimple stmt)
+ widest_int i_bound, gimple stmt)
{
/* Don't warn if the loop doesn't have known constant bound. */
if (!loop->nb_iterations
@@ -2618,7 +2614,7 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
|| loop->warned_aggressive_loop_optimizations
/* Only warn if undefined behavior gives us lower estimate than the
known constant bound. */
- || i_bound.ucmp (tree_to_double_int (loop->nb_iterations)) >= 0
+ || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
/* And undefined behavior happens unconditionally. */
|| !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
return;
@@ -2630,8 +2626,8 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
gimple estmt = last_stmt (e->src);
if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
"iteration %E invokes undefined behavior",
- double_int_to_tree (TREE_TYPE (loop->nb_iterations),
- i_bound)))
+ wide_int_to_tree (TREE_TYPE (loop->nb_iterations),
+ i_bound)))
inform (gimple_location (estmt), "containing loop");
loop->warned_aggressive_loop_optimizations = true;
}
@@ -2641,13 +2637,13 @@ do_warn_aggressive_loop_optimizations (struct loop *loop,
is taken at last when the STMT is executed BOUND + 1 times.
REALISTIC is true if BOUND is expected to be close to the real number
of iterations. UPPER is true if we are sure the loop iterates at most
- BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */
+ BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
static void
-record_estimate (struct loop *loop, tree bound, double_int i_bound,
+record_estimate (struct loop *loop, tree bound, const widest_int &i_bound,
gimple at_stmt, bool is_exit, bool realistic, bool upper)
{
- double_int delta;
+ widest_int delta;
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -2657,7 +2653,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
upper ? "" : "probably ");
print_generic_expr (dump_file, bound, TDF_SLIM);
fprintf (dump_file, " (bounded by ");
- dump_double_int (dump_file, i_bound, true);
+ print_decu (i_bound, dump_file);
fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
}
@@ -2666,7 +2662,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
if (TREE_CODE (bound) != INTEGER_CST)
realistic = false;
else
- gcc_checking_assert (i_bound == tree_to_double_int (bound));
+ gcc_checking_assert (i_bound == wi::to_widest (bound));
if (!upper && !realistic)
return;
@@ -2697,18 +2693,18 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound,
otherwise it can be executed BOUND + 1 times. We will lower the estimate
later if such statement must be executed on last iteration */
if (is_exit)
- delta = double_int_zero;
+ delta = 0;
else
- delta = double_int_one;
- i_bound += delta;
+ delta = 1;
+ widest_int new_i_bound = i_bound + delta;
/* If an overflow occurred, ignore the result. */
- if (i_bound.ult (delta))
+ if (wi::ltu_p (new_i_bound, delta))
return;
if (upper && !is_exit)
- do_warn_aggressive_loop_optimizations (loop, i_bound, at_stmt);
- record_niter_bound (loop, i_bound, realistic, upper);
+ do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
+ record_niter_bound (loop, new_i_bound, realistic, upper);
}
/* Record the estimate on number of iterations of LOOP based on the fact that
@@ -2723,7 +2719,6 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
{
tree niter_bound, extreme, delta;
tree type = TREE_TYPE (base), unsigned_type;
- double_int max;
if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step))
return;
@@ -2764,7 +2759,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt,
/* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
would get out of the range. */
niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step);
- max = derive_constant_upper_bound (niter_bound);
+ widest_int max = derive_constant_upper_bound (niter_bound);
record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
}
@@ -3068,27 +3063,21 @@ infer_loop_bounds_from_undefined (struct loop *loop)
free (bbs);
}
-
-
-/* Compare double ints, callback for qsort. */
+/* Compare wide ints, callback for qsort. */
static int
-double_int_cmp (const void *p1, const void *p2)
+wide_int_cmp (const void *p1, const void *p2)
{
- const double_int *d1 = (const double_int *)p1;
- const double_int *d2 = (const double_int *)p2;
- if (*d1 == *d2)
- return 0;
- if (d1->ult (*d2))
- return -1;
- return 1;
+ const widest_int *d1 = (const widest_int *) p1;
+ const widest_int *d2 = (const widest_int *) p2;
+ return wi::cmpu (*d1, *d2);
}
/* Return index of BOUND in BOUNDS array sorted in increasing order.
Lookup by binary search. */
static int
-bound_index (vec<double_int> bounds, double_int bound)
+bound_index (vec<widest_int> bounds, const widest_int &bound)
{
unsigned int end = bounds.length ();
unsigned int begin = 0;
@@ -3097,11 +3086,11 @@ bound_index (vec<double_int> bounds, double_int bound)
while (begin != end)
{
unsigned int middle = (begin + end) / 2;
- double_int index = bounds[middle];
+ widest_int index = bounds[middle];
if (index == bound)
return middle;
- else if (index.ult (bound))
+ else if (wi::ltu_p (index, bound))
begin = middle + 1;
else
end = middle;
@@ -3120,7 +3109,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
{
pointer_map_t *bb_bounds;
struct nb_iter_bound *elt;
- vec<double_int> bounds = vNULL;
+ vec<widest_int> bounds = vNULL;
vec<vec<basic_block> > queues = vNULL;
vec<basic_block> queue = vNULL;
ptrdiff_t queue_index;
@@ -3130,20 +3119,20 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
/* Discover what bounds may interest us. */
for (elt = loop->bounds; elt; elt = elt->next)
{
- double_int bound = elt->bound;
+ widest_int bound = elt->bound;
/* Exit terminates loop at given iteration, while non-exits produce undefined
effect on the next iteration. */
if (!elt->is_exit)
{
- bound += double_int_one;
+ bound += 1;
/* If an overflow occurred, ignore the result. */
- if (bound.is_zero ())
+ if (bound == 0)
continue;
}
if (!loop->any_upper_bound
- || bound.ult (loop->nb_iterations_upper_bound))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
bounds.safe_push (bound);
}
@@ -3156,7 +3145,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
/* Sort the bounds in decreasing order. */
qsort (bounds.address (), bounds.length (),
- sizeof (double_int), double_int_cmp);
+ sizeof (widest_int), wide_int_cmp);
/* For every basic block record the lowest bound that is guaranteed to
terminate the loop. */
@@ -3164,17 +3153,17 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
bb_bounds = pointer_map_create ();
for (elt = loop->bounds; elt; elt = elt->next)
{
- double_int bound = elt->bound;
+ widest_int bound = elt->bound;
if (!elt->is_exit)
{
- bound += double_int_one;
+ bound += 1;
/* If an overflow occurred, ignore the result. */
- if (bound.is_zero ())
+ if (bound == 0)
continue;
}
if (!loop->any_upper_bound
- || bound.ult (loop->nb_iterations_upper_bound))
+ || wi::ltu_p (bound, loop->nb_iterations_upper_bound))
{
ptrdiff_t index = bound_index (bounds, bound);
void **entry = pointer_map_contains (bb_bounds,
@@ -3274,7 +3263,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "Found better loop bound ");
- dump_double_int (dump_file, bounds[latch_index], true);
+ print_decu (bounds[latch_index], dump_file);
fprintf (dump_file, "\n");
}
record_niter_bound (loop, bounds[latch_index], false, true);
@@ -3309,7 +3298,7 @@ maybe_lower_iteration_bound (struct loop *loop)
for (elt = loop->bounds; elt; elt = elt->next)
{
if (!elt->is_exit
- && elt->bound.ult (loop->nb_iterations_upper_bound))
+ && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound))
{
if (!not_executed_last_iteration)
not_executed_last_iteration = pointer_set_create ();
@@ -3383,7 +3372,7 @@ maybe_lower_iteration_bound (struct loop *loop)
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Reducing loop iteration estimate by 1; "
"undefined statement must be executed at the last iteration.\n");
- record_niter_bound (loop, loop->nb_iterations_upper_bound - double_int_one,
+ record_niter_bound (loop, loop->nb_iterations_upper_bound - 1,
false, true);
}
BITMAP_FREE (visited);
@@ -3402,7 +3391,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
unsigned i;
struct tree_niter_desc niter_desc;
edge ex;
- double_int bound;
+ widest_int bound;
edge likely_exit;
/* Give up if we already have tried to compute an estimation. */
@@ -3449,7 +3438,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
if (loop->header->count != 0)
{
gcov_type nit = expected_loop_iterations_unbounded (loop) + 1;
- bound = gcov_type_to_double_int (nit);
+ bound = gcov_type_to_wide_int (nit);
record_niter_bound (loop, bound, true, false);
}
@@ -3460,8 +3449,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
&& TREE_CODE (loop->nb_iterations) == INTEGER_CST)
{
loop->any_upper_bound = true;
- loop->nb_iterations_upper_bound
- = tree_to_double_int (loop->nb_iterations);
+ loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations);
}
}
@@ -3471,7 +3459,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop)
the function returns false, otherwise returns true. */
bool
-estimated_loop_iterations (struct loop *loop, double_int *nit)
+estimated_loop_iterations (struct loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -3488,13 +3476,13 @@ estimated_loop_iterations (struct loop *loop, double_int *nit)
HOST_WIDE_INT
estimated_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!estimated_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -3507,7 +3495,7 @@ estimated_loop_iterations_int (struct loop *loop)
false, otherwise returns true. */
bool
-max_loop_iterations (struct loop *loop, double_int *nit)
+max_loop_iterations (struct loop *loop, widest_int *nit)
{
/* When SCEV information is available, try to update loop iterations
estimate. Otherwise just return whatever we recorded earlier. */
@@ -3524,13 +3512,13 @@ max_loop_iterations (struct loop *loop, double_int *nit)
HOST_WIDE_INT
max_loop_iterations_int (struct loop *loop)
{
- double_int nit;
+ widest_int nit;
HOST_WIDE_INT hwi_nit;
if (!max_loop_iterations (loop, &nit))
return -1;
- if (!nit.fits_shwi ())
+ if (!wi::fits_shwi_p (nit))
return -1;
hwi_nit = nit.to_shwi ();
@@ -3561,18 +3549,18 @@ estimated_stmt_executions_int (struct loop *loop)
false, otherwise returns true. */
bool
-max_stmt_executions (struct loop *loop, double_int *nit)
+max_stmt_executions (struct loop *loop, widest_int *nit)
{
- double_int nit_minus_one;
+ widest_int nit_minus_one;
if (!max_loop_iterations (loop, nit))
return false;
nit_minus_one = *nit;
- *nit += double_int_one;
+ *nit += 1;
- return (*nit).ugt (nit_minus_one);
+ return wi::gtu_p (*nit, nit_minus_one);
}
/* Sets NIT to the estimated number of executions of the latch of the
@@ -3580,18 +3568,18 @@ max_stmt_executions (struct loop *loop, double_int *nit)
false, otherwise returns true. */
bool
-estimated_stmt_executions (struct loop *loop, double_int *nit)
+estimated_stmt_executions (struct loop *loop, widest_int *nit)
{
- double_int nit_minus_one;
+ widest_int nit_minus_one;
if (!estimated_loop_iterations (loop, nit))
return false;
nit_minus_one = *nit;
- *nit += double_int_one;
+ *nit += 1;
- return (*nit).ugt (nit_minus_one);
+ return wi::gtu_p (*nit, nit_minus_one);
}
/* Records estimates on numbers of iterations of loops. */
@@ -3662,7 +3650,7 @@ n_of_executions_at_most (gimple stmt,
struct nb_iter_bound *niter_bound,
tree niter)
{
- double_int bound = niter_bound->bound;
+ widest_int bound = niter_bound->bound;
tree nit_type = TREE_TYPE (niter), e;
enum tree_code cmp;
@@ -3670,7 +3658,7 @@ n_of_executions_at_most (gimple stmt,
/* If the bound does not even fit into NIT_TYPE, it cannot tell us that
the number of iterations is small. */
- if (!double_int_fits_to_tree_p (nit_type, bound))
+ if (!wi::fits_to_tree_p (bound, nit_type))
return false;
/* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1
@@ -3713,16 +3701,16 @@ n_of_executions_at_most (gimple stmt,
gsi_next (&bsi))
if (gimple_has_side_effects (gsi_stmt (bsi)))
return false;
- bound += double_int_one;
- if (bound.is_zero ()
- || !double_int_fits_to_tree_p (nit_type, bound))
+ bound += 1;
+ if (bound == 0
+ || !wi::fits_to_tree_p (bound, nit_type))
return false;
}
cmp = GT_EXPR;
}
e = fold_binary (cmp, boolean_type_node,
- niter, double_int_to_tree (nit_type, bound));
+ niter, wide_int_to_tree (nit_type, bound));
return e && integer_nonzerop (e);
}
@@ -3760,7 +3748,7 @@ scev_probably_wraps_p (tree base, tree step,
tree unsigned_type, valid_niter;
tree type = TREE_TYPE (step);
tree e;
- double_int niter;
+ widest_int niter;
struct nb_iter_bound *bound;
/* FIXME: We really need something like
@@ -3826,10 +3814,10 @@ scev_probably_wraps_p (tree base, tree step,
estimate_numbers_of_iterations_loop (loop);
if (max_loop_iterations (loop, &niter)
- && double_int_fits_to_tree_p (TREE_TYPE (valid_niter), niter)
+ && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter))
&& (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,
- double_int_to_tree (TREE_TYPE (valid_niter),
- niter))) != NULL
+ wide_int_to_tree (TREE_TYPE (valid_niter),
+ niter))) != NULL
&& integer_nonzerop (e))
{
fold_undefer_and_ignore_overflow_warnings ();
diff --git a/gcc/tree-ssa-loop-niter.h b/gcc/tree-ssa-loop-niter.h
index df0d64d021b..a143040723a 100644
--- a/gcc/tree-ssa-loop-niter.h
+++ b/gcc/tree-ssa-loop-niter.h
@@ -29,14 +29,14 @@ extern tree find_loop_niter (struct loop *, edge *);
extern bool finite_loop_p (struct loop *);
extern tree loop_niter_by_eval (struct loop *, edge);
extern tree find_loop_niter_by_eval (struct loop *, edge *);
-extern bool estimated_loop_iterations (struct loop *, double_int *);
+extern bool estimated_loop_iterations (struct loop *, widest_int *);
extern HOST_WIDE_INT estimated_loop_iterations_int (struct loop *);
-extern bool max_loop_iterations (struct loop *, double_int *);
+extern bool max_loop_iterations (struct loop *, widest_int *);
extern HOST_WIDE_INT max_loop_iterations_int (struct loop *);
extern HOST_WIDE_INT max_stmt_executions_int (struct loop *);
extern HOST_WIDE_INT estimated_stmt_executions_int (struct loop *);
-extern bool max_stmt_executions (struct loop *, double_int *);
-extern bool estimated_stmt_executions (struct loop *, double_int *);
+extern bool max_stmt_executions (struct loop *, widest_int *);
+extern bool estimated_stmt_executions (struct loop *, widest_int *);
extern void estimate_numbers_of_iterations (void);
extern bool stmt_dominates_stmt_p (gimple, gimple);
extern bool nowrap_type_p (tree);
diff --git a/gcc/tree-ssa-loop.h b/gcc/tree-ssa-loop.h
index 4684cda6dfa..95857f1e621 100644
--- a/gcc/tree-ssa-loop.h
+++ b/gcc/tree-ssa-loop.h
@@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_TREE_SSA_LOOP_H
#define GCC_TREE_SSA_LOOP_H
+#include "wide-int.h"
+
/* Affine iv. */
struct affine_iv
@@ -49,7 +51,7 @@ struct tree_niter_desc
a loop (provided that assumptions == true and
may_be_zero == false), more precisely the number
of executions of the latch of the loop. */
- double_int max; /* The upper bound on the number of iterations of
+ widest_int max; /* The upper bound on the number of iterations of
the loop. */
/* The simplified shape of the exit condition. The loop exits if
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 7c653c820bd..336626d0886 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -1148,7 +1148,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
multiplication sequence when profitable. */
c = TREE_REAL_CST (arg1);
n = real_to_integer (&c);
- real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
c_is_int = real_identical (&c, &cint);
if (c_is_int
@@ -1194,7 +1194,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
/* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
optimizing for space. Don't do this optimization if we don't have
a hardware sqrt insn. */
- real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0);
+ real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED);
SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
if (flag_unsafe_math_optimizations
@@ -1258,7 +1258,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
Do not calculate the powi factor when n/2 = 0. */
real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
n = real_to_integer (&c2);
- real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
c2_is_int = real_identical (&c2, &cint);
if (flag_unsafe_math_optimizations
@@ -1306,11 +1306,11 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
different from pow(x, 1./3.) due to rounding and behavior with
negative x, we need to constrain this transformation to unsafe
math and positive x or finite math. */
- real_from_integer (&dconst3, VOIDmode, 3, 0, 0);
+ real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
real_round (&c2, mode, &c2);
n = real_to_integer (&c2);
- real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, n, SIGNED);
real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
real_convert (&c2, mode, &c2);
diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c
index c9fe74e46ff..28a6ea76e85 100644
--- a/gcc/tree-ssa-phiopt.c
+++ b/gcc/tree-ssa-phiopt.c
@@ -559,7 +559,7 @@ jump_function_from_stmt (tree *arg, gimple stmt)
&offset);
if (tem
&& TREE_CODE (tem) == MEM_REF
- && (mem_ref_offset (tem) + double_int::from_shwi (offset)).is_zero ())
+ && (mem_ref_offset (tem) + offset) == 0)
{
*arg = TREE_OPERAND (tem, 0);
return true;
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 0344d438a4e..95e3af98238 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -1599,11 +1599,11 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2,
&& TREE_CODE (op[1]) == INTEGER_CST
&& TREE_CODE (op[2]) == INTEGER_CST)
{
- double_int off = tree_to_double_int (op[0]);
- off += -tree_to_double_int (op[1]);
- off *= tree_to_double_int (op[2]);
- if (off.fits_shwi ())
- newop.off = off.low;
+ offset_int off = ((wi::to_offset (op[0])
+ - wi::to_offset (op[1]))
+ * wi::to_offset (op[2]));
+ if (wi::fits_shwi_p (off))
+ newop.off = off.to_shwi ();
}
newoperands[j] = newop;
/* If it transforms from an SSA_NAME to an address, fold with
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index 7239ac8b985..357ac08381c 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -1111,7 +1111,7 @@ decrement_power (gimple stmt)
arg1 = gimple_call_arg (stmt, 1);
c = TREE_REAL_CST (arg1);
power = real_to_integer (&c) - 1;
- real_from_integer (&cint, VOIDmode, power, 0, 0);
+ real_from_integer (&cint, VOIDmode, power, SIGNED);
gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint));
return power;
@@ -3704,8 +3704,7 @@ acceptable_pow_call (gimple stmt, tree *base, HOST_WIDE_INT *exponent)
return false;
*exponent = real_to_integer (&c);
- real_from_integer (&cint, VOIDmode, *exponent,
- *exponent < 0 ? -1 : 0, 0);
+ real_from_integer (&cint, VOIDmode, *exponent, SIGNED);
if (!real_identical (&c, &cint))
return false;
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index f7ec8b6d60b..585fd85049c 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -814,21 +814,20 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
tree bit_offset = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1));
if (TREE_INT_CST_LOW (bit_offset) % BITS_PER_UNIT == 0)
{
- double_int off
- = tree_to_double_int (this_offset)
- + tree_to_double_int (bit_offset)
- .rshift (BITS_PER_UNIT == 8
- ? 3 : exact_log2 (BITS_PER_UNIT));
- if (off.fits_shwi ()
+ offset_int off
+ = (wi::to_offset (this_offset)
+ + wi::lrshift (wi::to_offset (bit_offset),
+ LOG2_BITS_PER_UNIT));
+ if (wi::fits_shwi_p (off)
/* Probibit value-numbering zero offset components
of addresses the same before the pass folding
__builtin_object_size had a chance to run
(checking cfun->after_inlining does the
trick here). */
&& (TREE_CODE (orig) != ADDR_EXPR
- || !off.is_zero ()
+ || off != 0
|| cfun->after_inlining))
- temp.off = off.low;
+ temp.off = off.to_shwi ();
}
}
}
@@ -844,11 +843,11 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result)
&& TREE_CODE (temp.op1) == INTEGER_CST
&& TREE_CODE (temp.op2) == INTEGER_CST)
{
- double_int off = tree_to_double_int (temp.op0);
- off += -tree_to_double_int (temp.op1);
- off *= tree_to_double_int (temp.op2);
- if (off.fits_shwi ())
- temp.off = off.low;
+ offset_int off = ((wi::to_offset (temp.op0)
+ - wi::to_offset (temp.op1))
+ * wi::to_offset (temp.op2));
+ if (wi::fits_shwi_p (off))
+ temp.off = off.to_shwi();
}
break;
case VAR_DECL:
@@ -1168,10 +1167,9 @@ vn_reference_fold_indirect (vec<vn_reference_op_s> *ops,
gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF);
if (addr_base != TREE_OPERAND (op->op0, 0))
{
- double_int off = tree_to_double_int (mem_op->op0);
- off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
- off += double_int::from_shwi (addr_offset);
- mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off);
+ offset_int off = offset_int::from (mem_op->op0, SIGNED);
+ off += addr_offset;
+ mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off);
op->op0 = build_fold_addr_expr (addr_base);
if (tree_fits_shwi_p (mem_op->op0))
mem_op->off = tree_to_shwi (mem_op->op0);
@@ -1191,7 +1189,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
vn_reference_op_t mem_op = &(*ops)[i - 1];
gimple def_stmt;
enum tree_code code;
- double_int off;
+ offset_int off;
def_stmt = SSA_NAME_DEF_STMT (op->op0);
if (!is_gimple_assign (def_stmt))
@@ -1202,8 +1200,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
&& code != POINTER_PLUS_EXPR)
return;
- off = tree_to_double_int (mem_op->op0);
- off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0)));
+ off = offset_int::from (mem_op->op0, SIGNED);
/* The only thing we have to do is from &OBJ.foo.bar add the offset
from .foo.bar to the preceding MEM_REF offset and replace the
@@ -1220,7 +1217,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
|| TREE_CODE (addr_base) != MEM_REF)
return;
- off += double_int::from_shwi (addr_offset);
+ off += addr_offset;
off += mem_ref_offset (addr_base);
op->op0 = TREE_OPERAND (addr_base, 0);
}
@@ -1233,11 +1230,11 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops,
|| TREE_CODE (ptroff) != INTEGER_CST)
return;
- off += tree_to_double_int (ptroff);
+ off += wi::to_offset (ptroff);
op->op0 = ptr;
}
- mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off);
+ mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off);
if (tree_fits_shwi_p (mem_op->op0))
mem_op->off = tree_to_shwi (mem_op->op0);
else
@@ -1391,11 +1388,11 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything)
&& TREE_CODE (vro->op1) == INTEGER_CST
&& TREE_CODE (vro->op2) == INTEGER_CST)
{
- double_int off = tree_to_double_int (vro->op0);
- off += -tree_to_double_int (vro->op1);
- off *= tree_to_double_int (vro->op2);
- if (off.fits_shwi ())
- vro->off = off.low;
+ offset_int off = ((wi::to_offset (vro->op0)
+ - wi::to_offset (vro->op1))
+ * wi::to_offset (vro->op2));
+ if (wi::fits_shwi_p (off))
+ vro->off = off.to_shwi ();
}
}
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index 0ab5f0953fe..5d3a323e54a 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -3065,14 +3065,13 @@ get_constraint_for_ptr_offset (tree ptr, tree offset,
else
{
/* Sign-extend the offset. */
- double_int soffset = tree_to_double_int (offset)
- .sext (TYPE_PRECISION (TREE_TYPE (offset)));
- if (!soffset.fits_shwi ())
+ offset_int soffset = offset_int::from (offset, SIGNED);
+ if (!wi::fits_shwi_p (soffset))
rhsoffset = UNKNOWN_OFFSET;
else
{
/* Make sure the bit-offset also fits. */
- HOST_WIDE_INT rhsunitoffset = soffset.low;
+ HOST_WIDE_INT rhsunitoffset = soffset.to_shwi ();
rhsoffset = rhsunitoffset * BITS_PER_UNIT;
if (rhsunitoffset != rhsoffset / BITS_PER_UNIT)
rhsoffset = UNKNOWN_OFFSET;
diff --git a/gcc/tree-ssa-uninit.c b/gcc/tree-ssa-uninit.c
index ae251ccd6ba..96e9e609f83 100644
--- a/gcc/tree-ssa-uninit.c
+++ b/gcc/tree-ssa-uninit.c
@@ -854,12 +854,11 @@ is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
if (cmpc == EQ_EXPR)
result = tree_int_cst_equal (val, boundary);
else if (cmpc == LT_EXPR)
- result = INT_CST_LT_UNSIGNED (val, boundary);
+ result = tree_int_cst_lt (val, boundary);
else
{
gcc_assert (cmpc == LE_EXPR);
- result = (tree_int_cst_equal (val, boundary)
- || INT_CST_LT_UNSIGNED (val, boundary));
+ result = tree_int_cst_le (val, boundary);
}
}
else
@@ -867,12 +866,12 @@ is_value_included_in (tree val, tree boundary, enum tree_code cmpc)
if (cmpc == EQ_EXPR)
result = tree_int_cst_equal (val, boundary);
else if (cmpc == LT_EXPR)
- result = INT_CST_LT (val, boundary);
+ result = tree_int_cst_lt (val, boundary);
else
{
gcc_assert (cmpc == LE_EXPR);
result = (tree_int_cst_equal (val, boundary)
- || INT_CST_LT (val, boundary));
+ || tree_int_cst_lt (val, boundary));
}
}
diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c
index 8101ac717fb..856325e0de4 100644
--- a/gcc/tree-ssa.c
+++ b/gcc/tree-ssa.c
@@ -1340,9 +1340,9 @@ non_rewritable_mem_ref_base (tree ref)
|| TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE)
&& useless_type_conversion_p (TREE_TYPE (base),
TREE_TYPE (TREE_TYPE (decl)))
- && mem_ref_offset (base).fits_uhwi ()
- && tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
- .ugt (mem_ref_offset (base))
+ && wi::fits_uhwi_p (mem_ref_offset (base))
+ && wi::gtu_p (wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (decl))),
+ mem_ref_offset (base))
&& multiple_of_p (sizetype, TREE_OPERAND (base, 1),
TYPE_SIZE_UNIT (TREE_TYPE (base))))
return NULL_TREE;
diff --git a/gcc/tree-ssanames.c b/gcc/tree-ssanames.c
index 02db6a5627d..fa7eaf3b476 100644
--- a/gcc/tree-ssanames.c
+++ b/gcc/tree-ssanames.c
@@ -186,19 +186,23 @@ make_ssa_name_fn (struct function *fn, tree var, gimple stmt)
/* Store range information RANGE_TYPE, MIN, and MAX to tree ssa_name NAME. */
void
-set_range_info (tree name, enum value_range_type range_type, double_int min,
- double_int max)
+set_range_info (tree name, enum value_range_type range_type,
+ const wide_int_ref &min, const wide_int_ref &max)
{
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name)));
gcc_assert (range_type == VR_RANGE || range_type == VR_ANTI_RANGE);
range_info_def *ri = SSA_NAME_RANGE_INFO (name);
+ unsigned int precision = TYPE_PRECISION (TREE_TYPE (name));
/* Allocate if not available. */
if (ri == NULL)
{
- ri = ggc_alloc_cleared_range_info_def ();
+ size_t size = (sizeof (range_info_def)
+ + trailing_wide_ints <3>::extra_size (precision));
+ ri = ggc_alloc_range_info_def (size);
+ ri->ints.set_precision (precision);
SSA_NAME_RANGE_INFO (name) = ri;
- ri->nonzero_bits = double_int::mask (TYPE_PRECISION (TREE_TYPE (name)));
+ ri->set_nonzero_bits (wi::shwi (-1, precision));
}
/* Record the range type. */
@@ -206,25 +210,16 @@ set_range_info (tree name, enum value_range_type range_type, double_int min,
SSA_NAME_ANTI_RANGE_P (name) = (range_type == VR_ANTI_RANGE);
/* Set the values. */
- ri->min = min;
- ri->max = max;
+ ri->set_min (min);
+ ri->set_max (max);
/* If it is a range, try to improve nonzero_bits from the min/max. */
if (range_type == VR_RANGE)
{
- int prec = TYPE_PRECISION (TREE_TYPE (name));
- double_int xorv;
-
- min = min.zext (prec);
- max = max.zext (prec);
- xorv = min ^ max;
- if (xorv.high)
- xorv = double_int::mask (2 * HOST_BITS_PER_WIDE_INT
- - clz_hwi (xorv.high));
- else if (xorv.low)
- xorv = double_int::mask (HOST_BITS_PER_WIDE_INT
- - clz_hwi (xorv.low));
- ri->nonzero_bits = ri->nonzero_bits & (min | xorv);
+ wide_int xorv = ri->get_min () ^ ri->get_max ();
+ if (xorv != 0)
+ xorv = wi::mask (precision - wi::clz (xorv), false, precision);
+ ri->set_nonzero_bits (ri->get_nonzero_bits () & (ri->get_min () | xorv));
}
}
@@ -234,7 +229,7 @@ set_range_info (tree name, enum value_range_type range_type, double_int min,
is used to determine if MIN and MAX are valid values. */
enum value_range_type
-get_range_info (const_tree name, double_int *min, double_int *max)
+get_range_info (const_tree name, wide_int *min, wide_int *max)
{
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name)));
gcc_assert (min && max);
@@ -246,50 +241,45 @@ get_range_info (const_tree name, double_int *min, double_int *max)
> 2 * HOST_BITS_PER_WIDE_INT))
return VR_VARYING;
- *min = ri->min;
- *max = ri->max;
+ *min = ri->get_min ();
+ *max = ri->get_max ();
return SSA_NAME_RANGE_TYPE (name);
}
/* Change non-zero bits bitmask of NAME. */
void
-set_nonzero_bits (tree name, double_int mask)
+set_nonzero_bits (tree name, const wide_int_ref &mask)
{
gcc_assert (!POINTER_TYPE_P (TREE_TYPE (name)));
if (SSA_NAME_RANGE_INFO (name) == NULL)
set_range_info (name, VR_RANGE,
- tree_to_double_int (TYPE_MIN_VALUE (TREE_TYPE (name))),
- tree_to_double_int (TYPE_MAX_VALUE (TREE_TYPE (name))));
+ TYPE_MIN_VALUE (TREE_TYPE (name)),
+ TYPE_MAX_VALUE (TREE_TYPE (name)));
range_info_def *ri = SSA_NAME_RANGE_INFO (name);
- ri->nonzero_bits
- = mask & double_int::mask (TYPE_PRECISION (TREE_TYPE (name)));
+ ri->set_nonzero_bits (mask);
}
-/* Return a double_int with potentially non-zero bits in SSA_NAME
- NAME, or double_int_minus_one if unknown. */
+/* Return a widest_int with potentially non-zero bits in SSA_NAME
+ NAME, or -1 if unknown. */
-double_int
+wide_int
get_nonzero_bits (const_tree name)
{
+ unsigned int precision = TYPE_PRECISION (TREE_TYPE (name));
if (POINTER_TYPE_P (TREE_TYPE (name)))
{
struct ptr_info_def *pi = SSA_NAME_PTR_INFO (name);
if (pi && pi->align)
- {
- double_int al = double_int::from_uhwi (pi->align - 1);
- return ((double_int::mask (TYPE_PRECISION (TREE_TYPE (name))) & ~al)
- | double_int::from_uhwi (pi->misalign));
- }
- return double_int_minus_one;
+ return wi::shwi (-(int) pi->align | pi->misalign, precision);
+ return wi::shwi (-1, precision);
}
range_info_def *ri = SSA_NAME_RANGE_INFO (name);
- if (!ri || (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (name)))
- > 2 * HOST_BITS_PER_WIDE_INT))
- return double_int_minus_one;
+ if (!ri)
+ return wi::shwi (-1, precision);
- return ri->nonzero_bits;
+ return ri->get_nonzero_bits ();
}
/* We no longer need the SSA_NAME expression VAR, release it so that
@@ -502,8 +492,11 @@ duplicate_ssa_name_range_info (tree name, enum value_range_type range_type,
if (!range_info)
return;
- new_range_info = ggc_alloc_range_info_def ();
- *new_range_info = *range_info;
+ unsigned int precision = TYPE_PRECISION (TREE_TYPE (name));
+ size_t size = (sizeof (range_info_def)
+ + trailing_wide_ints <3>::extra_size (precision));
+ new_range_info = ggc_alloc_range_info_def (size);
+ memcpy (new_range_info, range_info, size);
gcc_assert (range_type == VR_RANGE || range_type == VR_ANTI_RANGE);
SSA_NAME_ANTI_RANGE_P (name) = (range_type == VR_ANTI_RANGE);
diff --git a/gcc/tree-ssanames.h b/gcc/tree-ssanames.h
index bb3b5e6c1df..4fc9f69e1af 100644
--- a/gcc/tree-ssanames.h
+++ b/gcc/tree-ssanames.h
@@ -47,13 +47,12 @@ struct GTY(()) ptr_info_def
/* Value range information for SSA_NAMEs representing non-pointer variables. */
-struct GTY (()) range_info_def {
- /* Minimum for value range. */
- double_int min;
- /* Maximum for value range. */
- double_int max;
- /* Non-zero bits - bits not set are guaranteed to be always zero. */
- double_int nonzero_bits;
+struct GTY ((variable_size)) range_info_def {
+ /* Minimum, maximum and nonzero bits. */
+ TRAILING_WIDE_INT_ACCESSOR (min, ints, 0)
+ TRAILING_WIDE_INT_ACCESSOR (max, ints, 1)
+ TRAILING_WIDE_INT_ACCESSOR (nonzero_bits, ints, 2)
+ trailing_wide_ints <3> ints;
};
@@ -70,13 +69,13 @@ struct GTY (()) range_info_def {
enum value_range_type { VR_UNDEFINED, VR_RANGE, VR_ANTI_RANGE, VR_VARYING };
/* Sets the value range to SSA. */
-extern void set_range_info (tree, enum value_range_type, double_int,
- double_int);
+extern void set_range_info (tree, enum value_range_type, const wide_int_ref &,
+ const wide_int_ref &);
/* Gets the value range from SSA. */
-extern enum value_range_type get_range_info (const_tree, double_int *,
- double_int *);
-extern void set_nonzero_bits (tree, double_int);
-extern double_int get_nonzero_bits (const_tree);
+extern enum value_range_type get_range_info (const_tree, wide_int *,
+ wide_int *);
+extern void set_nonzero_bits (tree, const wide_int_ref &);
+extern wide_int get_nonzero_bits (const_tree);
extern void init_ssanames (struct function *, int);
extern void fini_ssanames (void);
extern void ssanames_print_statistics (void);
diff --git a/gcc/tree-streamer-in.c b/gcc/tree-streamer-in.c
index b02bb6bd0a0..fc5ecfc9855 100644
--- a/gcc/tree-streamer-in.c
+++ b/gcc/tree-streamer-in.c
@@ -152,8 +152,9 @@ unpack_ts_base_value_fields (struct bitpack_d *bp, tree expr)
static void
unpack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr)
{
- TREE_INT_CST_LOW (expr) = bp_unpack_var_len_unsigned (bp);
- TREE_INT_CST_HIGH (expr) = bp_unpack_var_len_int (bp);
+ int i;
+ for (i = 0; i < TREE_INT_CST_EXT_NUNITS (expr); i++)
+ TREE_INT_CST_ELT (expr, i) = bp_unpack_var_len_int (bp);
}
@@ -603,6 +604,12 @@ streamer_alloc_tree (struct lto_input_block *ib, struct data_in *data_in,
unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib);
result = make_tree_binfo (len);
}
+ else if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
+ {
+ unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib);
+ unsigned HOST_WIDE_INT ext_len = streamer_read_uhwi (ib);
+ result = make_int_cst (len, ext_len);
+ }
else if (code == CALL_EXPR)
{
unsigned HOST_WIDE_INT nargs = streamer_read_uhwi (ib);
diff --git a/gcc/tree-streamer-out.c b/gcc/tree-streamer-out.c
index 90dec0a1ce6..5858047b4b5 100644
--- a/gcc/tree-streamer-out.c
+++ b/gcc/tree-streamer-out.c
@@ -127,8 +127,11 @@ pack_ts_base_value_fields (struct bitpack_d *bp, tree expr)
static void
pack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr)
{
- bp_pack_var_len_unsigned (bp, TREE_INT_CST_LOW (expr));
- bp_pack_var_len_int (bp, TREE_INT_CST_HIGH (expr));
+ int i;
+ /* Note that the number of elements has already been written out in
+ streamer_write_tree_header. */
+ for (i = 0; i < TREE_INT_CST_EXT_NUNITS (expr); i++)
+ bp_pack_var_len_int (bp, TREE_INT_CST_ELT (expr, i));
}
@@ -1008,6 +1011,12 @@ streamer_write_tree_header (struct output_block *ob, tree expr)
streamer_write_uhwi (ob, call_expr_nargs (expr));
else if (TREE_CODE (expr) == OMP_CLAUSE)
streamer_write_uhwi (ob, OMP_CLAUSE_CODE (expr));
+ else if (CODE_CONTAINS_STRUCT (code, TS_INT_CST))
+ {
+ gcc_checking_assert (TREE_INT_CST_NUNITS (expr));
+ streamer_write_uhwi (ob, TREE_INT_CST_NUNITS (expr));
+ streamer_write_uhwi (ob, TREE_INT_CST_EXT_NUNITS (expr));
+ }
}
@@ -1017,9 +1026,16 @@ streamer_write_tree_header (struct output_block *ob, tree expr)
void
streamer_write_integer_cst (struct output_block *ob, tree cst, bool ref_p)
{
+ int i;
+ int len = TREE_INT_CST_NUNITS (cst);
gcc_assert (!TREE_OVERFLOW (cst));
streamer_write_record_start (ob, LTO_integer_cst);
stream_write_tree (ob, TREE_TYPE (cst), ref_p);
- streamer_write_uhwi (ob, TREE_INT_CST_LOW (cst));
- streamer_write_hwi (ob, TREE_INT_CST_HIGH (cst));
+ /* We're effectively streaming a non-sign-extended wide_int here,
+ so there's no need to stream TREE_INT_CST_EXT_NUNITS or any
+ array members beyond LEN. We'll recreate the tree from the
+ wide_int and the type. */
+ streamer_write_uhwi (ob, len);
+ for (i = 0; i < len; i++)
+ streamer_write_hwi (ob, TREE_INT_CST_ELT (cst, i));
}
diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c
index 26295340e25..3651120d0e1 100644
--- a/gcc/tree-switch-conversion.c
+++ b/gcc/tree-switch-conversion.c
@@ -446,7 +446,13 @@ emit_case_bit_tests (gimple swtch, tree index_expr,
if (const & csui) goto target */
for (k = 0; k < count; k++)
{
- tmp = build_int_cst_wide (word_type_node, test[k].lo, test[k].hi);
+ HOST_WIDE_INT a[2];
+
+ a[0] = test[k].lo;
+ a[1] = test[k].hi;
+ tmp = wide_int_to_tree (word_type_node,
+ wide_int::from_array (a, 2,
+ TYPE_PRECISION (word_type_node)));
tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp);
tmp = force_gimple_operand_gsi (&gsi, tmp,
/*simple=*/true, NULL_TREE,
@@ -886,7 +892,8 @@ build_constructors (gimple swtch, struct switch_conv_info *info)
info->constructors[k]->quick_push (elt);
}
- pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
+ pos = int_const_binop (PLUS_EXPR, pos,
+ build_int_cst (TREE_TYPE (pos), 1));
}
gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
@@ -911,7 +918,8 @@ build_constructors (gimple swtch, struct switch_conv_info *info)
elt.value = unshare_expr_without_location (val);
info->constructors[j]->quick_push (elt);
- pos = int_const_binop (PLUS_EXPR, pos, integer_one_node);
+ pos = int_const_binop (PLUS_EXPR, pos,
+ build_int_cst (TREE_TYPE (pos), 1));
} while (!tree_int_cst_lt (high, pos)
&& tree_int_cst_lt (low, pos));
j++;
@@ -966,26 +974,26 @@ array_value_type (gimple swtch, tree type, int num,
FOR_EACH_VEC_SAFE_ELT (info->constructors[num], i, elt)
{
- double_int cst;
+ wide_int cst;
if (TREE_CODE (elt->value) != INTEGER_CST)
return type;
- cst = TREE_INT_CST (elt->value);
+ cst = elt->value;
while (1)
{
unsigned int prec = GET_MODE_BITSIZE (mode);
if (prec > HOST_BITS_PER_WIDE_INT)
return type;
- if (sign >= 0 && cst == cst.zext (prec))
+ if (sign >= 0 && cst == wi::zext (cst, prec))
{
- if (sign == 0 && cst == cst.sext (prec))
+ if (sign == 0 && cst == wi::sext (cst, prec))
break;
sign = 1;
break;
}
- if (sign <= 0 && cst == cst.sext (prec))
+ if (sign <= 0 && cst == wi::sext (cst, prec))
{
sign = -1;
break;
diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c
index 274cdbdcf80..d48e3cdcfca 100644
--- a/gcc/tree-vect-data-refs.c
+++ b/gcc/tree-vect-data-refs.c
@@ -2898,15 +2898,13 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
*/
- HOST_WIDE_INT
- min_seg_len_b = (TREE_CODE (dr_b1->seg_len) == INTEGER_CST) ?
- TREE_INT_CST_LOW (dr_b1->seg_len) :
- vect_factor;
+ HOST_WIDE_INT min_seg_len_b = (tree_fits_shwi_p (dr_b1->seg_len)
+ ? tree_to_shwi (dr_b1->seg_len)
+ : vect_factor);
if (diff <= min_seg_len_b
- || (TREE_CODE (dr_a1->seg_len) == INTEGER_CST
- && diff - (HOST_WIDE_INT) TREE_INT_CST_LOW (dr_a1->seg_len) <
- min_seg_len_b))
+ || (tree_fits_shwi_p (dr_a1->seg_len)
+ && diff - tree_to_shwi (dr_a1->seg_len) < min_seg_len_b))
{
if (dump_enabled_p ())
{
@@ -2999,8 +2997,8 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep,
{
if (off == NULL_TREE)
{
- double_int moff = mem_ref_offset (base);
- off = double_int_to_tree (sizetype, moff);
+ offset_int moff = mem_ref_offset (base);
+ off = wide_int_to_tree (sizetype, moff);
}
else
off = size_binop (PLUS_EXPR, off,
diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c
index 301103db693..43a695d65f2 100644
--- a/gcc/tree-vect-generic.c
+++ b/gcc/tree-vect-generic.c
@@ -57,11 +57,13 @@ static tree
build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value)
{
int width = tree_to_uhwi (TYPE_SIZE (inner_type));
- int n = HOST_BITS_PER_WIDE_INT / width;
- unsigned HOST_WIDE_INT low, high, mask;
- tree ret;
+ int n = (TYPE_PRECISION (type) + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT;
+ unsigned HOST_WIDE_INT low, mask;
+ HOST_WIDE_INT a[WIDE_INT_MAX_ELTS];
+ int i;
- gcc_assert (n);
+ gcc_assert (n && n <= WIDE_INT_MAX_ELTS);
if (width == HOST_BITS_PER_WIDE_INT)
low = value;
@@ -71,17 +73,12 @@ build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value)
low = (unsigned HOST_WIDE_INT) ~0 / mask * (value & mask);
}
- if (TYPE_PRECISION (type) < HOST_BITS_PER_WIDE_INT)
- low &= ((HOST_WIDE_INT)1 << TYPE_PRECISION (type)) - 1, high = 0;
- else if (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT)
- high = 0;
- else if (TYPE_PRECISION (type) == HOST_BITS_PER_DOUBLE_INT)
- high = low;
- else
- gcc_unreachable ();
+ for (i = 0; i < n; i++)
+ a[i] = low;
- ret = build_int_cst_wide (type, low, high);
- return ret;
+ gcc_assert (TYPE_PRECISION (type) <= MAX_BITSIZE_MODE_ANY_INT);
+ return wide_int_to_tree
+ (type, wide_int::from_array (a, n, TYPE_PRECISION (type)));
}
static GTY(()) tree vector_inner_type;
@@ -415,7 +412,8 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
unsigned HOST_WIDE_INT *mulc = XALLOCAVEC (unsigned HOST_WIDE_INT, nunits);
int prec = TYPE_PRECISION (TREE_TYPE (type));
int dummy_int;
- unsigned int i, unsignedp = TYPE_UNSIGNED (TREE_TYPE (type));
+ unsigned int i;
+ signop sign_p = TYPE_SIGN (TREE_TYPE (type));
unsigned HOST_WIDE_INT mask = GET_MODE_MASK (TYPE_MODE (TREE_TYPE (type)));
tree *vec;
tree cur_op, mulcst, tem;
@@ -457,7 +455,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
}
if (mode == -2)
continue;
- if (unsignedp)
+ if (sign_p == UNSIGNED)
{
unsigned HOST_WIDE_INT mh;
unsigned HOST_WIDE_INT d = TREE_INT_CST_LOW (cst) & mask;
@@ -586,7 +584,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
if (use_pow2)
{
tree addend = NULL_TREE;
- if (!unsignedp)
+ if (sign_p == SIGNED)
{
tree uns_type;
@@ -638,7 +636,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
}
if (code == TRUNC_DIV_EXPR)
{
- if (unsignedp)
+ if (sign_p == UNSIGNED)
{
/* q = op0 >> shift; */
cur_op = add_rshift (gsi, type, op0, shifts);
@@ -672,7 +670,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
if (op != unknown_optab
&& optab_handler (op, TYPE_MODE (type)) != CODE_FOR_nothing)
{
- if (unsignedp)
+ if (sign_p == UNSIGNED)
/* r = op0 & mask; */
return gimplify_build2 (gsi, BIT_AND_EXPR, type, op0, mask);
else if (addend != NULL_TREE)
@@ -713,7 +711,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
switch (mode)
{
case 0:
- gcc_assert (unsignedp);
+ gcc_assert (sign_p == UNSIGNED);
/* t1 = oprnd0 >> pre_shift;
t2 = t1 h* ml;
q = t2 >> post_shift; */
@@ -722,7 +720,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
return NULL_TREE;
break;
case 1:
- gcc_assert (unsignedp);
+ gcc_assert (sign_p == UNSIGNED);
for (i = 0; i < nunits; i++)
{
shift_temps[i] = 1;
@@ -733,7 +731,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0,
case 3:
case 4:
case 5:
- gcc_assert (!unsignedp);
+ gcc_assert (sign_p == SIGNED);
for (i = 0; i < nunits; i++)
shift_temps[i] = prec - 1;
break;
diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c
index 77d945e6a50..7b79ab1b9d0 100644
--- a/gcc/tree-vect-loop-manip.c
+++ b/gcc/tree-vect-loop-manip.c
@@ -1791,7 +1791,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo,
: LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 2;
if (check_profitability)
max_iter = MAX (max_iter, (int) th - 1);
- record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
+ record_niter_bound (new_loop, max_iter, false, true);
dump_printf (MSG_NOTE,
"Setting upper bound of nb iterations for epilogue "
"loop to %d\n", max_iter);
@@ -2028,7 +2028,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, tree ni_name,
max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 2;
if (check_profitability)
max_iter = MAX (max_iter, (int) th - 1);
- record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
+ record_niter_bound (new_loop, max_iter, false, true);
dump_printf (MSG_NOTE,
"Setting upper bound of nb iterations for prologue "
"loop to %d\n", max_iter);
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 7f4209346bc..1f6ac1a348b 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -6114,19 +6114,17 @@ vect_transform_loop (loop_vec_info loop_vinfo)
scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor),
expected_iterations / vectorization_factor);
loop->nb_iterations_upper_bound
- = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor),
- FLOOR_DIV_EXPR);
+ = wi::udiv_floor (loop->nb_iterations_upper_bound, vectorization_factor);
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
- && loop->nb_iterations_upper_bound != double_int_zero)
- loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one;
+ && loop->nb_iterations_upper_bound != 0)
+ loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - 1;
if (loop->any_estimate)
{
loop->nb_iterations_estimate
- = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor),
- FLOOR_DIV_EXPR);
+ = wi::udiv_floor (loop->nb_iterations_estimate, vectorization_factor);
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
- && loop->nb_iterations_estimate != double_int_zero)
- loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one;
+ && loop->nb_iterations_estimate != 0)
+ loop->nb_iterations_estimate = loop->nb_iterations_estimate - 1;
}
if (dump_enabled_p ())
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 365cf016213..094cf047e05 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -2340,13 +2340,13 @@ vect_recog_divmod_pattern (vec<gimple> *stmts,
else
t3 = t2;
- double_int oprnd0_min, oprnd0_max;
+ wide_int oprnd0_min, oprnd0_max;
int msb = 1;
if (get_range_info (oprnd0, &oprnd0_min, &oprnd0_max) == VR_RANGE)
{
- if (!oprnd0_min.is_negative ())
+ if (!wi::neg_p (oprnd0_min, TYPE_SIGN (itype)))
msb = 0;
- else if (oprnd0_max.is_negative ())
+ else if (wi::neg_p (oprnd0_max, TYPE_SIGN (itype)))
msb = -1;
}
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index 1a51d6d7b57..b8547cba962 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -2992,11 +2992,10 @@ vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
? POINTER_PLUS_EXPR : PLUS_EXPR;
tree type = POINTER_TYPE_P (TREE_TYPE (op))
? sizetype : TREE_TYPE (op);
- double_int cst
- = double_int::from_shwi
- (bestn->simdclone->args[i].linear_step);
- cst *= double_int::from_uhwi (ncopies * nunits);
- tree tcst = double_int_to_tree (type, cst);
+ widest_int cst
+ = wi::mul (bestn->simdclone->args[i].linear_step,
+ ncopies * nunits);
+ tree tcst = wide_int_to_tree (type, cst);
tree phi_arg = copy_ssa_name (op, NULL);
new_stmt = gimple_build_assign_with_ops (code, phi_arg,
phi_res, tcst);
@@ -3017,11 +3016,10 @@ vectorizable_simd_clone_call (gimple stmt, gimple_stmt_iterator *gsi,
? POINTER_PLUS_EXPR : PLUS_EXPR;
tree type = POINTER_TYPE_P (TREE_TYPE (op))
? sizetype : TREE_TYPE (op);
- double_int cst
- = double_int::from_shwi
- (bestn->simdclone->args[i].linear_step);
- cst *= double_int::from_uhwi (j * nunits);
- tree tcst = double_int_to_tree (type, cst);
+ widest_int cst
+ = wi::mul (bestn->simdclone->args[i].linear_step,
+ j * nunits);
+ tree tcst = wide_int_to_tree (type, cst);
new_temp = make_ssa_name (TREE_TYPE (op), NULL);
new_stmt
= gimple_build_assign_with_ops (code, new_temp,
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index d072128e650..633c329030f 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -60,6 +60,7 @@ along with GCC; see the file COPYING3. If not see
#include "expr.h"
#include "optabs.h"
#include "tree-ssa-threadedge.h"
+#include "wide-int.h"
@@ -1150,15 +1151,7 @@ operand_less_p (tree val, tree val2)
{
/* LT is folded faster than GE and others. Inline the common case. */
if (TREE_CODE (val) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
- {
- if (TYPE_UNSIGNED (TREE_TYPE (val)))
- return INT_CST_LT_UNSIGNED (val, val2);
- else
- {
- if (INT_CST_LT (val, val2))
- return 1;
- }
- }
+ return tree_int_cst_lt (val, val2);
else
{
tree tcmp;
@@ -1630,10 +1623,8 @@ extract_range_from_assert (value_range_t *vr_p, tree expr)
/* Make sure to not set TREE_OVERFLOW on the final type
conversion. We are willingly interpreting large positive
unsigned values as negative singed values here. */
- min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min),
- 0, false);
- max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max),
- 0, false);
+ min = force_fit_type (TREE_TYPE (var), wi::to_widest (min), 0, false);
+ max = force_fit_type (TREE_TYPE (var), wi::to_widest (max), 0, false);
/* We can transform a max, min range to an anti-range or
vice-versa. Use set_and_canonicalize_value_range which does
@@ -1889,6 +1880,10 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
/* If the singed operation wraps then int_const_binop has done
everything we want. */
;
+ /* Signed division of -1/0 overflows and by the time it gets here
+ returns NULL_TREE. */
+ else if (!res)
+ return NULL_TREE;
else if ((TREE_OVERFLOW (res)
&& !TREE_OVERFLOW (val1)
&& !TREE_OVERFLOW (val2))
@@ -1980,19 +1975,20 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2)
}
-/* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO
+/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO
bitmask if some bit is unset, it means for all numbers in the range
the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO
bitmask if some bit is set, it means for all numbers in the range
the bit is 1, otherwise it might be 0 or 1. */
static bool
-zero_nonzero_bits_from_vr (value_range_t *vr,
- double_int *may_be_nonzero,
- double_int *must_be_nonzero)
+zero_nonzero_bits_from_vr (const tree expr_type,
+ value_range_t *vr,
+ wide_int *may_be_nonzero,
+ wide_int *must_be_nonzero)
{
- *may_be_nonzero = double_int_minus_one;
- *must_be_nonzero = double_int_zero;
+ *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type));
+ *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type));
if (!range_int_cst_p (vr)
|| is_overflow_infinity (vr->min)
|| is_overflow_infinity (vr->max))
@@ -2000,34 +1996,21 @@ zero_nonzero_bits_from_vr (value_range_t *vr,
if (range_int_cst_singleton_p (vr))
{
- *may_be_nonzero = tree_to_double_int (vr->min);
+ *may_be_nonzero = vr->min;
*must_be_nonzero = *may_be_nonzero;
}
else if (tree_int_cst_sgn (vr->min) >= 0
|| tree_int_cst_sgn (vr->max) < 0)
{
- double_int dmin = tree_to_double_int (vr->min);
- double_int dmax = tree_to_double_int (vr->max);
- double_int xor_mask = dmin ^ dmax;
- *may_be_nonzero = dmin | dmax;
- *must_be_nonzero = dmin & dmax;
- if (xor_mask.high != 0)
+ wide_int xor_mask = wi::bit_xor (vr->min, vr->max);
+ *may_be_nonzero = wi::bit_or (vr->min, vr->max);
+ *must_be_nonzero = wi::bit_and (vr->min, vr->max);
+ if (xor_mask != 0)
{
- unsigned HOST_WIDE_INT mask
- = ((unsigned HOST_WIDE_INT) 1
- << floor_log2 (xor_mask.high)) - 1;
- may_be_nonzero->low = ALL_ONES;
- may_be_nonzero->high |= mask;
- must_be_nonzero->low = 0;
- must_be_nonzero->high &= ~mask;
- }
- else if (xor_mask.low != 0)
- {
- unsigned HOST_WIDE_INT mask
- = ((unsigned HOST_WIDE_INT) 1
- << floor_log2 (xor_mask.low)) - 1;
- may_be_nonzero->low |= mask;
- must_be_nonzero->low &= ~mask;
+ wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
+ may_be_nonzero->get_precision ());
+ *may_be_nonzero = *may_be_nonzero | mask;
+ *must_be_nonzero = must_be_nonzero->and_not (mask);
}
}
@@ -2059,16 +2042,12 @@ ranges_from_anti_range (value_range_t *ar,
{
vr0->type = VR_RANGE;
vr0->min = vrp_val_min (type);
- vr0->max
- = double_int_to_tree (type,
- tree_to_double_int (ar->min) - double_int_one);
+ vr0->max = wide_int_to_tree (type, wi::sub (ar->min, 1));
}
if (!vrp_val_is_max (ar->max))
{
vr1->type = VR_RANGE;
- vr1->min
- = double_int_to_tree (type,
- tree_to_double_int (ar->max) + double_int_one);
+ vr1->min = wide_int_to_tree (type, wi::add (ar->max, 1));
vr1->max = vrp_val_max (type);
}
if (vr0->type == VR_UNDEFINED)
@@ -2234,28 +2213,6 @@ extract_range_from_multiplicative_op_1 (value_range_t *vr,
set_value_range (vr, type, min, max, NULL);
}
-/* Some quadruple precision helpers. */
-static int
-quad_int_cmp (double_int l0, double_int h0,
- double_int l1, double_int h1, bool uns)
-{
- int c = h0.cmp (h1, uns);
- if (c != 0) return c;
- return l0.ucmp (l1);
-}
-
-static void
-quad_int_pair_sort (double_int *l0, double_int *h0,
- double_int *l1, double_int *h1, bool uns)
-{
- if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0)
- {
- double_int tmp;
- tmp = *l0; *l0 = *l1; *l1 = tmp;
- tmp = *h0; *h0 = *h1; *h1 = tmp;
- }
-}
-
/* Extract range information from a binary operation CODE based on
the ranges of each of its operands, *VR0 and *VR1 with resulting
type EXPR_TYPE. The resulting range is stored in *VR. */
@@ -2427,43 +2384,36 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* If we have a PLUS_EXPR with two VR_RANGE integer constant
ranges compute the precise range for such case if possible. */
if (range_int_cst_p (&vr0)
- && range_int_cst_p (&vr1)
- /* We need as many bits as the possibly unsigned inputs. */
- && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT)
- {
- double_int min0 = tree_to_double_int (vr0.min);
- double_int max0 = tree_to_double_int (vr0.max);
- double_int min1 = tree_to_double_int (vr1.min);
- double_int max1 = tree_to_double_int (vr1.max);
- bool uns = TYPE_UNSIGNED (expr_type);
- double_int type_min
- = double_int::min_value (TYPE_PRECISION (expr_type), uns);
- double_int type_max
- = double_int::max_value (TYPE_PRECISION (expr_type), uns);
- double_int dmin, dmax;
+ && range_int_cst_p (&vr1))
+ {
+ signop sgn = TYPE_SIGN (expr_type);
+ unsigned int prec = TYPE_PRECISION (expr_type);
+ wide_int type_min = wi::min_value (TYPE_PRECISION (expr_type), sgn);
+ wide_int type_max = wi::max_value (TYPE_PRECISION (expr_type), sgn);
+ wide_int wmin, wmax;
int min_ovf = 0;
int max_ovf = 0;
if (code == PLUS_EXPR)
{
- dmin = min0 + min1;
- dmax = max0 + max1;
-
- /* Check for overflow in double_int. */
- if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns))
- min_ovf = min0.cmp (dmin, uns);
- if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns))
- max_ovf = max0.cmp (dmax, uns);
+ wmin = wi::add (vr0.min, vr1.min);
+ wmax = wi::add (vr0.max, vr1.max);
+
+ /* Check for overflow. */
+ if (wi::cmp (vr1.min, 0, sgn) != wi::cmp (wmin, vr0.min, sgn))
+ min_ovf = wi::cmp (vr0.min, wmin, sgn);
+ if (wi::cmp (vr1.max, 0, sgn) != wi::cmp (wmax, vr0.max, sgn))
+ max_ovf = wi::cmp (vr0.max, wmax, sgn);
}
else /* if (code == MINUS_EXPR) */
{
- dmin = min0 - max1;
- dmax = max0 - min1;
+ wmin = wi::sub (vr0.min, vr1.max);
+ wmax = wi::sub (vr0.max, vr1.min);
- if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns))
- min_ovf = min0.cmp (max1, uns);
- if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns))
- max_ovf = max0.cmp (min1, uns);
+ if (wi::cmp (0, vr1.max, sgn) != wi::cmp (wmin, vr0.min, sgn))
+ min_ovf = wi::cmp (vr0.min, vr1.max, sgn);
+ if (wi::cmp (0, vr1.min, sgn) != wi::cmp (wmax, vr0.max, sgn))
+ max_ovf = wi::cmp (vr0.max, vr1.min, sgn);
}
/* For non-wrapping arithmetic look at possibly smaller
@@ -2471,24 +2421,24 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
if (!TYPE_OVERFLOW_WRAPS (expr_type))
{
if (vrp_val_min (expr_type))
- type_min = tree_to_double_int (vrp_val_min (expr_type));
+ type_min = vrp_val_min (expr_type);
if (vrp_val_max (expr_type))
- type_max = tree_to_double_int (vrp_val_max (expr_type));
+ type_max = vrp_val_max (expr_type);
}
/* Check for type overflow. */
if (min_ovf == 0)
{
- if (dmin.cmp (type_min, uns) == -1)
+ if (wi::cmp (wmin, type_min, sgn) == -1)
min_ovf = -1;
- else if (dmin.cmp (type_max, uns) == 1)
+ else if (wi::cmp (wmin, type_max, sgn) == 1)
min_ovf = 1;
}
if (max_ovf == 0)
{
- if (dmax.cmp (type_min, uns) == -1)
+ if (wi::cmp (wmax, type_min, sgn) == -1)
max_ovf = -1;
- else if (dmax.cmp (type_max, uns) == 1)
+ else if (wi::cmp (wmax, type_max, sgn) == 1)
max_ovf = 1;
}
@@ -2496,16 +2446,14 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
{
/* If overflow wraps, truncate the values and adjust the
range kind and bounds appropriately. */
- double_int tmin
- = dmin.ext (TYPE_PRECISION (expr_type), uns);
- double_int tmax
- = dmax.ext (TYPE_PRECISION (expr_type), uns);
+ wide_int tmin = wide_int::from (wmin, prec, sgn);
+ wide_int tmax = wide_int::from (wmax, prec, sgn);
if (min_ovf == max_ovf)
{
/* No overflow or both overflow or underflow. The
range kind stays VR_RANGE. */
- min = double_int_to_tree (expr_type, tmin);
- max = double_int_to_tree (expr_type, tmax);
+ min = wide_int_to_tree (expr_type, tmin);
+ max = wide_int_to_tree (expr_type, tmax);
}
else if (min_ovf == -1
&& max_ovf == 1)
@@ -2519,26 +2467,26 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* Min underflow or max overflow. The range kind
changes to VR_ANTI_RANGE. */
bool covers = false;
- double_int tem = tmin;
+ wide_int tem = tmin;
gcc_assert ((min_ovf == -1 && max_ovf == 0)
|| (max_ovf == 1 && min_ovf == 0));
type = VR_ANTI_RANGE;
- tmin = tmax + double_int_one;
- if (tmin.cmp (tmax, uns) < 0)
+ tmin = tmax + 1;
+ if (wi::cmp (tmin, tmax, sgn) < 0)
covers = true;
- tmax = tem + double_int_minus_one;
- if (tmax.cmp (tem, uns) > 0)
+ tmax = tem - 1;
+ if (wi::cmp (tmax, tem, sgn) > 0)
covers = true;
/* If the anti-range would cover nothing, drop to varying.
Likewise if the anti-range bounds are outside of the
types values. */
- if (covers || tmin.cmp (tmax, uns) > 0)
+ if (covers || wi::cmp (tmin, tmax, sgn) > 0)
{
set_value_range_to_varying (vr);
return;
}
- min = double_int_to_tree (expr_type, tmin);
- max = double_int_to_tree (expr_type, tmax);
+ min = wide_int_to_tree (expr_type, tmin);
+ max = wide_int_to_tree (expr_type, tmax);
}
}
else
@@ -2551,7 +2499,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& supports_overflow_infinity (expr_type))
min = negative_overflow_infinity (expr_type);
else
- min = double_int_to_tree (expr_type, type_min);
+ min = wide_int_to_tree (expr_type, type_min);
}
else if (min_ovf == 1)
{
@@ -2559,10 +2507,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& supports_overflow_infinity (expr_type))
min = positive_overflow_infinity (expr_type);
else
- min = double_int_to_tree (expr_type, type_max);
+ min = wide_int_to_tree (expr_type, type_max);
}
else
- min = double_int_to_tree (expr_type, dmin);
+ min = wide_int_to_tree (expr_type, wmin);
if (max_ovf == -1)
{
@@ -2570,7 +2518,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& supports_overflow_infinity (expr_type))
max = negative_overflow_infinity (expr_type);
else
- max = double_int_to_tree (expr_type, type_min);
+ max = wide_int_to_tree (expr_type, type_min);
}
else if (max_ovf == 1)
{
@@ -2578,10 +2526,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& supports_overflow_infinity (expr_type))
max = positive_overflow_infinity (expr_type);
else
- max = double_int_to_tree (expr_type, type_max);
+ max = wide_int_to_tree (expr_type, type_max);
}
else
- max = double_int_to_tree (expr_type, dmax);
+ max = wide_int_to_tree (expr_type, wmax);
}
if (needs_overflow_infinity (expr_type)
&& supports_overflow_infinity (expr_type))
@@ -2667,97 +2615,85 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
else if (code == MULT_EXPR)
{
/* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not
- drop to varying. */
+ drop to varying. This test requires 2*prec bits if both
+ operands are signed and 2*prec + 2 bits if either is not. */
+
+ signop sign = TYPE_SIGN (expr_type);
+ unsigned int prec = TYPE_PRECISION (expr_type);
+
if (range_int_cst_p (&vr0)
&& range_int_cst_p (&vr1)
&& TYPE_OVERFLOW_WRAPS (expr_type))
{
- double_int min0, max0, min1, max1, sizem1, size;
- double_int prod0l, prod0h, prod1l, prod1h,
- prod2l, prod2h, prod3l, prod3h;
- bool uns0, uns1, uns;
-
- sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true);
- size = sizem1 + double_int_one;
-
- min0 = tree_to_double_int (vr0.min);
- max0 = tree_to_double_int (vr0.max);
- min1 = tree_to_double_int (vr1.min);
- max1 = tree_to_double_int (vr1.max);
-
- uns0 = TYPE_UNSIGNED (expr_type);
- uns1 = uns0;
-
+ typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION * 2) vrp_int;
+ typedef generic_wide_int
+ <wi::extended_tree <WIDE_INT_MAX_PRECISION * 2> > vrp_int_cst;
+ vrp_int sizem1 = wi::mask <vrp_int> (prec, false);
+ vrp_int size = sizem1 + 1;
+
+ /* Extend the values using the sign of the result to PREC2.
+ From here on out, everthing is just signed math no matter
+ what the input types were. */
+ vrp_int min0 = vrp_int_cst (vr0.min);
+ vrp_int max0 = vrp_int_cst (vr0.max);
+ vrp_int min1 = vrp_int_cst (vr1.min);
+ vrp_int max1 = vrp_int_cst (vr1.max);
/* Canonicalize the intervals. */
- if (TYPE_UNSIGNED (expr_type))
+ if (sign == UNSIGNED)
{
- double_int min2 = size - min0;
- if (!min2.is_zero () && min2.cmp (max0, true) < 0)
+ if (wi::ltu_p (size, min0 + max0))
{
- min0 = -min2;
+ min0 -= size;
max0 -= size;
- uns0 = false;
}
- min2 = size - min1;
- if (!min2.is_zero () && min2.cmp (max1, true) < 0)
+ if (wi::ltu_p (size, min1 + max1))
{
- min1 = -min2;
+ min1 -= size;
max1 -= size;
- uns1 = false;
}
}
- uns = uns0 & uns1;
- bool overflow;
- prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow);
- if (!uns0 && min0.is_negative ())
- prod0h -= min1;
- if (!uns1 && min1.is_negative ())
- prod0h -= min0;
-
- prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow);
- if (!uns0 && min0.is_negative ())
- prod1h -= max1;
- if (!uns1 && max1.is_negative ())
- prod1h -= min0;
-
- prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow);
- if (!uns0 && max0.is_negative ())
- prod2h -= min1;
- if (!uns1 && min1.is_negative ())
- prod2h -= max0;
-
- prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow);
- if (!uns0 && max0.is_negative ())
- prod3h -= max1;
- if (!uns1 && max1.is_negative ())
- prod3h -= max0;
-
- /* Sort the 4 products. */
- quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns);
- quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns);
- quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns);
- quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns);
-
- /* Max - min. */
- if (prod0l.is_zero ())
+ vrp_int prod0 = min0 * min1;
+ vrp_int prod1 = min0 * max1;
+ vrp_int prod2 = max0 * min1;
+ vrp_int prod3 = max0 * max1;
+
+ /* Sort the 4 products so that min is in prod0 and max is in
+ prod3. */
+ /* min0min1 > max0max1 */
+ if (wi::gts_p (prod0, prod3))
{
- prod1l = double_int_zero;
- prod1h = -prod0h;
+ vrp_int tmp = prod3;
+ prod3 = prod0;
+ prod0 = tmp;
}
- else
+
+ /* min0max1 > max0min1 */
+ if (wi::gts_p (prod1, prod2))
{
- prod1l = -prod0l;
- prod1h = ~prod0h;
+ vrp_int tmp = prod2;
+ prod2 = prod1;
+ prod1 = tmp;
}
- prod2l = prod3l + prod1l;
- prod2h = prod3h + prod1h;
- if (prod2l.ult (prod3l))
- prod2h += double_int_one; /* carry */
- if (!prod2h.is_zero ()
- || prod2l.cmp (sizem1, true) >= 0)
+ if (wi::gts_p (prod0, prod1))
+ {
+ vrp_int tmp = prod1;
+ prod1 = prod0;
+ prod0 = tmp;
+ }
+
+ if (wi::gts_p (prod2, prod3))
+ {
+ vrp_int tmp = prod3;
+ prod3 = prod2;
+ prod2 = tmp;
+ }
+
+ /* diff = max - min. */
+ prod2 = prod3 - prod0;
+ if (wi::geu_p (prod2, sizem1))
{
/* the range covers all values. */
set_value_range_to_varying (vr);
@@ -2766,8 +2702,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
/* The following should handle the wrapping and selecting
VR_ANTI_RANGE for us. */
- min = double_int_to_tree (expr_type, prod0l);
- max = double_int_to_tree (expr_type, prod3l);
+ min = wide_int_to_tree (expr_type, prod0);
+ max = wide_int_to_tree (expr_type, prod3);
set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL);
return;
}
@@ -2814,11 +2750,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
bool saved_flag_wrapv;
value_range_t vr1p = VR_INITIALIZER;
vr1p.type = VR_RANGE;
- vr1p.min
- = double_int_to_tree (expr_type,
- double_int_one
- .llshift (TREE_INT_CST_LOW (vr1.min),
- TYPE_PRECISION (expr_type)));
+ vr1p.min = (wide_int_to_tree
+ (expr_type,
+ wi::set_bit_in_zero (tree_to_shwi (vr1.min),
+ TYPE_PRECISION (expr_type))));
vr1p.max = vr1p.min;
/* We have to use a wrapping multiply though as signed overflow
on lshifts is implementation defined in C89. */
@@ -2835,34 +2770,34 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
int prec = TYPE_PRECISION (expr_type);
int overflow_pos = prec;
int bound_shift;
- double_int bound, complement, low_bound, high_bound;
+ wide_int low_bound, high_bound;
bool uns = TYPE_UNSIGNED (expr_type);
bool in_bounds = false;
if (!uns)
overflow_pos -= 1;
- bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max);
- /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can
+ bound_shift = overflow_pos - tree_to_shwi (vr1.max);
+ /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
overflow. However, for that to happen, vr1.max needs to be
zero, which means vr1 is a singleton range of zero, which
means it should be handled by the previous LSHIFT_EXPR
if-clause. */
- bound = double_int_one.llshift (bound_shift, prec);
- complement = ~(bound - double_int_one);
+ wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
+ wide_int complement = ~(bound - 1);
if (uns)
{
- low_bound = bound.zext (prec);
- high_bound = complement.zext (prec);
- if (tree_to_double_int (vr0.max).ult (low_bound))
+ low_bound = bound;
+ high_bound = complement;
+ if (wi::ltu_p (vr0.max, low_bound))
{
/* [5, 6] << [1, 2] == [10, 24]. */
/* We're shifting out only zeroes, the value increases
monotonically. */
in_bounds = true;
}
- else if (high_bound.ult (tree_to_double_int (vr0.min)))
+ else if (wi::ltu_p (high_bound, vr0.min))
{
/* [0xffffff00, 0xffffffff] << [1, 2]
== [0xfffffc00, 0xfffffffe]. */
@@ -2874,10 +2809,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
else
{
/* [-1, 1] << [1, 2] == [-4, 4]. */
- low_bound = complement.sext (prec);
+ low_bound = complement;
high_bound = bound;
- if (tree_to_double_int (vr0.max).slt (high_bound)
- && low_bound.slt (tree_to_double_int (vr0.min)))
+ if (wi::lts_p (vr0.max, high_bound)
+ && wi::lts_p (low_bound, vr0.min))
{
/* For non-negative numbers, we're shifting out only
zeroes, the value increases monotonically.
@@ -3001,7 +2936,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min);
if (tree_int_cst_lt (max, vr1.max))
max = vr1.max;
- max = int_const_binop (MINUS_EXPR, max, integer_one_node);
+ max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1));
/* If the dividend is non-negative the modulus will be
non-negative as well. */
if (TYPE_UNSIGNED (expr_type)
@@ -3013,21 +2948,22 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR)
{
bool int_cst_range0, int_cst_range1;
- double_int may_be_nonzero0, may_be_nonzero1;
- double_int must_be_nonzero0, must_be_nonzero1;
+ wide_int may_be_nonzero0, may_be_nonzero1;
+ wide_int must_be_nonzero0, must_be_nonzero1;
- int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0,
+ int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0,
+ &may_be_nonzero0,
&must_be_nonzero0);
- int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1,
+ int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1,
+ &may_be_nonzero1,
&must_be_nonzero1);
type = VR_RANGE;
if (code == BIT_AND_EXPR)
{
- double_int dmax;
- min = double_int_to_tree (expr_type,
- must_be_nonzero0 & must_be_nonzero1);
- dmax = may_be_nonzero0 & may_be_nonzero1;
+ min = wide_int_to_tree (expr_type,
+ must_be_nonzero0 & must_be_nonzero1);
+ wide_int wmax = may_be_nonzero0 & may_be_nonzero1;
/* If both input ranges contain only negative values we can
truncate the result range maximum to the minimum of the
input range maxima. */
@@ -3035,28 +2971,23 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& tree_int_cst_sgn (vr0.max) < 0
&& tree_int_cst_sgn (vr1.max) < 0)
{
- dmax = dmax.min (tree_to_double_int (vr0.max),
- TYPE_UNSIGNED (expr_type));
- dmax = dmax.min (tree_to_double_int (vr1.max),
- TYPE_UNSIGNED (expr_type));
+ wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
+ wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
}
/* If either input range contains only non-negative values
we can truncate the result range maximum to the respective
maximum of the input range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0)
- dmax = dmax.min (tree_to_double_int (vr0.max),
- TYPE_UNSIGNED (expr_type));
+ wmax = wi::min (wmax, vr0.max, TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0)
- dmax = dmax.min (tree_to_double_int (vr1.max),
- TYPE_UNSIGNED (expr_type));
- max = double_int_to_tree (expr_type, dmax);
+ wmax = wi::min (wmax, vr1.max, TYPE_SIGN (expr_type));
+ max = wide_int_to_tree (expr_type, wmax);
}
else if (code == BIT_IOR_EXPR)
{
- double_int dmin;
- max = double_int_to_tree (expr_type,
- may_be_nonzero0 | may_be_nonzero1);
- dmin = must_be_nonzero0 | must_be_nonzero1;
+ max = wide_int_to_tree (expr_type,
+ may_be_nonzero0 | may_be_nonzero1);
+ wide_int wmin = must_be_nonzero0 | must_be_nonzero1;
/* If the input ranges contain only positive values we can
truncate the minimum of the result range to the maximum
of the input range minima. */
@@ -3064,31 +2995,27 @@ extract_range_from_binary_expr_1 (value_range_t *vr,
&& tree_int_cst_sgn (vr0.min) >= 0
&& tree_int_cst_sgn (vr1.min) >= 0)
{
- dmin = dmin.max (tree_to_double_int (vr0.min),
- TYPE_UNSIGNED (expr_type));
- dmin = dmin.max (tree_to_double_int (vr1.min),
- TYPE_UNSIGNED (expr_type));
+ wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
+ wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
}
/* If either input range contains only negative values
we can truncate the minimum of the result range to the
respective minimum range. */
if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0)
- dmin = dmin.max (tree_to_double_int (vr0.min),
- TYPE_UNSIGNED (expr_type));
+ wmin = wi::max (wmin, vr0.min, TYPE_SIGN (expr_type));
if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0)
- dmin = dmin.max (tree_to_double_int (vr1.min),
- TYPE_UNSIGNED (expr_type));
- min = double_int_to_tree (expr_type, dmin);
+ wmin = wi::max (wmin, vr1.min, TYPE_SIGN (expr_type));
+ min = wide_int_to_tree (expr_type, wmin);
}
else if (code == BIT_XOR_EXPR)
{
- double_int result_zero_bits, result_one_bits;
- result_zero_bits = (must_be_nonzero0 & must_be_nonzero1)
- | ~(may_be_nonzero0 | may_be_nonzero1);
- result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1)
- | must_be_nonzero1.and_not (may_be_nonzero0);
- max = double_int_to_tree (expr_type, ~result_zero_bits);
- min = double_int_to_tree (expr_type, result_one_bits);
+ wide_int result_zero_bits = ((must_be_nonzero0 & must_be_nonzero1)
+ | ~(may_be_nonzero0 | may_be_nonzero1));
+ wide_int result_one_bits
+ = (must_be_nonzero0.and_not (may_be_nonzero1)
+ | must_be_nonzero1.and_not (may_be_nonzero0));
+ max = wide_int_to_tree (expr_type, ~result_zero_bits);
+ min = wide_int_to_tree (expr_type, result_one_bits);
/* If the range has all positive or all negative values the
result is better than VARYING. */
if (tree_int_cst_sgn (min) < 0
@@ -3303,15 +3230,13 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
if (is_overflow_infinity (vr0.min))
new_min = negative_overflow_infinity (outer_type);
else
- new_min = force_fit_type_double (outer_type,
- tree_to_double_int (vr0.min),
- 0, false);
+ new_min = force_fit_type (outer_type, wi::to_widest (vr0.min),
+ 0, false);
if (is_overflow_infinity (vr0.max))
new_max = positive_overflow_infinity (outer_type);
else
- new_max = force_fit_type_double (outer_type,
- tree_to_double_int (vr0.max),
- 0, false);
+ new_max = force_fit_type (outer_type, wi::to_widest (vr0.max),
+ 0, false);
set_and_canonicalize_value_range (vr, vr0.type,
new_min, new_max, NULL);
return;
@@ -3409,7 +3334,7 @@ extract_range_from_unary_expr_1 (value_range_t *vr,
min = (vr0.min != type_min_value
? int_const_binop (PLUS_EXPR, type_min_value,
- integer_one_node)
+ build_int_cst (TREE_TYPE (type_min_value), 1))
: type_min_value);
}
else
@@ -3925,30 +3850,29 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop,
&& (TREE_CODE (init) != SSA_NAME
|| get_value_range (init)->type == VR_RANGE))
{
- double_int nit;
+ widest_int nit;
/* We are only entering here for loop header PHI nodes, so using
the number of latch executions is the correct thing to use. */
if (max_loop_iterations (loop, &nit))
{
value_range_t maxvr = VR_INITIALIZER;
- double_int dtmp;
- bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step));
- bool overflow = false;
+ signop sgn = TYPE_SIGN (TREE_TYPE (step));
+ bool overflow;
- dtmp = tree_to_double_int (step)
- .mul_with_sign (nit, unsigned_p, &overflow);
+ widest_int wtmp = wi::mul (wi::to_widest (step), nit, sgn,
+ &overflow);
/* If the multiplication overflowed we can't do a meaningful
adjustment. Likewise if the result doesn't fit in the type
of the induction variable. For a signed type we have to
check whether the result has the expected signedness which
is that of the step as number of iterations is unsigned. */
if (!overflow
- && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp)
- && (unsigned_p
- || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0)))
+ && wi::fits_to_tree_p (wtmp, TREE_TYPE (init))
+ && (sgn == UNSIGNED
+ || wi::gts_p (wtmp, 0) == wi::gts_p (step, 0)))
{
- tem = double_int_to_tree (TREE_TYPE (init), dtmp);
+ tem = wide_int_to_tree (TREE_TYPE (init), wtmp);
extract_range_from_binary_expr (&maxvr, PLUS_EXPR,
TREE_TYPE (init), init, tem);
/* Likewise if the addition did. */
@@ -4773,23 +4697,23 @@ extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code,
(to transform signed values into unsigned) and at the end xor
SGNBIT back. */
-static double_int
-masked_increment (double_int val, double_int mask, double_int sgnbit,
- unsigned int prec)
+static wide_int
+masked_increment (const wide_int &val_in, const wide_int &mask,
+ const wide_int &sgnbit, unsigned int prec)
{
- double_int bit = double_int_one, res;
+ wide_int bit = wi::one (prec), res;
unsigned int i;
- val ^= sgnbit;
+ wide_int val = val_in ^ sgnbit;
for (i = 0; i < prec; i++, bit += bit)
{
res = mask;
- if ((res & bit).is_zero ())
+ if ((res & bit) == 0)
continue;
- res = bit - double_int_one;
+ res = bit - 1;
res = (val + bit).and_not (res);
res &= mask;
- if (res.ugt (val))
+ if (wi::gtu_p (res, val))
return res ^ sgnbit;
}
return val ^ sgnbit;
@@ -4964,8 +4888,8 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
gimple def_stmt = SSA_NAME_DEF_STMT (name);
tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE;
tree val2 = NULL_TREE;
- double_int mask = double_int_zero;
unsigned int prec = TYPE_PRECISION (TREE_TYPE (val));
+ wide_int mask = wi::zero (prec);
unsigned int nprec = prec;
enum tree_code rhs_code = ERROR_MARK;
@@ -5034,12 +4958,11 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
&& tree_fits_uhwi_p (cst2)
&& INTEGRAL_TYPE_P (TREE_TYPE (name2))
&& IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
- && prec <= HOST_BITS_PER_DOUBLE_INT
&& prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val)))
&& live_on_edge (e, name2)
&& !has_single_use (name2))
{
- mask = double_int::mask (tree_to_uhwi (cst2));
+ mask = wi::mask (tree_to_uhwi (cst2), false, prec);
val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
}
}
@@ -5062,26 +4985,26 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
val2 = fold_convert (type, val2);
}
tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2);
- new_val = double_int_to_tree (TREE_TYPE (tmp), mask);
+ new_val = wide_int_to_tree (TREE_TYPE (tmp), mask);
new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR;
}
else if (comp_code == LT_EXPR || comp_code == GE_EXPR)
{
- double_int minval
- = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
+ wide_int minval
+ = wi::min_value (prec, TYPE_SIGN (TREE_TYPE (val)));
new_val = val2;
- if (minval == tree_to_double_int (new_val))
+ if (minval == new_val)
new_val = NULL_TREE;
}
else
{
- double_int maxval
- = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val)));
- mask |= tree_to_double_int (val2);
+ wide_int maxval
+ = wi::max_value (prec, TYPE_SIGN (TREE_TYPE (val)));
+ mask |= val2;
if (mask == maxval)
new_val = NULL_TREE;
else
- new_val = double_int_to_tree (TREE_TYPE (val2), mask);
+ new_val = wide_int_to_tree (TREE_TYPE (val2), mask);
}
if (new_val)
@@ -5133,7 +5056,6 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
&& INTEGRAL_TYPE_P (TREE_TYPE (name2))
&& TREE_CODE (cst2) == INTEGER_CST
&& !integer_zerop (cst2)
- && nprec <= HOST_BITS_PER_DOUBLE_INT
&& (nprec > 1
|| TYPE_UNSIGNED (TREE_TYPE (val))))
{
@@ -5156,27 +5078,24 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
}
if (names[0] || names[1])
{
- double_int minv, maxv = double_int_zero, valv, cst2v;
- double_int tem, sgnbit;
- bool valid_p = false, valn = false, cst2n = false;
+ wide_int minv, maxv, valv, cst2v;
+ wide_int tem, sgnbit;
+ bool valid_p = false, valn, cst2n;
enum tree_code ccode = comp_code;
- valv = tree_to_double_int (val).zext (nprec);
- cst2v = tree_to_double_int (cst2).zext (nprec);
- if (!TYPE_UNSIGNED (TREE_TYPE (val)))
- {
- valn = valv.sext (nprec).is_negative ();
- cst2n = cst2v.sext (nprec).is_negative ();
- }
+ valv = wide_int::from (val, nprec, UNSIGNED);
+ cst2v = wide_int::from (cst2, nprec, UNSIGNED);
+ valn = wi::neg_p (valv, TYPE_SIGN (TREE_TYPE (val)));
+ cst2n = wi::neg_p (cst2v, TYPE_SIGN (TREE_TYPE (val)));
/* If CST2 doesn't have most significant bit set,
but VAL is negative, we have comparison like
if ((x & 0x123) > -4) (always true). Just give up. */
if (!cst2n && valn)
ccode = ERROR_MARK;
if (cst2n)
- sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
+ sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
else
- sgnbit = double_int_zero;
+ sgnbit = wi::zero (nprec);
minv = valv & cst2v;
switch (ccode)
{
@@ -5186,33 +5105,30 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
have folded the comparison into false) and
maximum unsigned value is VAL | ~CST2. */
maxv = valv | ~cst2v;
- maxv = maxv.zext (nprec);
valid_p = true;
break;
+
case NE_EXPR:
tem = valv | ~cst2v;
- tem = tem.zext (nprec);
/* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */
- if (valv.is_zero ())
+ if (valv == 0)
{
cst2n = false;
- sgnbit = double_int_zero;
+ sgnbit = wi::zero (nprec);
goto gt_expr;
}
/* If (VAL | ~CST2) is all ones, handle it as
(X & CST2) < VAL. */
- if (tem == double_int::mask (nprec))
+ if (tem == -1)
{
cst2n = false;
valn = false;
- sgnbit = double_int_zero;
+ sgnbit = wi::zero (nprec);
goto lt_expr;
}
- if (!cst2n
- && cst2v.sext (nprec).is_negative ())
- sgnbit
- = double_int_one.llshift (nprec - 1, nprec).zext (nprec);
- if (!sgnbit.is_zero ())
+ if (!cst2n && wi::neg_p (cst2v))
+ sgnbit = wi::set_bit_in_zero (nprec - 1, nprec);
+ if (sgnbit != 0)
{
if (valv == sgnbit)
{
@@ -5220,15 +5136,16 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
valn = true;
goto gt_expr;
}
- if (tem == double_int::mask (nprec - 1))
+ if (tem == wi::mask (nprec - 1, false, nprec))
{
cst2n = true;
goto lt_expr;
}
if (!cst2n)
- sgnbit = double_int_zero;
+ sgnbit = wi::zero (nprec);
}
break;
+
case GE_EXPR:
/* Minimum unsigned value for >= if (VAL & CST2) == VAL
is VAL and maximum unsigned value is ~0. For signed
@@ -5243,9 +5160,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
if (minv == valv)
break;
}
- maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
+ maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
valid_p = true;
break;
+
case GT_EXPR:
gt_expr:
/* Find out smallest MINV where MINV > VAL
@@ -5254,9 +5172,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
minv = masked_increment (valv, cst2v, sgnbit, nprec);
if (minv == valv)
break;
- maxv = double_int::mask (nprec - (cst2n ? 1 : 0));
+ maxv = wi::mask (nprec - (cst2n ? 1 : 0), false, nprec);
valid_p = true;
break;
+
case LE_EXPR:
/* Minimum unsigned value for <= is 0 and maximum
unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL.
@@ -5273,13 +5192,13 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
maxv = masked_increment (valv, cst2v, sgnbit, nprec);
if (maxv == valv)
break;
- maxv -= double_int_one;
+ maxv -= 1;
}
maxv |= ~cst2v;
- maxv = maxv.zext (nprec);
minv = sgnbit;
valid_p = true;
break;
+
case LT_EXPR:
lt_expr:
/* Minimum unsigned value for < is 0 and maximum
@@ -5302,17 +5221,17 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
if (maxv == valv)
break;
}
- maxv -= double_int_one;
+ maxv -= 1;
maxv |= ~cst2v;
- maxv = maxv.zext (nprec);
minv = sgnbit;
valid_p = true;
break;
+
default:
break;
}
if (valid_p
- && (maxv - minv).zext (nprec) != double_int::mask (nprec))
+ && (maxv - minv) != -1)
{
tree tmp, new_val, type;
int i;
@@ -5320,7 +5239,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
for (i = 0; i < 2; i++)
if (names[i])
{
- double_int maxv2 = maxv;
+ wide_int maxv2 = maxv;
tmp = names[i];
type = TREE_TYPE (names[i]);
if (!TYPE_UNSIGNED (type))
@@ -5328,13 +5247,13 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi,
type = build_nonstandard_integer_type (nprec, 1);
tmp = build1 (NOP_EXPR, type, names[i]);
}
- if (!minv.is_zero ())
+ if (minv != 0)
{
tmp = build2 (PLUS_EXPR, type, tmp,
- double_int_to_tree (type, -minv));
+ wide_int_to_tree (type, -minv));
maxv2 = maxv - minv;
}
- new_val = double_int_to_tree (type, maxv2);
+ new_val = wide_int_to_tree (type, maxv2);
if (dump_file)
{
@@ -6201,7 +6120,8 @@ check_array_ref (location_t location, tree ref, bool ignore_off_by_one)
}
low_bound = array_ref_low_bound (ref);
- up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node);
+ up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound,
+ build_int_cst (TREE_TYPE (up_bound), 1));
if (TREE_CODE (low_sub) == SSA_NAME)
{
@@ -6298,7 +6218,7 @@ search_for_addr_array (tree t, location_t location)
{
tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0);
tree low_bound, up_bound, el_sz;
- double_int idx;
+ offset_int idx;
if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE
|| TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE
|| !TYPE_DOMAIN (TREE_TYPE (tem)))
@@ -6316,8 +6236,8 @@ search_for_addr_array (tree t, location_t location)
return;
idx = mem_ref_offset (t);
- idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR);
- if (idx.slt (double_int_zero))
+ idx = wi::sdiv_trunc (idx, wi::to_offset (el_sz));
+ if (wi::lts_p (idx, 0))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -6329,9 +6249,8 @@ search_for_addr_array (tree t, location_t location)
"array subscript is below array bounds");
TREE_NO_WARNING (t) = 1;
}
- else if (idx.sgt (tree_to_double_int (up_bound)
- - tree_to_double_int (low_bound)
- + double_int_one))
+ else if (wi::gts_p (idx, (wi::to_offset (up_bound)
+ - wi::to_offset (low_bound) + 1)))
{
if (dump_file && (dump_flags & TDF_DETAILS))
{
@@ -6512,8 +6431,7 @@ maybe_set_nonzero_bits (basic_block bb, tree var)
return;
}
cst = gimple_assign_rhs2 (stmt);
- set_nonzero_bits (var, (get_nonzero_bits (var)
- & ~tree_to_double_int (cst)));
+ set_nonzero_bits (var, wi::bit_and_not (get_nonzero_bits (var), cst));
}
/* Convert range assertion expressions into the implied copies and
@@ -6598,8 +6516,8 @@ remove_range_assertions (void)
single_pred (bb)))
{
set_range_info (var, SSA_NAME_RANGE_TYPE (lhs),
- SSA_NAME_RANGE_INFO (lhs)->min,
- SSA_NAME_RANGE_INFO (lhs)->max);
+ SSA_NAME_RANGE_INFO (lhs)->get_min (),
+ SSA_NAME_RANGE_INFO (lhs)->get_max ());
maybe_set_nonzero_bits (bb, var);
}
}
@@ -7644,9 +7562,11 @@ union_ranges (enum value_range_type *vr0type,
&& vrp_val_is_max (vr1max))
{
tree min = int_const_binop (PLUS_EXPR,
- *vr0max, integer_one_node);
+ *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
tree max = int_const_binop (MINUS_EXPR,
- vr1min, integer_one_node);
+ vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
if (!operand_less_p (max, min))
{
*vr0type = VR_ANTI_RANGE;
@@ -7668,9 +7588,11 @@ union_ranges (enum value_range_type *vr0type,
&& vrp_val_is_max (*vr0max))
{
tree min = int_const_binop (PLUS_EXPR,
- vr1max, integer_one_node);
+ vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
tree max = int_const_binop (MINUS_EXPR,
- *vr0min, integer_one_node);
+ *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
if (!operand_less_p (max, min))
{
*vr0type = VR_ANTI_RANGE;
@@ -7706,9 +7628,11 @@ union_ranges (enum value_range_type *vr0type,
{
/* Arbitrarily choose the right or left gap. */
if (!mineq && TREE_CODE (vr1min) == INTEGER_CST)
- *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
+ *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
+ *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
else
goto give_up;
}
@@ -7739,12 +7663,14 @@ union_ranges (enum value_range_type *vr0type,
*vr0type = VR_ANTI_RANGE;
if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST)
{
- *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
+ *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
*vr0min = vr1min;
}
else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST)
{
- *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
+ *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
*vr0max = vr1max;
}
else
@@ -7773,7 +7699,8 @@ union_ranges (enum value_range_type *vr0type,
&& vr1type == VR_RANGE)
{
if (TREE_CODE (vr1min) == INTEGER_CST)
- *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node);
+ *vr0max = int_const_binop (MINUS_EXPR, vr1min,
+ build_int_cst (TREE_TYPE (vr1min), 1));
else
goto give_up;
}
@@ -7783,7 +7710,8 @@ union_ranges (enum value_range_type *vr0type,
if (TREE_CODE (*vr0max) == INTEGER_CST)
{
*vr0type = vr1type;
- *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node);
+ *vr0min = int_const_binop (PLUS_EXPR, *vr0max,
+ build_int_cst (TREE_TYPE (*vr0max), 1));
*vr0max = vr1max;
}
else
@@ -7808,7 +7736,8 @@ union_ranges (enum value_range_type *vr0type,
&& vr1type == VR_RANGE)
{
if (TREE_CODE (vr1max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
+ *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
else
goto give_up;
}
@@ -7819,7 +7748,8 @@ union_ranges (enum value_range_type *vr0type,
{
*vr0type = vr1type;
*vr0min = vr1min;
- *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node);
+ *vr0max = int_const_binop (MINUS_EXPR, *vr0min,
+ build_int_cst (TREE_TYPE (*vr0min), 1));
}
else
goto give_up;
@@ -7934,7 +7864,8 @@ intersect_ranges (enum value_range_type *vr0type,
if (mineq)
{
if (TREE_CODE (vr1max) == INTEGER_CST)
- *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node);
+ *vr0min = int_const_binop (PLUS_EXPR, vr1max,
+ build_int_cst (TREE_TYPE (vr1max), 1));
else
*vr0min = vr1max;
}
@@ -7943,7 +7874,7 @@ intersect_ranges (enum value_range_type *vr0type,
{
if (TREE_CODE (vr1min) == INTEGER_CST)
*vr0max = int_const_binop (MINUS_EXPR, vr1min,
- integer_one_node);
+ build_int_cst (TREE_TYPE (vr1min), 1));
else
*vr0max = vr1min;
}
@@ -7989,7 +7920,7 @@ intersect_ranges (enum value_range_type *vr0type,
*vr0type = VR_RANGE;
if (TREE_CODE (*vr0max) == INTEGER_CST)
*vr0min = int_const_binop (PLUS_EXPR, *vr0max,
- integer_one_node);
+ build_int_cst (TREE_TYPE (*vr0max), 1));
else
*vr0min = *vr0max;
*vr0max = vr1max;
@@ -8000,7 +7931,7 @@ intersect_ranges (enum value_range_type *vr0type,
*vr0type = VR_RANGE;
if (TREE_CODE (*vr0min) == INTEGER_CST)
*vr0max = int_const_binop (MINUS_EXPR, *vr0min,
- integer_one_node);
+ build_int_cst (TREE_TYPE (*vr0min), 1));
else
*vr0max = *vr0min;
*vr0min = vr1min;
@@ -8052,7 +7983,7 @@ intersect_ranges (enum value_range_type *vr0type,
{
if (TREE_CODE (vr1min) == INTEGER_CST)
*vr0max = int_const_binop (MINUS_EXPR, vr1min,
- integer_one_node);
+ build_int_cst (TREE_TYPE (vr1min), 1));
else
*vr0max = vr1min;
}
@@ -8062,7 +7993,7 @@ intersect_ranges (enum value_range_type *vr0type,
*vr0type = VR_RANGE;
if (TREE_CODE (*vr0max) == INTEGER_CST)
*vr0min = int_const_binop (PLUS_EXPR, *vr0max,
- integer_one_node);
+ build_int_cst (TREE_TYPE (*vr0max), 1));
else
*vr0min = *vr0max;
*vr0max = vr1max;
@@ -8086,7 +8017,7 @@ intersect_ranges (enum value_range_type *vr0type,
{
if (TREE_CODE (vr1max) == INTEGER_CST)
*vr0min = int_const_binop (PLUS_EXPR, vr1max,
- integer_one_node);
+ build_int_cst (TREE_TYPE (vr1max), 1));
else
*vr0min = vr1max;
}
@@ -8096,7 +8027,7 @@ intersect_ranges (enum value_range_type *vr0type,
*vr0type = VR_RANGE;
if (TREE_CODE (*vr0min) == INTEGER_CST)
*vr0max = int_const_binop (MINUS_EXPR, *vr0min,
- integer_one_node);
+ build_int_cst (TREE_TYPE (*vr0min), 1));
else
*vr0max = *vr0min;
*vr0min = vr1min;
@@ -8503,7 +8434,8 @@ simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
if (rhs_code == EQ_EXPR)
{
if (TREE_CODE (op1) == INTEGER_CST)
- op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node);
+ op1 = int_const_binop (BIT_XOR_EXPR, op1,
+ build_int_cst (TREE_TYPE (op1), 1));
else
return false;
}
@@ -8689,9 +8621,9 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
tree op = NULL_TREE;
value_range_t vr0 = VR_INITIALIZER;
value_range_t vr1 = VR_INITIALIZER;
- double_int may_be_nonzero0, may_be_nonzero1;
- double_int must_be_nonzero0, must_be_nonzero1;
- double_int mask;
+ wide_int may_be_nonzero0, may_be_nonzero1;
+ wide_int must_be_nonzero0, must_be_nonzero1;
+ wide_int mask;
if (TREE_CODE (op0) == SSA_NAME)
vr0 = *(get_value_range (op0));
@@ -8707,22 +8639,24 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
else
return false;
- if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0))
+ if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0,
+ &must_be_nonzero0))
return false;
- if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1))
+ if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1,
+ &must_be_nonzero1))
return false;
switch (gimple_assign_rhs_code (stmt))
{
case BIT_AND_EXPR:
mask = may_be_nonzero0.and_not (must_be_nonzero1);
- if (mask.is_zero ())
+ if (mask == 0)
{
op = op0;
break;
}
mask = may_be_nonzero1.and_not (must_be_nonzero0);
- if (mask.is_zero ())
+ if (mask == 0)
{
op = op1;
break;
@@ -8730,13 +8664,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
break;
case BIT_IOR_EXPR:
mask = may_be_nonzero0.and_not (must_be_nonzero1);
- if (mask.is_zero ())
+ if (mask == 0)
{
op = op1;
break;
}
mask = may_be_nonzero1.and_not (must_be_nonzero0);
- if (mask.is_zero ())
+ if (mask == 0)
{
op = op0;
break;
@@ -8822,11 +8756,12 @@ test_for_singularity (enum tree_code cond_code, tree op0,
by PRECISION and UNSIGNED_P. */
static bool
-range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
+range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn)
{
tree src_type;
unsigned src_precision;
- double_int tem;
+ widest_int tem;
+ signop src_sgn;
/* We can only handle integral and pointer types. */
src_type = TREE_TYPE (vr->min);
@@ -8834,13 +8769,13 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
&& !POINTER_TYPE_P (src_type))
return false;
- /* An extension is fine unless VR is signed and unsigned_p,
+ /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED,
and so is an identity transform. */
src_precision = TYPE_PRECISION (TREE_TYPE (vr->min));
- if ((src_precision < precision
- && !(unsigned_p && !TYPE_UNSIGNED (src_type)))
- || (src_precision == precision
- && TYPE_UNSIGNED (src_type) == unsigned_p))
+ src_sgn = TYPE_SIGN (src_type);
+ if ((src_precision < dest_precision
+ && !(dest_sgn == UNSIGNED && src_sgn == SIGNED))
+ || (src_precision == dest_precision && src_sgn == dest_sgn))
return true;
/* Now we can only handle ranges with constant bounds. */
@@ -8849,21 +8784,21 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p)
|| TREE_CODE (vr->max) != INTEGER_CST)
return false;
- /* For sign changes, the MSB of the double_int has to be clear.
+ /* For sign changes, the MSB of the wide_int has to be clear.
An unsigned value with its MSB set cannot be represented by
- a signed double_int, while a negative value cannot be represented
- by an unsigned double_int. */
- if (TYPE_UNSIGNED (src_type) != unsigned_p
- && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0)
+ a signed wide_int, while a negative value cannot be represented
+ by an unsigned wide_int. */
+ if (src_sgn != dest_sgn
+ && (wi::lts_p (vr->min, 0) || wi::lts_p (vr->max, 0)))
return false;
/* Then we can perform the conversion on both ends and compare
the result for equality. */
- tem = tree_to_double_int (vr->min).ext (precision, unsigned_p);
- if (tree_to_double_int (vr->min) != tem)
+ tem = wi::ext (wi::to_widest (vr->min), dest_precision, dest_sgn);
+ if (tem != wi::to_widest (vr->min))
return false;
- tem = tree_to_double_int (vr->max).ext (precision, unsigned_p);
- if (tree_to_double_int (vr->max) != tem)
+ tem = wi::ext (wi::to_widest (vr->max), dest_precision, dest_sgn);
+ if (tem != wi::to_widest (vr->max))
return false;
return true;
@@ -8978,7 +8913,7 @@ simplify_cond_using_ranges (gimple stmt)
if (range_int_cst_p (vr)
&& range_fits_type_p (vr,
TYPE_PRECISION (TREE_TYPE (op0)),
- TYPE_UNSIGNED (TREE_TYPE (op0)))
+ TYPE_SIGN (TREE_TYPE (op0)))
&& int_fits_type_p (op1, TREE_TYPE (innerop))
/* The range must not have overflowed, or if it did overflow
we must not be wrapping/trapping overflow and optimizing
@@ -9123,9 +9058,9 @@ simplify_conversion_using_ranges (gimple stmt)
tree innerop, middleop, finaltype;
gimple def_stmt;
value_range_t *innervr;
- bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p;
+ signop inner_sgn, middle_sgn, final_sgn;
unsigned inner_prec, middle_prec, final_prec;
- double_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
+ widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax;
finaltype = TREE_TYPE (gimple_assign_lhs (stmt));
if (!INTEGRAL_TYPE_P (finaltype))
@@ -9149,8 +9084,8 @@ simplify_conversion_using_ranges (gimple stmt)
/* Simulate the conversion chain to check if the result is equal if
the middle conversion is removed. */
- innermin = tree_to_double_int (innervr->min);
- innermax = tree_to_double_int (innervr->max);
+ innermin = wi::to_widest (innervr->min);
+ innermax = wi::to_widest (innervr->max);
inner_prec = TYPE_PRECISION (TREE_TYPE (innerop));
middle_prec = TYPE_PRECISION (TREE_TYPE (middleop));
@@ -9158,34 +9093,35 @@ simplify_conversion_using_ranges (gimple stmt)
/* If the first conversion is not injective, the second must not
be widening. */
- if ((innermax - innermin).ugt (double_int::mask (middle_prec))
+ if (wi::gtu_p (innermax - innermin,
+ wi::mask <widest_int> (middle_prec, false))
&& middle_prec < final_prec)
return false;
/* We also want a medium value so that we can track the effect that
narrowing conversions with sign change have. */
- inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop));
- if (inner_unsigned_p)
- innermed = double_int::mask (inner_prec).lrshift (1, inner_prec);
+ inner_sgn = TYPE_SIGN (TREE_TYPE (innerop));
+ if (inner_sgn == UNSIGNED)
+ innermed = wi::shifted_mask <widest_int> (1, inner_prec - 1, false);
else
- innermed = double_int_zero;
- if (innermin.cmp (innermed, inner_unsigned_p) >= 0
- || innermed.cmp (innermax, inner_unsigned_p) >= 0)
+ innermed = 0;
+ if (wi::cmp (innermin, innermed, inner_sgn) >= 0
+ || wi::cmp (innermed, innermax, inner_sgn) >= 0)
innermed = innermin;
- middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop));
- middlemin = innermin.ext (middle_prec, middle_unsigned_p);
- middlemed = innermed.ext (middle_prec, middle_unsigned_p);
- middlemax = innermax.ext (middle_prec, middle_unsigned_p);
+ middle_sgn = TYPE_SIGN (TREE_TYPE (middleop));
+ middlemin = wi::ext (innermin, middle_prec, middle_sgn);
+ middlemed = wi::ext (innermed, middle_prec, middle_sgn);
+ middlemax = wi::ext (innermax, middle_prec, middle_sgn);
/* Require that the final conversion applied to both the original
and the intermediate range produces the same result. */
- final_unsigned_p = TYPE_UNSIGNED (finaltype);
- if (middlemin.ext (final_prec, final_unsigned_p)
- != innermin.ext (final_prec, final_unsigned_p)
- || middlemed.ext (final_prec, final_unsigned_p)
- != innermed.ext (final_prec, final_unsigned_p)
- || middlemax.ext (final_prec, final_unsigned_p)
- != innermax.ext (final_prec, final_unsigned_p))
+ final_sgn = TYPE_SIGN (finaltype);
+ if (wi::ext (middlemin, final_prec, final_sgn)
+ != wi::ext (innermin, final_prec, final_sgn)
+ || wi::ext (middlemed, final_prec, final_sgn)
+ != wi::ext (innermed, final_prec, final_sgn)
+ || wi::ext (middlemax, final_prec, final_sgn)
+ != wi::ext (innermax, final_prec, final_sgn))
return false;
gimple_assign_set_rhs1 (stmt, innerop);
@@ -9215,8 +9151,7 @@ simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
if (TYPE_UNSIGNED (TREE_TYPE (rhs1))
&& (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0)
!= CODE_FOR_nothing)
- && range_fits_type_p (vr, GET_MODE_PRECISION
- (TYPE_MODE (TREE_TYPE (rhs1))), 0))
+ && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED))
mode = TYPE_MODE (TREE_TYPE (rhs1));
/* If we can do the conversion in the current input mode do nothing. */
else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)),
@@ -9233,7 +9168,7 @@ simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt)
or if the value-range does not fit in the signed type
try with a wider mode. */
if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing
- && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0))
+ && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED))
break;
mode = GET_MODE_WIDER_MODE (mode);
@@ -9701,9 +9636,8 @@ vrp_finalize (void)
&& (TREE_CODE (vr_value[i]->max) == INTEGER_CST)
&& (vr_value[i]->type == VR_RANGE
|| vr_value[i]->type == VR_ANTI_RANGE))
- set_range_info (name, vr_value[i]->type,
- tree_to_double_int (vr_value[i]->min),
- tree_to_double_int (vr_value[i]->max));
+ set_range_info (name, vr_value[i]->type, vr_value[i]->min,
+ vr_value[i]->max);
}
/* Free allocated memory. */
diff --git a/gcc/tree.c b/gcc/tree.c
index 8b44ecc7088..a578c926923 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -75,6 +75,7 @@ along with GCC; see the file COPYING3. If not see
#include "except.h"
#include "debug.h"
#include "intl.h"
+#include "wide-int.h"
/* Tree code classes. */
@@ -576,7 +577,7 @@ init_ttree (void)
int_cst_hash_table = htab_create_ggc (1024, int_cst_hash_hash,
int_cst_hash_eq, NULL);
- int_cst_node = make_node (INTEGER_CST);
+ int_cst_node = make_int_cst (1, 1);
cl_option_hash_table = htab_create_ggc (64, cl_option_hash_hash,
cl_option_hash_eq, NULL);
@@ -603,7 +604,7 @@ decl_assembler_name (tree decl)
/* Compute the number of bytes occupied by a tree with code CODE.
This function cannot be used for nodes that have variable sizes,
- including TREE_VEC, STRING_CST, and CALL_EXPR. */
+ including TREE_VEC, INTEGER_CST, STRING_CST, and CALL_EXPR. */
size_t
tree_code_size (enum tree_code code)
{
@@ -651,7 +652,7 @@ tree_code_size (enum tree_code code)
case tcc_constant: /* a constant */
switch (code)
{
- case INTEGER_CST: return sizeof (struct tree_int_cst);
+ case INTEGER_CST: gcc_unreachable ();
case REAL_CST: return sizeof (struct tree_real_cst);
case FIXED_CST: return sizeof (struct tree_fixed_cst);
case COMPLEX_CST: return sizeof (struct tree_complex);
@@ -698,6 +699,10 @@ tree_size (const_tree node)
const enum tree_code code = TREE_CODE (node);
switch (code)
{
+ case INTEGER_CST:
+ return (sizeof (struct tree_int_cst)
+ + (TREE_INT_CST_EXT_NUNITS (node) - 1) * sizeof (HOST_WIDE_INT));
+
case TREE_BINFO:
return (offsetof (struct tree_binfo, base_binfos)
+ vec<tree, va_gc>
@@ -830,8 +835,9 @@ allocate_decl_uid (void)
/* Return a newly allocated node of code CODE. For decl and type
nodes, some other fields are initialized. The rest of the node is
- initialized to zero. This function cannot be used for TREE_VEC or
- OMP_CLAUSE nodes, which is enforced by asserts in tree_code_size.
+ initialized to zero. This function cannot be used for TREE_VEC,
+ INTEGER_CST or OMP_CLAUSE nodes, which is enforced by asserts in
+ tree_code_size.
Achoo! I got a code in the node. */
@@ -1019,6 +1025,53 @@ copy_list (tree list)
}
+/* Return the value that TREE_INT_CST_EXT_NUNITS should have for an
+ INTEGER_CST with value CST and type TYPE. */
+
+static unsigned int
+get_int_cst_ext_nunits (tree type, const wide_int &cst)
+{
+ gcc_checking_assert (cst.get_precision () == TYPE_PRECISION (type));
+ /* We need an extra zero HWI if CST is an unsigned integer with its
+ upper bit set, and if CST occupies a whole number of HWIs. */
+ if (TYPE_UNSIGNED (type)
+ && wi::neg_p (cst)
+ && (cst.get_precision () % HOST_BITS_PER_WIDE_INT) == 0)
+ return cst.get_precision () / HOST_BITS_PER_WIDE_INT + 1;
+ return cst.get_len ();
+}
+
+/* Return a new INTEGER_CST with value CST and type TYPE. */
+
+static tree
+build_new_int_cst (tree type, const wide_int &cst)
+{
+ unsigned int len = cst.get_len ();
+ unsigned int ext_len = get_int_cst_ext_nunits (type, cst);
+ tree nt = make_int_cst (len, ext_len);
+
+ if (len < ext_len)
+ {
+ --ext_len;
+ TREE_INT_CST_ELT (nt, ext_len) = 0;
+ for (unsigned int i = len; i < ext_len; ++i)
+ TREE_INT_CST_ELT (nt, i) = -1;
+ }
+ else if (TYPE_UNSIGNED (type)
+ && cst.get_precision () < len * HOST_BITS_PER_WIDE_INT)
+ {
+ len--;
+ TREE_INT_CST_ELT (nt, len)
+ = zext_hwi (cst.elt (len),
+ cst.get_precision () % HOST_BITS_PER_WIDE_INT);
+ }
+
+ for (unsigned int i = 0; i < len; i++)
+ TREE_INT_CST_ELT (nt, i) = cst.elt (i);
+ TREE_TYPE (nt) = type;
+ return nt;
+}
+
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
tree
@@ -1028,7 +1081,13 @@ build_int_cst (tree type, HOST_WIDE_INT low)
if (!type)
type = integer_type_node;
- return double_int_to_tree (type, double_int::from_shwi (low));
+ return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type)));
+}
+
+tree
+build_int_cstu (tree type, unsigned HOST_WIDE_INT cst)
+{
+ return wide_int_to_tree (type, wi::uhwi (cst, TYPE_PRECISION (type)));
}
/* Create an INT_CST node with a LOW value sign extended to TYPE. */
@@ -1037,8 +1096,7 @@ tree
build_int_cst_type (tree type, HOST_WIDE_INT low)
{
gcc_assert (type);
-
- return double_int_to_tree (type, double_int::from_shwi (low));
+ return wide_int_to_tree (type, wi::shwi (low, TYPE_PRECISION (type)));
}
/* Constructs tree in type TYPE from with value given by CST. Signedness
@@ -1047,28 +1105,10 @@ build_int_cst_type (tree type, HOST_WIDE_INT low)
tree
double_int_to_tree (tree type, double_int cst)
{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return build_int_cst_wide (type, cst.low, cst.high);
-}
-
-/* Returns true if CST fits into range of TYPE. Signedness of CST is assumed
- to be the same as the signedness of TYPE. */
-
-bool
-double_int_fits_to_tree_p (const_tree type, double_int cst)
-{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
-
- double_int ext
- = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
-
- return cst == ext;
+ return wide_int_to_tree (type, widest_int::from (cst, TYPE_SIGN (type)));
}
-/* We force the double_int CST to the range of the type TYPE by sign or
+/* We force the wide_int CST to the range of the type TYPE by sign or
zero extending it. OVERFLOWABLE indicates if we are interested in
overflow of the value, when >0 we are only interested in signed
overflow, for <0 we are interested in any overflow. OVERFLOWED
@@ -1079,34 +1119,32 @@ double_int_fits_to_tree_p (const_tree type, double_int cst)
OVERFLOWED is nonzero,
or OVERFLOWABLE is >0 and signed overflow occurs
or OVERFLOWABLE is <0 and any overflow occurs
- We return a new tree node for the extended double_int. The node
+ We return a new tree node for the extended wide_int. The node
is shared if no overflow flags are set. */
tree
-force_fit_type_double (tree type, double_int cst, int overflowable,
- bool overflowed)
+force_fit_type (tree type, const wide_int_ref &cst,
+ int overflowable, bool overflowed)
{
- bool sign_extended_type = !TYPE_UNSIGNED (type);
+ signop sign = TYPE_SIGN (type);
/* If we need to set overflow flags, return a new unshared node. */
- if (overflowed || !double_int_fits_to_tree_p (type, cst))
+ if (overflowed || !wi::fits_to_tree_p (cst, type))
{
if (overflowed
|| overflowable < 0
- || (overflowable > 0 && sign_extended_type))
+ || (overflowable > 0 && sign == SIGNED))
{
- tree t = make_node (INTEGER_CST);
- TREE_INT_CST (t)
- = cst.ext (TYPE_PRECISION (type), !sign_extended_type);
- TREE_TYPE (t) = type;
+ wide_int tmp = wide_int::from (cst, TYPE_PRECISION (type), sign);
+ tree t = build_new_int_cst (type, tmp);
TREE_OVERFLOW (t) = 1;
return t;
}
}
/* Else build a shared node. */
- return double_int_to_tree (type, cst);
+ return wide_int_to_tree (type, cst);
}
/* These are the hash table functions for the hash table of INTEGER_CST
@@ -1118,9 +1156,13 @@ static hashval_t
int_cst_hash_hash (const void *x)
{
const_tree const t = (const_tree) x;
+ hashval_t code = htab_hash_pointer (TREE_TYPE (t));
+ int i;
- return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t)
- ^ htab_hash_pointer (TREE_TYPE (t)));
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ code ^= TREE_INT_CST_ELT (t, i);
+
+ return code;
}
/* Return nonzero if the value represented by *X (an INTEGER_CST tree node)
@@ -1132,121 +1174,167 @@ int_cst_hash_eq (const void *x, const void *y)
const_tree const xt = (const_tree) x;
const_tree const yt = (const_tree) y;
- return (TREE_TYPE (xt) == TREE_TYPE (yt)
- && TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt)
- && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt));
+ if (TREE_TYPE (xt) != TREE_TYPE (yt)
+ || TREE_INT_CST_NUNITS (xt) != TREE_INT_CST_NUNITS (yt)
+ || TREE_INT_CST_EXT_NUNITS (xt) != TREE_INT_CST_EXT_NUNITS (yt))
+ return false;
+
+ for (int i = 0; i < TREE_INT_CST_NUNITS (xt); i++)
+ if (TREE_INT_CST_ELT (xt, i) != TREE_INT_CST_ELT (yt, i))
+ return false;
+
+ return true;
}
-/* Create an INT_CST node of TYPE and value HI:LOW.
+/* Create an INT_CST node of TYPE and value CST.
The returned node is always shared. For small integers we use a
- per-type vector cache, for larger ones we use a single hash table. */
+ per-type vector cache, for larger ones we use a single hash table.
+ The value is extended from its precision according to the sign of
+ the type to be a multiple of HOST_BITS_PER_WIDE_INT. This defines
+ the upper bits and ensures that hashing and value equality based
+ upon the underlying HOST_WIDE_INTs works without masking. */
tree
-build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi)
+wide_int_to_tree (tree type, const wide_int_ref &pcst)
{
tree t;
int ix = -1;
int limit = 0;
gcc_assert (type);
+ unsigned int prec = TYPE_PRECISION (type);
+ signop sgn = TYPE_SIGN (type);
- switch (TREE_CODE (type))
+ /* Verify that everything is canonical. */
+ int l = pcst.get_len ();
+ if (l > 1)
{
- case NULLPTR_TYPE:
- gcc_assert (hi == 0 && low == 0);
- /* Fallthru. */
-
- case POINTER_TYPE:
- case REFERENCE_TYPE:
- /* Cache NULL pointer. */
- if (!hi && !low)
- {
- limit = 1;
- ix = 0;
- }
- break;
+ if (pcst.elt (l - 1) == 0)
+ gcc_checking_assert (pcst.elt (l - 2) < 0);
+ if (pcst.elt (l - 1) == (HOST_WIDE_INT) -1)
+ gcc_checking_assert (pcst.elt (l - 2) >= 0);
+ }
- case BOOLEAN_TYPE:
- /* Cache false or true. */
- limit = 2;
- if (!hi && low < 2)
- ix = low;
- break;
+ wide_int cst = wide_int::from (pcst, prec, sgn);
+ unsigned int ext_len = get_int_cst_ext_nunits (type, cst);
- case INTEGER_TYPE:
- case OFFSET_TYPE:
+ if (ext_len == 1)
+ {
+ /* We just need to store a single HOST_WIDE_INT. */
+ HOST_WIDE_INT hwi;
if (TYPE_UNSIGNED (type))
- {
- /* Cache 0..N */
- limit = INTEGER_SHARE_LIMIT;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low;
- }
+ hwi = cst.to_uhwi ();
else
+ hwi = cst.to_shwi ();
+
+ switch (TREE_CODE (type))
{
- /* Cache -1..N */
- limit = INTEGER_SHARE_LIMIT + 1;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low + 1;
- else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1)
- ix = 0;
- }
- break;
+ case NULLPTR_TYPE:
+ gcc_assert (hwi == 0);
+ /* Fallthru. */
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ /* Cache NULL pointer. */
+ if (hwi == 0)
+ {
+ limit = 1;
+ ix = 0;
+ }
+ break;
- case ENUMERAL_TYPE:
- break;
+ case BOOLEAN_TYPE:
+ /* Cache false or true. */
+ limit = 2;
+ if (hwi < 2)
+ ix = hwi;
+ break;
- default:
- gcc_unreachable ();
- }
+ case INTEGER_TYPE:
+ case OFFSET_TYPE:
+ if (TYPE_SIGN (type) == UNSIGNED)
+ {
+ /* Cache [0, N). */
+ limit = INTEGER_SHARE_LIMIT;
+ if (IN_RANGE (hwi, 0, INTEGER_SHARE_LIMIT - 1))
+ ix = hwi;
+ }
+ else
+ {
+ /* Cache [-1, N). */
+ limit = INTEGER_SHARE_LIMIT + 1;
+ if (IN_RANGE (hwi, -1, INTEGER_SHARE_LIMIT - 1))
+ ix = hwi + 1;
+ }
+ break;
- if (ix >= 0)
- {
- /* Look for it in the type's vector of small shared ints. */
- if (!TYPE_CACHED_VALUES_P (type))
- {
- TYPE_CACHED_VALUES_P (type) = 1;
- TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
+ case ENUMERAL_TYPE:
+ break;
+
+ default:
+ gcc_unreachable ();
}
- t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
- if (t)
+ if (ix >= 0)
{
- /* Make sure no one is clobbering the shared constant. */
- gcc_assert (TREE_TYPE (t) == type);
- gcc_assert (TREE_INT_CST_LOW (t) == low);
- gcc_assert (TREE_INT_CST_HIGH (t) == hi);
+ /* Look for it in the type's vector of small shared ints. */
+ if (!TYPE_CACHED_VALUES_P (type))
+ {
+ TYPE_CACHED_VALUES_P (type) = 1;
+ TYPE_CACHED_VALUES (type) = make_tree_vec (limit);
+ }
+
+ t = TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix);
+ if (t)
+ /* Make sure no one is clobbering the shared constant. */
+ gcc_checking_assert (TREE_TYPE (t) == type
+ && TREE_INT_CST_NUNITS (t) == 1
+ && TREE_INT_CST_OFFSET_NUNITS (t) == 1
+ && TREE_INT_CST_EXT_NUNITS (t) == 1
+ && TREE_INT_CST_ELT (t, 0) == hwi);
+ else
+ {
+ /* Create a new shared int. */
+ t = build_new_int_cst (type, cst);
+ TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ }
}
else
{
- /* Create a new shared int. */
- t = make_node (INTEGER_CST);
+ /* Use the cache of larger shared ints, using int_cst_node as
+ a temporary. */
+ void **slot;
- TREE_INT_CST_LOW (t) = low;
- TREE_INT_CST_HIGH (t) = hi;
- TREE_TYPE (t) = type;
+ TREE_INT_CST_ELT (int_cst_node, 0) = hwi;
+ TREE_TYPE (int_cst_node) = type;
- TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t;
+ slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
+ t = (tree) *slot;
+ if (!t)
+ {
+ /* Insert this one into the hash table. */
+ t = int_cst_node;
+ *slot = t;
+ /* Make a new node for next time round. */
+ int_cst_node = make_int_cst (1, 1);
+ }
}
}
else
{
- /* Use the cache of larger shared ints. */
+ /* The value either hashes properly or we drop it on the floor
+ for the gc to take care of. There will not be enough of them
+ to worry about. */
void **slot;
- TREE_INT_CST_LOW (int_cst_node) = low;
- TREE_INT_CST_HIGH (int_cst_node) = hi;
- TREE_TYPE (int_cst_node) = type;
-
- slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT);
+ tree nt = build_new_int_cst (type, cst);
+ slot = htab_find_slot (int_cst_hash_table, nt, INSERT);
t = (tree) *slot;
if (!t)
{
/* Insert this one into the hash table. */
- t = int_cst_node;
+ t = nt;
*slot = t;
- /* Make a new node for next time round. */
- int_cst_node = make_node (INTEGER_CST);
}
}
@@ -1257,23 +1345,22 @@ void
cache_integer_cst (tree t)
{
tree type = TREE_TYPE (t);
- HOST_WIDE_INT hi = TREE_INT_CST_HIGH (t);
- unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (t);
int ix = -1;
int limit = 0;
+ int prec = TYPE_PRECISION (type);
gcc_assert (!TREE_OVERFLOW (t));
switch (TREE_CODE (type))
{
case NULLPTR_TYPE:
- gcc_assert (hi == 0 && low == 0);
+ gcc_assert (integer_zerop (t));
/* Fallthru. */
case POINTER_TYPE:
case REFERENCE_TYPE:
/* Cache NULL pointer. */
- if (!hi && !low)
+ if (integer_zerop (t))
{
limit = 1;
ix = 0;
@@ -1283,8 +1370,8 @@ cache_integer_cst (tree t)
case BOOLEAN_TYPE:
/* Cache false or true. */
limit = 2;
- if (!hi && low < 2)
- ix = low;
+ if (wi::ltu_p (t, 2))
+ ix = TREE_INT_CST_ELT (t, 0);
break;
case INTEGER_TYPE:
@@ -1293,17 +1380,35 @@ cache_integer_cst (tree t)
{
/* Cache 0..N */
limit = INTEGER_SHARE_LIMIT;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low;
+
+ /* This is a little hokie, but if the prec is smaller than
+ what is necessary to hold INTEGER_SHARE_LIMIT, then the
+ obvious test will not get the correct answer. */
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT)
+ ix = tree_to_uhwi (t);
+ }
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ ix = tree_to_uhwi (t);
}
else
{
/* Cache -1..N */
limit = INTEGER_SHARE_LIMIT + 1;
- if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT)
- ix = low + 1;
- else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1)
+
+ if (integer_minus_onep (t))
ix = 0;
+ else if (!wi::neg_p (t))
+ {
+ if (prec < HOST_BITS_PER_WIDE_INT)
+ {
+ if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT)
+ ix = tree_to_shwi (t) + 1;
+ }
+ else if (wi::ltu_p (t, INTEGER_SHARE_LIMIT))
+ ix = tree_to_shwi (t) + 1;
+ }
}
break;
@@ -1335,13 +1440,10 @@ cache_integer_cst (tree t)
/* If there is already an entry for the number verify it's the
same. */
if (*slot)
- {
- gcc_assert (TREE_INT_CST_LOW ((tree)*slot) == low
- && TREE_INT_CST_HIGH ((tree)*slot) == hi);
- return;
- }
- /* Otherwise insert this one into the hash table. */
- *slot = t;
+ gcc_assert (wi::eq_p (tree (*slot), t));
+ else
+ /* Otherwise insert this one into the hash table. */
+ *slot = t;
}
}
@@ -1352,18 +1454,10 @@ cache_integer_cst (tree t)
tree
build_low_bits_mask (tree type, unsigned bits)
{
- double_int mask;
-
gcc_assert (bits <= TYPE_PRECISION (type));
- if (bits == TYPE_PRECISION (type)
- && !TYPE_UNSIGNED (type))
- /* Sign extended all-ones mask. */
- mask = double_int_minus_one;
- else
- mask = double_int::mask (bits);
-
- return build_int_cst_wide (type, mask.low, mask.high);
+ return wide_int_to_tree (type, wi::mask (bits, false,
+ TYPE_PRECISION (type)));
}
/* Checks that X is integer constant that can be expressed in (unsigned)
@@ -1378,8 +1472,7 @@ cst_and_fits_in_hwi (const_tree x)
if (TYPE_PRECISION (TREE_TYPE (x)) > HOST_BITS_PER_WIDE_INT)
return false;
- return (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ return TREE_INT_CST_NUNITS (x) == 1;
}
/* Build a newly constructed TREE_VEC node of length LEN. */
@@ -1619,9 +1712,8 @@ real_value_from_int_cst (const_tree type, const_tree i)
bitwise comparisons to see if two values are the same. */
memset (&d, 0, sizeof d);
- real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode,
- TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i),
- TYPE_UNSIGNED (TREE_TYPE (i)));
+ real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, i,
+ TYPE_SIGN (TREE_TYPE (i)));
return d;
}
@@ -1859,6 +1951,38 @@ build_case_label (tree low_value, tree high_value, tree label_decl)
return t;
}
+/* Build a newly constructed INTEGER_CST node. LEN and EXT_LEN are the
+ values of TREE_INT_CST_NUNITS and TREE_INT_CST_EXT_NUNITS respectively.
+ The latter determines the length of the HOST_WIDE_INT vector. */
+
+tree
+make_int_cst_stat (int len, int ext_len MEM_STAT_DECL)
+{
+ tree t;
+ int length = ((ext_len - 1) * sizeof (HOST_WIDE_INT)
+ + sizeof (struct tree_int_cst));
+
+ gcc_assert (len);
+ record_node_allocation_statistics (INTEGER_CST, length);
+
+ t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT);
+
+ TREE_SET_CODE (t, INTEGER_CST);
+ TREE_INT_CST_NUNITS (t) = len;
+ TREE_INT_CST_EXT_NUNITS (t) = ext_len;
+ /* to_offset can only be applied to trees that are offset_int-sized
+ or smaller. EXT_LEN is correct if it fits, otherwise the constant
+ must be exactly the precision of offset_int and so LEN is correct. */
+ if (ext_len <= OFFSET_INT_ELTS)
+ TREE_INT_CST_OFFSET_NUNITS (t) = ext_len;
+ else
+ TREE_INT_CST_OFFSET_NUNITS (t) = len;
+
+ TREE_CONSTANT (t) = 1;
+
+ return t;
+}
+
/* Build a newly constructed TREE_VEC node of length LEN. */
tree
@@ -1910,8 +2034,7 @@ integer_zerop (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (expr) == 0
- && TREE_INT_CST_HIGH (expr) == 0);
+ return wi::eq_p (expr, 0);
case COMPLEX_CST:
return (integer_zerop (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
@@ -1939,8 +2062,7 @@ integer_onep (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (expr) == 1
- && TREE_INT_CST_HIGH (expr) == 0);
+ return wi::eq_p (wi::to_widest (expr), 1);
case COMPLEX_CST:
return (integer_onep (TREE_REALPART (expr))
&& integer_zerop (TREE_IMAGPART (expr)));
@@ -1963,9 +2085,6 @@ integer_onep (const_tree expr)
int
integer_all_onesp (const_tree expr)
{
- int prec;
- int uns;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST
@@ -1985,35 +2104,7 @@ integer_all_onesp (const_tree expr)
else if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- uns = TYPE_UNSIGNED (TREE_TYPE (expr));
- if (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0
- && TREE_INT_CST_HIGH (expr) == -1)
- return 1;
- if (!uns)
- return 0;
-
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- if (prec >= HOST_BITS_PER_WIDE_INT)
- {
- HOST_WIDE_INT high_value;
- int shift_amount;
-
- shift_amount = prec - HOST_BITS_PER_WIDE_INT;
-
- /* Can not handle precisions greater than twice the host int size. */
- gcc_assert (shift_amount <= HOST_BITS_PER_WIDE_INT);
- if (shift_amount == HOST_BITS_PER_WIDE_INT)
- /* Shifting by the host word size is undefined according to the ANSI
- standard, so we must handle this as a special case. */
- high_value = -1;
- else
- high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1;
-
- return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0
- && TREE_INT_CST_HIGH (expr) == high_value);
- }
- else
- return TREE_INT_CST_LOW (expr) == ((unsigned HOST_WIDE_INT) 1 << prec) - 1;
+ return wi::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr;
}
/* Return 1 if EXPR is the integer constant minus one. */
@@ -2036,9 +2127,6 @@ integer_minus_onep (const_tree expr)
int
integer_pow2p (const_tree expr)
{
- int prec;
- unsigned HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST
@@ -2049,29 +2137,7 @@ integer_pow2p (const_tree expr)
if (TREE_CODE (expr) != INTEGER_CST)
return 0;
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~(HOST_WIDE_INT_M1U << prec);
- }
-
- if (high == 0 && low == 0)
- return 0;
-
- return ((high == 0 && (low & (low - 1)) == 0)
- || (low == 0 && (high & (high - 1)) == 0));
+ return wi::popcount (expr) == 1;
}
/* Return 1 if EXPR is an integer constant other than zero or a
@@ -2083,8 +2149,7 @@ integer_nonzerop (const_tree expr)
STRIP_NOPS (expr);
return ((TREE_CODE (expr) == INTEGER_CST
- && (TREE_INT_CST_LOW (expr) != 0
- || TREE_INT_CST_HIGH (expr) != 0))
+ && !wi::eq_p (expr, 0))
|| (TREE_CODE (expr) == COMPLEX_CST
&& (integer_nonzerop (TREE_REALPART (expr))
|| integer_nonzerop (TREE_IMAGPART (expr)))));
@@ -2105,34 +2170,12 @@ fixed_zerop (const_tree expr)
int
tree_log2 (const_tree expr)
{
- int prec;
- HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~(HOST_WIDE_INT_M1U << prec);
- }
-
- return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high)
- : exact_log2 (low));
+ return wi::exact_log2 (expr);
}
/* Similar, but return the largest integer Y such that 2 ** Y is less
@@ -2141,35 +2184,12 @@ tree_log2 (const_tree expr)
int
tree_floor_log2 (const_tree expr)
{
- int prec;
- HOST_WIDE_INT high, low;
-
STRIP_NOPS (expr);
if (TREE_CODE (expr) == COMPLEX_CST)
return tree_log2 (TREE_REALPART (expr));
- prec = TYPE_PRECISION (TREE_TYPE (expr));
- high = TREE_INT_CST_HIGH (expr);
- low = TREE_INT_CST_LOW (expr);
-
- /* First clear all bits that are beyond the type's precision in case
- we've been sign extended. Ignore if type's precision hasn't been set
- since what we are doing is setting it. */
-
- if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0)
- ;
- else if (prec > HOST_BITS_PER_WIDE_INT)
- high &= ~(HOST_WIDE_INT_M1U << (prec - HOST_BITS_PER_WIDE_INT));
- else
- {
- high = 0;
- if (prec < HOST_BITS_PER_WIDE_INT)
- low &= ~(HOST_WIDE_INT_M1U << prec);
- }
-
- return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high)
- : floor_log2 (low));
+ return wi::floor_log2 (expr);
}
/* Return number of known trailing zero bits in EXPR, or, if the value of
@@ -2186,10 +2206,10 @@ tree_ctz (const_tree expr)
switch (TREE_CODE (expr))
{
case INTEGER_CST:
- ret1 = tree_to_double_int (expr).trailing_zeros ();
+ ret1 = wi::ctz (expr);
return MIN (ret1, prec);
case SSA_NAME:
- ret1 = get_nonzero_bits (expr).trailing_zeros ();
+ ret1 = wi::ctz (get_nonzero_bits (expr));
return MIN (ret1, prec);
case PLUS_EXPR:
case MINUS_EXPR:
@@ -2657,14 +2677,11 @@ int_size_in_bytes (const_tree type)
type = TYPE_MAIN_VARIANT (type);
t = TYPE_SIZE_UNIT (type);
- if (t == 0
- || TREE_CODE (t) != INTEGER_CST
- || TREE_INT_CST_HIGH (t) != 0
- /* If the result would appear negative, it's too big to represent. */
- || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0)
- return -1;
- return TREE_INT_CST_LOW (t);
+ if (t && tree_fits_uhwi_p (t))
+ return TREE_INT_CST_LOW (t);
+ else
+ return -1;
}
/* Return the maximum size of TYPE (in bytes) as a wide integer
@@ -4312,11 +4329,10 @@ build_simple_mem_ref_loc (location_t loc, tree ptr)
/* Return the constant offset of a MEM_REF or TARGET_MEM_REF tree T. */
-double_int
+offset_int
mem_ref_offset (const_tree t)
{
- tree toff = TREE_OPERAND (t, 1);
- return tree_to_double_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff)));
+ return offset_int::from (TREE_OPERAND (t, 1), SIGNED);
}
/* Return an invariant ADDR_EXPR of type TYPE taking the address of BASE
@@ -4540,6 +4556,8 @@ build_type_attribute_qual_variant (tree ttype, tree attribute, int quals)
{
hashval_t hashcode = 0;
tree ntype;
+ int i;
+ tree t;
enum tree_code code = TREE_CODE (ttype);
/* Building a distinct copy of a tagged type is inappropriate; it
@@ -4581,10 +4599,9 @@ build_type_attribute_qual_variant (tree ttype, tree attribute, int quals)
hashcode);
break;
case INTEGER_TYPE:
- hashcode = iterative_hash_object
- (TREE_INT_CST_LOW (TYPE_MAX_VALUE (ntype)), hashcode);
- hashcode = iterative_hash_object
- (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (ntype)), hashcode);
+ t = TYPE_MAX_VALUE (ntype);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ hashcode = iterative_hash_object (TREE_INT_CST_ELT (t, i), hashcode);
break;
case REAL_TYPE:
case FIXED_POINT_TYPE:
@@ -6630,6 +6647,8 @@ type_hash_eq (const void *va, const void *vb)
case INTEGER_TYPE:
case REAL_TYPE:
case BOOLEAN_TYPE:
+ if (TYPE_PRECISION (a->type) != TYPE_PRECISION (b->type))
+ return false;
return ((TYPE_MAX_VALUE (a->type) == TYPE_MAX_VALUE (b->type)
|| tree_int_cst_equal (TYPE_MAX_VALUE (a->type),
TYPE_MAX_VALUE (b->type)))
@@ -6928,54 +6947,12 @@ tree_int_cst_equal (const_tree t1, const_tree t2)
if (TREE_CODE (t1) == INTEGER_CST
&& TREE_CODE (t2) == INTEGER_CST
- && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2))
+ && wi::to_widest (t1) == wi::to_widest (t2))
return 1;
return 0;
}
-/* Nonzero if integer constants T1 and T2 represent values that satisfy <.
- The precise way of comparison depends on their data type. */
-
-int
-tree_int_cst_lt (const_tree t1, const_tree t2)
-{
- if (t1 == t2)
- return 0;
-
- if (TYPE_UNSIGNED (TREE_TYPE (t1)) != TYPE_UNSIGNED (TREE_TYPE (t2)))
- {
- int t1_sgn = tree_int_cst_sgn (t1);
- int t2_sgn = tree_int_cst_sgn (t2);
-
- if (t1_sgn < t2_sgn)
- return 1;
- else if (t1_sgn > t2_sgn)
- return 0;
- /* Otherwise, both are non-negative, so we compare them as
- unsigned just in case one of them would overflow a signed
- type. */
- }
- else if (!TYPE_UNSIGNED (TREE_TYPE (t1)))
- return INT_CST_LT (t1, t2);
-
- return INT_CST_LT_UNSIGNED (t1, t2);
-}
-
-/* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. */
-
-int
-tree_int_cst_compare (const_tree t1, const_tree t2)
-{
- if (tree_int_cst_lt (t1, t2))
- return -1;
- else if (tree_int_cst_lt (t2, t1))
- return 1;
- else
- return 0;
-}
-
/* Return true if T is an INTEGER_CST whose numerical value (extended
according to TYPE_UNSIGNED) fits in a signed HOST_WIDE_INT. */
@@ -6984,11 +6961,7 @@ tree_fits_shwi_p (const_tree t)
{
return (t != NULL_TREE
&& TREE_CODE (t) == INTEGER_CST
- && ((TREE_INT_CST_HIGH (t) == 0
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0)
- || (TREE_INT_CST_HIGH (t) == -1
- && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0
- && !TYPE_UNSIGNED (TREE_TYPE (t)))));
+ && wi::fits_shwi_p (wi::to_widest (t)));
}
/* Return true if T is an INTEGER_CST whose numerical value (extended
@@ -6999,7 +6972,7 @@ tree_fits_uhwi_p (const_tree t)
{
return (t != NULL_TREE
&& TREE_CODE (t) == INTEGER_CST
- && TREE_INT_CST_HIGH (t) == 0);
+ && wi::fits_uhwi_p (wi::to_widest (t)));
}
/* T is an INTEGER_CST whose numerical value (extended according to
@@ -7030,17 +7003,8 @@ int
tree_int_cst_sign_bit (const_tree t)
{
unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1;
- unsigned HOST_WIDE_INT w;
-
- if (bitno < HOST_BITS_PER_WIDE_INT)
- w = TREE_INT_CST_LOW (t);
- else
- {
- w = TREE_INT_CST_HIGH (t);
- bitno -= HOST_BITS_PER_WIDE_INT;
- }
- return (w >> bitno) & 1;
+ return wi::extract_uhwi (t, bitno, 1);
}
/* Return an indication of the sign of the integer constant T.
@@ -7050,11 +7014,11 @@ tree_int_cst_sign_bit (const_tree t)
int
tree_int_cst_sgn (const_tree t)
{
- if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0)
+ if (wi::eq_p (t, 0))
return 0;
else if (TYPE_UNSIGNED (TREE_TYPE (t)))
return 1;
- else if (TREE_INT_CST_HIGH (t) < 0)
+ else if (wi::neg_p (t))
return -1;
else
return 1;
@@ -7064,7 +7028,7 @@ tree_int_cst_sgn (const_tree t)
signed or unsigned type, UNSIGNEDP says which. */
unsigned int
-tree_int_cst_min_precision (tree value, bool unsignedp)
+tree_int_cst_min_precision (tree value, signop sgn)
{
/* If the value is negative, compute its negative minus 1. The latter
adjustment is because the absolute value of the largest negative value
@@ -7082,7 +7046,7 @@ tree_int_cst_min_precision (tree value, bool unsignedp)
if (integer_zerop (value))
return 1;
else
- return tree_floor_log2 (value) + 1 + !unsignedp;
+ return tree_floor_log2 (value) + 1 + (sgn == SIGNED ? 1 : 0) ;
}
/* Return truthvalue of whether T1 is the same tree structure as T2.
@@ -7125,8 +7089,7 @@ simple_cst_equal (const_tree t1, const_tree t2)
switch (code1)
{
case INTEGER_CST:
- return (TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2)
- && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2));
+ return wi::to_widest (t1) == wi::to_widest (t2);
case REAL_CST:
return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2));
@@ -7262,7 +7225,7 @@ compare_tree_int (const_tree t, unsigned HOST_WIDE_INT u)
{
if (tree_int_cst_sgn (t) < 0)
return -1;
- else if (TREE_INT_CST_HIGH (t) != 0)
+ else if (!tree_fits_uhwi_p (t))
return 1;
else if (TREE_INT_CST_LOW (t) == u)
return 0;
@@ -7398,8 +7361,9 @@ iterative_hash_expr (const_tree t, hashval_t val)
/* Alas, constants aren't shared, so we can't rely on pointer
identity. */
case INTEGER_CST:
- val = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), val);
- return iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), val);
+ for (i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ val = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), val);
+ return val;
case REAL_CST:
{
unsigned int val2 = real_hash (TREE_REAL_CST_PTR (t));
@@ -8547,11 +8511,8 @@ bool
int_fits_type_p (const_tree c, const_tree type)
{
tree type_low_bound, type_high_bound;
- bool ok_for_low_bound, ok_for_high_bound, unsc;
- double_int dc, dd;
-
- dc = tree_to_double_int (c);
- unsc = TYPE_UNSIGNED (TREE_TYPE (c));
+ bool ok_for_low_bound, ok_for_high_bound;
+ signop sgn_c = TYPE_SIGN (TREE_TYPE (c));
retry:
type_low_bound = TYPE_MIN_VALUE (type);
@@ -8560,7 +8521,7 @@ retry:
/* If at least one bound of the type is a constant integer, we can check
ourselves and maybe make a decision. If no such decision is possible, but
this type is a subtype, try checking against that. Otherwise, use
- double_int_fits_to_tree_p, which checks against the precision.
+ fits_to_tree_p, which checks against the precision.
Compute the status for each possibly constant bound, and return if we see
one does not match. Use ok_for_xxx_bound for this purpose, assigning -1
@@ -8570,18 +8531,7 @@ retry:
/* Check if c >= type_low_bound. */
if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST)
{
- dd = tree_to_double_int (type_low_bound);
- if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound)))
- {
- int c_neg = (!unsc && dc.is_negative ());
- int t_neg = (unsc && dd.is_negative ());
-
- if (c_neg && !t_neg)
- return false;
- if ((c_neg || !t_neg) && dc.ult (dd))
- return false;
- }
- else if (dc.cmp (dd, unsc) < 0)
+ if (tree_int_cst_lt (c, type_low_bound))
return false;
ok_for_low_bound = true;
}
@@ -8591,18 +8541,7 @@ retry:
/* Check if c <= type_high_bound. */
if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST)
{
- dd = tree_to_double_int (type_high_bound);
- if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound)))
- {
- int c_neg = (!unsc && dc.is_negative ());
- int t_neg = (unsc && dd.is_negative ());
-
- if (t_neg && !c_neg)
- return false;
- if ((t_neg || !c_neg) && dc.ugt (dd))
- return false;
- }
- else if (dc.cmp (dd, unsc) > 0)
+ if (tree_int_cst_lt (type_high_bound, c))
return false;
ok_for_high_bound = true;
}
@@ -8616,7 +8555,7 @@ retry:
/* Perform some generic filtering which may allow making a decision
even if the bounds are not constant. First, negative integers
never fit in unsigned types, */
- if (TYPE_UNSIGNED (type) && !unsc && dc.is_negative ())
+ if (TYPE_UNSIGNED (type) && sgn_c == SIGNED && wi::neg_p (c))
return false;
/* Second, narrower types always fit in wider ones. */
@@ -8624,16 +8563,21 @@ retry:
return true;
/* Third, unsigned integers with top bit set never fit signed types. */
- if (! TYPE_UNSIGNED (type) && unsc)
+ if (!TYPE_UNSIGNED (type) && sgn_c == UNSIGNED)
{
int prec = GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (c))) - 1;
- if (prec < HOST_BITS_PER_WIDE_INT)
+ if (prec < TYPE_PRECISION (TREE_TYPE (c)))
{
- if (((((unsigned HOST_WIDE_INT) 1) << prec) & dc.low) != 0)
+ /* When a tree_cst is converted to a wide-int, the precision
+ is taken from the type. However, if the precision of the
+ mode underneath the type is smaller than that, it is
+ possible that the value will not fit. The test below
+ fails if any bit is set between the sign bit of the
+ underlying mode and the top bit of the type. */
+ if (wi::ne_p (wi::zext (c, prec - 1), c))
return false;
- }
- else if (((((unsigned HOST_WIDE_INT) 1)
- << (prec - HOST_BITS_PER_WIDE_INT)) & dc.high) != 0)
+ }
+ else if (wi::neg_p (c))
return false;
}
@@ -8648,8 +8592,8 @@ retry:
goto retry;
}
- /* Or to double_int_fits_to_tree_p, if nothing else. */
- return double_int_fits_to_tree_p (type, dc);
+ /* Or to fits_to_tree_p, if nothing else. */
+ return wi::fits_to_tree_p (c, type);
}
/* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant
@@ -8662,33 +8606,25 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max)
{
if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type)
&& TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST)
- mpz_set_double_int (min, tree_to_double_int (TYPE_MIN_VALUE (type)),
- TYPE_UNSIGNED (type));
+ wi::to_mpz (TYPE_MIN_VALUE (type), min, TYPE_SIGN (type));
else
{
if (TYPE_UNSIGNED (type))
mpz_set_ui (min, 0);
else
{
- double_int mn;
- mn = double_int::mask (TYPE_PRECISION (type) - 1);
- mn = (mn + double_int_one).sext (TYPE_PRECISION (type));
- mpz_set_double_int (min, mn, false);
+ wide_int mn = wi::min_value (TYPE_PRECISION (type), SIGNED);
+ wi::to_mpz (mn, min, SIGNED);
}
}
if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type)
&& TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST)
- mpz_set_double_int (max, tree_to_double_int (TYPE_MAX_VALUE (type)),
- TYPE_UNSIGNED (type));
+ wi::to_mpz (TYPE_MAX_VALUE (type), max, TYPE_SIGN (type));
else
{
- if (TYPE_UNSIGNED (type))
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)),
- true);
- else
- mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type) - 1),
- true);
+ wide_int mn = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+ wi::to_mpz (mn, max, TYPE_SIGN (type));
}
}
@@ -9357,6 +9293,18 @@ tree_contains_struct_check_failed (const_tree node,
(dynamically sized) vector. */
void
+tree_int_cst_elt_check_failed (int idx, int len, const char *file, int line,
+ const char *function)
+{
+ internal_error
+ ("tree check: accessed elt %d of tree_int_cst with %d elts in %s, at %s:%d",
+ idx + 1, len, function, trim_filename (file), line);
+}
+
+/* Similar to above, except that the check is for the bounds of a TREE_VEC's
+ (dynamically sized) vector. */
+
+void
tree_vec_elt_check_failed (int idx, int len, const char *file, int line,
const char *function)
{
@@ -9607,13 +9555,11 @@ build_common_tree_nodes (bool signed_char, bool short_double)
#endif
/* Define a boolean type. This type only represents boolean values but
- may be larger than char depending on the value of BOOL_TYPE_SIZE.
- Front ends which want to override this size (i.e. Java) can redefine
- boolean_type_node before calling build_common_tree_nodes_2. */
+ may be larger than char depending on the value of BOOL_TYPE_SIZE. */
boolean_type_node = make_unsigned_type (BOOL_TYPE_SIZE);
TREE_SET_CODE (boolean_type_node, BOOLEAN_TYPE);
- TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
TYPE_PRECISION (boolean_type_node) = 1;
+ TYPE_MAX_VALUE (boolean_type_node) = build_int_cst (boolean_type_node, 1);
/* Define what type to use for size_t. */
if (strcmp (SIZE_TYPE, "unsigned int") == 0)
@@ -10509,8 +10455,7 @@ int_cst_value (const_tree x)
unsigned HOST_WIDE_INT val = TREE_INT_CST_LOW (x);
/* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */
- gcc_assert (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ gcc_assert (cst_and_fits_in_hwi (x));
if (bits < HOST_BITS_PER_WIDE_INT)
{
@@ -10534,12 +10479,16 @@ widest_int_cst_value (const_tree x)
#if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT
gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT);
- val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_HIGH (x))
- << HOST_BITS_PER_WIDE_INT);
+ gcc_assert (TREE_INT_CST_NUNITS (x) == 2);
+
+ if (TREE_INT_CST_NUNITS (x) == 1)
+ val = HOST_WIDE_INT (val);
+ else
+ val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_ELT (x, 1))
+ << HOST_BITS_PER_WIDE_INT);
#else
/* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */
- gcc_assert (TREE_INT_CST_HIGH (x) == 0
- || TREE_INT_CST_HIGH (x) == -1);
+ gcc_assert (TREE_INT_CST_NUNITS (x) == 1);
#endif
if (bits < HOST_BITS_PER_WIDEST_INT)
@@ -10625,7 +10574,6 @@ truth_type_for (tree type)
tree
upper_bound_in_type (tree outer, tree inner)
{
- double_int high;
unsigned int det = 0;
unsigned oprec = TYPE_PRECISION (outer);
unsigned iprec = TYPE_PRECISION (inner);
@@ -10669,21 +10617,8 @@ upper_bound_in_type (tree outer, tree inner)
gcc_unreachable ();
}
- /* Compute 2^^prec - 1. */
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- high.high = 0;
- high.low = ((~(unsigned HOST_WIDE_INT) 0)
- >> (HOST_BITS_PER_WIDE_INT - prec));
- }
- else
- {
- high.high = ((~(unsigned HOST_WIDE_INT) 0)
- >> (HOST_BITS_PER_DOUBLE_INT - prec));
- high.low = ~(unsigned HOST_WIDE_INT) 0;
- }
-
- return double_int_to_tree (outer, high);
+ return wide_int_to_tree (outer,
+ wi::mask (prec, false, TYPE_PRECISION (outer)));
}
/* Returns the smallest value obtainable by casting something in INNER type to
@@ -10692,7 +10627,6 @@ upper_bound_in_type (tree outer, tree inner)
tree
lower_bound_in_type (tree outer, tree inner)
{
- double_int low;
unsigned oprec = TYPE_PRECISION (outer);
unsigned iprec = TYPE_PRECISION (inner);
@@ -10703,7 +10637,7 @@ lower_bound_in_type (tree outer, tree inner)
contains all values of INNER type. In particular, both INNER
and OUTER types have zero in common. */
|| (oprec > iprec && TYPE_UNSIGNED (inner)))
- low.low = low.high = 0;
+ return build_int_cst (outer, 0);
else
{
/* If we are widening a signed type to another signed type, we
@@ -10711,21 +10645,10 @@ lower_bound_in_type (tree outer, tree inner)
precision or narrowing to a signed type, we want to obtain
-2^(oprec-1). */
unsigned prec = oprec > iprec ? iprec : oprec;
-
- if (prec <= HOST_BITS_PER_WIDE_INT)
- {
- low.high = ~(unsigned HOST_WIDE_INT) 0;
- low.low = (~(unsigned HOST_WIDE_INT) 0) << (prec - 1);
- }
- else
- {
- low.high = ((~(unsigned HOST_WIDE_INT) 0)
- << (prec - HOST_BITS_PER_WIDE_INT - 1));
- low.low = 0;
- }
+ return wide_int_to_tree (outer,
+ wi::mask (prec - 1, true,
+ TYPE_PRECISION (outer)));
}
-
- return double_int_to_tree (outer, low);
}
/* Return nonzero if two operands that are suitable for PHI nodes are
@@ -10744,42 +10667,12 @@ operand_equal_for_phi_arg_p (const_tree arg0, const_tree arg1)
return operand_equal_p (arg0, arg1, 0);
}
-/* Returns number of zeros at the end of binary representation of X.
-
- ??? Use ffs if available? */
+/* Returns number of zeros at the end of binary representation of X. */
tree
num_ending_zeros (const_tree x)
{
- unsigned HOST_WIDE_INT fr, nfr;
- unsigned num, abits;
- tree type = TREE_TYPE (x);
-
- if (TREE_INT_CST_LOW (x) == 0)
- {
- num = HOST_BITS_PER_WIDE_INT;
- fr = TREE_INT_CST_HIGH (x);
- }
- else
- {
- num = 0;
- fr = TREE_INT_CST_LOW (x);
- }
-
- for (abits = HOST_BITS_PER_WIDE_INT / 2; abits; abits /= 2)
- {
- nfr = fr >> abits;
- if (nfr << abits == fr)
- {
- num += abits;
- fr = nfr;
- }
- }
-
- if (num > TYPE_PRECISION (type))
- num = TYPE_PRECISION (type);
-
- return build_int_cst_type (type, num);
+ return build_int_cst (TREE_TYPE (x), wi::ctz (x));
}
@@ -12320,8 +12213,7 @@ drop_tree_overflow (tree t)
/* For tree codes with a sharing machinery re-build the result. */
if (TREE_CODE (t) == INTEGER_CST)
- return build_int_cst_wide (TREE_TYPE (t),
- TREE_INT_CST_LOW (t), TREE_INT_CST_HIGH (t));
+ return wide_int_to_tree (TREE_TYPE (t), t);
/* Otherwise, as all tcc_constants are possibly shared, copy the node
and drop the flag. */
diff --git a/gcc/tree.def b/gcc/tree.def
index a92d86f32b0..a6247a7e00a 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -257,13 +257,24 @@ DEFTREECODE (LANG_TYPE, "lang_type", tcc_type, 0)
/* First, the constants. */
-/* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields,
- 32 bits each, giving us a 64 bit constant capability. INTEGER_CST
- nodes can be shared, and therefore should be considered read only.
- They should be copied, before setting a flag such as TREE_OVERFLOW.
- If an INTEGER_CST has TREE_OVERFLOW already set, it is known to be unique.
- INTEGER_CST nodes are created for the integral types, for pointer
- types and for vector and float types in some circumstances. */
+/* Contents are in an array of HOST_WIDE_INTs.
+
+ We often access these constants both in their native precision and
+ in wider precisions (with the constant being implicitly extended
+ according to TYPE_SIGN). In each case, the useful part of the array
+ may be as wide as the precision requires but may be shorter when all
+ of the upper bits are sign bits. The length of the array when accessed
+ in the constant's native precision is given by TREE_INT_CST_NUNITS.
+ The length of the array when accessed in wider precisions is given
+ by TREE_INT_CST_EXT_NUNITS. Each element can be obtained using
+ TREE_INT_CST_ELT.
+
+ INTEGER_CST nodes can be shared, and therefore should be considered
+ read only. They should be copied before setting a flag such as
+ TREE_OVERFLOW. If an INTEGER_CST has TREE_OVERFLOW already set,
+ it is known to be unique. INTEGER_CST nodes are created for the
+ integral types, for pointer types and for vector and float types in
+ some circumstances. */
DEFTREECODE (INTEGER_CST, "integer_cst", tcc_constant, 0)
/* Contents are in TREE_REAL_CST field. */
diff --git a/gcc/tree.h b/gcc/tree.h
index ae4876dd462..3e8e625ab9f 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see
#define GCC_TREE_H
#include "tree-core.h"
+#include "wide-int.h"
/* These includes are required here because they provide declarations
used by inline functions in this file.
@@ -207,6 +208,8 @@ along with GCC; see the file COPYING3. If not see
#define CASE_FLT_FN_REENT(FN) case FN##_R: case FN##F_R: case FN##L_R
#define CASE_INT_FN(FN) case FN: case FN##L: case FN##LL: case FN##IMAX
+#define NULL_TREE (tree) NULL
+
/* Define accessors for the fields that all tree nodes have
(though some fields are not used for all kinds of nodes). */
@@ -278,6 +281,9 @@ along with GCC; see the file COPYING3. If not see
#define NON_TYPE_CHECK(T) \
(non_type_check ((T), __FILE__, __LINE__, __FUNCTION__))
+#define TREE_INT_CST_ELT_CHECK(T, I) \
+(*tree_int_cst_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__))
+
#define TREE_VEC_ELT_CHECK(T, I) \
(*(CONST_CAST2 (tree *, typeof (T)*, \
tree_vec_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__))))
@@ -333,6 +339,9 @@ extern void tree_not_class_check_failed (const_tree,
const enum tree_code_class,
const char *, int, const char *)
ATTRIBUTE_NORETURN;
+extern void tree_int_cst_elt_check_failed (int, int, const char *,
+ int, const char *)
+ ATTRIBUTE_NORETURN;
extern void tree_vec_elt_check_failed (int, int, const char *,
int, const char *)
ATTRIBUTE_NORETURN;
@@ -370,6 +379,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
#define TREE_RANGE_CHECK(T, CODE1, CODE2) (T)
#define EXPR_CHECK(T) (T)
#define NON_TYPE_CHECK(T) (T)
+#define TREE_INT_CST_ELT_CHECK(T, I) ((T)->int_cst.val[I])
#define TREE_VEC_ELT_CHECK(T, I) ((T)->vec.a[I])
#define TREE_OPERAND_CHECK(T, I) ((T)->exp.operands[I])
#define TREE_OPERAND_CHECK_CODE(T, CODE, I) ((T)->exp.operands[I])
@@ -738,6 +748,9 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
/* In integral and pointer types, means an unsigned type. */
#define TYPE_UNSIGNED(NODE) (TYPE_CHECK (NODE)->base.u.bits.unsigned_flag)
+/* Same as TYPE_UNSIGNED but converted to SIGNOP. */
+#define TYPE_SIGN(NODE) ((signop) TYPE_UNSIGNED (NODE))
+
/* True if overflow wraps around for the given integral type. That
is, TYPE_MAX + 1 == TYPE_MIN. */
#define TYPE_OVERFLOW_WRAPS(TYPE) \
@@ -872,25 +885,15 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int,
/* Define additional fields and accessors for nodes representing constants. */
-/* In an INTEGER_CST node. These two together make a 2-word integer.
- If the data type is signed, the value is sign-extended to 2 words
- even though not all of them may really be in use.
- In an unsigned constant shorter than 2 words, the extra bits are 0. */
-#define TREE_INT_CST(NODE) (INTEGER_CST_CHECK (NODE)->int_cst.int_cst)
-#define TREE_INT_CST_LOW(NODE) (TREE_INT_CST (NODE).low)
-#define TREE_INT_CST_HIGH(NODE) (TREE_INT_CST (NODE).high)
-
-#define INT_CST_LT(A, B) \
- (TREE_INT_CST_HIGH (A) < TREE_INT_CST_HIGH (B) \
- || (TREE_INT_CST_HIGH (A) == TREE_INT_CST_HIGH (B) \
- && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B)))
-
-#define INT_CST_LT_UNSIGNED(A, B) \
- (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \
- < (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \
- || (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \
- == (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \
- && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B)))
+#define TREE_INT_CST_NUNITS(NODE) \
+ (INTEGER_CST_CHECK (NODE)->base.u.int_length.unextended)
+#define TREE_INT_CST_EXT_NUNITS(NODE) \
+ (INTEGER_CST_CHECK (NODE)->base.u.int_length.extended)
+#define TREE_INT_CST_OFFSET_NUNITS(NODE) \
+ (INTEGER_CST_CHECK (NODE)->base.u.int_length.offset)
+#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I)
+#define TREE_INT_CST_LOW(NODE) \
+ ((unsigned HOST_WIDE_INT) TREE_INT_CST_ELT (NODE, 0))
#define TREE_REAL_CST_PTR(NODE) (REAL_CST_CHECK (NODE)->real_cst.real_cst_ptr)
#define TREE_REAL_CST(NODE) (*TREE_REAL_CST_PTR (NODE))
@@ -2901,6 +2904,30 @@ non_type_check (tree __t, const char *__f, int __l, const char *__g)
return __t;
}
+inline const HOST_WIDE_INT *
+tree_int_cst_elt_check (const_tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != INTEGER_CST)
+ tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0);
+ if (__i < 0 || __i >= __t->base.u.int_length.extended)
+ tree_int_cst_elt_check_failed (__i, __t->base.u.int_length.extended,
+ __f, __l, __g);
+ return &CONST_CAST_TREE (__t)->int_cst.val[__i];
+}
+
+inline HOST_WIDE_INT *
+tree_int_cst_elt_check (tree __t, int __i,
+ const char *__f, int __l, const char *__g)
+{
+ if (TREE_CODE (__t) != INTEGER_CST)
+ tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0);
+ if (__i < 0 || __i >= __t->base.u.int_length.extended)
+ tree_int_cst_elt_check_failed (__i, __t->base.u.int_length.extended,
+ __f, __l, __g);
+ return &CONST_CAST_TREE (__t)->int_cst.val[__i];
+}
+
inline tree *
tree_vec_elt_check (tree __t, int __i,
const char *__f, int __l, const char *__g)
@@ -3391,8 +3418,6 @@ tree_operand_check_code (const_tree __t, enum tree_code __code, int __i,
#define int128_integer_type_node integer_types[itk_int128]
#define int128_unsigned_type_node integer_types[itk_unsigned_int128]
-#define NULL_TREE (tree) NULL
-
/* True if NODE is an erroneous expression. */
#define error_operand_p(NODE) \
@@ -3406,9 +3431,9 @@ extern tree decl_assembler_name (tree);
extern size_t tree_size (const_tree);
-/* Compute the number of bytes occupied by a tree with code CODE. This
- function cannot be used for TREE_VEC codes, which are of variable
- length. */
+/* Compute the number of bytes occupied by a tree with code CODE.
+ This function cannot be used for TREE_VEC or INTEGER_CST nodes,
+ which are of variable length. */
extern size_t tree_code_size (enum tree_code);
/* Allocate and return a new UID from the DECL_UID namespace. */
@@ -3438,6 +3463,12 @@ extern tree build_case_label (tree, tree, tree);
extern tree make_tree_binfo_stat (unsigned MEM_STAT_DECL);
#define make_tree_binfo(t) make_tree_binfo_stat (t MEM_STAT_INFO)
+/* Make an INTEGER_CST. */
+
+extern tree make_int_cst_stat (int, int MEM_STAT_DECL);
+#define make_int_cst(LEN, EXT_LEN) \
+ make_int_cst_stat (LEN, EXT_LEN MEM_STAT_INFO)
+
/* Make a TREE_VEC. */
extern tree make_tree_vec_stat (int MEM_STAT_DECL);
@@ -3535,27 +3566,17 @@ extern tree build_var_debug_value_stat (tree, tree MEM_STAT_DECL);
/* Constructs double_int from tree CST. */
-static inline double_int
-tree_to_double_int (const_tree cst)
-{
- return TREE_INT_CST (cst);
-}
-
extern tree double_int_to_tree (tree, double_int);
-extern bool double_int_fits_to_tree_p (const_tree, double_int);
-extern tree force_fit_type_double (tree, double_int, int, bool);
-/* Create an INT_CST node with a CST value zero extended. */
+extern tree wide_int_to_tree (tree type, const wide_int_ref &cst);
+extern tree force_fit_type (tree, const wide_int_ref &, int, bool);
-static inline tree
-build_int_cstu (tree type, unsigned HOST_WIDE_INT cst)
-{
- return double_int_to_tree (type, double_int::from_uhwi (cst));
-}
+/* Create an INT_CST node with a CST value zero extended. */
+/* static inline */
extern tree build_int_cst (tree, HOST_WIDE_INT);
+extern tree build_int_cstu (tree type, unsigned HOST_WIDE_INT cst);
extern tree build_int_cst_type (tree, HOST_WIDE_INT);
-extern tree build_int_cst_wide (tree, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
extern tree make_vector_stat (unsigned MEM_STAT_DECL);
#define make_vector(n) make_vector_stat (n MEM_STAT_INFO)
extern tree build_vector_stat (tree, tree * MEM_STAT_DECL);
@@ -3637,8 +3658,7 @@ extern tree chain_index (int, tree);
extern int attribute_list_equal (const_tree, const_tree);
extern int attribute_list_contained (const_tree, const_tree);
extern int tree_int_cst_equal (const_tree, const_tree);
-extern int tree_int_cst_lt (const_tree, const_tree);
-extern int tree_int_cst_compare (const_tree, const_tree);
+
extern bool tree_fits_shwi_p (const_tree)
#ifndef ENABLE_TREE_CHECKING
ATTRIBUTE_PURE /* tree_fits_shwi_p is pure only when checking is disabled. */
@@ -3668,7 +3688,7 @@ tree_to_uhwi (const_tree t)
#endif
extern int tree_int_cst_sgn (const_tree);
extern int tree_int_cst_sign_bit (const_tree);
-extern unsigned int tree_int_cst_min_precision (tree, bool);
+extern unsigned int tree_int_cst_min_precision (tree, signop);
extern tree strip_array_types (tree);
extern tree excess_precision_type (tree);
extern bool valid_constant_size_p (const_tree);
@@ -4519,6 +4539,180 @@ opts_for_fn (const_tree fndecl)
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
+/* The tree and const_tree overload templates. */
+namespace wi
+{
+ template <>
+ struct int_traits <const_tree>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = false;
+ static unsigned int get_precision (const_tree);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const_tree);
+ };
+
+ template <>
+ struct int_traits <tree> : public int_traits <const_tree> {};
+
+ template <int N>
+ class extended_tree
+ {
+ private:
+ const_tree m_t;
+
+ public:
+ extended_tree (const_tree);
+
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ };
+
+ template <>
+ template <int N>
+ struct int_traits <extended_tree <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const unsigned int precision = N;
+ };
+
+ generic_wide_int <extended_tree <WIDE_INT_MAX_PRECISION> >
+ to_widest (const_tree);
+
+ generic_wide_int <extended_tree <ADDR_MAX_PRECISION> > to_offset (const_tree);
+}
+
+inline unsigned int
+wi::int_traits <const_tree>::get_precision (const_tree tcst)
+{
+ return TYPE_PRECISION (TREE_TYPE (tcst));
+}
+
+/* Convert the tree_cst X into a wide_int of PRECISION. */
+inline wi::storage_ref
+wi::int_traits <const_tree>::decompose (HOST_WIDE_INT *,
+ unsigned int precision, const_tree x)
+{
+ return wi::storage_ref (&TREE_INT_CST_ELT (x, 0), TREE_INT_CST_NUNITS (x),
+ precision);
+}
+
+inline generic_wide_int <wi::extended_tree <WIDE_INT_MAX_PRECISION> >
+wi::to_widest (const_tree t)
+{
+ return t;
+}
+
+inline generic_wide_int <wi::extended_tree <ADDR_MAX_PRECISION> >
+wi::to_offset (const_tree t)
+{
+ return t;
+}
+
+template <int N>
+inline wi::extended_tree <N>::extended_tree (const_tree t)
+ : m_t (t)
+{
+ gcc_checking_assert (TYPE_PRECISION (TREE_TYPE (t)) <= N);
+}
+
+template <int N>
+inline unsigned int
+wi::extended_tree <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+wi::extended_tree <N>::get_val () const
+{
+ return &TREE_INT_CST_ELT (m_t, 0);
+}
+
+template <int N>
+inline unsigned int
+wi::extended_tree <N>::get_len () const
+{
+ if (N == ADDR_MAX_PRECISION)
+ return TREE_INT_CST_OFFSET_NUNITS (m_t);
+ else if (N >= WIDE_INT_MAX_PRECISION)
+ return TREE_INT_CST_EXT_NUNITS (m_t);
+ else
+ /* This class is designed to be used for specific output precisions
+ and needs to be as fast as possible, so there is no fallback for
+ other casees. */
+ gcc_unreachable ();
+}
+
+namespace wi
+{
+ template <typename T>
+ bool fits_to_tree_p (const T &x, const_tree);
+
+ wide_int min_value (const_tree);
+ wide_int max_value (const_tree);
+ wide_int from_mpz (const_tree, mpz_t, bool);
+}
+
+template <typename T>
+bool
+wi::fits_to_tree_p (const T &x, const_tree type)
+{
+ if (TYPE_SIGN (type) == UNSIGNED)
+ return eq_p (x, zext (x, TYPE_PRECISION (type)));
+ else
+ return eq_p (x, sext (x, TYPE_PRECISION (type)));
+}
+
+/* Produce the smallest number that is represented in TYPE. The precision
+ and sign are taken from TYPE. */
+inline wide_int
+wi::min_value (const_tree type)
+{
+ return min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+}
+
+/* Produce the largest number that is represented in TYPE. The precision
+ and sign are taken from TYPE. */
+inline wide_int
+wi::max_value (const_tree type)
+{
+ return max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
+}
+
+/* Return true if INTEGER_CST T1 is less than INTEGER_CST T2,
+ extending both according to their respective TYPE_SIGNs. */
+
+inline bool
+tree_int_cst_lt (const_tree t1, const_tree t2)
+{
+ return wi::lts_p (wi::to_widest (t1), wi::to_widest (t2));
+}
+
+/* Return true if INTEGER_CST T1 is less than or equal to INTEGER_CST T2,
+ extending both according to their respective TYPE_SIGNs. */
+
+inline bool
+tree_int_cst_le (const_tree t1, const_tree t2)
+{
+ return wi::les_p (wi::to_widest (t1), wi::to_widest (t2));
+}
+
+/* Returns -1 if T1 < T2, 0 if T1 == T2, and 1 if T1 > T2. T1 and T2
+ are both INTEGER_CSTs and their values are extended according to their
+ respective TYPE_SIGNs. */
+
+inline int
+tree_int_cst_compare (const_tree t1, const_tree t2)
+{
+ return wi::cmps (wi::to_widest (t1), wi::to_widest (t2));
+}
+
/* FIXME - These declarations belong in builtins.h, expr.h and emit-rtl.h,
but none of these files are allowed to be included from front ends.
They should be split in two. One suitable for the FEs, the other suitable
diff --git a/gcc/value-prof.c b/gcc/value-prof.c
index 28900934744..0d033ef6284 100644
--- a/gcc/value-prof.c
+++ b/gcc/value-prof.c
@@ -834,9 +834,17 @@ gimple_divmod_fixed_value_transform (gimple_stmt_iterator *si)
else
prob = 0;
- tree_val = build_int_cst_wide (get_gcov_type (),
- (unsigned HOST_WIDE_INT) val,
- val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1);
+ if (sizeof (gcov_type) == sizeof (HOST_WIDE_INT))
+ tree_val = build_int_cst (get_gcov_type (), val);
+ else
+ {
+ HOST_WIDE_INT a[2];
+ a[0] = (unsigned HOST_WIDE_INT) val;
+ a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1;
+
+ tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2,
+ TYPE_PRECISION (get_gcov_type ()), false));
+ }
result = gimple_divmod_fixed_value (stmt, tree_val, prob, count, all);
if (dump_file)
@@ -1745,9 +1753,18 @@ gimple_stringops_transform (gimple_stmt_iterator *gsi)
default:
gcc_unreachable ();
}
- tree_val = build_int_cst_wide (get_gcov_type (),
- (unsigned HOST_WIDE_INT) val,
- val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1);
+ if (sizeof (gcov_type) == sizeof (HOST_WIDE_INT))
+ tree_val = build_int_cst (get_gcov_type (), val);
+ else
+ {
+ HOST_WIDE_INT a[2];
+ a[0] = (unsigned HOST_WIDE_INT) val;
+ a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1;
+
+ tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2,
+ TYPE_PRECISION (get_gcov_type ()), false));
+ }
+
if (dump_file)
{
fprintf (dump_file, "Single value %i stringop transformation on ",
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index d55b44a4618..f36a1e9a692 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -3555,6 +3555,23 @@ loc_cmp (rtx x, rtx y)
default:
gcc_unreachable ();
}
+ if (CONST_WIDE_INT_P (x))
+ {
+ /* Compare the vector length first. */
+ if (CONST_WIDE_INT_NUNITS (x) >= CONST_WIDE_INT_NUNITS (y))
+ return 1;
+ else if (CONST_WIDE_INT_NUNITS (x) < CONST_WIDE_INT_NUNITS (y))
+ return -1;
+
+ /* Compare the vectors elements. */;
+ for (j = CONST_WIDE_INT_NUNITS (x) - 1; j >= 0 ; j--)
+ {
+ if (CONST_WIDE_INT_ELT (x, j) < CONST_WIDE_INT_ELT (y, j))
+ return -1;
+ if (CONST_WIDE_INT_ELT (x, j) > CONST_WIDE_INT_ELT (y, j))
+ return 1;
+ }
+ }
return 0;
}
diff --git a/gcc/varasm.c b/gcc/varasm.c
index 8e8c5f6634e..f8930b989b8 100644
--- a/gcc/varasm.c
+++ b/gcc/varasm.c
@@ -2738,7 +2738,7 @@ decode_addr_const (tree exp, struct addr_const *value)
else if (TREE_CODE (target) == MEM_REF
&& TREE_CODE (TREE_OPERAND (target, 0)) == ADDR_EXPR)
{
- offset += mem_ref_offset (target).low;
+ offset += mem_ref_offset (target).to_short_addr ();
target = TREE_OPERAND (TREE_OPERAND (target, 0), 0);
}
else if (TREE_CODE (target) == INDIRECT_REF
@@ -2818,8 +2818,8 @@ const_hash_1 (const tree exp)
switch (code)
{
case INTEGER_CST:
- p = (char *) &TREE_INT_CST (exp);
- len = sizeof TREE_INT_CST (exp);
+ p = (char *) &TREE_INT_CST_ELT (exp, 0);
+ len = TREE_INT_CST_NUNITS (exp) * sizeof (HOST_WIDE_INT);
break;
case REAL_CST:
@@ -3520,6 +3520,7 @@ const_rtx_hash_1 (rtx *xp, void *data)
enum rtx_code code;
hashval_t h, *hp;
rtx x;
+ int i;
x = *xp;
code = GET_CODE (x);
@@ -3530,11 +3531,11 @@ const_rtx_hash_1 (rtx *xp, void *data)
{
case CONST_INT:
hwi = INTVAL (x);
+
fold_hwi:
{
int shift = sizeof (hashval_t) * CHAR_BIT;
const int n = sizeof (HOST_WIDE_INT) / sizeof (hashval_t);
- int i;
h ^= (hashval_t) hwi;
for (i = 1; i < n; ++i)
@@ -3545,8 +3546,16 @@ const_rtx_hash_1 (rtx *xp, void *data)
}
break;
+ case CONST_WIDE_INT:
+ hwi = GET_MODE_PRECISION (mode);
+ {
+ for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++)
+ hwi ^= CONST_WIDE_INT_ELT (x, i);
+ goto fold_hwi;
+ }
+
case CONST_DOUBLE:
- if (mode == VOIDmode)
+ if (TARGET_SUPPORTS_WIDE_INT == 0 && mode == VOIDmode)
{
hwi = CONST_DOUBLE_LOW (x) ^ CONST_DOUBLE_HIGH (x);
goto fold_hwi;
@@ -4639,8 +4648,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align)
exp = build1 (ADDR_EXPR, saved_type, TREE_OPERAND (exp, 0));
/* Likewise for constant ints. */
else if (TREE_CODE (exp) == INTEGER_CST)
- exp = build_int_cst_wide (saved_type, TREE_INT_CST_LOW (exp),
- TREE_INT_CST_HIGH (exp));
+ exp = wide_int_to_tree (saved_type, exp);
}
@@ -4779,7 +4787,7 @@ array_size_for_constructor (tree val)
tree max_index;
unsigned HOST_WIDE_INT cnt;
tree index, value, tmp;
- double_int i;
+ offset_int i;
/* This code used to attempt to handle string constants that are not
arrays of single-bytes, but nothing else does, so there's no point in
@@ -4801,14 +4809,13 @@ array_size_for_constructor (tree val)
/* Compute the total number of array elements. */
tmp = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val)));
- i = tree_to_double_int (max_index) - tree_to_double_int (tmp);
- i += double_int_one;
+ i = wi::to_offset (max_index) - wi::to_offset (tmp) + 1;
/* Multiply by the array element unit size to find number of bytes. */
- i *= tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val))));
+ i *= wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val))));
- gcc_assert (i.fits_uhwi ());
- return i.low;
+ gcc_assert (wi::fits_uhwi_p (i));
+ return i.to_uhwi ();
}
/* Other datastructures + helpers for output_constructor. */
@@ -4888,11 +4895,10 @@ output_constructor_regular_field (oc_local_state *local)
sign-extend the result because Ada has negative DECL_FIELD_OFFSETs
but we are using an unsigned sizetype. */
unsigned prec = TYPE_PRECISION (sizetype);
- double_int idx = tree_to_double_int (local->index)
- - tree_to_double_int (local->min_index);
- idx = idx.sext (prec);
- fieldpos = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (local->val)))
- * idx.low);
+ offset_int idx = wi::sext (wi::to_offset (local->index)
+ - wi::to_offset (local->min_index), prec);
+ fieldpos = (idx * wi::to_offset (TYPE_SIZE_UNIT (TREE_TYPE (local->val))))
+ .to_short_addr ();
}
else if (local->field != NULL_TREE)
fieldpos = int_byte_position (local->field);
@@ -5084,22 +5090,13 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset)
the word boundary in the INTEGER_CST. We can
only select bits from the LOW or HIGH part
not from both. */
- if (shift < HOST_BITS_PER_WIDE_INT
- && shift + this_time > HOST_BITS_PER_WIDE_INT)
- {
- this_time = shift + this_time - HOST_BITS_PER_WIDE_INT;
- shift = HOST_BITS_PER_WIDE_INT;
- }
+ if ((shift / HOST_BITS_PER_WIDE_INT)
+ != ((shift + this_time) / HOST_BITS_PER_WIDE_INT))
+ this_time = (shift + this_time) & (HOST_BITS_PER_WIDE_INT - 1);
/* Now get the bits from the appropriate constant word. */
- if (shift < HOST_BITS_PER_WIDE_INT)
- value = TREE_INT_CST_LOW (local->val);
- else
- {
- gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT);
- value = TREE_INT_CST_HIGH (local->val);
- shift -= HOST_BITS_PER_WIDE_INT;
- }
+ value = TREE_INT_CST_ELT (local->val, shift / HOST_BITS_PER_WIDE_INT);
+ shift = shift & (HOST_BITS_PER_WIDE_INT - 1);
/* Get the result. This works only when:
1 <= this_time <= HOST_BITS_PER_WIDE_INT. */
@@ -5119,19 +5116,13 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset)
the word boundary in the INTEGER_CST. We can
only select bits from the LOW or HIGH part
not from both. */
- if (shift < HOST_BITS_PER_WIDE_INT
- && shift + this_time > HOST_BITS_PER_WIDE_INT)
+ if ((shift / HOST_BITS_PER_WIDE_INT)
+ != ((shift + this_time) / HOST_BITS_PER_WIDE_INT))
this_time = (HOST_BITS_PER_WIDE_INT - shift);
/* Now get the bits from the appropriate constant word. */
- if (shift < HOST_BITS_PER_WIDE_INT)
- value = TREE_INT_CST_LOW (local->val);
- else
- {
- gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT);
- value = TREE_INT_CST_HIGH (local->val);
- shift -= HOST_BITS_PER_WIDE_INT;
- }
+ value = TREE_INT_CST_ELT (local->val, shift / HOST_BITS_PER_WIDE_INT);
+ shift = shift & (HOST_BITS_PER_WIDE_INT - 1);
/* Get the result. This works only when:
1 <= this_time <= HOST_BITS_PER_WIDE_INT. */
diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc
new file mode 100644
index 00000000000..c79c781d3ef
--- /dev/null
+++ b/gcc/wide-int-print.cc
@@ -0,0 +1,145 @@
+/* Printing operations with very long integers.
+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
+ Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "hwint.h"
+#include "wide-int.h"
+#include "wide-int-print.h"
+
+/*
+ * public printing routines.
+ */
+
+#define BLOCKS_NEEDED(PREC) \
+ (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
+
+void
+print_dec (const wide_int_ref &wi, char *buf, signop sgn)
+{
+ if (sgn == SIGNED)
+ print_decs (wi, buf);
+ else
+ print_decu (wi, buf);
+}
+
+void
+print_dec (const wide_int_ref &wi, FILE *file, signop sgn)
+{
+ if (sgn == SIGNED)
+ print_decs (wi, file);
+ else
+ print_decu (wi, file);
+}
+
+
+/* Try to print the signed self in decimal to BUF if the number fits
+ in a HWI. Other print in hex. */
+
+void
+print_decs (const wide_int_ref &wi, char *buf)
+{
+ if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
+ || (wi.get_len () == 1))
+ {
+ if (wi::neg_p (wi))
+ sprintf (buf, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -wi.to_shwi ());
+ else
+ sprintf (buf, HOST_WIDE_INT_PRINT_DEC, wi.to_shwi ());
+ }
+ else
+ print_hex (wi, buf);
+}
+
+/* Try to print the signed self in decimal to FILE if the number fits
+ in a HWI. Other print in hex. */
+
+void
+print_decs (const wide_int_ref &wi, FILE *file)
+{
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+ print_decs (wi, buf);
+ fputs (buf, file);
+}
+
+/* Try to print the unsigned self in decimal to BUF if the number fits
+ in a HWI. Other print in hex. */
+
+void
+print_decu (const wide_int_ref &wi, char *buf)
+{
+ if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT)
+ || (wi.get_len () == 1 && !wi::neg_p (wi)))
+ sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, wi.to_uhwi ());
+ else
+ print_hex (wi, buf);
+}
+
+/* Try to print the signed self in decimal to FILE if the number fits
+ in a HWI. Other print in hex. */
+
+void
+print_decu (const wide_int_ref &wi, FILE *file)
+{
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+ print_decu (wi, buf);
+ fputs (buf, file);
+}
+
+void
+print_hex (const wide_int_ref &wi, char *buf)
+{
+ int i = wi.get_len ();
+
+ if (wi == 0)
+ buf += sprintf (buf, "0x0");
+ else
+ {
+ if (wi::neg_p (wi))
+ {
+ int j;
+ /* If the number is negative, we may need to pad value with
+ 0xFFF... because the leading elements may be missing and
+ we do not print a '-' with hex. */
+ buf += sprintf (buf, "0x");
+ for (j = BLOCKS_NEEDED (wi.get_precision ()); j > i; j--)
+ buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, (HOST_WIDE_INT) -1);
+
+ }
+ else
+ buf += sprintf (buf, "0x"HOST_WIDE_INT_PRINT_HEX_PURE, wi.elt (--i));
+
+ while (--i >= 0)
+ buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, wi.elt (i));
+ }
+}
+
+/* Print one big hex number to FILE. Note that some assemblers may not
+ accept this for large modes. */
+void
+print_hex (const wide_int_ref &wi, FILE *file)
+{
+ char buf[WIDE_INT_PRINT_BUFFER_SIZE];
+ print_hex (wi, buf);
+ fputs (buf, file);
+}
+
diff --git a/gcc/wide-int-print.h b/gcc/wide-int-print.h
new file mode 100644
index 00000000000..9ab37d33432
--- /dev/null
+++ b/gcc/wide-int-print.h
@@ -0,0 +1,39 @@
+/* Print wide integers.
+ Copyright (C) 2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef WIDE_INT_PRINT_H
+#define WIDE_INT_PRINT_H
+
+#include <stdio.h>
+#include "wide-int.h"
+
+#define WIDE_INT_PRINT_BUFFER_SIZE (WIDE_INT_MAX_PRECISION / 4 + 4)
+
+/* Printing functions. */
+
+extern void print_dec (const wide_int_ref &wi, char *buf, signop sgn);
+extern void print_dec (const wide_int_ref &wi, FILE *file, signop sgn);
+extern void print_decs (const wide_int_ref &wi, char *buf);
+extern void print_decs (const wide_int_ref &wi, FILE *file);
+extern void print_decu (const wide_int_ref &wi, char *buf);
+extern void print_decu (const wide_int_ref &wi, FILE *file);
+extern void print_hex (const wide_int_ref &wi, char *buf);
+extern void print_hex (const wide_int_ref &wi, FILE *file);
+
+#endif /* WIDE_INT_PRINT_H */
diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc
new file mode 100644
index 00000000000..69a15bcd148
--- /dev/null
+++ b/gcc/wide-int.cc
@@ -0,0 +1,2083 @@
+/* Operations with very long integers.
+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
+ Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "hwint.h"
+#include "wide-int.h"
+#include "tree.h"
+#include "dumpfile.h"
+
+#if GCC_VERSION >= 3000
+#define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
+typedef unsigned HOST_HALF_WIDE_INT UHWtype;
+typedef unsigned HOST_WIDE_INT UWtype;
+typedef unsigned int UQItype __attribute__ ((mode (QI)));
+typedef unsigned int USItype __attribute__ ((mode (SI)));
+typedef unsigned int UDItype __attribute__ ((mode (DI)));
+#include "longlong.h"
+#endif
+
+static const HOST_WIDE_INT zeros[WIDE_INT_MAX_ELTS] = {};
+
+/*
+ * Internal utilities.
+ */
+
+/* Quantities to deal with values that hold half of a wide int. Used
+ in multiply and divide. */
+#define HALF_INT_MASK (((HOST_WIDE_INT) 1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
+
+#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
+#define BLOCKS_NEEDED(PREC) \
+ (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1)
+#define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
+
+/* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
+ based on the top existing bit of VAL. */
+
+static unsigned HOST_WIDE_INT
+safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
+{
+ return i < len ? val[i] : val[len - 1] < 0 ? (HOST_WIDE_INT) -1 : 0;
+}
+
+/* Convert the integer in VAL to canonical form, returning its new length.
+ LEN is the number of blocks currently in VAL and PRECISION is the number
+ of bits in the integer it represents.
+
+ This function only changes the representation, not the value. */
+static unsigned int
+canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
+{
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ HOST_WIDE_INT top;
+ int i;
+
+ if (len > blocks_needed)
+ len = blocks_needed;
+
+ if (len == 1)
+ return len;
+
+ top = val[len - 1];
+ if (len * HOST_BITS_PER_WIDE_INT > precision)
+ val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
+ if (top != 0 && top != (HOST_WIDE_INT)-1)
+ return len;
+
+ /* At this point we know that the top is either 0 or -1. Find the
+ first block that is not a copy of this. */
+ for (i = len - 2; i >= 0; i--)
+ {
+ HOST_WIDE_INT x = val[i];
+ if (x != top)
+ {
+ if (SIGN_MASK (x) == top)
+ return i + 1;
+
+ /* We need an extra block because the top bit block i does
+ not match the extension. */
+ return i + 2;
+ }
+ }
+
+ /* The number is 0 or -1. */
+ return 1;
+}
+
+/*
+ * Conversion routines in and out of wide_int.
+ */
+
+/* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
+ result for an integer with precision PRECISION. Return the length
+ of VAL (after any canonization. */
+unsigned int
+wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, bool need_canon)
+{
+ for (unsigned i = 0; i < xlen; i++)
+ val[i] = xval[i];
+ return need_canon ? canonize (val, xlen, precision) : xlen;
+}
+
+/* Construct a wide int from a buffer of length LEN. BUFFER will be
+ read according to byte endianess and word endianess of the target.
+ Only the lower BUFFER_LEN bytes of the result are set; the remaining
+ high bytes are cleared. */
+wide_int
+wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
+{
+ unsigned int precision = buffer_len * BITS_PER_UNIT;
+ wide_int result = wide_int::create (precision);
+ unsigned int words = buffer_len / UNITS_PER_WORD;
+
+ /* We have to clear all the bits ourself, as we merely or in values
+ below. */
+ unsigned int len = BLOCKS_NEEDED (precision);
+ HOST_WIDE_INT *val = result.write_val ();
+ for (unsigned int i = 0; i < len; ++i)
+ val[i] = 0;
+
+ for (unsigned int byte = 0; byte < buffer_len; byte++)
+ {
+ unsigned int offset;
+ unsigned int index;
+ unsigned int bitpos = byte * BITS_PER_UNIT;
+ unsigned HOST_WIDE_INT value;
+
+ if (buffer_len > UNITS_PER_WORD)
+ {
+ unsigned int word = byte / UNITS_PER_WORD;
+
+ if (WORDS_BIG_ENDIAN)
+ word = (words - 1) - word;
+
+ offset = word * UNITS_PER_WORD;
+
+ if (BYTES_BIG_ENDIAN)
+ offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
+ else
+ offset += byte % UNITS_PER_WORD;
+ }
+ else
+ offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
+
+ value = (unsigned HOST_WIDE_INT) buffer[offset];
+
+ index = bitpos / HOST_BITS_PER_WIDE_INT;
+ val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
+ }
+
+ result.set_len (canonize (val, len, precision));
+
+ return result;
+}
+
+/* Sets RESULT from X, the sign is taken according to SGN. */
+void
+wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
+{
+ int len = x.get_len ();
+ const HOST_WIDE_INT *v = x.get_val ();
+ int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
+
+ if (wi::neg_p (x, sgn))
+ {
+ /* We use ones complement to avoid -x80..0 edge case that -
+ won't work on. */
+ HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
+ for (int i = 0; i < len; i++)
+ t[i] = ~v[i];
+ if (excess > 0)
+ t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
+ mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
+ mpz_com (result, result);
+ }
+ else if (excess > 0)
+ {
+ HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
+ for (int i = 0; i < len - 1; i++)
+ t[i] = v[i];
+ t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
+ mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
+ }
+ else
+ mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
+}
+
+/* Returns X converted to TYPE. If WRAP is true, then out-of-range
+ values of VAL will be wrapped; otherwise, they will be set to the
+ appropriate minimum or maximum TYPE bound. */
+wide_int
+wi::from_mpz (const_tree type, mpz_t x, bool wrap)
+{
+ size_t count, numb;
+ int prec = TYPE_PRECISION (type);
+ wide_int res = wide_int::create (prec);
+
+ if (!wrap)
+ {
+ mpz_t min, max;
+
+ mpz_init (min);
+ mpz_init (max);
+ get_type_static_bounds (type, min, max);
+
+ if (mpz_cmp (x, min) < 0)
+ mpz_set (x, min);
+ else if (mpz_cmp (x, max) > 0)
+ mpz_set (x, max);
+
+ mpz_clear (min);
+ mpz_clear (max);
+ }
+
+ /* Determine the number of unsigned HOST_WIDE_INTs that are required
+ for representing the value. The code to calculate count is
+ extracted from the GMP manual, section "Integer Import and Export":
+ http://gmplib.org/manual/Integer-Import-and-Export.html */
+ numb = 8 * sizeof(HOST_WIDE_INT);
+ count = (mpz_sizeinbase (x, 2) + numb - 1) / numb;
+ HOST_WIDE_INT *val = res.write_val ();
+ mpz_export (val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
+ if (count < 1)
+ {
+ val[0] = 0;
+ count = 1;
+ }
+ res.set_len (count);
+
+ if (mpz_sgn (x) < 0)
+ res = -res;
+
+ return res;
+}
+
+/*
+ * Largest and smallest values in a mode.
+ */
+
+/* Return the largest SGNed number that is representable in PRECISION bits.
+
+ TODO: There is still code from the double_int era that trys to
+ make up for the fact that double int's could not represent the
+ min and max values of all types. This code should be removed
+ because the min and max values can always be represented in
+ wide_ints and int-csts. */
+wide_int
+wi::max_value (unsigned int precision, signop sgn)
+{
+ gcc_checking_assert (precision != 0);
+ if (sgn == UNSIGNED)
+ /* The unsigned max is just all ones. */
+ return shwi (-1, precision);
+ else
+ /* The signed max is all ones except the top bit. This must be
+ explicitly represented. */
+ return mask (precision - 1, false, precision);
+}
+
+/* Return the largest SGNed number that is representable in PRECISION bits. */
+wide_int
+wi::min_value (unsigned int precision, signop sgn)
+{
+ gcc_checking_assert (precision != 0);
+ if (sgn == UNSIGNED)
+ return uhwi (0, precision);
+ else
+ /* The signed min is all zeros except the top bit. This must be
+ explicitly represented. */
+ return wi::set_bit_in_zero (precision - 1, precision);
+}
+
+/*
+ * Public utilities.
+ */
+
+/* Convert the number represented by XVAL, XLEN and XPRECISION, which has
+ signedness SGN, to an integer that has PRECISION bits. Store the blocks
+ in VAL and return the number of blocks used.
+
+ This function can handle both extension (PRECISION > XPRECISION)
+ and truncation (PRECISION < XPRECISION). */
+unsigned int
+wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int precision, signop sgn)
+{
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
+ for (unsigned i = 0; i < len; i++)
+ val[i] = xval[i];
+
+ if (precision > xprecision)
+ {
+ unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
+
+ /* Expanding. */
+ if (sgn == UNSIGNED)
+ {
+ if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+ val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
+ else if (val[len - 1] < 0)
+ {
+ while (len < BLOCKS_NEEDED (xprecision))
+ val[len++] = -1;
+ if (small_xprecision)
+ val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
+ else
+ val[len++] = 0;
+ }
+ }
+ else
+ {
+ if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
+ val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
+ }
+ }
+ len = canonize (val, len, precision);
+
+ return len;
+}
+
+/* This function hides the fact that we cannot rely on the bits beyond
+ the precision. This issue comes up in the relational comparisions
+ where we do allow comparisons of values of different precisions. */
+static inline HOST_WIDE_INT
+selt (const HOST_WIDE_INT *a, unsigned int len,
+ unsigned int blocks_needed, unsigned int small_prec,
+ unsigned int index, signop sgn)
+{
+ HOST_WIDE_INT val;
+ if (index < len)
+ val = a[index];
+ else if (index < blocks_needed || sgn == SIGNED)
+ /* Signed or within the precision. */
+ val = SIGN_MASK (a[len - 1]);
+ else
+ /* Unsigned extension beyond the precision. */
+ val = 0;
+
+ if (small_prec && index == blocks_needed - 1)
+ return (sgn == SIGNED
+ ? sext_hwi (val, small_prec)
+ : zext_hwi (val, small_prec));
+ else
+ return val;
+}
+
+/* Find the highest bit represented in a wide int. This will in
+ general have the same value as the sign bit. */
+static inline HOST_WIDE_INT
+top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
+{
+ int excess = len * HOST_BITS_PER_WIDE_INT - prec;
+ unsigned HOST_WIDE_INT val = a[len - 1];
+ if (excess > 0)
+ val <<= excess;
+ return val >> (HOST_BITS_PER_WIDE_INT - 1);
+}
+
+/*
+ * Comparisons, note that only equality is an operator. The other
+ * comparisons cannot be operators since they are inherently signed or
+ * unsigned and C++ has no such operators.
+ */
+
+/* Return true if OP0 == OP1. */
+bool
+wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ const HOST_WIDE_INT *op1, unsigned int op1len,
+ unsigned int prec)
+{
+ int l0 = op0len - 1;
+ unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
+
+ if (op0len != op1len)
+ return false;
+
+ if (op0len == BLOCKS_NEEDED (prec) && small_prec)
+ {
+ /* It does not matter if we zext or sext here, we just have to
+ do both the same way. */
+ if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
+ return false;
+ l0--;
+ }
+
+ while (l0 >= 0)
+ if (op0[l0] != op1[l0])
+ return false;
+ else
+ l0--;
+
+ return true;
+}
+
+/* Return true if OP0 < OP1 using signed comparisons. */
+bool
+wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
+{
+ HOST_WIDE_INT s0, s1;
+ unsigned HOST_WIDE_INT u0, u1;
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ int l = MAX (op0len - 1, op1len - 1);
+
+ /* Only the top block is compared as signed. The rest are unsigned
+ comparisons. */
+ s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
+ if (s0 < s1)
+ return true;
+ if (s0 > s1)
+ return false;
+
+ l--;
+ while (l >= 0)
+ {
+ u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
+
+ if (u0 < u1)
+ return true;
+ if (u0 > u1)
+ return false;
+ l--;
+ }
+
+ return false;
+}
+
+/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
+ signed compares. */
+int
+wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
+{
+ HOST_WIDE_INT s0, s1;
+ unsigned HOST_WIDE_INT u0, u1;
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ int l = MAX (op0len - 1, op1len - 1);
+
+ /* Only the top block is compared as signed. The rest are unsigned
+ comparisons. */
+ s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
+ if (s0 < s1)
+ return -1;
+ if (s0 > s1)
+ return 1;
+
+ l--;
+ while (l >= 0)
+ {
+ u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
+ u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
+
+ if (u0 < u1)
+ return -1;
+ if (u0 > u1)
+ return 1;
+ l--;
+ }
+
+ return 0;
+}
+
+/* Return true if OP0 < OP1 using unsigned comparisons. */
+bool
+wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
+{
+ unsigned HOST_WIDE_INT x0;
+ unsigned HOST_WIDE_INT x1;
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ int l = MAX (op0len - 1, op1len - 1);
+
+ while (l >= 0)
+ {
+ x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
+ x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
+ if (x0 < x1)
+ return true;
+ if (x0 > x1)
+ return false;
+ l--;
+ }
+
+ return false;
+}
+
+/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
+ unsigned compares. */
+int
+wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
+ unsigned int precision,
+ const HOST_WIDE_INT *op1, unsigned int op1len)
+{
+ unsigned HOST_WIDE_INT x0;
+ unsigned HOST_WIDE_INT x1;
+ unsigned int blocks_needed = BLOCKS_NEEDED (precision);
+ unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
+ int l = MAX (op0len - 1, op1len - 1);
+
+ while (l >= 0)
+ {
+ x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
+ x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
+ if (x0 < x1)
+ return -1;
+ if (x0 > x1)
+ return 1;
+ l--;
+ }
+
+ return 0;
+}
+
+/*
+ * Extension.
+ */
+
+/* Sign-extend the number represented by XVAL and XLEN into VAL,
+ starting at OFFSET. Return the number of blocks in VAL. Both XVAL
+ and VAL have PRECISION bits. */
+unsigned int
+wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, unsigned int offset)
+{
+ unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
+ /* Extending beyond the precision is a no-op. If we have only stored
+ OFFSET bits or fewer, the rest are already signs. */
+ if (offset >= precision || len >= xlen)
+ {
+ for (unsigned i = 0; i < xlen; ++i)
+ val[i] = xval[i];
+ return xlen;
+ }
+ unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
+ for (unsigned int i = 0; i < len; i++)
+ val[i] = xval[i];
+ if (suboffset > 0)
+ {
+ val[len] = sext_hwi (xval[len], suboffset);
+ len += 1;
+ }
+ return canonize (val, len, precision);
+}
+
+/* Zero-extend the number represented by XVAL and XLEN into VAL,
+ starting at OFFSET. Return the number of blocks in VAL. Both XVAL
+ and VAL have PRECISION bits. */
+unsigned int
+wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, unsigned int offset)
+{
+ unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
+ /* Extending beyond the precision is a no-op. If we have only stored
+ OFFSET bits or fewer, and the upper stored bit is zero, then there
+ is nothing to do. */
+ if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
+ {
+ for (unsigned i = 0; i < xlen; ++i)
+ val[i] = xval[i];
+ return xlen;
+ }
+ unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
+ for (unsigned int i = 0; i < len; i++)
+ val[i] = i < xlen ? xval[i] : -1;
+ if (suboffset > 0)
+ val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
+ else
+ val[len] = 0;
+ return canonize (val, len + 1, precision);
+}
+
+/*
+ * Masking, inserting, shifting, rotating.
+ */
+
+/* Insert WIDTH bits from Y into X starting at START. */
+wide_int
+wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
+ unsigned int width)
+{
+ wide_int result;
+ wide_int mask;
+ wide_int tmp;
+
+ unsigned int precision = x.get_precision ();
+ if (start >= precision)
+ return x;
+
+ gcc_checking_assert (precision >= width);
+
+ if (start + width >= precision)
+ width = precision - start;
+
+ mask = wi::shifted_mask (start, width, false, precision);
+ tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
+ result = tmp & mask;
+
+ tmp = wi::bit_and_not (x, mask);
+ result = result | tmp;
+
+ return result;
+}
+
+/* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
+ Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
+ bits. */
+unsigned int
+wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision, unsigned int bit)
+{
+ unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
+ unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
+
+ if (block + 1 >= xlen)
+ {
+ /* The operation either affects the last current block or needs
+ a new block. */
+ unsigned int len = block + 1;
+ for (unsigned int i = 0; i < len; i++)
+ val[i] = safe_uhwi (xval, xlen, i);
+ val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
+
+ /* If the bit we just set is at the msb of the block, make sure
+ that any higher bits are zeros. */
+ if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
+ val[len++] = 0;
+ return len;
+ }
+ else
+ {
+ for (unsigned int i = 0; i < xlen; i++)
+ val[i] = xval[i];
+ val[block] |= (unsigned HOST_WIDE_INT) 1 << subbit;
+ return canonize (val, xlen, precision);
+ }
+}
+
+/* bswap THIS. */
+wide_int
+wide_int_storage::bswap () const
+{
+ wide_int result = wide_int::create (precision);
+ unsigned int i, s;
+ unsigned int len = BLOCKS_NEEDED (precision);
+ unsigned int xlen = get_len ();
+ const HOST_WIDE_INT *xval = get_val ();
+ HOST_WIDE_INT *val = result.write_val ();
+
+ /* This is not a well defined operation if the precision is not a
+ multiple of 8. */
+ gcc_assert ((precision & 0x7) == 0);
+
+ for (i = 0; i < len; i++)
+ val[i] = 0;
+
+ /* Only swap the bytes that are not the padding. */
+ for (s = 0; s < precision; s += 8)
+ {
+ unsigned int d = precision - s - 8;
+ unsigned HOST_WIDE_INT byte;
+
+ unsigned int block = s / HOST_BITS_PER_WIDE_INT;
+ unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
+
+ byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
+
+ block = d / HOST_BITS_PER_WIDE_INT;
+ offset = d & (HOST_BITS_PER_WIDE_INT - 1);
+
+ val[block] |= byte << offset;
+ }
+
+ result.set_len (canonize (val, len, precision));
+ return result;
+}
+
+/* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
+ above that up to PREC are zeros. The result is inverted if NEGATE
+ is true. Return the number of blocks in VAL. */
+unsigned int
+wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
+ unsigned int prec)
+{
+ if (width >= prec)
+ {
+ val[0] = negate ? 0 : -1;
+ return 1;
+ }
+ else if (width == 0)
+ {
+ val[0] = negate ? -1 : 0;
+ return 1;
+ }
+
+ unsigned int i = 0;
+ while (i < width / HOST_BITS_PER_WIDE_INT)
+ val[i++] = negate ? 0 : -1;
+
+ unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
+ if (shift != 0)
+ {
+ HOST_WIDE_INT last = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
+ val[i++] = negate ? ~last : last;
+ }
+ else
+ val[i++] = negate ? -1 : 0;
+
+ return i;
+}
+
+/* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
+ bits are ones, and the bits above that up to PREC are zeros. The result
+ is inverted if NEGATE is true. Return the number of blocks in VAL. */
+unsigned int
+wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
+ bool negate, unsigned int prec)
+{
+ if (start >= prec || width == 0)
+ {
+ val[0] = negate ? -1 : 0;
+ return 1;
+ }
+
+ if (width > prec - start)
+ width = prec - start;
+ unsigned int end = start + width;
+
+ unsigned int i = 0;
+ while (i < start / HOST_BITS_PER_WIDE_INT)
+ val[i++] = negate ? -1 : 0;
+
+ unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
+ if (shift)
+ {
+ HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
+ shift += width;
+ if (shift < HOST_BITS_PER_WIDE_INT)
+ {
+ /* case 000111000 */
+ block = ((unsigned HOST_WIDE_INT) 1 << shift) - block - 1;
+ val[i++] = negate ? ~block : block;
+ return i;
+ }
+ else
+ /* ...111000 */
+ val[i++] = negate ? block : ~block;
+ }
+
+ while (i < end / HOST_BITS_PER_WIDE_INT)
+ /* 1111111 */
+ val[i++] = negate ? 0 : -1;
+
+ shift = end & (HOST_BITS_PER_WIDE_INT - 1);
+ if (shift != 0)
+ {
+ /* 000011111 */
+ HOST_WIDE_INT block = ((unsigned HOST_WIDE_INT) 1 << shift) - 1;
+ val[i++] = negate ? ~block : block;
+ }
+ else if (end < prec)
+ val[i++] = negate ? -1 : 0;
+
+ return i;
+}
+
+/*
+ * logical operations.
+ */
+
+/* Set VAL to OP0 & OP1. Return the number of blocks used. */
+unsigned int
+wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
+{
+ int l0 = op0len - 1;
+ int l1 = op1len - 1;
+ bool need_canon = true;
+
+ unsigned int len = MAX (op0len, op1len);
+ if (l0 > l1)
+ {
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ if (op1mask == 0)
+ {
+ l0 = l1;
+ len = l1 + 1;
+ }
+ else
+ {
+ need_canon = false;
+ while (l0 > l1)
+ {
+ val[l0] = op0[l0];
+ l0--;
+ }
+ }
+ }
+ else if (l1 > l0)
+ {
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ if (op0mask == 0)
+ len = l0 + 1;
+ else
+ {
+ need_canon = false;
+ while (l1 > l0)
+ {
+ val[l1] = op1[l1];
+ l1--;
+ }
+ }
+ }
+
+ while (l0 >= 0)
+ {
+ val[l0] = op0[l0] & op1[l0];
+ l0--;
+ }
+
+ if (need_canon)
+ len = canonize (val, len, prec);
+
+ return len;
+}
+
+/* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
+unsigned int
+wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
+{
+ wide_int result;
+ int l0 = op0len - 1;
+ int l1 = op1len - 1;
+ bool need_canon = true;
+
+ unsigned int len = MAX (op0len, op1len);
+ if (l0 > l1)
+ {
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ if (op1mask != 0)
+ {
+ l0 = l1;
+ len = l1 + 1;
+ }
+ else
+ {
+ need_canon = false;
+ while (l0 > l1)
+ {
+ val[l0] = op0[l0];
+ l0--;
+ }
+ }
+ }
+ else if (l1 > l0)
+ {
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ if (op0mask == 0)
+ len = l0 + 1;
+ else
+ {
+ need_canon = false;
+ while (l1 > l0)
+ {
+ val[l1] = ~op1[l1];
+ l1--;
+ }
+ }
+ }
+
+ while (l0 >= 0)
+ {
+ val[l0] = op0[l0] & ~op1[l0];
+ l0--;
+ }
+
+ if (need_canon)
+ len = canonize (val, len, prec);
+
+ return len;
+}
+
+/* Set VAL to OP0 | OP1. Return the number of blocks used. */
+unsigned int
+wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
+{
+ wide_int result;
+ int l0 = op0len - 1;
+ int l1 = op1len - 1;
+ bool need_canon = true;
+
+ unsigned int len = MAX (op0len, op1len);
+ if (l0 > l1)
+ {
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ if (op1mask != 0)
+ {
+ l0 = l1;
+ len = l1 + 1;
+ }
+ else
+ {
+ need_canon = false;
+ while (l0 > l1)
+ {
+ val[l0] = op0[l0];
+ l0--;
+ }
+ }
+ }
+ else if (l1 > l0)
+ {
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ if (op0mask != 0)
+ len = l0 + 1;
+ else
+ {
+ need_canon = false;
+ while (l1 > l0)
+ {
+ val[l1] = op1[l1];
+ l1--;
+ }
+ }
+ }
+
+ while (l0 >= 0)
+ {
+ val[l0] = op0[l0] | op1[l0];
+ l0--;
+ }
+
+ if (need_canon)
+ len = canonize (val, len, prec);
+
+ return len;
+}
+
+/* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
+unsigned int
+wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
+{
+ wide_int result;
+ int l0 = op0len - 1;
+ int l1 = op1len - 1;
+ bool need_canon = true;
+
+ unsigned int len = MAX (op0len, op1len);
+ if (l0 > l1)
+ {
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ if (op1mask == 0)
+ {
+ l0 = l1;
+ len = l1 + 1;
+ }
+ else
+ {
+ need_canon = false;
+ while (l0 > l1)
+ {
+ val[l0] = op0[l0];
+ l0--;
+ }
+ }
+ }
+ else if (l1 > l0)
+ {
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ if (op0mask != 0)
+ len = l0 + 1;
+ else
+ {
+ need_canon = false;
+ while (l1 > l0)
+ {
+ val[l1] = ~op1[l1];
+ l1--;
+ }
+ }
+ }
+
+ while (l0 >= 0)
+ {
+ val[l0] = op0[l0] | ~op1[l0];
+ l0--;
+ }
+
+ if (need_canon)
+ len = canonize (val, len, prec);
+
+ return len;
+}
+
+/* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
+unsigned int
+wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec)
+{
+ wide_int result;
+ int l0 = op0len - 1;
+ int l1 = op1len - 1;
+
+ unsigned int len = MAX (op0len, op1len);
+ if (l0 > l1)
+ {
+ HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
+ while (l0 > l1)
+ {
+ val[l0] = op0[l0] ^ op1mask;
+ l0--;
+ }
+ }
+
+ if (l1 > l0)
+ {
+ HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
+ while (l1 > l0)
+ {
+ val[l1] = op0mask ^ op1[l1];
+ l1--;
+ }
+ }
+
+ while (l0 >= 0)
+ {
+ val[l0] = op0[l0] ^ op1[l0];
+ l0--;
+ }
+
+ return canonize (val, len, prec);
+}
+
+/*
+ * math
+ */
+
+/* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
+ whether the result overflows when OP0 and OP1 are treated as having
+ signedness SGN. Return the number of blocks in VAL. */
+unsigned int
+wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec,
+ signop sgn, bool *overflow)
+{
+ unsigned HOST_WIDE_INT o0 = 0;
+ unsigned HOST_WIDE_INT o1 = 0;
+ unsigned HOST_WIDE_INT x = 0;
+ unsigned HOST_WIDE_INT carry = 0;
+ unsigned HOST_WIDE_INT old_carry = 0;
+ unsigned HOST_WIDE_INT mask0, mask1;
+ unsigned int i;
+
+ unsigned int len = MAX (op0len, op1len);
+ mask0 = -top_bit_of (op0, op0len, prec);
+ mask1 = -top_bit_of (op1, op1len, prec);
+ /* Add all of the explicitly defined elements. */
+
+ for (i = 0; i < len; i++)
+ {
+ o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
+ o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
+ x = o0 + o1 + carry;
+ val[i] = x;
+ old_carry = carry;
+ carry = carry == 0 ? x < o0 : x <= o0;
+ }
+
+ if (len * HOST_BITS_PER_WIDE_INT < prec)
+ {
+ val[len] = mask0 + mask1 + carry;
+ len++;
+ if (overflow)
+ *overflow = false;
+ }
+ else if (overflow)
+ {
+ unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
+ if (sgn == SIGNED)
+ {
+ unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
+ *overflow = (HOST_WIDE_INT) (x << shift) < 0;
+ }
+ else
+ {
+ /* Put the MSB of X and O0 and in the top of the HWI. */
+ x <<= shift;
+ o0 <<= shift;
+ if (old_carry)
+ *overflow = (x <= o0);
+ else
+ *overflow = (x < o0);
+ }
+ }
+
+ return canonize (val, len, prec);
+}
+
+/* Subroutines of the multiplication and division operations. Unpack
+ the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
+ HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
+ uncompressing the top bit of INPUT[IN_LEN - 1]. */
+static void
+wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
+ unsigned int in_len, unsigned int out_len,
+ unsigned int prec, signop sgn)
+{
+ unsigned int i;
+ unsigned int j = 0;
+ unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
+ unsigned int blocks_needed = BLOCKS_NEEDED (prec);
+ HOST_WIDE_INT mask;
+
+ if (sgn == SIGNED)
+ {
+ mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
+ mask &= HALF_INT_MASK;
+ }
+ else
+ mask = 0;
+
+ for (i = 0; i < blocks_needed - 1; i++)
+ {
+ HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
+ result[j++] = x;
+ result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
+ }
+
+ HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
+ if (small_prec)
+ {
+ if (sgn == SIGNED)
+ x = sext_hwi (x, small_prec);
+ else
+ x = zext_hwi (x, small_prec);
+ }
+ result[j++] = x;
+ result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
+
+ /* Smear the sign bit. */
+ while (j < out_len)
+ result[j++] = mask;
+}
+
+/* The inverse of wi_unpack. IN_LEN is the the number of input
+ blocks. The number of output blocks will be half this amount. */
+static void
+wi_pack (unsigned HOST_WIDE_INT *result,
+ const unsigned HOST_HALF_WIDE_INT *input,
+ unsigned int in_len)
+{
+ unsigned int i = 0;
+ unsigned int j = 0;
+
+ while (i + 2 < in_len)
+ {
+ result[j++] = (unsigned HOST_WIDE_INT)input[i]
+ | ((unsigned HOST_WIDE_INT)input[i + 1]
+ << HOST_BITS_PER_HALF_WIDE_INT);
+ i += 2;
+ }
+
+ /* Handle the case where in_len is odd. For this we zero extend. */
+ if (in_len & 1)
+ result[j++] = (unsigned HOST_WIDE_INT)input[i];
+ else
+ result[j++] = (unsigned HOST_WIDE_INT)input[i]
+ | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT);
+}
+
+/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
+ result is returned.
+
+ If HIGH is not set, throw away the upper half after the check is
+ made to see if it overflows. Unfortunately there is no better way
+ to check for overflow than to do this. If OVERFLOW is nonnull,
+ record in *OVERFLOW whether the result overflowed. SGN controls
+ the signedness and is used to check overflow or if HIGH is set. */
+unsigned int
+wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
+ unsigned int op1len, const HOST_WIDE_INT *op2val,
+ unsigned int op2len, unsigned int prec, signop sgn,
+ bool *overflow, bool high)
+{
+ unsigned HOST_WIDE_INT o0, o1, k, t;
+ unsigned int i;
+ unsigned int j;
+ unsigned int blocks_needed = BLOCKS_NEEDED (prec);
+ unsigned int half_blocks_needed = blocks_needed * 2;
+ /* The sizes here are scaled to support a 2x largest mode by 2x
+ largest mode yielding a 4x largest mode result. This is what is
+ needed by vpn. */
+
+ unsigned HOST_HALF_WIDE_INT
+ u[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT
+ v[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
+ /* The '2' in 'R' is because we are internally doing a full
+ multiply. */
+ unsigned HOST_HALF_WIDE_INT
+ r[2 * 4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
+ HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
+
+ /* If the top level routine did not really pass in an overflow, then
+ just make sure that we never attempt to set it. */
+ bool needs_overflow = (overflow != 0);
+ if (needs_overflow)
+ *overflow = false;
+
+ wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
+ wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
+
+ /* This is a surprisingly common case, so do it first. */
+ if (op1 == 0 || op2 == 0)
+ {
+ val[0] = 0;
+ return 1;
+ }
+
+#ifdef umul_ppmm
+ if (sgn == UNSIGNED)
+ {
+ /* If the inputs are single HWIs and the output has room for at
+ least two HWIs, we can use umul_ppmm directly. */
+ if (prec >= HOST_BITS_PER_WIDE_INT * 2
+ && wi::fits_uhwi_p (op1)
+ && wi::fits_uhwi_p (op2))
+ {
+ umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
+ return 1 + (val[1] != 0 || val[0] < 0);
+ }
+ /* Likewise if the output is a full single HWI, except that the
+ upper HWI of the result is only used for determining overflow.
+ (We handle this case inline when overflow isn't needed.) */
+ else if (prec == HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT upper;
+ umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
+ if (needs_overflow)
+ *overflow = (upper != 0);
+ return 1;
+ }
+ }
+#endif
+
+ /* Handle multiplications by 1. */
+ if (op1 == 1)
+ {
+ for (i = 0; i < op2len; i++)
+ val[i] = op2val[i];
+ return op2len;
+ }
+ if (op2 == 1)
+ {
+ for (i = 0; i < op1len; i++)
+ val[i] = op1val[i];
+ return op1len;
+ }
+
+ /* If we need to check for overflow, we can only do half wide
+ multiplies quickly because we need to look at the top bits to
+ check for the overflow. */
+ if ((high || needs_overflow)
+ && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
+ {
+ unsigned HOST_WIDE_INT r;
+
+ if (sgn == SIGNED)
+ {
+ o0 = op1.to_shwi ();
+ o1 = op2.to_shwi ();
+ }
+ else
+ {
+ o0 = op1.to_uhwi ();
+ o1 = op2.to_uhwi ();
+ }
+
+ r = o0 * o1;
+ if (needs_overflow)
+ {
+ if (sgn == SIGNED)
+ {
+ if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
+ *overflow = true;
+ }
+ else
+ {
+ if ((r >> prec) != 0)
+ *overflow = true;
+ }
+ }
+ val[0] = high ? r >> prec : r;
+ return 1;
+ }
+
+ /* We do unsigned mul and then correct it. */
+ wi_unpack (u, op1val, op1len, half_blocks_needed, prec, SIGNED);
+ wi_unpack (v, op2val, op2len, half_blocks_needed, prec, SIGNED);
+
+ /* The 2 is for a full mult. */
+ memset (r, 0, half_blocks_needed * 2
+ * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
+
+ for (j = 0; j < half_blocks_needed; j++)
+ {
+ k = 0;
+ for (i = 0; i < half_blocks_needed; i++)
+ {
+ t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
+ + r[i + j] + k);
+ r[i + j] = t & HALF_INT_MASK;
+ k = t >> HOST_BITS_PER_HALF_WIDE_INT;
+ }
+ r[j + half_blocks_needed] = k;
+ }
+
+ /* We did unsigned math above. For signed we must adjust the
+ product (assuming we need to see that). */
+ if (sgn == SIGNED && (high || needs_overflow))
+ {
+ unsigned HOST_WIDE_INT b;
+ if (wi::neg_p (op1))
+ {
+ b = 0;
+ for (i = 0; i < half_blocks_needed; i++)
+ {
+ t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
+ - (unsigned HOST_WIDE_INT)v[i] - b;
+ r[i + half_blocks_needed] = t & HALF_INT_MASK;
+ b = t >> (HOST_BITS_PER_WIDE_INT - 1);
+ }
+ }
+ if (wi::neg_p (op2))
+ {
+ b = 0;
+ for (i = 0; i < half_blocks_needed; i++)
+ {
+ t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
+ - (unsigned HOST_WIDE_INT)u[i] - b;
+ r[i + half_blocks_needed] = t & HALF_INT_MASK;
+ b = t >> (HOST_BITS_PER_WIDE_INT - 1);
+ }
+ }
+ }
+
+ if (needs_overflow)
+ {
+ HOST_WIDE_INT top;
+
+ /* For unsigned, overflow is true if any of the top bits are set.
+ For signed, overflow is true if any of the top bits are not equal
+ to the sign bit. */
+ if (sgn == UNSIGNED)
+ top = 0;
+ else
+ {
+ top = r[(half_blocks_needed) - 1];
+ top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2));
+ top &= mask;
+ }
+
+ for (i = half_blocks_needed; i < half_blocks_needed * 2; i++)
+ if (((HOST_WIDE_INT)(r[i] & mask)) != top)
+ *overflow = true;
+ }
+
+ if (high)
+ {
+ /* compute [prec] <- ([prec] * [prec]) >> [prec] */
+ wi_pack ((unsigned HOST_WIDE_INT *) val,
+ &r[half_blocks_needed], half_blocks_needed);
+ return canonize (val, blocks_needed, prec);
+ }
+ else
+ {
+ /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */
+ wi_pack ((unsigned HOST_WIDE_INT *) val, r, half_blocks_needed);
+ return canonize (val, blocks_needed, prec);
+ }
+}
+
+/* Compute the population count of X. */
+int
+wi::popcount (const wide_int_ref &x)
+{
+ unsigned int i;
+ int count;
+
+ /* The high order block is special if it is the last block and the
+ precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
+ have to clear out any ones above the precision before doing
+ popcount on this block. */
+ count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
+ unsigned int stop = x.len;
+ if (count < 0)
+ {
+ count = popcount_hwi (x.uhigh () << -count);
+ stop -= 1;
+ }
+ else
+ {
+ if (x.sign_mask () >= 0)
+ count = 0;
+ }
+
+ for (i = 0; i < stop; ++i)
+ count += popcount_hwi (x.val[i]);
+
+ return count;
+}
+
+/* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
+ whether the result overflows when OP0 and OP1 are treated as having
+ signedness SGN. Return the number of blocks in VAL. */
+unsigned int
+wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
+ unsigned int op0len, const HOST_WIDE_INT *op1,
+ unsigned int op1len, unsigned int prec,
+ signop sgn, bool *overflow)
+{
+ unsigned HOST_WIDE_INT o0 = 0;
+ unsigned HOST_WIDE_INT o1 = 0;
+ unsigned HOST_WIDE_INT x = 0;
+ /* We implement subtraction as an in place negate and add. Negation
+ is just inversion and add 1, so we can do the add of 1 by just
+ starting the borrow in of the first element at 1. */
+ unsigned HOST_WIDE_INT borrow = 0;
+ unsigned HOST_WIDE_INT old_borrow = 0;
+
+ unsigned HOST_WIDE_INT mask0, mask1;
+ unsigned int i;
+
+ unsigned int len = MAX (op0len, op1len);
+ mask0 = -top_bit_of (op0, op0len, prec);
+ mask1 = -top_bit_of (op1, op1len, prec);
+
+ /* Subtract all of the explicitly defined elements. */
+ for (i = 0; i < len; i++)
+ {
+ o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
+ o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
+ x = o0 - o1 - borrow;
+ val[i] = x;
+ old_borrow = borrow;
+ borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
+ }
+
+ if (len * HOST_BITS_PER_WIDE_INT < prec)
+ {
+ val[len] = mask0 - mask1 - borrow;
+ len++;
+ if (overflow)
+ *overflow = false;
+ }
+ else if (overflow)
+ {
+ unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
+ if (sgn == SIGNED)
+ {
+ unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
+ *overflow = (HOST_WIDE_INT) (x << shift) < 0;
+ }
+ else
+ {
+ /* Put the MSB of X and O0 and in the top of the HWI. */
+ x <<= shift;
+ o0 <<= shift;
+ if (old_borrow)
+ *overflow = (x >= o0);
+ else
+ *overflow = (x > o0);
+ }
+ }
+
+ return canonize (val, len, prec);
+}
+
+
+/*
+ * Division and Mod
+ */
+
+/* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
+ algorithm is a small modification of the algorithm in Hacker's
+ Delight by Warren, which itself is a small modification of Knuth's
+ algorithm. M is the number of significant elements of U however
+ there needs to be at least one extra element of B_DIVIDEND
+ allocated, N is the number of elements of B_DIVISOR. */
+static void
+divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
+ unsigned HOST_HALF_WIDE_INT *b_remainder,
+ unsigned HOST_HALF_WIDE_INT *b_dividend,
+ unsigned HOST_HALF_WIDE_INT *b_divisor,
+ int m, int n)
+{
+ /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
+ HOST_WIDE_INT and stored in the lower bits of each word. This
+ algorithm should work properly on both 32 and 64 bit
+ machines. */
+ unsigned HOST_WIDE_INT b
+ = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT;
+ unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
+ unsigned HOST_WIDE_INT rhat; /* A remainder. */
+ unsigned HOST_WIDE_INT p; /* Product of two digits. */
+ HOST_WIDE_INT t, k;
+ int i, j, s;
+
+ /* Single digit divisor. */
+ if (n == 1)
+ {
+ k = 0;
+ for (j = m - 1; j >= 0; j--)
+ {
+ b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
+ k = ((k * b + b_dividend[j])
+ - ((unsigned HOST_WIDE_INT)b_quotient[j]
+ * (unsigned HOST_WIDE_INT)b_divisor[0]));
+ }
+ b_remainder[0] = k;
+ return;
+ }
+
+ s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
+
+ if (s)
+ {
+ /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
+ algorithm, we can overwrite b_dividend and b_divisor, so we do
+ that. */
+ for (i = n - 1; i > 0; i--)
+ b_divisor[i] = (b_divisor[i] << s)
+ | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
+ b_divisor[0] = b_divisor[0] << s;
+
+ b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
+ for (i = m - 1; i > 0; i--)
+ b_dividend[i] = (b_dividend[i] << s)
+ | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
+ b_dividend[0] = b_dividend[0] << s;
+ }
+
+ /* Main loop. */
+ for (j = m - n; j >= 0; j--)
+ {
+ qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
+ rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
+ again:
+ if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
+ {
+ qhat -= 1;
+ rhat += b_divisor[n-1];
+ if (rhat < b)
+ goto again;
+ }
+
+ /* Multiply and subtract. */
+ k = 0;
+ for (i = 0; i < n; i++)
+ {
+ p = qhat * b_divisor[i];
+ t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
+ b_dividend[i + j] = t;
+ k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
+ - (t >> HOST_BITS_PER_HALF_WIDE_INT));
+ }
+ t = b_dividend[j+n] - k;
+ b_dividend[j+n] = t;
+
+ b_quotient[j] = qhat;
+ if (t < 0)
+ {
+ b_quotient[j] -= 1;
+ k = 0;
+ for (i = 0; i < n; i++)
+ {
+ t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
+ b_dividend[i+j] = t;
+ k = t >> HOST_BITS_PER_HALF_WIDE_INT;
+ }
+ b_dividend[j+n] += k;
+ }
+ }
+ if (s)
+ for (i = 0; i < n; i++)
+ b_remainder[i] = (b_dividend[i] >> s)
+ | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
+ else
+ for (i = 0; i < n; i++)
+ b_remainder[i] = b_dividend[i];
+}
+
+
+/* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
+ the result. If QUOTIENT is nonnull, store the value of the quotient
+ there and return the number of blocks in it. The return value is
+ not defined otherwise. If REMAINDER is nonnull, store the value
+ of the remainder there and store the number of blocks in
+ *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
+ the division overflowed. */
+unsigned int
+wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
+ HOST_WIDE_INT *remainder,
+ const HOST_WIDE_INT *dividend_val,
+ unsigned int dividend_len, unsigned int dividend_prec,
+ const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
+ unsigned int divisor_prec, signop sgn,
+ bool *oflow)
+{
+ unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
+ unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
+ unsigned HOST_HALF_WIDE_INT
+ b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT
+ b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned HOST_HALF_WIDE_INT
+ b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1];
+ unsigned HOST_HALF_WIDE_INT
+ b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT];
+ unsigned int m, n;
+ bool dividend_neg = false;
+ bool divisor_neg = false;
+ bool overflow = false;
+ wide_int neg_dividend, neg_divisor;
+
+ wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
+ dividend_prec);
+ wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
+ divisor_prec);
+ if (divisor == 0)
+ overflow = true;
+
+ /* The smallest signed number / -1 causes overflow. The dividend_len
+ check is for speed rather than correctness. */
+ if (sgn == SIGNED
+ && dividend_len == BLOCKS_NEEDED (dividend_prec)
+ && divisor == -1
+ && wi::only_sign_bit_p (dividend))
+ overflow = true;
+
+ /* Handle the overflow cases. Viewed as unsigned value, the quotient of
+ (signed min / -1) has the same representation as the orignal dividend.
+ We have traditionally made division by zero act as division by one,
+ so there too we use the original dividend. */
+ if (overflow)
+ {
+ if (remainder)
+ {
+ *remainder_len = 1;
+ remainder[0] = 0;
+ }
+ if (oflow != 0)
+ *oflow = true;
+ if (quotient)
+ for (unsigned int i = 0; i < dividend_len; ++i)
+ quotient[i] = dividend_val[i];
+ return dividend_len;
+ }
+
+ if (oflow)
+ *oflow = false;
+
+ /* Do it on the host if you can. */
+ if (sgn == SIGNED
+ && wi::fits_shwi_p (dividend)
+ && wi::fits_shwi_p (divisor))
+ {
+ HOST_WIDE_INT o0 = dividend.to_shwi ();
+ HOST_WIDE_INT o1 = divisor.to_shwi ();
+
+ if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
+ {
+ gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
+ if (quotient)
+ {
+ quotient[0] = HOST_WIDE_INT_MIN;
+ quotient[1] = 0;
+ }
+ if (remainder)
+ {
+ remainder[0] = 0;
+ *remainder_len = 1;
+ }
+ return 2;
+ }
+ else
+ {
+ if (quotient)
+ quotient[0] = o0 / o1;
+ if (remainder)
+ {
+ remainder[0] = o0 % o1;
+ *remainder_len = 1;
+ }
+ return 1;
+ }
+ }
+
+ if (sgn == UNSIGNED
+ && wi::fits_uhwi_p (dividend)
+ && wi::fits_uhwi_p (divisor))
+ {
+ unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
+ unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
+
+ if (quotient)
+ quotient[0] = o0 / o1;
+ if (remainder)
+ {
+ remainder[0] = o0 % o1;
+ *remainder_len = 1;
+ }
+ return 1;
+ }
+
+ /* Make the divisor and dividend positive and remember what we
+ did. */
+ if (sgn == SIGNED)
+ {
+ if (wi::neg_p (dividend))
+ {
+ neg_dividend = -dividend;
+ dividend = neg_dividend;
+ dividend_neg = true;
+ }
+ if (wi::neg_p (divisor))
+ {
+ neg_divisor = -divisor;
+ divisor = neg_divisor;
+ divisor_neg = true;
+ }
+ }
+
+ wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
+ dividend_blocks_needed, dividend_prec, sgn);
+ wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
+ divisor_blocks_needed, divisor_prec, sgn);
+
+ m = dividend_blocks_needed;
+ while (m > 1 && b_dividend[m - 1] == 0)
+ m--;
+
+ n = divisor_blocks_needed;
+ while (n > 1 && b_divisor[n - 1] == 0)
+ n--;
+
+ memset (b_quotient, 0, sizeof (b_quotient));
+
+ divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
+
+ unsigned int quotient_len = 0;
+ if (quotient)
+ {
+ wi_pack ((unsigned HOST_WIDE_INT *) quotient, b_quotient, m);
+ quotient_len = canonize (quotient, (m + 1) / 2, dividend_prec);
+ /* The quotient is neg if exactly one of the divisor or dividend is
+ neg. */
+ if (dividend_neg != divisor_neg)
+ quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
+ quotient_len, dividend_prec,
+ UNSIGNED, 0);
+ }
+
+ if (remainder)
+ {
+ wi_pack ((unsigned HOST_WIDE_INT *) remainder, b_remainder, n);
+ *remainder_len = canonize (remainder, (n + 1) / 2, dividend_prec);
+ /* The remainder is always the same sign as the dividend. */
+ if (dividend_neg)
+ *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
+ *remainder_len, dividend_prec,
+ UNSIGNED, 0);
+ }
+
+ return quotient_len;
+}
+
+/*
+ * Shifting, rotating and extraction.
+ */
+
+/* Left shift XVAL by SHIFT and store the result in VAL. Return the
+ number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
+unsigned int
+wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int precision,
+ unsigned int shift)
+{
+ /* Split the shift into a whole-block shift and a subblock shift. */
+ unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
+ unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
+
+ /* The whole-block shift fills with zeros. */
+ unsigned int len = BLOCKS_NEEDED (precision);
+ for (unsigned int i = 0; i < skip; ++i)
+ val[i] = 0;
+
+ /* It's easier to handle the simple block case specially. */
+ if (small_shift == 0)
+ for (unsigned int i = skip; i < len; ++i)
+ val[i] = safe_uhwi (xval, xlen, i - skip);
+ else
+ {
+ /* The first unfilled output block is a left shift of the first
+ block in XVAL. The other output blocks contain bits from two
+ consecutive input blocks. */
+ unsigned HOST_WIDE_INT carry = 0;
+ for (unsigned int i = skip; i < len; ++i)
+ {
+ unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
+ val[i] = (x << small_shift) | carry;
+ carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
+ }
+ }
+ return canonize (val, len, precision);
+}
+
+/* Right shift XVAL by SHIFT and store the result in VAL. Return the
+ number of blocks in VAL. The input has XPRECISION bits and the
+ output has XPRECISION - SHIFT bits. */
+static unsigned int
+rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int shift)
+{
+ /* Split the shift into a whole-block shift and a subblock shift. */
+ unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
+ unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
+
+ /* Work out how many blocks are needed to store the significant bits
+ (excluding the upper zeros or signs). */
+ unsigned int len = BLOCKS_NEEDED (xprecision - shift);
+
+ /* It's easier to handle the simple block case specially. */
+ if (small_shift == 0)
+ for (unsigned int i = 0; i < len; ++i)
+ val[i] = safe_uhwi (xval, xlen, i + skip);
+ else
+ {
+ /* Each output block but the last is a combination of two input blocks.
+ The last block is a right shift of the last block in XVAL. */
+ unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
+ for (unsigned int i = 0; i < len; ++i)
+ {
+ val[i] = curr >> small_shift;
+ curr = safe_uhwi (xval, xlen, i + skip + 1);
+ val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
+ }
+ }
+ return len;
+}
+
+/* Logically right shift XVAL by SHIFT and store the result in VAL.
+ Return the number of blocks in VAL. XVAL has XPRECISION bits and
+ VAL has PRECISION bits. */
+unsigned int
+wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int precision, unsigned int shift)
+{
+ unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
+
+ /* The value we just created has precision XPRECISION - SHIFT.
+ Zero-extend it to wider precisions. */
+ if (precision > xprecision - shift)
+ {
+ unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
+ if (small_prec)
+ val[len - 1] = zext_hwi (val[len - 1], small_prec);
+ else if (val[len - 1] < 0)
+ {
+ /* Add a new block with a zero. */
+ val[len++] = 0;
+ return len;
+ }
+ }
+ return canonize (val, len, precision);
+}
+
+/* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
+ Return the number of blocks in VAL. XVAL has XPRECISION bits and
+ VAL has PRECISION bits. */
+unsigned int
+wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
+ unsigned int xlen, unsigned int xprecision,
+ unsigned int precision, unsigned int shift)
+{
+ unsigned int len = rshift_large_common (val, xval, xlen, xprecision, shift);
+
+ /* The value we just created has precision XPRECISION - SHIFT.
+ Sign-extend it to wider types. */
+ if (precision > xprecision - shift)
+ {
+ unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
+ if (small_prec)
+ val[len - 1] = sext_hwi (val[len - 1], small_prec);
+ }
+ return canonize (val, len, precision);
+}
+
+/* Return the number of leading (upper) zeros in X. */
+int
+wi::clz (const wide_int_ref &x)
+{
+ /* Calculate how many bits there above the highest represented block. */
+ int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
+
+ unsigned HOST_WIDE_INT high = x.uhigh ();
+ if (count < 0)
+ /* The upper -COUNT bits of HIGH are not part of the value.
+ Clear them out. */
+ high = (high << -count) >> -count;
+ else if (x.sign_mask () < 0)
+ /* The upper bit is set, so there are no leading zeros. */
+ return 0;
+
+ /* We don't need to look below HIGH. Either HIGH is nonzero,
+ or the top bit of the block below is nonzero; clz_hwi is
+ HOST_BITS_PER_WIDE_INT in the latter case. */
+ return count + clz_hwi (high);
+}
+
+/* Return the number of redundant sign bits in X. (That is, the number
+ of bits immediately below the sign bit that have the same value as
+ the sign bit.) */
+int
+wi::clrsb (const wide_int_ref &x)
+{
+ /* Calculate how many bits there above the highest represented block. */
+ int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
+
+ unsigned HOST_WIDE_INT high = x.uhigh ();
+ unsigned HOST_WIDE_INT mask = -1;
+ if (count < 0)
+ {
+ /* The upper -COUNT bits of HIGH are not part of the value.
+ Clear them from both MASK and HIGH. */
+ mask >>= -count;
+ high &= mask;
+ }
+
+ /* If the top bit is 1, count the number of leading 1s. If the top
+ bit is zero, count the number of leading zeros. */
+ if (high > mask / 2)
+ high ^= mask;
+
+ /* There are no sign bits below the top block, so we don't need to look
+ beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
+ HIGH is 0. */
+ return count + clz_hwi (high) - 1;
+}
+
+/* Return the number of trailing (lower) zeros in X. */
+int
+wi::ctz (const wide_int_ref &x)
+{
+ if (x.len == 1 && x.ulow () == 0)
+ return x.precision;
+
+ /* Having dealt with the zero case, there must be a block with a
+ nonzero bit. We don't care about the bits above the first 1. */
+ unsigned int i = 0;
+ while (x.val[i] == 0)
+ ++i;
+ return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
+}
+
+/* If X is an exact power of 2, return the base-2 logarithm, otherwise
+ return -1. */
+int
+wi::exact_log2 (const wide_int_ref &x)
+{
+ /* Reject cases where there are implicit -1 blocks above HIGH. */
+ if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
+ return -1;
+
+ /* Set CRUX to the index of the entry that should be nonzero.
+ If the top block is zero then the next lowest block (if any)
+ must have the high bit set. */
+ unsigned int crux = x.len - 1;
+ if (crux > 0 && x.val[crux] == 0)
+ crux -= 1;
+
+ /* Check that all lower blocks are zero. */
+ for (unsigned int i = 0; i < crux; ++i)
+ if (x.val[i] != 0)
+ return -1;
+
+ /* Get a zero-extended form of block CRUX. */
+ unsigned HOST_WIDE_INT hwi = x.val[crux];
+ if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
+ hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
+
+ /* Now it's down to whether HWI is a power of 2. */
+ int res = ::exact_log2 (hwi);
+ if (res >= 0)
+ res += crux * HOST_BITS_PER_WIDE_INT;
+ return res;
+}
+
+/* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
+int
+wi::floor_log2 (const wide_int_ref &x)
+{
+ return x.precision - 1 - clz (x);
+}
+
+/* Return the index of the first (lowest) set bit in X, counting from 1.
+ Return 0 if X is 0. */
+int
+wi::ffs (const wide_int_ref &x)
+{
+ return eq_p (x, 0) ? 0 : ctz (x) + 1;
+}
+
+/* Return true if sign-extending X to have precision PRECISION would give
+ the minimum signed value at that precision. */
+bool
+wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
+{
+ return ctz (x) + 1 == int (precision);
+}
+
+/* Return true if X represents the minimum signed value. */
+bool
+wi::only_sign_bit_p (const wide_int_ref &x)
+{
+ return only_sign_bit_p (x, x.precision);
+}
+
+/*
+ * Private utilities.
+ */
+
+void gt_ggc_mx (widest_int *) { }
+void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
+void gt_pch_nx (widest_int *) { }
+
+template void wide_int::dump () const;
+template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
+template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
+template void offset_int::dump () const;
+template void widest_int::dump () const;
diff --git a/gcc/wide-int.h b/gcc/wide-int.h
new file mode 100644
index 00000000000..6860af95be4
--- /dev/null
+++ b/gcc/wide-int.h
@@ -0,0 +1,3175 @@
+/* Operations with very long integers. -*- C++ -*-
+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef WIDE_INT_H
+#define WIDE_INT_H
+
+/* wide-int.[cc|h] implements a class that efficiently performs
+ mathematical operations on finite precision integers. wide_ints
+ are designed to be transient - they are not for long term storage
+ of values. There is tight integration between wide_ints and the
+ other longer storage GCC representations (rtl and tree).
+
+ The actual precision of a wide_int depends on the flavor. There
+ are three predefined flavors:
+
+ 1) wide_int (the default). This flavor does the math in the
+ precision of its input arguments. It is assumed (and checked)
+ that the precisions of the operands and results are consistent.
+ This is the most efficient flavor. It is not possible to examine
+ bits above the precision that has been specified. Because of
+ this, the default flavor has semantics that are simple to
+ understand and in general model the underlying hardware that the
+ compiler is targetted for.
+
+ This flavor must be used at the RTL level of gcc because there
+ is, in general, not enough information in the RTL representation
+ to extend a value beyond the precision specified in the mode.
+
+ This flavor should also be used at the TREE and GIMPLE levels of
+ the compiler except for the circumstances described in the
+ descriptions of the other two flavors.
+
+ The default wide_int representation does not contain any
+ information inherent about signedness of the represented value,
+ so it can be used to represent both signed and unsigned numbers.
+ For operations where the results depend on signedness (full width
+ multiply, division, shifts, comparisons, and operations that need
+ overflow detected), the signedness must be specified separately.
+
+ 2) offset_int. This is a fixed size representation that is
+ guaranteed to be large enough to compute any bit or byte sized
+ address calculation on the target. Currently the value is 64 + 4
+ bits rounded up to the next number even multiple of
+ HOST_BITS_PER_WIDE_INT (but this can be changed when the first
+ port needs more than 64 bits for the size of a pointer).
+
+ This flavor can be used for all address math on the target. In
+ this representation, the values are sign or zero extended based
+ on their input types to the internal precision. All math is done
+ in this precision and then the values are truncated to fit in the
+ result type. Unlike most gimple or rtl intermediate code, it is
+ not useful to perform the address arithmetic at the same
+ precision in which the operands are represented because there has
+ been no effort by the front ends to convert most addressing
+ arithmetic to canonical types.
+
+ 3) widest_int. This representation is an approximation of
+ infinite precision math. However, it is not really infinite
+ precision math as in the GMP library. It is really finite
+ precision math where the precision is 4 times the size of the
+ largest integer that the target port can represent.
+
+ widest_int is supposed to be wider than any number that it needs to
+ store, meaning that there is always at least one leading sign bit.
+ All widest_int values are therefore signed.
+
+ There are several places in the GCC where this should/must be used:
+
+ * Code that does induction variable optimizations. This code
+ works with induction variables of many different types at the
+ same time. Because of this, it ends up doing many different
+ calculations where the operands are not compatible types. The
+ widest_int makes this easy, because it provides a field where
+ nothing is lost when converting from any variable,
+
+ * There are a small number of passes that currently use the
+ widest_int that should use the default. These should be
+ changed.
+
+ There are surprising features of offset_int and widest_int
+ that the users should be careful about:
+
+ 1) Shifts and rotations are just weird. You have to specify a
+ precision in which the shift or rotate is to happen in. The bits
+ above this precision are zeroed. While this is what you
+ want, it is clearly non obvious.
+
+ 2) Larger precision math sometimes does not produce the same
+ answer as would be expected for doing the math at the proper
+ precision. In particular, a multiply followed by a divide will
+ produce a different answer if the first product is larger than
+ what can be represented in the input precision.
+
+ The offset_int and the widest_int flavors are more expensive
+ than the default wide int, so in addition to the caveats with these
+ two, the default is the prefered representation.
+
+ All three flavors of wide_int are represented as a vector of
+ HOST_WIDE_INTs. The default and widest_int vectors contain enough elements
+ to hold a value of MAX_BITSIZE_MODE_ANY_INT bits. offset_int contains only
+ enough elements to hold ADDR_MAX_PRECISION bits. The values are stored
+ in the vector with the least significant HOST_BITS_PER_WIDE_INT bits
+ in element 0.
+
+ The default wide_int contains three fields: the vector (VAL),
+ the precision and a length (LEN). The length is the number of HWIs
+ needed to represent the value. widest_int and offset_int have a
+ constant precision that cannot be changed, so they only store the
+ VAL and LEN fields.
+
+ Since most integers used in a compiler are small values, it is
+ generally profitable to use a representation of the value that is
+ as small as possible. LEN is used to indicate the number of
+ elements of the vector that are in use. The numbers are stored as
+ sign extended numbers as a means of compression. Leading
+ HOST_WIDE_INTs that contain strings of either -1 or 0 are removed
+ as long as they can be reconstructed from the top bit that is being
+ represented.
+
+ The precision and length of a wide_int are always greater than 0.
+ Any bits in a wide_int above the precision are sign-extended from the
+ most significant bit. For example, a 4-bit value 0x8 is represented as
+ VAL = { 0xf...fff8 }. However, as an optimization, we allow other integer
+ constants to be represented with undefined bits above the precision.
+ This allows INTEGER_CSTs to be pre-extended according to TYPE_SIGN,
+ so that the INTEGER_CST representation can be used both in TYPE_PRECISION
+ and in wider precisions.
+
+ There are constructors to create the various forms of wide_int from
+ trees, rtl and constants. For trees you can simply say:
+
+ tree t = ...;
+ wide_int x = t;
+
+ However, a little more syntax is required for rtl constants since
+ they do not have an explicit precision. To make an rtl into a
+ wide_int, you have to pair it with a mode. The canonical way to do
+ this is with std::make_pair as in:
+
+ rtx r = ...
+ wide_int x = std::make_pair (r, mode);
+
+ Similarly, a wide_int can only be constructed from a host value if
+ the target precision is given explicitly, such as in:
+
+ wide_int x = wi::shwi (c, prec); // sign-extend C if necessary
+ wide_int y = wi::uhwi (c, prec); // zero-extend C if necessary
+
+ However, offset_int and widest_int have an inherent precision and so
+ can be initialized directly from a host value:
+
+ offset_int x = (int) c; // sign-extend C
+ widest_int x = (unsigned int) c; // zero-extend C
+
+ It is also possible to do arithmetic directly on trees, rtxes and
+ constants. For example:
+
+ wi::add (t1, t2); // add equal-sized INTEGER_CSTs t1 and t2
+ wi::add (t1, 1); // add 1 to INTEGER_CST t1
+ wi::add (r1, r2); // add equal-sized rtx constants r1 and r2
+ wi::lshift (1, 100); // 1 << 100 as a widest_int
+
+ Many binary operations place restrictions on the combinations of inputs,
+ using the following rules:
+
+ - {tree, rtx, wide_int} op {tree, rtx, wide_int} -> wide_int
+ The inputs must be the same precision. The result is a wide_int
+ of the same precision
+
+ - {tree, rtx, wide_int} op (un)signed HOST_WIDE_INT -> wide_int
+ (un)signed HOST_WIDE_INT op {tree, rtx, wide_int} -> wide_int
+ The HOST_WIDE_INT is extended or truncated to the precision of
+ the other input. The result is a wide_int of the same precision
+ as that input.
+
+ - (un)signed HOST_WIDE_INT op (un)signed HOST_WIDE_INT -> widest_int
+ The inputs are extended to widest_int precision and produce a
+ widest_int result.
+
+ - offset_int op offset_int -> offset_int
+ offset_int op (un)signed HOST_WIDE_INT -> offset_int
+ (un)signed HOST_WIDE_INT op offset_int -> offset_int
+
+ - widest_int op widest_int -> widest_int
+ widest_int op (un)signed HOST_WIDE_INT -> widest_int
+ (un)signed HOST_WIDE_INT op widest_int -> widest_int
+
+ Other combinations like:
+
+ - widest_int op offset_int and
+ - wide_int op offset_int
+
+ are not allowed. The inputs should instead be extended or truncated
+ so that they match.
+
+ The inputs to comparison functions like wi::eq_p and wi::lts_p
+ follow the same compatibility rules, although their return types
+ are different. Unary functions on X produce the same result as
+ a binary operation X + X. Shift functions X op Y also produce
+ the same result as X + X; the precision of the shift amount Y
+ can be arbitrarily different from X. */
+
+
+#include <utility>
+#include "system.h"
+#include "hwint.h"
+#include "signop.h"
+#include "insn-modes.h"
+
+/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very
+ early examination of the target's mode file. The WIDE_INT_MAX_ELTS
+ can accomodate at least 1 more bit so that unsigned numbers of that
+ mode can be represented as a signed value. Note that it is still
+ possible to create fixed_wide_ints that have precisions greater than
+ MAX_BITSIZE_MODE_ANY_INT. This can be useful when representing a
+ double-width multiplication result, for example. */
+#define WIDE_INT_MAX_ELTS \
+ ((MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT) / HOST_BITS_PER_WIDE_INT)
+
+#define WIDE_INT_MAX_PRECISION (WIDE_INT_MAX_ELTS * HOST_BITS_PER_WIDE_INT)
+
+/* This is the max size of any pointer on any machine. It does not
+ seem to be as easy to sniff this out of the machine description as
+ it is for MAX_BITSIZE_MODE_ANY_INT since targets may support
+ multiple address sizes and may have different address sizes for
+ different address spaces. However, currently the largest pointer
+ on any platform is 64 bits. When that changes, then it is likely
+ that a target hook should be defined so that targets can make this
+ value larger for those targets. */
+#define ADDR_MAX_BITSIZE 64
+
+/* This is the internal precision used when doing any address
+ arithmetic. The '4' is really 3 + 1. Three of the bits are for
+ the number of extra bits needed to do bit addresses and the other bit
+ is to allow everything to be signed without loosing any precision.
+ Then everything is rounded up to the next HWI for efficiency. */
+#define ADDR_MAX_PRECISION \
+ ((ADDR_MAX_BITSIZE + 4 + HOST_BITS_PER_WIDE_INT - 1) \
+ & ~(HOST_BITS_PER_WIDE_INT - 1))
+
+/* The number of HWIs needed to store an offset_int. */
+#define OFFSET_INT_ELTS (ADDR_MAX_PRECISION / HOST_BITS_PER_WIDE_INT)
+
+/* The type of result produced by a binary operation on types T1 and T2.
+ Defined purely for brevity. */
+#define WI_BINARY_RESULT(T1, T2) \
+ typename wi::binary_traits <T1, T2>::result_type
+
+/* The type of result produced by a unary operation on type T. */
+#define WI_UNARY_RESULT(T) \
+ typename wi::unary_traits <T>::result_type
+
+/* Define a variable RESULT to hold the result of a binary operation on
+ X and Y, which have types T1 and T2 respectively. Define VAL to
+ point to the blocks of RESULT. Once the user of the macro has
+ filled in VAL, it should call RESULT.set_len to set the number
+ of initialized blocks. */
+#define WI_BINARY_RESULT_VAR(RESULT, VAL, T1, X, T2, Y) \
+ WI_BINARY_RESULT (T1, T2) RESULT = \
+ wi::int_traits <WI_BINARY_RESULT (T1, T2)>::get_binary_result (X, Y); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
+
+/* Similar for the result of a unary operation on X, which has type T. */
+#define WI_UNARY_RESULT_VAR(RESULT, VAL, T, X) \
+ WI_UNARY_RESULT (T) RESULT = \
+ wi::int_traits <WI_UNARY_RESULT (T)>::get_binary_result (X, X); \
+ HOST_WIDE_INT *VAL = RESULT.write_val ()
+
+template <typename T> struct generic_wide_int;
+template <int N> struct fixed_wide_int_storage;
+struct wide_int_storage;
+
+/* An N-bit integer. Until we can use typedef templates, use this instead. */
+#define FIXED_WIDE_INT(N) \
+ generic_wide_int < fixed_wide_int_storage <N> >
+
+typedef generic_wide_int <wide_int_storage> wide_int;
+typedef FIXED_WIDE_INT (ADDR_MAX_PRECISION) offset_int;
+typedef FIXED_WIDE_INT (WIDE_INT_MAX_PRECISION) widest_int;
+
+template <bool SE>
+struct wide_int_ref_storage;
+
+typedef generic_wide_int <wide_int_ref_storage <false> > wide_int_ref;
+
+/* This can be used instead of wide_int_ref if the referenced value is
+ known to have type T. It carries across properties of T's representation,
+ such as whether excess upper bits in a HWI are defined, and can therefore
+ help avoid redundant work.
+
+ The macro could be replaced with a template typedef, once we're able
+ to use those. */
+#define WIDE_INT_REF_FOR(T) \
+ generic_wide_int \
+ <wide_int_ref_storage <wi::int_traits <T>::is_sign_extended> >
+
+namespace wi
+{
+ /* Classifies an integer based on its precision. */
+ enum precision_type {
+ /* The integer has both a precision and defined signedness. This allows
+ the integer to be converted to any width, since we know whether to fill
+ any extra bits with zeros or signs. */
+ FLEXIBLE_PRECISION,
+
+ /* The integer has a variable precision but no defined signedness. */
+ VAR_PRECISION,
+
+ /* The integer has a constant precision (known at GCC compile time)
+ but no defined signedness. */
+ CONST_PRECISION
+ };
+
+ /* This class, which has no default implementation, is expected to
+ provide the following members:
+
+ static const enum precision_type precision_type;
+ Classifies the type of T.
+
+ static const unsigned int precision;
+ Only defined if precision_type == CONST_PRECISION. Specifies the
+ precision of all integers of type T.
+
+ static const bool host_dependent_precision;
+ True if the precision of T depends (or can depend) on the host.
+
+ static unsigned int get_precision (const T &x)
+ Return the number of bits in X.
+
+ static wi::storage_ref *decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, const T &x)
+ Decompose X as a PRECISION-bit integer, returning the associated
+ wi::storage_ref. SCRATCH is available as scratch space if needed.
+ The routine should assert that PRECISION is acceptable. */
+ template <typename T> struct int_traits;
+
+ /* This class provides a single type, result_type, which specifies the
+ type of integer produced by a binary operation whose inputs have
+ types T1 and T2. The definition should be symmetric. */
+ template <typename T1, typename T2,
+ enum precision_type P1 = int_traits <T1>::precision_type,
+ enum precision_type P2 = int_traits <T2>::precision_type>
+ struct binary_traits;
+
+ /* The result of a unary operation on T is the same as the result of
+ a binary operation on two values of type T. */
+ template <typename T>
+ struct unary_traits : public binary_traits <T, T> {};
+
+ /* Specify the result type for each supported combination of binary
+ inputs. Note that CONST_PRECISION and VAR_PRECISION cannot be
+ mixed, in order to give stronger type checking. When both inputs
+ are CONST_PRECISION, they must have the same precision. */
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef widest_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T2>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, CONST_PRECISION, CONST_PRECISION>
+ {
+ /* Spelled out explicitly (rather than through FIXED_WIDE_INT)
+ so as not to confuse gengtype. */
+ STATIC_ASSERT (int_traits <T1>::precision == int_traits <T2>::precision);
+ typedef generic_wide_int < fixed_wide_int_storage
+ <int_traits <T1>::precision> > result_type;
+ };
+
+ template <>
+ template <typename T1, typename T2>
+ struct binary_traits <T1, T2, VAR_PRECISION, VAR_PRECISION>
+ {
+ typedef wide_int result_type;
+ };
+}
+
+/* Public functions for querying and operating on integers. */
+namespace wi
+{
+ template <typename T>
+ unsigned int get_precision (const T &);
+
+ template <typename T1, typename T2>
+ unsigned int get_binary_precision (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ void copy (T1 &, const T2 &);
+
+#define UNARY_PREDICATE \
+ template <typename T> bool
+#define UNARY_FUNCTION \
+ template <typename T> WI_UNARY_RESULT (T)
+#define BINARY_PREDICATE \
+ template <typename T1, typename T2> bool
+#define BINARY_FUNCTION \
+ template <typename T1, typename T2> WI_BINARY_RESULT (T1, T2)
+#define SHIFT_FUNCTION \
+ template <typename T1, typename T2> WI_UNARY_RESULT (T1)
+
+ UNARY_PREDICATE fits_shwi_p (const T &);
+ UNARY_PREDICATE fits_uhwi_p (const T &);
+ UNARY_PREDICATE neg_p (const T &, signop = SIGNED);
+
+ template <typename T>
+ HOST_WIDE_INT sign_mask (const T &);
+
+ BINARY_PREDICATE eq_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ne_p (const T1 &, const T2 &);
+ BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE lts_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ltu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE le_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE les_p (const T1 &, const T2 &);
+ BINARY_PREDICATE leu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE gts_p (const T1 &, const T2 &);
+ BINARY_PREDICATE gtu_p (const T1 &, const T2 &);
+ BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop);
+ BINARY_PREDICATE ges_p (const T1 &, const T2 &);
+ BINARY_PREDICATE geu_p (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ int cmp (const T1 &, const T2 &, signop);
+
+ template <typename T1, typename T2>
+ int cmps (const T1 &, const T2 &);
+
+ template <typename T1, typename T2>
+ int cmpu (const T1 &, const T2 &);
+
+ UNARY_FUNCTION bit_not (const T &);
+ UNARY_FUNCTION neg (const T &);
+ UNARY_FUNCTION neg (const T &, bool *);
+ UNARY_FUNCTION abs (const T &);
+ UNARY_FUNCTION ext (const T &, unsigned int, signop);
+ UNARY_FUNCTION sext (const T &, unsigned int);
+ UNARY_FUNCTION zext (const T &, unsigned int);
+ UNARY_FUNCTION set_bit (const T &, unsigned int);
+
+ BINARY_FUNCTION min (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smin (const T1 &, const T2 &);
+ BINARY_FUNCTION umin (const T1 &, const T2 &);
+ BINARY_FUNCTION max (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION smax (const T1 &, const T2 &);
+ BINARY_FUNCTION umax (const T1 &, const T2 &);
+
+ BINARY_FUNCTION bit_and (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_and_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_or_not (const T1 &, const T2 &);
+ BINARY_FUNCTION bit_xor (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &);
+ BINARY_FUNCTION add (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION sub (const T1 &, const T2 &);
+ BINARY_FUNCTION sub (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION mul (const T1 &, const T2 &);
+ BINARY_FUNCTION mul (const T1 &, const T2 &, signop, bool *);
+ BINARY_FUNCTION smul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION umul (const T1 &, const T2 &, bool *);
+ BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop);
+ BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION udiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+ BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION smod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION umod_trunc (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION umod_floor (const T1 &, const T2 &);
+ BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, bool * = 0);
+ BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, bool * = 0);
+
+ template <typename T1, typename T2>
+ bool multiple_of_p (const T1 &, const T2 &, signop,
+ WI_BINARY_RESULT (T1, T2) *);
+
+ SHIFT_FUNCTION lshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION lrshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION arshift (const T1 &, const T2 &);
+ SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn);
+ SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int = 0);
+ SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int = 0);
+
+#undef SHIFT_FUNCTION
+#undef BINARY_PREDICATE
+#undef BINARY_FUNCTION
+#undef UNARY_PREDICATE
+#undef UNARY_FUNCTION
+
+ bool only_sign_bit_p (const wide_int_ref &, unsigned int);
+ bool only_sign_bit_p (const wide_int_ref &);
+ int clz (const wide_int_ref &);
+ int clrsb (const wide_int_ref &);
+ int ctz (const wide_int_ref &);
+ int exact_log2 (const wide_int_ref &);
+ int floor_log2 (const wide_int_ref &);
+ int ffs (const wide_int_ref &);
+ int popcount (const wide_int_ref &);
+ int parity (const wide_int_ref &);
+
+ template <typename T>
+ unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int);
+
+ template <typename T>
+ unsigned int min_precision (const T &, signop);
+}
+
+namespace wi
+{
+ /* Contains the components of a decomposed integer for easy, direct
+ access. */
+ struct storage_ref
+ {
+ storage_ref (const HOST_WIDE_INT *, unsigned int, unsigned int);
+
+ const HOST_WIDE_INT *val;
+ unsigned int len;
+ unsigned int precision;
+
+ /* Provide enough trappings for this class to act as storage for
+ generic_wide_int. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ };
+}
+
+inline::wi::storage_ref::storage_ref (const HOST_WIDE_INT *val_in,
+ unsigned int len_in,
+ unsigned int precision_in)
+ : val (val_in), len (len_in), precision (precision_in)
+{
+}
+
+inline unsigned int
+wi::storage_ref::get_len () const
+{
+ return len;
+}
+
+inline unsigned int
+wi::storage_ref::get_precision () const
+{
+ return precision;
+}
+
+inline const HOST_WIDE_INT *
+wi::storage_ref::get_val () const
+{
+ return val;
+}
+
+/* This class defines an integer type using the storage provided by the
+ template argument. The storage class must provide the following
+ functions:
+
+ unsigned int get_precision () const
+ Return the number of bits in the integer.
+
+ HOST_WIDE_INT *get_val () const
+ Return a pointer to the array of blocks that encodes the integer.
+
+ unsigned int get_len () const
+ Return the number of blocks in get_val (). If this is smaller
+ than the number of blocks implied by get_precision (), the
+ remaining blocks are sign extensions of block get_len () - 1.
+
+ Although not required by generic_wide_int itself, writable storage
+ classes can also provide the following functions:
+
+ HOST_WIDE_INT *write_val ()
+ Get a modifiable version of get_val ()
+
+ unsigned int set_len (unsigned int len)
+ Set the value returned by get_len () to LEN. */
+template <typename storage>
+class GTY(()) generic_wide_int : public storage
+{
+public:
+ generic_wide_int ();
+
+ template <typename T>
+ generic_wide_int (const T &);
+
+ template <typename T>
+ generic_wide_int (const T &, unsigned int);
+
+ /* Conversions. */
+ HOST_WIDE_INT to_shwi (unsigned int) const;
+ HOST_WIDE_INT to_shwi () const;
+ unsigned HOST_WIDE_INT to_uhwi (unsigned int) const;
+ unsigned HOST_WIDE_INT to_uhwi () const;
+ HOST_WIDE_INT to_short_addr () const;
+
+ /* Public accessors for the interior of a wide int. */
+ HOST_WIDE_INT sign_mask () const;
+ HOST_WIDE_INT elt (unsigned int) const;
+ unsigned HOST_WIDE_INT ulow () const;
+ unsigned HOST_WIDE_INT uhigh () const;
+ HOST_WIDE_INT slow () const;
+ HOST_WIDE_INT shigh () const;
+
+ template <typename T>
+ generic_wide_int &operator = (const T &);
+
+#define BINARY_PREDICATE(OP, F) \
+ template <typename T> \
+ bool OP (const T &c) const { return wi::F (*this, c); }
+
+#define UNARY_OPERATOR(OP, F) \
+ WI_UNARY_RESULT (generic_wide_int) OP () const { return wi::F (*this); }
+
+#define BINARY_OPERATOR(OP, F) \
+ template <typename T> \
+ WI_BINARY_RESULT (generic_wide_int, T) \
+ OP (const T &c) const { return wi::F (*this, c); }
+
+#define ASSIGNMENT_OPERATOR(OP, F) \
+ template <typename T> \
+ generic_wide_int &OP (const T &c) { return (*this = wi::F (*this, c)); }
+
+#define INCDEC_OPERATOR(OP, DELTA) \
+ generic_wide_int &OP () { *this += DELTA; return *this; }
+
+ UNARY_OPERATOR (operator ~, bit_not)
+ UNARY_OPERATOR (operator -, neg)
+ BINARY_PREDICATE (operator ==, eq_p)
+ BINARY_PREDICATE (operator !=, ne_p)
+ BINARY_OPERATOR (operator &, bit_and)
+ BINARY_OPERATOR (and_not, bit_and_not)
+ BINARY_OPERATOR (operator |, bit_or)
+ BINARY_OPERATOR (or_not, bit_or_not)
+ BINARY_OPERATOR (operator ^, bit_xor)
+ BINARY_OPERATOR (operator +, add)
+ BINARY_OPERATOR (operator -, sub)
+ BINARY_OPERATOR (operator *, mul)
+ ASSIGNMENT_OPERATOR (operator &=, bit_and)
+ ASSIGNMENT_OPERATOR (operator |=, bit_or)
+ ASSIGNMENT_OPERATOR (operator ^=, bit_xor)
+ ASSIGNMENT_OPERATOR (operator +=, add)
+ ASSIGNMENT_OPERATOR (operator -=, sub)
+ ASSIGNMENT_OPERATOR (operator *=, mul)
+ INCDEC_OPERATOR (operator ++, 1)
+ INCDEC_OPERATOR (operator --, -1)
+
+#undef BINARY_PREDICATE
+#undef UNARY_OPERATOR
+#undef BINARY_OPERATOR
+#undef ASSIGNMENT_OPERATOR
+#undef INCDEC_OPERATOR
+
+ /* Debugging functions. */
+ void dump () const;
+
+ static const bool is_sign_extended
+ = wi::int_traits <generic_wide_int <storage> >::is_sign_extended;
+};
+
+template <typename storage>
+inline generic_wide_int <storage>::generic_wide_int () {}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x)
+ : storage (x)
+{
+}
+
+template <typename storage>
+template <typename T>
+inline generic_wide_int <storage>::generic_wide_int (const T &x,
+ unsigned int precision)
+ : storage (x, precision)
+{
+}
+
+/* Return THIS as a signed HOST_WIDE_INT, sign-extending from PRECISION.
+ If THIS does not fit in PRECISION, the information is lost. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi (unsigned int precision) const
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return sext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
+
+/* Return THIS as a signed HOST_WIDE_INT, in its natural precision. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_shwi () const
+{
+ if (is_sign_extended)
+ return this->get_val ()[0];
+ else
+ return to_shwi (this->get_precision ());
+}
+
+/* Return THIS as an unsigned HOST_WIDE_INT, zero-extending from
+ PRECISION. If THIS does not fit in PRECISION, the information
+ is lost. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi (unsigned int precision) const
+{
+ if (precision < HOST_BITS_PER_WIDE_INT)
+ return zext_hwi (this->get_val ()[0], precision);
+ else
+ return this->get_val ()[0];
+}
+
+/* Return THIS as an signed HOST_WIDE_INT, in its natural precision. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::to_uhwi () const
+{
+ return to_uhwi (this->get_precision ());
+}
+
+/* TODO: The compiler is half converted from using HOST_WIDE_INT to
+ represent addresses to using offset_int to represent addresses.
+ We use to_short_addr at the interface from new code to old,
+ unconverted code. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::to_short_addr () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the implicit value of blocks above get_len (). */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::sign_mask () const
+{
+ unsigned int len = this->get_len ();
+ unsigned HOST_WIDE_INT high = this->get_val ()[len - 1];
+ if (!is_sign_extended)
+ {
+ unsigned int precision = this->get_precision ();
+ int excess = len * HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ high <<= excess;
+ }
+ return (HOST_WIDE_INT) (high) < 0 ? -1 : 0;
+}
+
+/* Return the signed value of the least-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::slow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the signed value of the most-significant explicitly-encoded
+ block. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::shigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return the unsigned value of the least-significant
+ explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::ulow () const
+{
+ return this->get_val ()[0];
+}
+
+/* Return the unsigned value of the most-significant
+ explicitly-encoded block. */
+template <typename storage>
+inline unsigned HOST_WIDE_INT
+generic_wide_int <storage>::uhigh () const
+{
+ return this->get_val ()[this->get_len () - 1];
+}
+
+/* Return block I, which might be implicitly or explicit encoded. */
+template <typename storage>
+inline HOST_WIDE_INT
+generic_wide_int <storage>::elt (unsigned int i) const
+{
+ if (i >= this->get_len ())
+ return sign_mask ();
+ else
+ return this->get_val ()[i];
+}
+
+template <typename storage>
+template <typename T>
+generic_wide_int <storage> &
+generic_wide_int <storage>::operator = (const T &x)
+{
+ storage::operator = (x);
+ return *this;
+}
+
+/* Dump the contents of the integer to stderr, for debugging. */
+template <typename storage>
+void
+generic_wide_int <storage>::dump () const
+{
+ unsigned int len = this->get_len ();
+ const HOST_WIDE_INT *val = this->get_val ();
+ unsigned int precision = this->get_precision ();
+ fprintf (stderr, "[");
+ if (len * HOST_BITS_PER_WIDE_INT < precision)
+ fprintf (stderr, "...,");
+ for (unsigned int i = 0; i < len - 1; ++i)
+ fprintf (stderr, HOST_WIDE_INT_PRINT_HEX ",", val[len - 1 - i]);
+ fprintf (stderr, HOST_WIDE_INT_PRINT_HEX "], precision = %d\n",
+ val[0], precision);
+}
+
+namespace wi
+{
+ template <>
+ template <typename storage>
+ struct int_traits < generic_wide_int <storage> >
+ : public wi::int_traits <storage>
+ {
+ static unsigned int get_precision (const generic_wide_int <storage> &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const generic_wide_int <storage> &);
+ };
+}
+
+template <typename storage>
+inline unsigned int
+wi::int_traits < generic_wide_int <storage> >::
+get_precision (const generic_wide_int <storage> &x)
+{
+ return x.get_precision ();
+}
+
+template <typename storage>
+inline wi::storage_ref
+wi::int_traits < generic_wide_int <storage> >::
+decompose (HOST_WIDE_INT *, unsigned int precision,
+ const generic_wide_int <storage> &x)
+{
+ gcc_checking_assert (precision == x.get_precision ());
+ return wi::storage_ref (x.get_val (), x.get_len (), precision);
+}
+
+/* Provide the storage for a wide_int_ref. This acts like a read-only
+ wide_int, with the optimization that VAL is normally a pointer to
+ another integer's storage, so that no array copy is needed. */
+template <bool SE>
+struct wide_int_ref_storage : public wi::storage_ref
+{
+private:
+ /* Scratch space that can be used when decomposing the original integer.
+ It must live as long as this object. */
+ HOST_WIDE_INT scratch[2];
+
+public:
+ wide_int_ref_storage (const wi::storage_ref &);
+
+ template <typename T>
+ wide_int_ref_storage (const T &);
+
+ template <typename T>
+ wide_int_ref_storage (const T &, unsigned int);
+};
+
+/* Create a reference from an existing reference. */
+template <bool SE>
+inline wide_int_ref_storage <SE>::
+wide_int_ref_storage (const wi::storage_ref &x)
+ : storage_ref (x)
+{}
+
+/* Create a reference to integer X in its natural precision. Note
+ that the natural precision is host-dependent for primitive
+ types. */
+template <bool SE>
+template <typename T>
+inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x)
+ : storage_ref (wi::int_traits <T>::decompose (scratch,
+ wi::get_precision (x), x))
+{
+}
+
+/* Create a reference to integer X in precision PRECISION. */
+template <bool SE>
+template <typename T>
+inline wide_int_ref_storage <SE>::wide_int_ref_storage (const T &x,
+ unsigned int precision)
+ : storage_ref (wi::int_traits <T>::decompose (scratch, precision, x))
+{
+}
+
+namespace wi
+{
+ template <>
+ template <bool SE>
+ struct int_traits <wide_int_ref_storage <SE> >
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* wi::storage_ref can be a reference to a primitive type,
+ so this is the conservatively-correct setting. */
+ static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = SE;
+ };
+}
+
+namespace wi
+{
+ unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ signop sgn);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool = true);
+}
+
+/* The storage used by wide_int. */
+class GTY(()) wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[WIDE_INT_MAX_ELTS];
+ unsigned int len;
+ unsigned int precision;
+
+public:
+ wide_int_storage ();
+ template <typename T>
+ wide_int_storage (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ static wide_int from (const wide_int_ref &, unsigned int, signop);
+ static wide_int from_array (const HOST_WIDE_INT *, unsigned int,
+ unsigned int, bool = true);
+ static wide_int create (unsigned int);
+
+ /* FIXME: target-dependent, so should disappear. */
+ wide_int bswap () const;
+};
+
+namespace wi
+{
+ template <>
+ struct int_traits <wide_int_storage>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* Guaranteed by a static assert in the wide_int_storage constructor. */
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ template <typename T1, typename T2>
+ static wide_int get_binary_result (const T1 &, const T2 &);
+ };
+}
+
+inline wide_int_storage::wide_int_storage () {}
+
+/* Initialize the storage from integer X, in its natural precision.
+ Note that we do not allow integers with host-dependent precision
+ to become wide_ints; wide_ints must always be logically independent
+ of the host. */
+template <typename T>
+inline wide_int_storage::wide_int_storage (const T &x)
+{
+ { STATIC_ASSERT (!wi::int_traits<T>::host_dependent_precision); }
+ { STATIC_ASSERT (wi::int_traits<T>::precision_type != wi::CONST_PRECISION); }
+ WIDE_INT_REF_FOR (T) xi (x);
+ precision = xi.precision;
+ wi::copy (*this, xi);
+}
+
+inline unsigned int
+wide_int_storage::get_precision () const
+{
+ return precision;
+}
+
+inline const HOST_WIDE_INT *
+wide_int_storage::get_val () const
+{
+ return val;
+}
+
+inline unsigned int
+wide_int_storage::get_len () const
+{
+ return len;
+}
+
+inline HOST_WIDE_INT *
+wide_int_storage::write_val ()
+{
+ return val;
+}
+
+inline void
+wide_int_storage::set_len (unsigned int l, bool is_sign_extended)
+{
+ len = l;
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > precision)
+ val[len - 1] = sext_hwi (val[len - 1],
+ precision % HOST_BITS_PER_WIDE_INT);
+}
+
+/* Treat X as having signedness SGN and convert it to a PRECISION-bit
+ number. */
+inline wide_int
+wide_int_storage::from (const wide_int_ref &x, unsigned int precision,
+ signop sgn)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, precision, sgn));
+ return result;
+}
+
+/* Create a wide_int from the explicit block encoding given by VAL and
+ LEN. PRECISION is the precision of the integer. NEED_CANON_P is
+ true if the encoding may have redundant trailing blocks. */
+inline wide_int
+wide_int_storage::from_array (const HOST_WIDE_INT *val, unsigned int len,
+ unsigned int precision, bool need_canon_p)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (wi::from_array (result.write_val (), val, len, precision,
+ need_canon_p));
+ return result;
+}
+
+/* Return an uninitialized wide_int with precision PRECISION. */
+inline wide_int
+wide_int_storage::create (unsigned int precision)
+{
+ wide_int x;
+ x.precision = precision;
+ return x;
+}
+
+template <typename T1, typename T2>
+inline wide_int
+wi::int_traits <wide_int_storage>::get_binary_result (const T1 &x, const T2 &y)
+{
+ /* This shouldn't be used for two flexible-precision inputs. */
+ STATIC_ASSERT (wi::int_traits <T1>::precision_type != FLEXIBLE_PRECISION
+ || wi::int_traits <T2>::precision_type != FLEXIBLE_PRECISION);
+ if (wi::int_traits <T1>::precision_type == FLEXIBLE_PRECISION)
+ return wide_int::create (wi::get_precision (y));
+ else
+ return wide_int::create (wi::get_precision (x));
+}
+
+/* The storage used by FIXED_WIDE_INT (N). */
+template <int N>
+class GTY(()) fixed_wide_int_storage
+{
+private:
+ HOST_WIDE_INT val[(N + HOST_BITS_PER_WIDE_INT + 1) / HOST_BITS_PER_WIDE_INT];
+ unsigned int len;
+
+public:
+ fixed_wide_int_storage ();
+ template <typename T>
+ fixed_wide_int_storage (const T &);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ unsigned int get_len () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ static FIXED_WIDE_INT (N) from (const wide_int_ref &, signop);
+ static FIXED_WIDE_INT (N) from_array (const HOST_WIDE_INT *, unsigned int,
+ bool = true);
+};
+
+namespace wi
+{
+ template <>
+ template <int N>
+ struct int_traits < fixed_wide_int_storage <N> >
+ {
+ static const enum precision_type precision_type = CONST_PRECISION;
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static const unsigned int precision = N;
+ template <typename T1, typename T2>
+ static FIXED_WIDE_INT (N) get_binary_result (const T1 &, const T2 &);
+ };
+}
+
+template <int N>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage () {}
+
+/* Initialize the storage from integer X, in precision N. */
+template <int N>
+template <typename T>
+inline fixed_wide_int_storage <N>::fixed_wide_int_storage (const T &x)
+{
+ /* Check for type compatibility. We don't want to initialize a
+ fixed-width integer from something like a wide_int. */
+ WI_BINARY_RESULT (T, FIXED_WIDE_INT (N)) *assertion ATTRIBUTE_UNUSED;
+ wi::copy (*this, WIDE_INT_REF_FOR (T) (x, N));
+}
+
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_precision () const
+{
+ return N;
+}
+
+template <int N>
+inline const HOST_WIDE_INT *
+fixed_wide_int_storage <N>::get_val () const
+{
+ return val;
+}
+
+template <int N>
+inline unsigned int
+fixed_wide_int_storage <N>::get_len () const
+{
+ return len;
+}
+
+template <int N>
+inline HOST_WIDE_INT *
+fixed_wide_int_storage <N>::write_val ()
+{
+ return val;
+}
+
+template <int N>
+inline void
+fixed_wide_int_storage <N>::set_len (unsigned int l, bool)
+{
+ len = l;
+ /* There are no excess bits in val[len - 1]. */
+ STATIC_ASSERT (N % HOST_BITS_PER_WIDE_INT == 0);
+}
+
+/* Treat X as having signedness SGN and convert it to an N-bit number. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from (const wide_int_ref &x, signop sgn)
+{
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::force_to_size (result.write_val (), x.val, x.len,
+ x.precision, N, sgn));
+ return result;
+}
+
+/* Create a FIXED_WIDE_INT (N) from the explicit block encoding given by
+ VAL and LEN. NEED_CANON_P is true if the encoding may have redundant
+ trailing blocks. */
+template <int N>
+inline FIXED_WIDE_INT (N)
+fixed_wide_int_storage <N>::from_array (const HOST_WIDE_INT *val,
+ unsigned int len,
+ bool need_canon_p)
+{
+ FIXED_WIDE_INT (N) result;
+ result.set_len (wi::from_array (result.write_val (), val, len,
+ N, need_canon_p));
+ return result;
+}
+
+template <int N>
+template <typename T1, typename T2>
+inline FIXED_WIDE_INT (N)
+wi::int_traits < fixed_wide_int_storage <N> >::
+get_binary_result (const T1 &, const T2 &)
+{
+ return FIXED_WIDE_INT (N) ();
+}
+
+/* A reference to one element of a trailing_wide_ints structure. */
+class trailing_wide_int_storage
+{
+private:
+ /* The precision of the integer, which is a fixed property of the
+ parent trailing_wide_ints. */
+ unsigned int m_precision;
+
+ /* A pointer to the length field. */
+ unsigned char *m_len;
+
+ /* A pointer to the HWI array. There are enough elements to hold all
+ values of precision M_PRECISION. */
+ HOST_WIDE_INT *m_val;
+
+public:
+ trailing_wide_int_storage (unsigned int, unsigned char *, HOST_WIDE_INT *);
+
+ /* The standard generic_wide_int storage methods. */
+ unsigned int get_len () const;
+ unsigned int get_precision () const;
+ const HOST_WIDE_INT *get_val () const;
+ HOST_WIDE_INT *write_val ();
+ void set_len (unsigned int, bool = false);
+
+ template <typename T>
+ trailing_wide_int_storage &operator = (const T &);
+};
+
+typedef generic_wide_int <trailing_wide_int_storage> trailing_wide_int;
+
+/* trailing_wide_int behaves like a wide_int. */
+namespace wi
+{
+ template <>
+ struct int_traits <trailing_wide_int_storage>
+ : public int_traits <wide_int_storage> {};
+}
+
+/* An array of N wide_int-like objects that can be put at the end of
+ a variable-sized structure. Use extra_size to calculate how many
+ bytes beyond the sizeof need to be allocated. Use set_precision
+ to initialize the structure. */
+template <int N>
+class GTY(()) trailing_wide_ints
+{
+private:
+ /* The shared precision of each number. */
+ unsigned short m_precision;
+
+ /* The shared maximum length of each number. */
+ unsigned char m_max_len;
+
+ /* The current length of each number. */
+ unsigned char m_len[N];
+
+ /* The variable-length part of the structure, which always contains
+ at least one HWI. Element I starts at index I * M_MAX_LEN. */
+ HOST_WIDE_INT m_val[1];
+
+public:
+ void set_precision (unsigned int);
+ trailing_wide_int operator [] (unsigned int);
+ static size_t extra_size (unsigned int);
+};
+
+inline trailing_wide_int_storage::
+trailing_wide_int_storage (unsigned int precision, unsigned char *len,
+ HOST_WIDE_INT *val)
+ : m_precision (precision), m_len (len), m_val (val)
+{
+}
+
+inline unsigned int
+trailing_wide_int_storage::get_len () const
+{
+ return *m_len;
+}
+
+inline unsigned int
+trailing_wide_int_storage::get_precision () const
+{
+ return m_precision;
+}
+
+inline const HOST_WIDE_INT *
+trailing_wide_int_storage::get_val () const
+{
+ return m_val;
+}
+
+inline HOST_WIDE_INT *
+trailing_wide_int_storage::write_val ()
+{
+ return m_val;
+}
+
+inline void
+trailing_wide_int_storage::set_len (unsigned int len, bool is_sign_extended)
+{
+ *m_len = len;
+ if (!is_sign_extended && len * HOST_BITS_PER_WIDE_INT > m_precision)
+ m_val[len - 1] = sext_hwi (m_val[len - 1],
+ m_precision % HOST_BITS_PER_WIDE_INT);
+}
+
+template <typename T>
+inline trailing_wide_int_storage &
+trailing_wide_int_storage::operator = (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x, m_precision);
+ wi::copy (*this, xi);
+ return *this;
+}
+
+/* Initialize the structure and record that all elements have precision
+ PRECISION. */
+template <int N>
+inline void
+trailing_wide_ints <N>::set_precision (unsigned int precision)
+{
+ m_precision = precision;
+ m_max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+}
+
+/* Return a reference to element INDEX. */
+template <int N>
+inline trailing_wide_int
+trailing_wide_ints <N>::operator [] (unsigned int index)
+{
+ return trailing_wide_int_storage (m_precision, &m_len[index],
+ &m_val[index * m_max_len]);
+}
+
+/* Return how many extra bytes need to be added to the end of the structure
+ in order to handle N wide_ints of precision PRECISION. */
+template <int N>
+inline size_t
+trailing_wide_ints <N>::extra_size (unsigned int precision)
+{
+ unsigned int max_len = ((precision + HOST_BITS_PER_WIDE_INT - 1)
+ / HOST_BITS_PER_WIDE_INT);
+ return (N * max_len - 1) * sizeof (HOST_WIDE_INT);
+}
+
+/* This macro is used in structures that end with a trailing_wide_ints field
+ called FIELD. It declares get_NAME() and set_NAME() methods to access
+ element I of FIELD. */
+#define TRAILING_WIDE_INT_ACCESSOR(NAME, FIELD, I) \
+ trailing_wide_int get_##NAME () { return FIELD[I]; } \
+ template <typename T> void set_##NAME (const T &x) { FIELD[I] = x; }
+
+namespace wi
+{
+ /* Implementation of int_traits for primitive integer types like "int". */
+ template <typename T, bool signed_p>
+ struct primitive_int_traits
+ {
+ static const enum precision_type precision_type = FLEXIBLE_PRECISION;
+ static const bool host_dependent_precision = true;
+ static const bool is_sign_extended = true;
+ static unsigned int get_precision (T);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int, T);
+ };
+}
+
+template <typename T, bool signed_p>
+inline unsigned int
+wi::primitive_int_traits <T, signed_p>::get_precision (T)
+{
+ return sizeof (T) * CHAR_BIT;
+}
+
+template <typename T, bool signed_p>
+inline wi::storage_ref
+wi::primitive_int_traits <T, signed_p>::decompose (HOST_WIDE_INT *scratch,
+ unsigned int precision, T x)
+{
+ scratch[0] = x;
+ if (signed_p || scratch[0] >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Allow primitive C types to be used in wi:: routines. */
+namespace wi
+{
+ template <>
+ struct int_traits <int>
+ : public primitive_int_traits <int, true> {};
+
+ template <>
+ struct int_traits <unsigned int>
+ : public primitive_int_traits <unsigned int, false> {};
+
+#if HOST_BITS_PER_INT != HOST_BITS_PER_WIDE_INT
+ template <>
+ struct int_traits <HOST_WIDE_INT>
+ : public primitive_int_traits <HOST_WIDE_INT, true> {};
+
+ template <>
+ struct int_traits <unsigned HOST_WIDE_INT>
+ : public primitive_int_traits <unsigned HOST_WIDE_INT, false> {};
+#endif
+}
+
+namespace wi
+{
+ /* Stores HWI-sized integer VAL, treating it as having signedness SGN
+ and precision PRECISION. */
+ struct hwi_with_prec
+ {
+ hwi_with_prec (HOST_WIDE_INT, unsigned int, signop);
+ HOST_WIDE_INT val;
+ unsigned int precision;
+ signop sgn;
+ };
+
+ hwi_with_prec shwi (HOST_WIDE_INT, unsigned int);
+ hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int);
+
+ hwi_with_prec minus_one (unsigned int);
+ hwi_with_prec zero (unsigned int);
+ hwi_with_prec one (unsigned int);
+ hwi_with_prec two (unsigned int);
+}
+
+inline wi::hwi_with_prec::hwi_with_prec (HOST_WIDE_INT v, unsigned int p,
+ signop s)
+ : val (v), precision (p), sgn (s)
+{
+}
+
+/* Return a signed integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::shwi (HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, SIGNED);
+}
+
+/* Return an unsigned integer that has value VAL and precision PRECISION. */
+inline wi::hwi_with_prec
+wi::uhwi (unsigned HOST_WIDE_INT val, unsigned int precision)
+{
+ return hwi_with_prec (val, precision, UNSIGNED);
+}
+
+/* Return a wide int of -1 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::minus_one (unsigned int precision)
+{
+ return wi::shwi (-1, precision);
+}
+
+/* Return a wide int of 0 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::zero (unsigned int precision)
+{
+ return wi::shwi (0, precision);
+}
+
+/* Return a wide int of 1 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::one (unsigned int precision)
+{
+ return wi::shwi (1, precision);
+}
+
+/* Return a wide int of 2 with precision PRECISION. */
+inline wi::hwi_with_prec
+wi::two (unsigned int precision)
+{
+ return wi::shwi (2, precision);
+}
+
+namespace wi
+{
+ template <>
+ struct int_traits <wi::hwi_with_prec>
+ {
+ static const enum precision_type precision_type = VAR_PRECISION;
+ /* hwi_with_prec has an explicitly-given precision, rather than the
+ precision of HOST_WIDE_INT. */
+ static const bool host_dependent_precision = false;
+ static const bool is_sign_extended = true;
+ static unsigned int get_precision (const wi::hwi_with_prec &);
+ static wi::storage_ref decompose (HOST_WIDE_INT *, unsigned int,
+ const wi::hwi_with_prec &);
+ };
+}
+
+inline unsigned int
+wi::int_traits <wi::hwi_with_prec>::get_precision (const wi::hwi_with_prec &x)
+{
+ return x.precision;
+}
+
+inline wi::storage_ref
+wi::int_traits <wi::hwi_with_prec>::
+decompose (HOST_WIDE_INT *scratch, unsigned int precision,
+ const wi::hwi_with_prec &x)
+{
+ gcc_checking_assert (precision == x.precision);
+ scratch[0] = x.val;
+ if (x.sgn == SIGNED || x.val >= 0 || precision <= HOST_BITS_PER_WIDE_INT)
+ return wi::storage_ref (scratch, 1, precision);
+ scratch[1] = 0;
+ return wi::storage_ref (scratch, 2, precision);
+}
+
+/* Private functions for handling large cases out of line. They take
+ individual length and array parameters because that is cheaper for
+ the inline caller than constructing an object on the stack and
+ passing a reference to it. (Although many callers use wide_int_refs,
+ we generally want those to be removed by SRA.) */
+namespace wi
+{
+ bool eq_p_large (const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int,
+ const HOST_WIDE_INT *, unsigned int);
+ unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
+ unsigned int, unsigned int);
+ unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int,
+ unsigned int, unsigned int);
+ unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int);
+ unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int);
+ unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int);
+ unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int);
+ unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, bool *);
+ unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int,
+ const HOST_WIDE_INT *, unsigned int, unsigned int,
+ signop, bool *);
+ unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, signop, bool *,
+ bool);
+ unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *,
+ HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ const HOST_WIDE_INT *,
+ unsigned int, unsigned int,
+ signop, bool *);
+}
+
+/* Return the number of bits that integer X can hold. */
+template <typename T>
+inline unsigned int
+wi::get_precision (const T &x)
+{
+ return wi::int_traits <T>::get_precision (x);
+}
+
+/* Return the number of bits that the result of a binary operation can
+ hold when the input operands are X and Y. */
+template <typename T1, typename T2>
+inline unsigned int
+wi::get_binary_precision (const T1 &x, const T2 &y)
+{
+ return get_precision (wi::int_traits <WI_BINARY_RESULT (T1, T2)>::
+ get_binary_result (x, y));
+}
+
+/* Copy the contents of Y to X, but keeping X's current precision. */
+template <typename T1, typename T2>
+inline void
+wi::copy (T1 &x, const T2 &y)
+{
+ HOST_WIDE_INT *xval = x.write_val ();
+ const HOST_WIDE_INT *yval = y.get_val ();
+ unsigned int len = y.get_len ();
+ unsigned int i = 0;
+ do
+ xval[i] = yval[i];
+ while (++i < len);
+ x.set_len (len, y.is_sign_extended);
+}
+
+/* Return true if X fits in a HOST_WIDE_INT with no loss of precision. */
+template <typename T>
+inline bool
+wi::fits_shwi_p (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ return xi.len == 1;
+}
+
+/* Return true if X fits in an unsigned HOST_WIDE_INT with no loss of
+ precision. */
+template <typename T>
+inline bool
+wi::fits_uhwi_p (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ if (xi.precision <= HOST_BITS_PER_WIDE_INT)
+ return true;
+ if (xi.len == 1)
+ return xi.slow () >= 0;
+ return xi.len == 2 && xi.uhigh () == 0;
+}
+
+/* Return true if X is negative based on the interpretation of SGN.
+ For UNSIGNED, this is always false. */
+template <typename T>
+inline bool
+wi::neg_p (const T &x, signop sgn)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ if (sgn == UNSIGNED)
+ return false;
+ return xi.sign_mask () < 0;
+}
+
+/* Return -1 if the top bit of X is set and 0 if the top bit is clear. */
+template <typename T>
+inline HOST_WIDE_INT
+wi::sign_mask (const T &x)
+{
+ WIDE_INT_REF_FOR (T) xi (x);
+ return xi.sign_mask ();
+}
+
+/* Return true if X == Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
+inline bool
+wi::eq_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (xi.is_sign_extended && yi.is_sign_extended)
+ {
+ /* This case reduces to array equality. */
+ if (xi.len != yi.len)
+ return false;
+ unsigned int i = 0;
+ do
+ if (xi.val[i] != yi.val[i])
+ return false;
+ while (++i != xi.len);
+ return true;
+ }
+ if (__builtin_expect (yi.len == 1, true))
+ {
+ /* XI is only equal to YI if it too has a single HWI. */
+ if (xi.len != 1)
+ return false;
+ /* Excess bits in xi.val[0] will be signs or zeros, so comparisons
+ with 0 are simple. */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return xi.val[0] == 0;
+ /* Otherwise flush out any excess bits first. */
+ unsigned HOST_WIDE_INT diff = xi.val[0] ^ yi.val[0];
+ int excess = HOST_BITS_PER_WIDE_INT - precision;
+ if (excess > 0)
+ diff <<= excess;
+ return diff == 0;
+ }
+ return eq_p_large (xi.val, xi.len, yi.val, yi.len, precision);
+}
+
+/* Return true if X != Y. X and Y must be binary-compatible. */
+template <typename T1, typename T2>
+inline bool
+wi::ne_p (const T1 &x, const T2 &y)
+{
+ return !eq_p (x, y);
+}
+
+/* Return true if X < Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::lts_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* We optimize x < y, where y is 64 or fewer bits. */
+ if (wi::fits_shwi_p (yi))
+ {
+ /* Make lts_p (x, 0) as efficient as wi::neg_p (x). */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return neg_p (xi);
+ /* If x fits directly into a shwi, we can compare directly. */
+ if (wi::fits_shwi_p (xi))
+ return xi.to_shwi () < yi.to_shwi ();
+ /* If x doesn't fit and is negative, then it must be more
+ negative than any value in y, and hence smaller than y. */
+ if (neg_p (xi))
+ return true;
+ /* If x is positive, then it must be larger than any value in y,
+ and hence greater than y. */
+ return false;
+ }
+ /* Optimize the opposite case, if it can be detected at compile time. */
+ if (STATIC_CONSTANT_P (xi.len == 1))
+ /* If YI is negative it is lower than the least HWI.
+ If YI is positive it is greater than the greatest HWI. */
+ return !neg_p (yi);
+ return lts_p_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return true if X < Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::ltu_p (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* Optimize comparisons with constants. */
+ if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
+ return xi.len == 1 && xi.to_uhwi () < (unsigned HOST_WIDE_INT) yi.val[0];
+ if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
+ return yi.len != 1 || yi.to_uhwi () > (unsigned HOST_WIDE_INT) xi.val[0];
+ /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
+ for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
+ values does not change the result. */
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl;
+ }
+ return ltu_p_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return true if X < Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::lt_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return lts_p (x, y);
+ else
+ return ltu_p (x, y);
+}
+
+/* Return true if X <= Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::les_p (const T1 &x, const T2 &y)
+{
+ return !lts_p (y, x);
+}
+
+/* Return true if X <= Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::leu_p (const T1 &x, const T2 &y)
+{
+ return !ltu_p (y, x);
+}
+
+/* Return true if X <= Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::le_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return les_p (x, y);
+ else
+ return leu_p (x, y);
+}
+
+/* Return true if X > Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::gts_p (const T1 &x, const T2 &y)
+{
+ return lts_p (y, x);
+}
+
+/* Return true if X > Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::gtu_p (const T1 &x, const T2 &y)
+{
+ return ltu_p (y, x);
+}
+
+/* Return true if X > Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::gt_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return gts_p (x, y);
+ else
+ return gtu_p (x, y);
+}
+
+/* Return true if X >= Y when both are treated as signed values. */
+template <typename T1, typename T2>
+inline bool
+wi::ges_p (const T1 &x, const T2 &y)
+{
+ return !lts_p (x, y);
+}
+
+/* Return true if X >= Y when both are treated as unsigned values. */
+template <typename T1, typename T2>
+inline bool
+wi::geu_p (const T1 &x, const T2 &y)
+{
+ return !ltu_p (x, y);
+}
+
+/* Return true if X >= Y. Signedness of X and Y is indicated by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::ge_p (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return ges_p (x, y);
+ else
+ return geu_p (x, y);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as signed values. */
+template <typename T1, typename T2>
+inline int
+wi::cmps (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (wi::fits_shwi_p (yi))
+ {
+ /* Special case for comparisons with 0. */
+ if (STATIC_CONSTANT_P (yi.val[0] == 0))
+ return neg_p (xi) ? -1 : !(xi.len == 1 && xi.val[0] == 0);
+ /* If x fits into a signed HWI, we can compare directly. */
+ if (wi::fits_shwi_p (xi))
+ {
+ HOST_WIDE_INT xl = xi.to_shwi ();
+ HOST_WIDE_INT yl = yi.to_shwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ /* If x doesn't fit and is negative, then it must be more
+ negative than any signed HWI, and hence smaller than y. */
+ if (neg_p (xi))
+ return -1;
+ /* If x is positive, then it must be larger than any signed HWI,
+ and hence greater than y. */
+ return 1;
+ }
+ /* Optimize the opposite case, if it can be detected at compile time. */
+ if (STATIC_CONSTANT_P (xi.len == 1))
+ /* If YI is negative it is lower than the least HWI.
+ If YI is positive it is greater than the greatest HWI. */
+ return neg_p (yi) ? 1 : -1;
+ return cmps_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y
+ as unsigned values. */
+template <typename T1, typename T2>
+inline int
+wi::cmpu (const T1 &x, const T2 &y)
+{
+ unsigned int precision = get_binary_precision (x, y);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ /* Optimize comparisons with constants. */
+ if (STATIC_CONSTANT_P (yi.len == 1 && yi.val[0] >= 0))
+ {
+ /* If XI doesn't fit in a HWI then it must be larger than YI. */
+ if (xi.len != 1)
+ return 1;
+ /* Otherwise compare directly. */
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.val[0];
+ return xl < yl ? -1 : xl > yl;
+ }
+ if (STATIC_CONSTANT_P (xi.len == 1 && xi.val[0] >= 0))
+ {
+ /* If YI doesn't fit in a HWI then it must be larger than XI. */
+ if (yi.len != 1)
+ return -1;
+ /* Otherwise compare directly. */
+ unsigned HOST_WIDE_INT xl = xi.val[0];
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ /* Optimize the case of two HWIs. The HWIs are implicitly sign-extended
+ for precisions greater than HOST_BITS_WIDE_INT, but sign-extending both
+ values does not change the result. */
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.to_uhwi ();
+ unsigned HOST_WIDE_INT yl = yi.to_uhwi ();
+ return xl < yl ? -1 : xl > yl;
+ }
+ return cmpu_large (xi.val, xi.len, precision, yi.val, yi.len);
+}
+
+/* Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of
+ X and Y indicated by SGN. */
+template <typename T1, typename T2>
+inline int
+wi::cmp (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == SIGNED)
+ return cmps (x, y);
+ else
+ return cmpu (x, y);
+}
+
+/* Return ~x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::bit_not (const T &x)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ WIDE_INT_REF_FOR (T) xi (x, get_precision (result));
+ for (unsigned int i = 0; i < xi.len; ++i)
+ val[i] = ~xi.val[i];
+ result.set_len (xi.len);
+ return result;
+}
+
+/* Return -x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x)
+{
+ return sub (0, x);
+}
+
+/* Return -x. Indicate in *OVERFLOW if X is the minimum signed value. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::neg (const T &x, bool *overflow)
+{
+ *overflow = only_sign_bit_p (x);
+ return sub (0, x);
+}
+
+/* Return the absolute value of x. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::abs (const T &x)
+{
+ return neg_p (x) ? neg (x) : WI_UNARY_RESULT (T) (x);
+}
+
+/* Return the result of sign-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::sext (const T &x, unsigned int offset)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ if (offset <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow (), offset);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (sext_large (val, xi.val, xi.len, precision, offset));
+ return result;
+}
+
+/* Return the result of zero-extending the low OFFSET bits of X. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::zext (const T &x, unsigned int offset)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ /* This is not just an optimization, it is actually required to
+ maintain canonization. */
+ if (offset >= precision)
+ {
+ wi::copy (result, xi);
+ return result;
+ }
+
+ /* In these cases we know that at least the top bit will be clear,
+ so no sign extension is necessary. */
+ if (offset < HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = zext_hwi (xi.ulow (), offset);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (zext_large (val, xi.val, xi.len, precision, offset), true);
+ return result;
+}
+
+/* Return the result of extending the low OFFSET bits of X according to
+ signedness SGN. */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::ext (const T &x, unsigned int offset, signop sgn)
+{
+ return sgn == SIGNED ? sext (x, offset) : zext (x, offset);
+}
+
+/* Return an integer that represents X | (1 << bit). */
+template <typename T>
+inline WI_UNARY_RESULT (T)
+wi::set_bit (const T &x, unsigned int bit)
+{
+ WI_UNARY_RESULT_VAR (result, val, T, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () | ((unsigned HOST_WIDE_INT) 1 << bit);
+ result.set_len (1);
+ }
+ else
+ result.set_len (set_bit_large (val, xi.val, xi.len, precision, bit));
+ return result;
+}
+
+/* Return the mininum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::min (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::le_p (x, y, sgn))
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
+ else
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
+ return result;
+}
+
+/* Return the minimum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smin (const T1 &x, const T2 &y)
+{
+ return min (x, y, SIGNED);
+}
+
+/* Return the minimum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umin (const T1 &x, const T2 &y)
+{
+ return min (x, y, UNSIGNED);
+}
+
+/* Return the maxinum of X and Y, treating them both as having
+ signedness SGN. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::max (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val ATTRIBUTE_UNUSED, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ if (wi::ge_p (x, y, sgn))
+ wi::copy (result, WIDE_INT_REF_FOR (T1) (x, precision));
+ else
+ wi::copy (result, WIDE_INT_REF_FOR (T2) (y, precision));
+ return result;
+}
+
+/* Return the maximum of X and Y, treating both as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smax (const T1 &x, const T2 &y)
+{
+ return max (x, y, SIGNED);
+}
+
+/* Return the maximum of X and Y, treating both as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umax (const T1 &x, const T2 &y)
+{
+ return max (x, y, UNSIGNED);
+}
+
+/* Return X & Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () & yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (and_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X & ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_and_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () & ~yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (and_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X | Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () | yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (or_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision), is_sign_extended);
+ return result;
+}
+
+/* Return X | ~Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_or_not (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () | ~yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (or_not_large (val, xi.val, xi.len, yi.val, yi.len,
+ precision), is_sign_extended);
+ return result;
+}
+
+/* Return X ^ Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::bit_xor (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ bool is_sign_extended = xi.is_sign_extended && yi.is_sign_extended;
+ if (__builtin_expect (xi.len + yi.len == 2, true))
+ {
+ val[0] = xi.ulow () ^ yi.ulow ();
+ result.set_len (1, is_sign_extended);
+ }
+ else
+ result.set_len (xor_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision), is_sign_extended);
+ return result;
+}
+
+/* Return X + Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () + yi.ulow ();
+ result.set_len (1);
+ }
+ /* If the precision is known at compile time to be greater than
+ HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
+ knowing that (a) all bits in those HWIs are significant and
+ (b) the result has room for at least two HWIs. This provides
+ a fast path for things like offset_int and widest_int.
+
+ The STATIC_CONSTANT_P test prevents this path from being
+ used for wide_ints. wide_ints with precisions greater than
+ HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
+ point handling them inline. */
+ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
+ && __builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ val[0] = resultl;
+ val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
+ result.set_len (1 + (((resultl ^ xl) & (resultl ^ yl))
+ >> (HOST_BITS_PER_WIDE_INT - 1)));
+ }
+ else
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ UNSIGNED, 0));
+ return result;
+}
+
+/* Return X + Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::add (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl + yl;
+ if (sgn == SIGNED)
+ *overflow = (((resultl ^ xl) & (resultl ^ yl))
+ >> (precision - 1)) & 1;
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ < (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ val[0] = resultl;
+ result.set_len (1);
+ }
+ else
+ result.set_len (add_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow));
+ return result;
+}
+
+/* Return X - Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () - yi.ulow ();
+ result.set_len (1);
+ }
+ /* If the precision is known at compile time to be greater than
+ HOST_BITS_PER_WIDE_INT, we can optimize the single-HWI case
+ knowing that (a) all bits in those HWIs are significant and
+ (b) the result has room for at least two HWIs. This provides
+ a fast path for things like offset_int and widest_int.
+
+ The STATIC_CONSTANT_P test prevents this path from being
+ used for wide_ints. wide_ints with precisions greater than
+ HOST_BITS_PER_WIDE_INT are relatively rare and there's not much
+ point handling them inline. */
+ else if (STATIC_CONSTANT_P (precision > HOST_BITS_PER_WIDE_INT)
+ && __builtin_expect (xi.len + yi.len == 2, true))
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ val[0] = resultl;
+ val[1] = (HOST_WIDE_INT) resultl < 0 ? 0 : -1;
+ result.set_len (1 + (((resultl ^ xl) & (xl ^ yl))
+ >> (HOST_BITS_PER_WIDE_INT - 1)));
+ }
+ else
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ UNSIGNED, 0));
+ return result;
+}
+
+/* Return X - Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sub (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT xl = xi.ulow ();
+ unsigned HOST_WIDE_INT yl = yi.ulow ();
+ unsigned HOST_WIDE_INT resultl = xl - yl;
+ if (sgn == SIGNED)
+ *overflow = (((xl ^ yl) & (resultl ^ xl)) >> (precision - 1)) & 1;
+ else
+ *overflow = ((resultl << (HOST_BITS_PER_WIDE_INT - precision))
+ > (xl << (HOST_BITS_PER_WIDE_INT - precision)));
+ val[0] = resultl;
+ result.set_len (1);
+ }
+ else
+ result.set_len (sub_large (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow));
+ return result;
+}
+
+/* Return X * Y. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ if (precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () * yi.ulow ();
+ result.set_len (1);
+ }
+ else
+ result.set_len (mul_internal (val, xi.val, xi.len, yi.val, yi.len,
+ precision, UNSIGNED, 0, false));
+ return result;
+}
+
+/* Return X * Y. Treat X and Y as having the signednes given by SGN
+ and indicate in *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, overflow, false));
+ return result;
+}
+
+/* Return X * Y, treating both X and Y as signed values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smul (const T1 &x, const T2 &y, bool *overflow)
+{
+ return mul (x, y, SIGNED, overflow);
+}
+
+/* Return X * Y, treating both X and Y as unsigned values. Indicate in
+ *OVERFLOW whether the operation overflowed. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umul (const T1 &x, const T2 &y, bool *overflow)
+{
+ return mul (x, y, UNSIGNED, overflow);
+}
+
+/* Perform a widening multiplication of X and Y, extending the values
+ according to SGN, and return the high part of the result. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mul_high (const T1 &x, const T2 &y, signop sgn)
+{
+ WI_BINARY_RESULT_VAR (result, val, T1, x, T2, y);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y, precision);
+ result.set_len (mul_internal (val, xi.val, xi.len,
+ yi.val, yi.len, precision,
+ sgn, 0, true));
+ return result;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ quotient.set_len (divmod_internal (quotient_val, 0, 0, xi.val, xi.len,
+ precision,
+ yi.val, yi.len, yi.precision,
+ sgn, overflow));
+ return quotient;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_trunc (const T1 &x, const T2 &y)
+{
+ return div_trunc (x, y, SIGNED);
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_trunc (const T1 &x, const T2 &y)
+{
+ return div_trunc (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
+ return quotient - 1;
+ return quotient;
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::sdiv_floor (const T1 &x, const T2 &y)
+{
+ return div_floor (x, y, SIGNED);
+}
+
+/* Return X / Y, rouding towards -inf. Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and udiv_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::udiv_floor (const T1 &x, const T2 &y)
+{
+ return div_floor (x, y, UNSIGNED);
+}
+
+/* Return X / Y, rouding towards +inf. Treat X and Y as having the
+ signedness given by SGN. Indicate in *OVERFLOW if the result
+ overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+ if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
+ return quotient + 1;
+ return quotient;
+}
+
+/* Return X / Y, rouding towards nearest with ties away from zero.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the result overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::div_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
+ {
+ if (sgn == SIGNED)
+ {
+ if (wi::ges_p (wi::abs (remainder),
+ wi::lrshift (wi::abs (y), 1)))
+ {
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
+ return quotient - 1;
+ else
+ return quotient + 1;
+ }
+ }
+ else
+ {
+ if (wi::geu_p (remainder, wi::lrshift (y, 1)))
+ return quotient + 1;
+ }
+ }
+ return quotient;
+}
+
+/* Return X / Y, rouding towards 0. Treat X and Y as having the
+ signedness given by SGN. Store the remainder in *REMAINDER_PTR. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::divmod_trunc (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *remainder_ptr)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, 0));
+ remainder.set_len (remainder_len);
+
+ *remainder_ptr = remainder;
+ return quotient;
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_trunc (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (remainder);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ divmod_internal (0, &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn, overflow);
+ remainder.set_len (remainder_len);
+
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as signed values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::smod_trunc (const T1 &x, const T2 &y)
+{
+ return mod_trunc (x, y, SIGNED);
+}
+
+/* Compute X / Y, rouding towards 0, and return the remainder.
+ Treat X and Y as unsigned values. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_trunc (const T1 &x, const T2 &y)
+{
+ return mod_trunc (x, y, UNSIGNED);
+}
+
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_floor (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn) && remainder != 0)
+ return remainder + y;
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards -inf, and return the remainder.
+ Treat X and Y as unsigned values. */
+/* ??? Why do we have both this and umod_trunc. Aren't they the same? */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::umod_floor (const T1 &x, const T2 &y)
+{
+ return mod_floor (x, y, UNSIGNED);
+}
+
+/* Compute X / Y, rouding towards +inf, and return the remainder.
+ Treat X and Y as having the signedness given by SGN. Indicate
+ in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_ceil (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (wi::neg_p (x, sgn) == wi::neg_p (y, sgn) && remainder != 0)
+ return remainder - y;
+ return remainder;
+}
+
+/* Compute X / Y, rouding towards nearest with ties away from zero,
+ and return the remainder. Treat X and Y as having the signedness
+ given by SGN. Indicate in *OVERFLOW if the division overflows. */
+template <typename T1, typename T2>
+inline WI_BINARY_RESULT (T1, T2)
+wi::mod_round (const T1 &x, const T2 &y, signop sgn, bool *overflow)
+{
+ WI_BINARY_RESULT_VAR (quotient, quotient_val, T1, x, T2, y);
+ WI_BINARY_RESULT_VAR (remainder, remainder_val, T1, x, T2, y);
+ unsigned int precision = get_precision (quotient);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+
+ unsigned int remainder_len;
+ quotient.set_len (divmod_internal (quotient_val,
+ &remainder_len, remainder_val,
+ xi.val, xi.len, precision,
+ yi.val, yi.len, yi.precision, sgn,
+ overflow));
+ remainder.set_len (remainder_len);
+
+ if (remainder != 0)
+ {
+ if (sgn == SIGNED)
+ {
+ if (wi::ges_p (wi::abs (remainder),
+ wi::lrshift (wi::abs (y), 1)))
+ {
+ if (wi::neg_p (x, sgn) != wi::neg_p (y, sgn))
+ return remainder + y;
+ else
+ return remainder - y;
+ }
+ }
+ else
+ {
+ if (wi::geu_p (remainder, wi::lrshift (y, 1)))
+ return remainder - y;
+ }
+ }
+ return remainder;
+}
+
+/* Return true if X is a multiple of Y, storing X / Y in *RES if so.
+ Treat X and Y as having the signedness given by SGN. */
+template <typename T1, typename T2>
+inline bool
+wi::multiple_of_p (const T1 &x, const T2 &y, signop sgn,
+ WI_BINARY_RESULT (T1, T2) *res)
+{
+ WI_BINARY_RESULT (T1, T2) remainder;
+ WI_BINARY_RESULT (T1, T2) quotient
+ = divmod_trunc (x, y, sgn, &remainder);
+ if (remainder == 0)
+ {
+ *res = quotient;
+ return true;
+ }
+ return false;
+}
+
+/* Return X << Y. Return 0 if Y is greater than or equal to
+ the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::lshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ unsigned int precision = get_precision (result);
+ WIDE_INT_REF_FOR (T1) xi (x, precision);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, precision))
+ {
+ val[0] = 0;
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ /* For fixed-precision integers like offset_int and widest_int,
+ handle the case where the shift value is constant and the
+ result is a single nonnegative HWI (meaning that we don't
+ need to worry about val[1]). This is particularly common
+ for converting a byte count to a bit count.
+
+ For variable-precision integers like wide_int, handle HWI
+ and sub-HWI integers inline. */
+ if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
+ ? (STATIC_CONSTANT_P (shift < HOST_BITS_PER_WIDE_INT - 1)
+ && xi.len == 1
+ && xi.val[0] <= (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT)
+ HOST_WIDE_INT_MAX >> shift))
+ : precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.ulow () << shift;
+ result.set_len (1);
+ }
+ else
+ result.set_len (lshift_large (val, xi.val, xi.len,
+ precision, shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using a logical shift. Return 0 if Y is greater than
+ or equal to the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::lrshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, xi.precision))
+ {
+ val[0] = 0;
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ /* For fixed-precision integers like offset_int and widest_int,
+ handle the case where the shift value is constant and the
+ shifted value is a single nonnegative HWI (meaning that all
+ bits above the HWI are zero). This is particularly common
+ for converting a bit count to a byte count.
+
+ For variable-precision integers like wide_int, handle HWI
+ and sub-HWI integers inline. */
+ if (STATIC_CONSTANT_P (xi.precision > HOST_BITS_PER_WIDE_INT)
+ ? xi.len == 1 && xi.val[0] >= 0
+ : xi.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = xi.to_uhwi () >> shift;
+ result.set_len (1);
+ }
+ else
+ result.set_len (lrshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using an arithmetic shift. Return a sign mask if
+ Y is greater than or equal to the precision of X. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::arshift (const T1 &x, const T2 &y)
+{
+ WI_UNARY_RESULT_VAR (result, val, T1, x);
+ /* Do things in the precision of the input rather than the output,
+ since the result can be no larger than that. */
+ WIDE_INT_REF_FOR (T1) xi (x);
+ WIDE_INT_REF_FOR (T2) yi (y);
+ /* Handle the simple cases quickly. */
+ if (geu_p (yi, xi.precision))
+ {
+ val[0] = sign_mask (x);
+ result.set_len (1);
+ }
+ else
+ {
+ unsigned int shift = yi.to_uhwi ();
+ if (xi.precision <= HOST_BITS_PER_WIDE_INT)
+ {
+ val[0] = sext_hwi (xi.ulow () >> shift, xi.precision - shift);
+ result.set_len (1, true);
+ }
+ else
+ result.set_len (arshift_large (val, xi.val, xi.len, xi.precision,
+ get_precision (result), shift));
+ }
+ return result;
+}
+
+/* Return X >> Y, using an arithmetic shift if SGN is SIGNED and a
+ logical shift otherwise. */
+template <typename T1, typename T2>
+inline WI_UNARY_RESULT (T1)
+wi::rshift (const T1 &x, const T2 &y, signop sgn)
+{
+ if (sgn == UNSIGNED)
+ return lrshift (x, y);
+ else
+ return arshift (x, y);
+}
+
+/* Return the result of rotating the low WIDTH bits of X left by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
+template <typename T1, typename T2>
+WI_UNARY_RESULT (T1)
+wi::lrotate (const T1 &x, const T2 &y, unsigned int width)
+{
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, ymod);
+ WI_UNARY_RESULT (T1) right = wi::lrshift (x, wi::sub (width, ymod));
+ if (width != precision)
+ return wi::zext (left, width) | wi::zext (right, width);
+ return left | right;
+}
+
+/* Return the result of rotating the low WIDTH bits of X right by Y
+ bits and zero-extending the result. Use a full-width rotate if
+ WIDTH is zero. */
+template <typename T1, typename T2>
+WI_UNARY_RESULT (T1)
+wi::rrotate (const T1 &x, const T2 &y, unsigned int width)
+{
+ unsigned int precision = get_binary_precision (x, x);
+ if (width == 0)
+ width = precision;
+ WI_UNARY_RESULT (T2) ymod = umod_trunc (y, width);
+ WI_UNARY_RESULT (T1) right = wi::lrshift (x, ymod);
+ WI_UNARY_RESULT (T1) left = wi::lshift (x, wi::sub (width, ymod));
+ if (width != precision)
+ return wi::zext (left, width) | wi::zext (right, width);
+ return left | right;
+}
+
+/* Return 0 if the number of 1s in X is even and 1 if the number of 1s
+ is odd. */
+inline int
+wi::parity (const wide_int_ref &x)
+{
+ return popcount (x) & 1;
+}
+
+/* Extract WIDTH bits from X, starting at BITPOS. */
+template <typename T>
+inline unsigned HOST_WIDE_INT
+wi::extract_uhwi (const T &x, unsigned int bitpos, unsigned int width)
+{
+ unsigned precision = get_precision (x);
+ if (precision < bitpos + width)
+ precision = bitpos + width;
+ WIDE_INT_REF_FOR (T) xi (x, precision);
+
+ /* Handle this rare case after the above, so that we assert about
+ bogus BITPOS values. */
+ if (width == 0)
+ return 0;
+
+ unsigned int start = bitpos / HOST_BITS_PER_WIDE_INT;
+ unsigned int shift = bitpos % HOST_BITS_PER_WIDE_INT;
+ unsigned HOST_WIDE_INT res = xi.elt (start);
+ res >>= shift;
+ if (shift + width > HOST_BITS_PER_WIDE_INT)
+ {
+ unsigned HOST_WIDE_INT upper = xi.elt (start + 1);
+ res |= upper << (-shift % HOST_BITS_PER_WIDE_INT);
+ }
+ return zext_hwi (res, width);
+}
+
+/* Return the minimum precision needed to store X with sign SGN. */
+template <typename T>
+inline unsigned int
+wi::min_precision (const T &x, signop sgn)
+{
+ if (sgn == SIGNED)
+ return get_precision (x) - clrsb (x);
+ else
+ return get_precision (x) - clz (x);
+}
+
+template<typename T>
+void
+gt_ggc_mx (generic_wide_int <T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *)
+{
+}
+
+template<typename T>
+void
+gt_pch_nx (generic_wide_int <T> *, void (*) (void *, void *), void *)
+{
+}
+
+template<int N>
+void
+gt_ggc_mx (trailing_wide_ints <N> *)
+{
+}
+
+template<int N>
+void
+gt_pch_nx (trailing_wide_ints <N> *)
+{
+}
+
+template<int N>
+void
+gt_pch_nx (trailing_wide_ints <N> *, void (*) (void *, void *), void *)
+{
+}
+
+namespace wi
+{
+ /* Used for overloaded functions in which the only other acceptable
+ scalar type is a pointer. It stops a plain 0 from being treated
+ as a null pointer. */
+ struct never_used1 {};
+ struct never_used2 {};
+
+ wide_int min_value (unsigned int, signop);
+ wide_int min_value (never_used1 *);
+ wide_int min_value (never_used2 *);
+ wide_int max_value (unsigned int, signop);
+ wide_int max_value (never_used1 *);
+ wide_int max_value (never_used2 *);
+
+ /* FIXME: this is target dependent, so should be elsewhere.
+ It also seems to assume that CHAR_BIT == BITS_PER_UNIT. */
+ wide_int from_buffer (const unsigned char *, unsigned int);
+
+#ifndef GENERATOR_FILE
+ void to_mpz (const wide_int_ref &, mpz_t, signop);
+#endif
+
+ wide_int mask (unsigned int, bool, unsigned int);
+ wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int);
+ wide_int set_bit_in_zero (unsigned int, unsigned int);
+ wide_int insert (const wide_int &x, const wide_int &y, unsigned int,
+ unsigned int);
+
+ template <typename T>
+ T mask (unsigned int, bool);
+
+ template <typename T>
+ T shifted_mask (unsigned int, unsigned int, bool);
+
+ template <typename T>
+ T set_bit_in_zero (unsigned int);
+
+ unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int);
+ unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int,
+ bool, unsigned int);
+ unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *,
+ unsigned int, unsigned int, bool);
+}
+
+/* Return a PRECISION-bit integer in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+inline wide_int
+wi::mask (unsigned int width, bool negate_p, unsigned int precision)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (mask (result.write_val (), width, negate_p, precision));
+ return result;
+}
+
+/* Return a PRECISION-bit integer in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear,
+ or the inverse if NEGATE_P. */
+inline wide_int
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p,
+ unsigned int precision)
+{
+ wide_int result = wide_int::create (precision);
+ result.set_len (shifted_mask (result.write_val (), start, width, negate_p,
+ precision));
+ return result;
+}
+
+/* Return a PRECISION-bit integer in which bit BIT is set and all the
+ others are clear. */
+inline wide_int
+wi::set_bit_in_zero (unsigned int bit, unsigned int precision)
+{
+ return shifted_mask (bit, 1, false, precision);
+}
+
+/* Return an integer of type T in which the low WIDTH bits are set
+ and the other bits are clear, or the inverse if NEGATE_P. */
+template <typename T>
+inline T
+wi::mask (unsigned int width, bool negate_p)
+{
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (mask (result.write_val (), width, negate_p,
+ wi::int_traits <T>::precision));
+ return result;
+}
+
+/* Return an integer of type T in which the low START bits are clear,
+ the next WIDTH bits are set, and the other bits are clear, or the
+ inverse if NEGATE_P. */
+template <typename T>
+inline T
+wi::shifted_mask (unsigned int start, unsigned int width, bool negate_p)
+{
+ STATIC_ASSERT (wi::int_traits<T>::precision);
+ T result;
+ result.set_len (shifted_mask (result.write_val (), start, width,
+ negate_p,
+ wi::int_traits <T>::precision));
+ return result;
+}
+
+/* Return an integer of type T in which bit BIT is set and all the
+ others are clear. */
+template <typename T>
+inline T
+wi::set_bit_in_zero (unsigned int bit)
+{
+ return shifted_mask <T> (bit, 1, false);
+}
+
+#endif /* WIDE_INT_H */