diff options
228 files changed, 12775 insertions, 5598 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in index 6ddc8534f84..2e9352025f0 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -864,7 +864,7 @@ RTL_BASE_H = coretypes.h rtl.h rtl.def $(MACHMODE_H) reg-notes.def \ insn-notes.def $(INPUT_H) $(REAL_H) statistics.h $(VEC_H) \ $(FIXED_VALUE_H) alias.h $(HASHTAB_H) FIXED_VALUE_H = fixed-value.h $(MACHMODE_H) double-int.h -RTL_H = $(RTL_BASE_H) $(FLAGS_H) genrtl.h +RTL_H = $(RTL_BASE_H) $(FLAGS_H) genrtl.h wide-int.h RTL_ERROR_H = rtl-error.h $(RTL_H) $(DIAGNOSTIC_CORE_H) READ_MD_H = $(OBSTACK_H) $(HASHTAB_H) read-md.h PARAMS_H = params.h params.def @@ -902,7 +902,7 @@ SCHED_INT_H = sched-int.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) $(DF_H) \ SEL_SCHED_IR_H = sel-sched-ir.h $(INSN_ATTR_H) $(BASIC_BLOCK_H) $(RTL_H) \ $(GGC_H) $(BITMAP_H) $(SCHED_INT_H) $(CFGLOOP_H) $(REGSET_H) SEL_SCHED_DUMP_H = sel-sched-dump.h $(SEL_SCHED_IR_H) -CFGLOOP_H = cfgloop.h $(BASIC_BLOCK_H) double-int.h \ +CFGLOOP_H = cfgloop.h $(BASIC_BLOCK_H) double-int.h wide-int.h \ $(BITMAP_H) sbitmap.h IPA_UTILS_H = ipa-utils.h $(TREE_H) $(CGRAPH_H) IPA_REFERENCE_H = ipa-reference.h $(BITMAP_H) $(TREE_H) @@ -921,7 +921,7 @@ TIMEVAR_H = timevar.h timevar.def INSN_ATTR_H = insn-attr.h insn-attr-common.h $(INSN_ADDR_H) INSN_ADDR_H = $(srcdir)/insn-addr.h C_COMMON_H = c-family/c-common.h c-family/c-common.def $(TREE_H) \ - $(SPLAY_TREE_H) $(CPPLIB_H) $(GGC_H) $(DIAGNOSTIC_CORE_H) + $(SPLAY_TREE_H) $(CPPLIB_H) $(GGC_H) $(DIAGNOSTIC_CORE_H) wide-int.h C_PRAGMA_H = c-family/c-pragma.h $(CPPLIB_H) C_TREE_H = c-tree.h $(C_COMMON_H) $(DIAGNOSTIC_H) SYSTEM_H = system.h hwint.h $(srcdir)/../include/libiberty.h \ @@ -944,7 +944,7 @@ TREE_PASS_H = tree-pass.h $(TIMEVAR_H) $(DUMPFILE_H) TREE_FLOW_H = tree-flow.h tree-flow-inline.h tree-ssa-operands.h \ $(BITMAP_H) sbitmap.h $(BASIC_BLOCK_H) $(GIMPLE_H) \ $(HASHTAB_H) $(CGRAPH_H) $(IPA_REFERENCE_H) \ - tree-ssa-alias.h + tree-ssa-alias.h wide-int.h TREE_HASHER_H = tree-hasher.h $(HASH_TABLE_H) $(TREE_FLOW_H) TREE_SSA_LIVE_H = tree-ssa-live.h $(PARTITION_H) SSAEXPAND_H = ssaexpand.h $(TREE_SSA_LIVE_H) @@ -953,14 +953,14 @@ TREE_PRETTY_PRINT_H = tree-pretty-print.h $(PRETTY_PRINT_H) GIMPLE_PRETTY_PRINT_H = gimple-pretty-print.h $(TREE_PRETTY_PRINT_H) DIAGNOSTIC_CORE_H = diagnostic-core.h $(INPUT_H) bversion.h diagnostic.def DIAGNOSTIC_H = diagnostic.h $(DIAGNOSTIC_CORE_H) $(PRETTY_PRINT_H) -DWARF2OUT_H = dwarf2out.h $(DWARF2_H) +DWARF2OUT_H = dwarf2out.h $(DWARF2_H) wide-int.h C_PRETTY_PRINT_H = c-family/c-pretty-print.h $(PRETTY_PRINT_H) \ $(C_COMMON_H) $(TREE_H) SCEV_H = tree-scalar-evolution.h $(GGC_H) tree-chrec.h $(PARAMS_H) OMEGA_H = omega.h $(PARAMS_H) TREE_DATA_REF_H = tree-data-ref.h $(OMEGA_H) graphds.h $(SCEV_H) TREE_INLINE_H = tree-inline.h -REAL_H = real.h $(MACHMODE_H) +REAL_H = real.h $(MACHMODE_H) signop.h IRA_INT_H = ira.h ira-int.h $(CFGLOOP_H) alloc-pool.h LRA_INT_H = lra.h $(BITMAP_H) $(RECOG_H) $(INSN_ATTR_H) insn-codes.h \ insn-config.h $(REGS_H) lra-int.h @@ -1475,6 +1475,8 @@ OBJS = \ vmsdbgout.o \ vtable-verify.o \ web.o \ + wide-int.o \ + wide-int-print.o \ xcoffout.o \ $(out_object_file) \ $(EXTRA_OBJS) \ @@ -1956,7 +1958,7 @@ c-family/c-common.o : c-family/c-common.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ $(TARGET_H) tree-iterator.h langhooks.h tree-mudflap.h \ intl.h $(OPTS_H) $(CPPLIB_H) $(TREE_INLINE_H) $(HASHTAB_H) \ $(BUILTINS_DEF) $(CGRAPH_H) $(TARGET_DEF_H) \ - gt-c-family-c-common.h $(COMMON_TARGET_H) + gt-c-family-c-common.h $(COMMON_TARGET_H) wide-int-print.h c-family/c-cppbuiltin.o : c-family/c-cppbuiltin.c $(CONFIG_H) $(SYSTEM_H) \ coretypes.h $(TM_H) $(TREE_H) version.h $(C_COMMON_H) $(C_PRAGMA_H) \ @@ -1980,7 +1982,7 @@ c-family/c-gimplify.o : c-family/c-gimplify.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) c-family/c-lex.o : c-family/c-lex.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ $(TM_H) $(TREE_H) $(FIXED_VALUE_H) debug.h $(C_COMMON_H) $(SPLAY_TREE_H) \ $(C_PRAGMA_H) $(INPUT_H) intl.h $(FLAGS_H) \ - $(CPPLIB_H) $(TARGET_H) $(TIMEVAR_H) + $(CPPLIB_H) $(TARGET_H) $(TIMEVAR_H) wide-int.h c-family/c-omp.o : c-family/c-omp.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ $(TREE_H) $(C_COMMON_H) $(GIMPLE_H) langhooks.h @@ -2010,7 +2012,8 @@ c-family/c-pragma.o: c-family/c-pragma.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ c-family/c-pretty-print.o : c-family/c-pretty-print.c $(C_PRETTY_PRINT_H) \ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ - $(DIAGNOSTIC_H) tree-iterator.h intl.h $(TREE_PRETTY_PRINT_H) + $(DIAGNOSTIC_H) tree-iterator.h intl.h $(TREE_PRETTY_PRINT_H) \ + wide-int-print.h c-family/c-semantics.o : c-family/c-semantics.c $(CONFIG_H) $(SYSTEM_H) \ coretypes.h $(TM_H) $(TREE_H) $(FLAGS_H) \ @@ -2019,7 +2022,7 @@ c-family/c-semantics.o : c-family/c-semantics.c $(CONFIG_H) $(SYSTEM_H) \ c-family/c-ada-spec.o : c-family/c-ada-spec.c c-family/c-ada-spec.h \ $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(CPP_ID_DATA_H) $(TM_H) \ - coretypes.h tree-iterator.h $(DUMPFILE_H) + coretypes.h tree-iterator.h $(DUMPFILE_H) wide-int.h c-family/array-notation-common.o : c-family/array-notation-common.c $(TREE_H) \ $(SYSTEM_H) $(TREE_H) coretypes.h tree-iterator.h $(DIAGNOSTIC_CORE_H) @@ -2237,9 +2240,10 @@ tree.o: tree.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \ $(BASIC_BLOCK_H) $(TREE_FLOW_H) $(OBSTACK_H) pointer-set.h \ $(TREE_PASS_H) $(LANGHOOKS_DEF_H) $(DIAGNOSTIC_H) $(CGRAPH_H) \ $(EXCEPT_H) debug.h intl.h tree-diagnostic.h $(TREE_PRETTY_PRINT_H) \ - $(COMMON_TARGET_H) + $(COMMON_TARGET_H) wide-int.h tree-dump.o: tree-dump.c $(CONFIG_H) $(SYSTEM_H) $(TM_H) $(TREE_H) \ - langhooks.h $(TREE_DUMP_H) tree-iterator.h $(TREE_PRETTY_PRINT_H) + langhooks.h $(TREE_DUMP_H) tree-iterator.h $(TREE_PRETTY_PRINT_H) \ + wide-int.h wide-int-print.h tree-inline.o : tree-inline.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(TREE_H) $(RTL_H) $(FLAGS_H) $(PARAMS_H) $(INPUT_H) insn-config.h \ $(HASHTAB_H) langhooks.h $(TREE_INLINE_H) $(CGRAPH_H) \ @@ -2249,7 +2253,8 @@ tree-inline.o : tree-inline.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(TREE_PRETTY_PRINT_H) print-tree.o : print-tree.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(DUMPFILE_H) \ $(TM_H) $(TREE_H) $(GGC_H) langhooks.h tree-iterator.h \ - $(DIAGNOSTIC_H) $(TREE_FLOW_H) $(GIMPLE_PRETTY_PRINT_H) $(TREE_DUMP_H) + $(DIAGNOSTIC_H) $(TREE_FLOW_H) $(GIMPLE_PRETTY_PRINT_H) $(TREE_DUMP_H) \ + wide-int-print.h stor-layout.o : stor-layout.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(TREE_H) $(PARAMS_H) $(FLAGS_H) $(FUNCTION_H) $(EXPR_H) $(RTL_H) \ $(GGC_H) $(TM_P_H) $(TARGET_H) langhooks.h $(REGS_H) gt-stor-layout.h \ @@ -2402,7 +2407,7 @@ gimple-ssa-strength-reduction.o : gimple-ssa-strength-reduction.c $(CONFIG_H) \ $(SYSTEM_H) coretypes.h $(TREE_H) $(GIMPLE_H) $(BASIC_BLOCK_H) \ $(HASH_TABLE_H) $(TREE_PASS_H) $(CFGLOOP_H) $(TREE_PRETTY_PRINT_H) \ $(GIMPLE_PRETTY_PRINT_H) alloc-pool.h $(TREE_FLOW_H) domwalk.h \ - pointer-set.h expmed.h + pointer-set.h expmed.h wide-int-print.h tree-vrp.o : tree-vrp.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \ $(TREE_FLOW_H) $(TREE_PASS_H) $(DIAGNOSTIC_CORE_H) $(GGC_H) \ $(BASIC_BLOCK_H) tree-ssa-propagate.h $(FLAGS_H) $(TREE_DUMP_H) \ @@ -2412,7 +2417,7 @@ tree-cfg.o : tree-cfg.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) $(HASH_TABLE_H) \ $(TREE_H) $(TM_P_H) $(GGC_H) $(FLAGS_H) $(TARGET_H) \ $(DIAGNOSTIC_CORE_H) $(FUNCTION_H) $(TM_H) coretypes.h \ $(TREE_DUMP_H) $(EXCEPT_H) $(CFGLOOP_H) $(TREE_PASS_H) \ - $(BASIC_BLOCK_H) \ + $(BASIC_BLOCK_H) wide-int.h wide-int-print.h \ value-prof.h tree-ssa-propagate.h $(TREE_INLINE_H) $(GIMPLE_PRETTY_PRINT_H) tree-cfgcleanup.o : tree-cfgcleanup.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \ $(TREE_H) $(TM_P_H) $(GGC_H) $(FLAGS_H) \ @@ -2470,12 +2475,13 @@ tree-ssa-address.o : tree-ssa-address.c $(TREE_FLOW_H) $(CONFIG_H) \ $(DIAGNOSTIC_H) $(TM_H) coretypes.h $(DUMPFILE_H) \ $(FLAGS_H) $(TREE_INLINE_H) $(RECOG_H) insn-config.h \ $(EXPR_H) gt-tree-ssa-address.h $(GGC_H) tree-affine.h $(TARGET_H) \ - $(TREE_PRETTY_PRINT_H) expmed.h + $(TREE_PRETTY_PRINT_H) expmed.h wide-int-print.h tree-ssa-loop-niter.o : tree-ssa-loop-niter.c $(TREE_FLOW_H) $(CONFIG_H) \ $(SYSTEM_H) $(TREE_H) $(TM_P_H) $(CFGLOOP_H) $(PARAMS_H) \ $(TREE_INLINE_H) $(DIAGNOSTIC_H) $(TM_H) coretypes.h $(DUMPFILE_H) \ $(DIAGNOSTIC_CORE_H) $(FLAGS_H) $(TREE_DATA_REF_H) \ - $(BASIC_BLOCK_H) $(GGC_H) intl.h $(GIMPLE_PRETTY_PRINT_H) $(TREE_PASS_H) + $(BASIC_BLOCK_H) $(GGC_H) intl.h $(GIMPLE_PRETTY_PRINT_H) $(TREE_PASS_H) \ + wide-int-print.h tree-ssa-loop-ivcanon.o : tree-ssa-loop-ivcanon.c $(TREE_FLOW_H) $(CONFIG_H) \ $(SYSTEM_H) $(TREE_H) $(TM_P_H) $(CFGLOOP_H) $(PARAMS_H) \ $(TREE_INLINE_H) $(DIAGNOSTIC_H) $(TM_H) coretypes.h \ @@ -2495,7 +2501,7 @@ tree-ssa-loop-prefetch.o: tree-ssa-loop-prefetch.c $(TREE_FLOW_H) $(CONFIG_H) \ tree-predcom.o: tree-predcom.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TM_P_H) \ $(CFGLOOP_H) $(TREE_FLOW_H) $(GGC_H) $(TREE_DATA_REF_H) \ $(PARAMS_H) $(DIAGNOSTIC_H) $(TREE_PASS_H) $(TM_H) coretypes.h \ - tree-affine.h $(TREE_INLINE_H) $(TREE_PRETTY_PRINT_H) + tree-affine.h $(TREE_INLINE_H) $(TREE_PRETTY_PRINT_H) wide-int-print.h tree-ssa-loop-ivopts.o : tree-ssa-loop-ivopts.c $(TREE_FLOW_H) $(CONFIG_H) \ $(SYSTEM_H) $(TREE_H) $(TM_P_H) $(CFGLOOP_H) $(EXPR_H) \ $(DIAGNOSTIC_H) $(TM_H) coretypes.h \ @@ -2506,7 +2512,7 @@ tree-ssa-loop-ivopts.o : tree-ssa-loop-ivopts.c $(TREE_FLOW_H) $(CONFIG_H) \ tree-affine.o : tree-affine.c tree-affine.h $(CONFIG_H) pointer-set.h \ $(SYSTEM_H) $(TREE_H) $(GIMPLE_H) \ coretypes.h $(DUMPFILE_H) $(FLAGS_H) \ - $(TREE_PRETTY_PRINT_H) + $(TREE_PRETTY_PRINT_H) wide-int-print.h wide-int.h tree-ssa-loop-manip.o : tree-ssa-loop-manip.c $(TREE_FLOW_H) $(CONFIG_H) \ $(SYSTEM_H) coretypes.h $(DUMPFILE_H) $(TM_H) $(TREE_H) \ $(BASIC_BLOCK_H) $(DIAGNOSTIC_H) $(TREE_FLOW_H) \ @@ -2687,7 +2693,7 @@ tree-nomudflap.o : $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TREE_INLINE_H) \ tree-pretty-print.o : tree-pretty-print.c $(CONFIG_H) $(SYSTEM_H) \ $(TREE_H) $(DIAGNOSTIC_H) $(HASHTAB_H) $(TREE_FLOW_H) \ $(TM_H) coretypes.h $(DUMPFILE_H) tree-iterator.h $(SCEV_H) langhooks.h \ - value-prof.h output.h $(TREE_PRETTY_PRINT_H) + value-prof.h output.h $(TREE_PRETTY_PRINT_H) wide-int-print.h tree-diagnostic.o : tree-diagnostic.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ $(DUMPFILE_H) \ $(TREE_H) $(DIAGNOSTIC_H) tree-diagnostic.h langhooks.h $(LANGHOOKS_DEF_H) \ @@ -2717,6 +2723,8 @@ targhooks.o : targhooks.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TREE_H) \ tree-ssa-alias.h $(TREE_FLOW_H) common/common-targhooks.o : common/common-targhooks.c $(CONFIG_H) $(SYSTEM_H) \ coretypes.h $(INPUT_H) $(TM_H) $(COMMON_TARGET_H) common/common-targhooks.h +wide-int.o: wide-int.cc $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) wide-int.h +wide-int-print.o: wide-int-print.cc wide-int-print.h wide-int.h bversion.h: s-bversion; @true s-bversion: BASE-VER @@ -2884,19 +2892,19 @@ xcoffout.o : xcoffout.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(TREE_H) $(RTL_H) xcoffout.h $(FLAGS_H) $(DIAGNOSTIC_CORE_H) output.h dbxout.h \ $(GGC_H) $(TARGET_H) debug.h $(GSTAB_H) xcoff.h godump.o : godump.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(DIAGNOSTIC_CORE_H) \ - $(TREE_H) $(GGC_H) pointer-set.h $(OBSTACK_H) debug.h gt-godump.h + $(TREE_H) $(GGC_H) pointer-set.h $(OBSTACK_H) debug.h gt-godump.h wide-int-print.h emit-rtl.o : emit-rtl.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \ $(TREE_H) $(FLAGS_H) $(FUNCTION_H) $(REGS_H) insn-config.h $(RECOG_H) \ $(GGC_H) $(EXPR_H) hard-reg-set.h $(BITMAP_H) $(DIAGNOSTIC_CORE_H) $(BASIC_BLOCK_H) \ $(HASHTAB_H) $(TM_P_H) debug.h langhooks.h $(TREE_PASS_H) gt-emit-rtl.h \ $(DF_H) $(PARAMS_H) $(TARGET_H) real.o : real.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \ - $(DIAGNOSTIC_CORE_H) $(TM_P_H) $(REAL_H) dfp.h realmpfr.h + $(DIAGNOSTIC_CORE_H) $(TM_P_H) $(REAL_H) dfp.h realmpfr.h wide-int.h realmpfr.o : realmpfr.c realmpfr.h $(CONFIG_H) $(SYSTEM_H) coretypes.h $(REAL_H) $(TREE_H) dfp.o : dfp.c dfp.h $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) \ - $(TM_P_H) $(REAL_H) $(DECNUM_H) + $(TM_P_H) $(REAL_H) $(DECNUM_H) wide-int.h fixed-value.o: fixed-value.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ - $(TREE_H) $(REAL_H) $(DIAGNOSTIC_CORE_H) + $(TREE_H) $(REAL_H) $(DIAGNOSTIC_CORE_H) wide-int.h jump.o : jump.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \ $(FLAGS_H) hard-reg-set.h $(REGS_H) insn-config.h $(RECOG_H) $(EXPR_H) \ $(EXCEPT_H) $(FUNCTION_H) $(BASIC_BLOCK_H) $(TREE_PASS_H) \ @@ -3079,7 +3087,7 @@ tree-ssa-ccp.o : tree-ssa-ccp.c $(TREE_FLOW_H) $(CONFIG_H) \ $(BASIC_BLOCK_H) $(TREE_PASS_H) langhooks.h $(PARAMS_H) \ tree-ssa-propagate.h value-prof.h $(FLAGS_H) $(TARGET_H) \ $(DIAGNOSTIC_CORE_H) $(HASH_TABLE_H) \ - $(DBGCNT_H) $(GIMPLE_PRETTY_PRINT_H) gimple-fold.h + $(DBGCNT_H) $(GIMPLE_PRETTY_PRINT_H) gimple-fold.h wide-int-print.h tree-ssa-strlen.o : tree-ssa-strlen.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ $(TREE_FLOW_H) $(TREE_PASS_H) domwalk.h alloc-pool.h tree-ssa-propagate.h \ $(GIMPLE_PRETTY_PRINT_H) $(PARAMS_H) $(EXPR_H) $(HASH_TABLE_H) @@ -3427,7 +3435,7 @@ final.o : final.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_ERROR_H) \ $(TREE_PASS_H) $(BASIC_BLOCK_H) $(TM_P_H) $(TARGET_H) $(EXPR_H) \ dbxout.h $(CGRAPH_H) $(COVERAGE_H) \ $(DF_H) $(GGC_H) $(CFGLOOP_H) $(PARAMS_H) $(TREE_FLOW_H) \ - $(TARGET_DEF_H) $(TREE_PRETTY_PRINT_H) + $(TARGET_DEF_H) $(TREE_PRETTY_PRINT_H) wide-int-print.h recog.o : recog.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_ERROR_H) \ $(FUNCTION_H) $(BASIC_BLOCK_H) $(REGS_H) $(RECOG_H) $(EXPR_H) \ $(FLAGS_H) insn-config.h $(INSN_ATTR_H) reload.h \ @@ -3776,7 +3784,7 @@ s-tm-texi: build/genhooks$(build_exeext) $(srcdir)/doc/tm.texi.in GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \ $(host_xm_file_list) \ $(tm_file_list) $(HASHTAB_H) $(SPLAY_TREE_H) $(srcdir)/bitmap.h \ - $(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \ + $(srcdir)/wide-int.h $(srcdir)/alias.h $(srcdir)/coverage.c $(srcdir)/rtl.h \ $(srcdir)/optabs.h $(srcdir)/tree.h $(srcdir)/libfuncs.h $(SYMTAB_H) \ $(srcdir)/real.h $(srcdir)/function.h $(srcdir)/insn-addr.h $(srcdir)/hwint.h \ $(srcdir)/fixed-value.h \ @@ -3786,6 +3794,7 @@ GTFILES = $(CPP_ID_DATA_H) $(srcdir)/input.h $(srcdir)/coretypes.h \ $(srcdir)/alias.c $(srcdir)/bitmap.c $(srcdir)/cselib.c $(srcdir)/cgraph.c \ $(srcdir)/ipa-prop.c $(srcdir)/ipa-cp.c \ $(srcdir)/dbxout.c \ + $(srcdir)/signop.h \ $(srcdir)/dwarf2out.h \ $(srcdir)/dwarf2asm.c \ $(srcdir)/dwarf2cfi.c \ @@ -3984,15 +3993,16 @@ CFLAGS-gengtype-parse.o += -DGENERATOR_FILE build/gengtype-parse.o: $(BCONFIG_H) gengtype-state.o build/gengtype-state.o: gengtype-state.c $(SYSTEM_H) \ - gengtype.h errors.h double-int.h version.h $(HASHTAB_H) $(OBSTACK_H) \ - $(XREGEX_H) + gengtype.h errors.h double-int.h version.h $(HASHTAB_H) \ + $(OBSTACK_H) $(XREGEX_H) gengtype-state.o: $(CONFIG_H) CFLAGS-gengtype-state.o += -DGENERATOR_FILE build/gengtype-state.o: $(BCONFIG_H) - +wide-int.h: $(GTM_H) $(TREE_H) hwint.h $(OPTIONS_H) \ + $(MACHMODE_H) double-int.h dumpfile.h $(REAL_H) signop.h gengtype.o build/gengtype.o : gengtype.c $(SYSTEM_H) gengtype.h \ - rtl.def insn-notes.def errors.h double-int.h version.h $(HASHTAB_H) \ - $(OBSTACK_H) $(XREGEX_H) + rtl.def insn-notes.def errors.h double-int.h version.h \ + $(HASHTAB_H) $(OBSTACK_H) $(XREGEX_H) gengtype.o: $(CONFIG_H) CFLAGS-gengtype.o += -DGENERATOR_FILE build/gengtype.o: $(BCONFIG_H) @@ -5299,7 +5309,7 @@ TAGS: lang.tags incs="$$incs --include $$dir/TAGS.sub"; \ fi; \ done; \ - etags -o TAGS.sub c-family/*.h c-family/*.c *.h *.c; \ + etags -o TAGS.sub c-family/*.h c-family/*.c *.h *.c *.cc; \ etags --include TAGS.sub $$incs) # ----------------------------------------------------- diff --git a/gcc/ada/gcc-interface/cuintp.c b/gcc/ada/gcc-interface/cuintp.c index e077d9ce009..f535c4bdb02 100644 --- a/gcc/ada/gcc-interface/cuintp.c +++ b/gcc/ada/gcc-interface/cuintp.c @@ -160,24 +160,24 @@ UI_From_gnu (tree Input) Int_Vector vec; #if HOST_BITS_PER_WIDE_INT == 64 - /* On 64-bit hosts, host_integerp tells whether the input fits in a + /* On 64-bit hosts, tree_fits_shwi_p tells whether the input fits in a signed 64-bit integer. Then a truncation tells whether it fits in a signed 32-bit integer. */ - if (host_integerp (Input, 0)) + if (tree_fits_shwi_p (Input)) { - HOST_WIDE_INT hw_input = TREE_INT_CST_LOW (Input); + HOST_WIDE_INT hw_input = tree_to_shwi (Input); if (hw_input == (int) hw_input) return UI_From_Int (hw_input); } else return No_Uint; #else - /* On 32-bit hosts, host_integerp tells whether the input fits in a + /* On 32-bit hosts, tree_fits_shwi_p tells whether the input fits in a signed 32-bit integer. Then a sign test tells whether it fits in a signed 64-bit integer. */ - if (host_integerp (Input, 0)) - return UI_From_Int (TREE_INT_CST_LOW (Input)); - else if (TREE_INT_CST_HIGH (Input) < 0 && TYPE_UNSIGNED (gnu_type)) + if (tree_fits_shwi_p (Input)) + return UI_From_Int (tree_to_shwi (Input)); + else if (wide_int::lts_p (Input, 0) && TYPE_UNSIGNED (gnu_type)) return No_Uint; #endif @@ -186,10 +186,9 @@ UI_From_gnu (tree Input) for (i = Max_For_Dint - 1; i >= 0; i--) { - v[i] = tree_low_cst (fold_build1 (ABS_EXPR, gnu_type, - fold_build2 (TRUNC_MOD_EXPR, gnu_type, - gnu_temp, gnu_base)), - 0); + v[i] = tree_to_hwi (fold_build1 (ABS_EXPR, gnu_type, + fold_build2 (TRUNC_MOD_EXPR, gnu_type, + gnu_temp, gnu_base))); gnu_temp = fold_build2 (TRUNC_DIV_EXPR, gnu_type, gnu_temp, gnu_base); } diff --git a/gcc/ada/gcc-interface/decl.c b/gcc/ada/gcc-interface/decl.c index f632a3164e7..17d6203d41a 100644 --- a/gcc/ada/gcc-interface/decl.c +++ b/gcc/ada/gcc-interface/decl.c @@ -837,13 +837,13 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) align_cap = get_mode_alignment (ptr_mode); } - if (!host_integerp (TYPE_SIZE (gnu_type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (gnu_type)) || compare_tree_int (TYPE_SIZE (gnu_type), size_cap) > 0) align = 0; else if (compare_tree_int (TYPE_SIZE (gnu_type), align_cap) > 0) align = align_cap; else - align = ceil_pow2 (tree_low_cst (TYPE_SIZE (gnu_type), 1)); + align = ceil_pow2 (tree_to_uhwi (TYPE_SIZE (gnu_type))); /* But make sure not to under-align the object. */ if (align <= TYPE_ALIGN (gnu_type)) @@ -1478,10 +1478,10 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) && const_flag && gnu_expr && TREE_CONSTANT (gnu_expr) && AGGREGATE_TYPE_P (gnu_type) - && host_integerp (TYPE_SIZE_UNIT (gnu_type), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (gnu_type)) && !(TYPE_IS_PADDING_P (gnu_type) - && !host_integerp (TYPE_SIZE_UNIT - (TREE_TYPE (TYPE_FIELDS (gnu_type))), 1))) + && !tree_fits_uhwi_p (TYPE_SIZE_UNIT + (TREE_TYPE (TYPE_FIELDS (gnu_type)))))) static_p = true; /* Now create the variable or the constant and set various flags. */ @@ -3480,7 +3480,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) gnu_size = DECL_SIZE (gnu_old_field); if (RECORD_OR_UNION_TYPE_P (gnu_field_type) && !TYPE_FAT_POINTER_P (gnu_field_type) - && host_integerp (TYPE_SIZE (gnu_field_type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_field_type))) gnu_field_type = make_packable_type (gnu_field_type, true); } @@ -4521,7 +4521,7 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) NULL_TREE)) { unsigned int size - = TREE_INT_CST_LOW (TYPE_SIZE (gnu_return_type)); + = tree_to_hwi (TYPE_SIZE (gnu_return_type)); unsigned int i = BITS_PER_UNIT; enum machine_mode mode; @@ -4899,22 +4899,22 @@ gnat_to_gnu_entity (Entity_Id gnat_entity, tree gnu_expr, int definition) /* Consider an alignment as suspicious if the alignment/size ratio is greater or equal to the byte/bit ratio. */ - if (host_integerp (size, 1) - && align >= TREE_INT_CST_LOW (size) * BITS_PER_UNIT) + if (tree_fits_uhwi_p (size) + && align >= tree_to_uhwi (size) * BITS_PER_UNIT) post_error_ne ("?suspiciously large alignment specified for&", Expression (Alignment_Clause (gnat_entity)), gnat_entity); } } else if (Is_Atomic (gnat_entity) && !gnu_size - && host_integerp (TYPE_SIZE (gnu_type), 1) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_type)) && integer_pow2p (TYPE_SIZE (gnu_type))) align = MIN (BIGGEST_ALIGNMENT, - tree_low_cst (TYPE_SIZE (gnu_type), 1)); + tree_to_uhwi (TYPE_SIZE (gnu_type))); else if (Is_Atomic (gnat_entity) && gnu_size - && host_integerp (gnu_size, 1) + && tree_fits_uhwi_p (gnu_size) && integer_pow2p (gnu_size)) - align = MIN (BIGGEST_ALIGNMENT, tree_low_cst (gnu_size, 1)); + align = MIN (BIGGEST_ALIGNMENT, tree_to_uhwi (gnu_size)); /* See if we need to pad the type. If we did, and made a record, the name of the new type may be changed. So get it back for @@ -5560,7 +5560,7 @@ gnat_to_gnu_component_type (Entity_Id gnat_array, bool definition, && !Strict_Alignment (gnat_type) && RECORD_OR_UNION_TYPE_P (gnu_type) && !TYPE_FAT_POINTER_P (gnu_type) - && host_integerp (TYPE_SIZE (gnu_type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_type))) gnu_type = make_packable_type (gnu_type, false); if (Has_Atomic_Components (gnat_array)) @@ -6488,7 +6488,7 @@ gnat_to_gnu_field (Entity_Id gnat_field, tree gnu_record_type, int packed, if (!needs_strict_alignment && RECORD_OR_UNION_TYPE_P (gnu_field_type) && !TYPE_FAT_POINTER_P (gnu_field_type) - && host_integerp (TYPE_SIZE (gnu_field_type), 1) + && tree_fits_uhwi_p (TYPE_SIZE (gnu_field_type)) && (packed == 1 || (gnu_size && (tree_int_cst_lt (gnu_size, TYPE_SIZE (gnu_field_type)) @@ -7477,11 +7477,11 @@ annotate_value (tree gnu_size) if (TREE_CODE (TREE_OPERAND (gnu_size, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (gnu_size, 1); - double_int signed_op1 - = tree_to_double_int (op1).sext (TYPE_PRECISION (sizetype)); - if (signed_op1.is_negative ()) + wide_int signed_op1 + = wide_int::from_tree (op1).sforce_to_size (TYPE_PRECISION (sizetype)); + if (signed_op1.neg_p (SIGNED)) { - op1 = double_int_to_tree (sizetype, -signed_op1); + op1 = wide_int_to_tree (sizetype, -signed_op1); pre_op1 = annotate_value (build1 (NEGATE_EXPR, sizetype, op1)); } } @@ -8348,7 +8348,7 @@ create_field_decl_from (tree old_field, tree field_type, tree record_type, { tree t = TREE_VALUE (purpose_member (old_field, pos_list)); tree pos = TREE_VEC_ELT (t, 0), bitpos = TREE_VEC_ELT (t, 2); - unsigned int offset_align = tree_low_cst (TREE_VEC_ELT (t, 1), 1); + unsigned int offset_align = tree_to_uhwi (TREE_VEC_ELT (t, 1)); tree new_pos, new_field; unsigned int i; subst_pair *s; diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c index 7b168df4e03..5978c9e5099 100644 --- a/gcc/ada/gcc-interface/misc.c +++ b/gcc/ada/gcc-interface/misc.c @@ -591,7 +591,7 @@ gnat_type_max_size (const_tree gnu_type) /* If we don't have a constant, see what we can get from TYPE_ADA_SIZE, which should stay untouched. */ - if (!host_integerp (max_unitsize, 1) + if (!tree_fits_uhwi_p (max_unitsize) && RECORD_OR_UNION_TYPE_P (gnu_type) && !TYPE_FAT_POINTER_P (gnu_type) && TYPE_ADA_SIZE (gnu_type)) @@ -600,7 +600,7 @@ gnat_type_max_size (const_tree gnu_type) /* If we have succeeded in finding a constant, round it up to the type's alignment and return the result in units. */ - if (host_integerp (max_adasize, 1)) + if (tree_fits_uhwi_p (max_adasize)) max_unitsize = size_binop (CEIL_DIV_EXPR, round_up (max_adasize, TYPE_ALIGN (gnu_type)), diff --git a/gcc/ada/gcc-interface/trans.c b/gcc/ada/gcc-interface/trans.c index 5c1038ab80a..a20078993c9 100644 --- a/gcc/ada/gcc-interface/trans.c +++ b/gcc/ada/gcc-interface/trans.c @@ -4196,7 +4196,7 @@ Call_to_gnu (Node_Id gnat_node, tree *gnu_result_type_p, tree gnu_target, gnu_actual = unchecked_convert (DECL_ARG_TYPE (gnu_formal), convert (gnat_type_for_size - (TREE_INT_CST_LOW (gnu_size), 1), + (tree_to_hwi (gnu_size), 1), integer_zero_node), false); else diff --git a/gcc/ada/gcc-interface/utils.c b/gcc/ada/gcc-interface/utils.c index 409c0dee94f..3e6bba4dd69 100644 --- a/gcc/ada/gcc-interface/utils.c +++ b/gcc/ada/gcc-interface/utils.c @@ -754,7 +754,7 @@ make_aligning_type (tree type, unsigned int align, tree size, tree make_packable_type (tree type, bool in_record) { - unsigned HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE (type), 1); + unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE (type)); unsigned HOST_WIDE_INT new_size; tree new_type, old_field, field_list = NULL_TREE; unsigned int align; @@ -789,12 +789,12 @@ make_packable_type (tree type, bool in_record) /* Do not try to shrink the size if the RM size is not constant. */ if (TYPE_CONTAINS_TEMPLATE_P (type) - || !host_integerp (TYPE_ADA_SIZE (type), 1)) + || !tree_fits_uhwi_p (TYPE_ADA_SIZE (type))) return type; /* Round the RM size up to a unit boundary to get the minimal size for a BLKmode record. Give up if it's already the size. */ - new_size = TREE_INT_CST_LOW (TYPE_ADA_SIZE (type)); + new_size = tree_to_uhwi (TYPE_ADA_SIZE (type)); new_size = (new_size + BITS_PER_UNIT - 1) & -BITS_PER_UNIT; if (new_size == size) return type; @@ -815,7 +815,7 @@ make_packable_type (tree type, bool in_record) if (RECORD_OR_UNION_TYPE_P (new_field_type) && !TYPE_FAT_POINTER_P (new_field_type) - && host_integerp (TYPE_SIZE (new_field_type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (new_field_type))) new_field_type = make_packable_type (new_field_type, true); /* However, for the last field in a not already packed record type @@ -898,10 +898,10 @@ make_type_from_size (tree type, tree size_tree, bool for_biased) /* If size indicates an error, just return TYPE to avoid propagating the error. Likewise if it's too large to represent. */ - if (!size_tree || !host_integerp (size_tree, 1)) + if (!size_tree || !tree_fits_uhwi_p (size_tree)) return type; - size = tree_low_cst (size_tree, 1); + size = tree_to_uhwi (size_tree); switch (TREE_CODE (type)) { @@ -1726,10 +1726,10 @@ rest_of_record_type_compilation (tree record_type) pos = compute_related_constant (curpos, last_pos); if (!pos && TREE_CODE (curpos) == MULT_EXPR - && host_integerp (TREE_OPERAND (curpos, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (curpos, 1))) { tree offset = TREE_OPERAND (curpos, 0); - align = tree_low_cst (TREE_OPERAND (curpos, 1), 1); + align = tree_to_uhwi (TREE_OPERAND (curpos, 1)); /* An offset which is a bitwise AND with a mask increases the alignment according to the number of trailing zeros. */ @@ -1738,7 +1738,7 @@ rest_of_record_type_compilation (tree record_type) && TREE_CODE (TREE_OPERAND (offset, 1)) == INTEGER_CST) { unsigned HOST_WIDE_INT mask - = TREE_INT_CST_LOW (TREE_OPERAND (offset, 1)); + = tree_to_hwi (TREE_OPERAND (offset, 1)); unsigned int i; for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++) @@ -1756,13 +1756,12 @@ rest_of_record_type_compilation (tree record_type) else if (!pos && TREE_CODE (curpos) == PLUS_EXPR && TREE_CODE (TREE_OPERAND (curpos, 1)) == INTEGER_CST && TREE_CODE (TREE_OPERAND (curpos, 0)) == MULT_EXPR - && host_integerp (TREE_OPERAND - (TREE_OPERAND (curpos, 0), 1), - 1)) + && tree_fits_uhwi_p (TREE_OPERAND + (TREE_OPERAND (curpos, 0), 1))) { align - = tree_low_cst - (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1), 1); + = tree_to_uhwi + (TREE_OPERAND (TREE_OPERAND (curpos, 0), 1)); pos = compute_related_constant (curpos, round_up (last_pos, align)); } @@ -2379,8 +2378,8 @@ create_field_decl (tree field_name, tree field_type, tree record_type, that an alignment of 0 is taken as infinite. */ unsigned int known_align; - if (host_integerp (pos, 1)) - known_align = tree_low_cst (pos, 1) & - tree_low_cst (pos, 1); + if (tree_fits_uhwi_p (pos)) + known_align = tree_to_uhwi (pos) & - tree_to_uhwi (pos); else known_align = BITS_PER_UNIT; @@ -2390,7 +2389,7 @@ create_field_decl (tree field_name, tree field_type, tree record_type, layout_decl (field_decl, known_align); SET_DECL_OFFSET_ALIGN (field_decl, - host_integerp (pos, 1) ? BIGGEST_ALIGNMENT + tree_fits_uhwi_p (pos) ? BIGGEST_ALIGNMENT : BITS_PER_UNIT); pos_from_bit (&DECL_FIELD_OFFSET (field_decl), &DECL_FIELD_BIT_OFFSET (field_decl), @@ -2550,8 +2549,8 @@ invalidate_global_renaming_pointers (void) bool value_factor_p (tree value, HOST_WIDE_INT factor) { - if (host_integerp (value, 1)) - return tree_low_cst (value, 1) % factor == 0; + if (tree_fits_uhwi_p (value)) + return tree_to_uhwi (value) % factor == 0; if (TREE_CODE (value) == MULT_EXPR) return (value_factor_p (TREE_OPERAND (value, 0), factor) @@ -2584,16 +2583,16 @@ potential_alignment_gap (tree prev_field, tree curr_field, tree offset) /* If the distance between the end of prev_field and the beginning of curr_field is constant, then there is a gap if the value of this constant is not null. */ - if (offset && host_integerp (offset, 1)) + if (offset && tree_fits_uhwi_p (offset)) return !integer_zerop (offset); /* If the size and position of the previous field are constant, then check the sum of this size and position. There will be a gap iff it is not multiple of the current field alignment. */ - if (host_integerp (DECL_SIZE (prev_field), 1) - && host_integerp (bit_position (prev_field), 1)) - return ((tree_low_cst (bit_position (prev_field), 1) - + tree_low_cst (DECL_SIZE (prev_field), 1)) + if (tree_fits_uhwi_p (DECL_SIZE (prev_field)) + && tree_fits_uhwi_p (bit_position (prev_field))) + return ((tree_to_uhwi (bit_position (prev_field)) + + tree_to_uhwi (DECL_SIZE (prev_field))) % DECL_ALIGN (curr_field) != 0); /* If both the position and size of the previous field are multiples @@ -3250,7 +3249,7 @@ build_vms_descriptor32 (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case ENUMERAL_TYPE: case BOOLEAN_TYPE: if (TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 10; @@ -3290,7 +3289,7 @@ build_vms_descriptor32 (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case COMPLEX_TYPE: if (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE && TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 12; @@ -3551,7 +3550,7 @@ build_vms_descriptor (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case ENUMERAL_TYPE: case BOOLEAN_TYPE: if (TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 10; @@ -3591,7 +3590,7 @@ build_vms_descriptor (tree type, Mechanism_Type mech, Entity_Id gnat_entity) case COMPLEX_TYPE: if (TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE && TYPE_VAX_FLOATING_POINT_P (type)) - switch (tree_low_cst (TYPE_DIGITS_VALUE (type), 1)) + switch (tree_to_uhwi (TYPE_DIGITS_VALUE (type))) { case 6: dtype = 12; @@ -3845,7 +3844,7 @@ convert_vms_descriptor64 (tree gnu_type, tree gnu_expr, Entity_Id gnat_subprog) tree max_field = DECL_CHAIN (TYPE_FIELDS (template_type)); tree template_tree, template_addr, aflags, dimct, t, u; /* See the head comment of build_vms_descriptor. */ - int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass)); + int iklass = tree_to_hwi (DECL_INITIAL (klass)); tree lfield, ufield; vec<constructor_elt, va_gc> *v; @@ -3999,7 +3998,7 @@ convert_vms_descriptor32 (tree gnu_type, tree gnu_expr, Entity_Id gnat_subprog) tree max_field = DECL_CHAIN (TYPE_FIELDS (template_type)); tree template_tree, template_addr, aflags, dimct, t, u; /* See the head comment of build_vms_descriptor. */ - int iklass = TREE_INT_CST_LOW (DECL_INITIAL (klass)); + int iklass = tree_to_hwi (DECL_INITIAL (klass)); vec<constructor_elt, va_gc> *v; /* Convert POINTER to the pointer-to-array type. */ @@ -5283,7 +5282,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p) GET_MODE_BITSIZE (TYPE_MODE (type)))) { tree rec_type = make_node (RECORD_TYPE); - unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (type)); + unsigned HOST_WIDE_INT prec = tree_to_hwi (TYPE_RM_SIZE (type)); tree field_type, field; if (TYPE_UNSIGNED (type)) @@ -5312,7 +5311,7 @@ unchecked_convert (tree type, tree expr, bool notrunc_p) GET_MODE_BITSIZE (TYPE_MODE (etype)))) { tree rec_type = make_node (RECORD_TYPE); - unsigned HOST_WIDE_INT prec = TREE_INT_CST_LOW (TYPE_RM_SIZE (etype)); + unsigned HOST_WIDE_INT prec = tree_to_hwi (TYPE_RM_SIZE (etype)); vec<constructor_elt, va_gc> *v; vec_alloc (v, 1); tree field_type, field; @@ -6031,11 +6030,10 @@ static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { /* Verify the arg number is a constant. */ - if (TREE_CODE (arg_num_expr) != INTEGER_CST - || TREE_INT_CST_HIGH (arg_num_expr) != 0) + if (!cst_fits_uhwi_p (arg_num_expr)) return false; - *valp = TREE_INT_CST_LOW (arg_num_expr); + *valp = tree_to_hwi (arg_num_expr); return true; } @@ -6272,7 +6270,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, size = TREE_VALUE (args); - if (!host_integerp (size, 1)) + if (!tree_fits_uhwi_p (size)) { warning (OPT_Wattributes, "%qs attribute ignored", IDENTIFIER_POINTER (name)); @@ -6280,7 +6278,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, } /* Get the vector size (in bytes). */ - vecsize = tree_low_cst (size, 1); + vecsize = tree_to_uhwi (size); /* We need to provide for vector pointers, vector arrays, and functions returning vectors. For example: @@ -6304,7 +6302,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, || (!SCALAR_FLOAT_MODE_P (orig_mode) && GET_MODE_CLASS (orig_mode) != MODE_INT && !ALL_SCALAR_FIXED_POINT_MODE_P (orig_mode)) - || !host_integerp (TYPE_SIZE_UNIT (type), 1) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) || TREE_CODE (type) == BOOLEAN_TYPE) { error ("invalid vector type for attribute %qs", @@ -6312,7 +6310,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, return NULL_TREE; } - if (vecsize % tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + if (vecsize % tree_to_uhwi (TYPE_SIZE_UNIT (type))) { error ("vector size not an integral multiple of component size"); return NULL; @@ -6325,7 +6323,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, } /* Calculate how many units fit in the vector. */ - nunits = vecsize / tree_low_cst (TYPE_SIZE_UNIT (type), 1); + nunits = vecsize / tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (nunits & (nunits - 1)) { error ("number of components of the vector not a power of two"); @@ -6373,7 +6371,7 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), bases, and this attribute is for binding implementors, not end-users, so we should never get there from legitimate explicit uses. */ - if (!host_integerp (rep_size, 1)) + if (!tree_fits_uhwi_p (rep_size)) return NULL_TREE; /* Get the element type/mode and check this is something we know @@ -6388,7 +6386,7 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), || (!SCALAR_FLOAT_MODE_P (elem_mode) && GET_MODE_CLASS (elem_mode) != MODE_INT && !ALL_SCALAR_FIXED_POINT_MODE_P (elem_mode)) - || !host_integerp (TYPE_SIZE_UNIT (elem_type), 1)) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (elem_type))) { error ("invalid element type for attribute %qs", IDENTIFIER_POINTER (name)); @@ -6397,9 +6395,9 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), /* Sanity check the vector size and element type consistency. */ - vec_bytes = tree_low_cst (rep_size, 1); + vec_bytes = tree_to_uhwi (rep_size); - if (vec_bytes % tree_low_cst (TYPE_SIZE_UNIT (elem_type), 1)) + if (vec_bytes % tree_to_uhwi (TYPE_SIZE_UNIT (elem_type))) { error ("vector size not an integral multiple of component size"); return NULL; @@ -6411,7 +6409,7 @@ handle_vector_type_attribute (tree *node, tree name, tree ARG_UNUSED (args), return NULL; } - vec_units = vec_bytes / tree_low_cst (TYPE_SIZE_UNIT (elem_type), 1); + vec_units = vec_bytes / tree_to_uhwi (TYPE_SIZE_UNIT (elem_type)); if (vec_units & (vec_units - 1)) { error ("number of components of the vector not a power of two"); diff --git a/gcc/ada/gcc-interface/utils2.c b/gcc/ada/gcc-interface/utils2.c index 64f7564a75d..ec986158a7a 100644 --- a/gcc/ada/gcc-interface/utils2.c +++ b/gcc/ada/gcc-interface/utils2.c @@ -119,7 +119,7 @@ known_alignment (tree exp) case INTEGER_CST: { - unsigned HOST_WIDE_INT c = TREE_INT_CST_LOW (exp); + unsigned HOST_WIDE_INT c = tree_to_hwi (exp); /* The first part of this represents the lowest bit in the constant, but it is originally in bytes, not bits. */ this_alignment = MIN (BITS_PER_UNIT * (c & -c), BIGGEST_ALIGNMENT); @@ -626,7 +626,7 @@ nonbinary_modular_operation (enum tree_code op_code, tree type, tree lhs, static unsigned int resolve_atomic_size (tree type) { - unsigned HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + unsigned HOST_WIDE_INT size = tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16) return size; diff --git a/gcc/alias.c b/gcc/alias.c index 18445794f9e..c52a9864ae2 100644 --- a/gcc/alias.c +++ b/gcc/alias.c @@ -338,9 +338,10 @@ ao_ref_from_mem (ao_ref *ref, const_rtx mem) if (MEM_EXPR (mem) != get_spill_slot_decl (false) && (ref->offset < 0 || (DECL_P (ref->base) - && (!host_integerp (DECL_SIZE (ref->base), 1) - || (TREE_INT_CST_LOW (DECL_SIZE ((ref->base))) - < (unsigned HOST_WIDE_INT)(ref->offset + ref->size)))))) + && (DECL_SIZE (ref->base) == NULL_TREE + || TREE_CODE (DECL_SIZE (ref->base)) != INTEGER_CST + || wide_int::ltu_p (DECL_SIZE (ref->base), + ref->offset + ref->size))))) return false; return true; @@ -1473,9 +1474,7 @@ rtx_equal_for_memref_p (const_rtx x, const_rtx y) case VALUE: CASE_CONST_UNIQUE: - /* There's no need to compare the contents of CONST_DOUBLEs or - CONST_INTs because pointer equality is a good enough - comparison for these nodes. */ + /* Pointer equality guarantees equality for these nodes. */ return 0; default: @@ -2278,15 +2277,23 @@ adjust_offset_for_component_ref (tree x, bool *known_p, { tree xoffset = component_ref_field_offset (x); tree field = TREE_OPERAND (x, 1); + addr_wide_int woffset; + if (TREE_CODE (xoffset) != INTEGER_CST) + { + *known_p = false; + return; + } + + woffset = xoffset; + woffset += (addr_wide_int (DECL_FIELD_BIT_OFFSET (field)) + .udiv_trunc (BITS_PER_UNIT)); - if (! host_integerp (xoffset, 1)) + if (!woffset.fits_uhwi_p ()) { *known_p = false; return; } - *offset += (tree_low_cst (xoffset, 1) - + (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - / BITS_PER_UNIT)); + *offset += woffset.to_uhwi (); x = TREE_OPERAND (x, 0); } diff --git a/gcc/asan.c b/gcc/asan.c index 1756b07805f..72675d05f94 100644 --- a/gcc/asan.c +++ b/gcc/asan.c @@ -1997,7 +1997,7 @@ asan_add_global (tree decl, tree type, vec<constructor_elt, va_gc> *v) CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, fold_convert (const_ptr_type_node, build_fold_addr_expr (refdecl))); - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size)); size += asan_red_zone_size (size); CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE, build_int_cst (uptr, size)); diff --git a/gcc/builtins.c b/gcc/builtins.c index 78b0d842cc0..49a0b442c8a 100644 --- a/gcc/builtins.c +++ b/gcc/builtins.c @@ -339,8 +339,8 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, if (TREE_CODE (addr) == BIT_AND_EXPR && TREE_CODE (TREE_OPERAND (addr, 1)) == INTEGER_CST) { - align = (TREE_INT_CST_LOW (TREE_OPERAND (addr, 1)) - & -TREE_INT_CST_LOW (TREE_OPERAND (addr, 1))); + align = (tree_to_hwi (TREE_OPERAND (addr, 1)) + & -tree_to_hwi (TREE_OPERAND (addr, 1))); align *= BITS_PER_UNIT; addr = TREE_OPERAND (addr, 0); } @@ -357,7 +357,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, { unsigned HOST_WIDE_INT step = 1; if (TMR_STEP (exp)) - step = TREE_INT_CST_LOW (TMR_STEP (exp)); + step = tree_to_hwi (TMR_STEP (exp)); align = MIN (align, (step & -step) * BITS_PER_UNIT); } if (TMR_INDEX2 (exp)) @@ -379,7 +379,7 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, bitpos += ptr_bitpos; if (TREE_CODE (exp) == MEM_REF || TREE_CODE (exp) == TARGET_MEM_REF) - bitpos += mem_ref_offset (exp).low * BITS_PER_UNIT; + bitpos += mem_ref_offset (exp).to_short_addr () * BITS_PER_UNIT; } } else if (TREE_CODE (exp) == STRING_CST) @@ -408,23 +408,23 @@ get_object_alignment_2 (tree exp, unsigned int *alignp, } else next_offset = NULL; - if (host_integerp (offset, 1)) + if (tree_fits_uhwi_p (offset)) { /* Any overflow in calculating offset_bits won't change the alignment. */ unsigned offset_bits - = ((unsigned) tree_low_cst (offset, 1) * BITS_PER_UNIT); + = ((unsigned) tree_to_uhwi (offset) * BITS_PER_UNIT); if (offset_bits) inner = MIN (inner, (offset_bits & -offset_bits)); } else if (TREE_CODE (offset) == MULT_EXPR - && host_integerp (TREE_OPERAND (offset, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (offset, 1))) { /* Any overflow in calculating offset_factor won't change the alignment. */ unsigned offset_factor - = ((unsigned) tree_low_cst (TREE_OPERAND (offset, 1), 1) + = ((unsigned) tree_to_uhwi (TREE_OPERAND (offset, 1)) * BITS_PER_UNIT); if (offset_factor) @@ -515,7 +515,7 @@ get_pointer_alignment_1 (tree exp, unsigned int *alignp, else if (TREE_CODE (exp) == INTEGER_CST) { *alignp = BIGGEST_ALIGNMENT; - *bitposp = ((TREE_INT_CST_LOW (exp) * BITS_PER_UNIT) + *bitposp = ((tree_to_hwi (exp) * BITS_PER_UNIT) & (BIGGEST_ALIGNMENT - 1)); return true; } @@ -624,10 +624,10 @@ c_strlen (tree src, int only_value) a null character if we can represent it as a single HOST_WIDE_INT. */ if (offset_node == 0) offset = 0; - else if (! host_integerp (offset_node, 0)) + else if (!tree_fits_shwi_p (offset_node)) offset = -1; else - offset = tree_low_cst (offset_node, 0); + offset = tree_to_shwi (offset_node); /* If the offset is known to be out of bounds, warn, and call strlen at runtime. */ @@ -665,27 +665,31 @@ c_getstr (tree src) if (offset_node == 0) return TREE_STRING_POINTER (src); - else if (!host_integerp (offset_node, 1) + else if (!tree_fits_uhwi_p (offset_node) || compare_tree_int (offset_node, TREE_STRING_LENGTH (src) - 1) > 0) return 0; - return TREE_STRING_POINTER (src) + tree_low_cst (offset_node, 1); + return TREE_STRING_POINTER (src) + tree_to_uhwi (offset_node); } -/* Return a CONST_INT or CONST_DOUBLE corresponding to target reading +/* Return a constant integer corresponding to target reading GET_MODE_BITSIZE (MODE) bits from string constant STR. */ static rtx c_readstr (const char *str, enum machine_mode mode) { - HOST_WIDE_INT c[2]; + wide_int c; HOST_WIDE_INT ch; unsigned int i, j; + HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; + unsigned int len = (GET_MODE_PRECISION (mode) + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT; + + for (i = 0; i < len; i++) + tmp[i] = 0; gcc_assert (GET_MODE_CLASS (mode) == MODE_INT); - c[0] = 0; - c[1] = 0; ch = 1; for (i = 0; i < GET_MODE_SIZE (mode); i++) { @@ -696,13 +700,14 @@ c_readstr (const char *str, enum machine_mode mode) && GET_MODE_SIZE (mode) >= UNITS_PER_WORD) j = j + UNITS_PER_WORD - 2 * (j % UNITS_PER_WORD) - 1; j *= BITS_PER_UNIT; - gcc_assert (j < HOST_BITS_PER_DOUBLE_INT); if (ch) ch = (unsigned char) str[i]; - c[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT); + tmp[j / HOST_BITS_PER_WIDE_INT] |= ch << (j % HOST_BITS_PER_WIDE_INT); } - return immed_double_const (c[0], c[1], mode); + + c = wide_int::from_array (tmp, len, GET_MODE_PRECISION (mode)); + return immed_wide_int_const (c, mode); } /* Cast a target constant CST to target CHAR and if that value fits into @@ -718,7 +723,9 @@ target_char_cast (tree cst, char *p) || CHAR_TYPE_SIZE > HOST_BITS_PER_WIDE_INT) return 1; - val = TREE_INT_CST_LOW (cst); + /* Do not care if it fits or not right here. */ + val = tree_to_hwi (cst); + if (CHAR_TYPE_SIZE < HOST_BITS_PER_WIDE_INT) val &= (((unsigned HOST_WIDE_INT) 1) << CHAR_TYPE_SIZE) - 1; @@ -3179,7 +3186,7 @@ expand_builtin_mempcpy_args (tree dest, tree src, tree len, return NULL_RTX; /* If LEN is not constant, call the normal function. */ - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) return NULL_RTX; len_rtx = expand_normal (len); @@ -3414,7 +3421,7 @@ expand_builtin_strncpy (tree exp, rtx target) tree slen = c_strlen (src, 1); /* We must be passed a constant len and src parameter. */ - if (!host_integerp (len, 1) || !slen || !host_integerp (slen, 1)) + if (!tree_fits_uhwi_p (len) || !slen || !tree_fits_uhwi_p (slen)) return NULL_RTX; slen = size_binop_loc (loc, PLUS_EXPR, slen, ssize_int (1)); @@ -3428,15 +3435,15 @@ expand_builtin_strncpy (tree exp, rtx target) const char *p = c_getstr (src); rtx dest_mem; - if (!p || dest_align == 0 || !host_integerp (len, 1) - || !can_store_by_pieces (tree_low_cst (len, 1), + if (!p || dest_align == 0 || !tree_fits_uhwi_p (len) + || !can_store_by_pieces (tree_to_uhwi (len), builtin_strncpy_read_str, CONST_CAST (char *, p), dest_align, false)) return NULL_RTX; dest_mem = get_memory_rtx (dest, len); - store_by_pieces (dest_mem, tree_low_cst (len, 1), + store_by_pieces (dest_mem, tree_to_uhwi (len), builtin_strncpy_read_str, CONST_CAST (char *, p), dest_align, false, 0); dest_mem = force_operand (XEXP (dest_mem, 0), target); @@ -3569,13 +3576,13 @@ expand_builtin_memset_args (tree dest, tree val, tree len, * the coefficients by pieces (in the required modes). * We can't pass builtin_memset_gen_str as that emits RTL. */ c = 1; - if (host_integerp (len, 1) - && can_store_by_pieces (tree_low_cst (len, 1), + if (tree_fits_uhwi_p (len) + && can_store_by_pieces (tree_to_uhwi (len), builtin_memset_read_str, &c, dest_align, true)) { val_rtx = force_reg (val_mode, val_rtx); - store_by_pieces (dest_mem, tree_low_cst (len, 1), + store_by_pieces (dest_mem, tree_to_uhwi (len), builtin_memset_gen_str, val_rtx, dest_align, true, 0); } @@ -3594,11 +3601,11 @@ expand_builtin_memset_args (tree dest, tree val, tree len, if (c) { - if (host_integerp (len, 1) - && can_store_by_pieces (tree_low_cst (len, 1), + if (tree_fits_uhwi_p (len) + && can_store_by_pieces (tree_to_uhwi (len), builtin_memset_read_str, &c, dest_align, true)) - store_by_pieces (dest_mem, tree_low_cst (len, 1), + store_by_pieces (dest_mem, tree_to_uhwi (len), builtin_memset_read_str, &c, dest_align, true, 0); else if (!set_storage_via_setmem (dest_mem, len_rtx, gen_int_mode (c, val_mode), @@ -4503,7 +4510,7 @@ expand_builtin_frame_address (tree fndecl, tree exp) if (call_expr_nargs (exp) == 0) /* Warning about missing arg was already issued. */ return const0_rtx; - else if (! host_integerp (CALL_EXPR_ARG (exp, 0), 1)) + else if (! tree_fits_uhwi_p (CALL_EXPR_ARG (exp, 0))) { if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_FRAME_ADDRESS) error ("invalid argument to %<__builtin_frame_address%>"); @@ -4515,7 +4522,7 @@ expand_builtin_frame_address (tree fndecl, tree exp) { rtx tem = expand_builtin_return_addr (DECL_FUNCTION_CODE (fndecl), - tree_low_cst (CALL_EXPR_ARG (exp, 0), 1)); + tree_to_uhwi (CALL_EXPR_ARG (exp, 0))); /* Some ports cannot access arbitrary stack frames. */ if (tem == NULL) @@ -4569,7 +4576,7 @@ expand_builtin_alloca (tree exp, bool cannot_accumulate) /* Compute the alignment. */ align = (alloca_with_align - ? TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1)) + ? tree_to_hwi (CALL_EXPR_ARG (exp, 1)) : BIGGEST_ALIGNMENT); /* Allocate the desired space. */ @@ -5010,12 +5017,13 @@ expand_builtin_signbit (tree exp, rtx target) if (bitpos < GET_MODE_BITSIZE (rmode)) { - double_int mask = double_int_zero.set_bit (bitpos); + wide_int mask = wide_int::set_bit_in_zero (bitpos, + GET_MODE_PRECISION (rmode)); if (GET_MODE_SIZE (imode) > GET_MODE_SIZE (rmode)) temp = gen_lowpart (rmode, temp); temp = expand_binop (rmode, and_optab, temp, - immed_double_int_const (mask, rmode), + immed_wide_int_const (mask, rmode), NULL_RTX, 1, OPTAB_LIB_WIDEN); } else @@ -5401,7 +5409,7 @@ expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp, weak = CALL_EXPR_ARG (exp, 3); is_weak = false; - if (host_integerp (weak, 0) && tree_low_cst (weak, 0) != 0) + if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0) is_weak = true; oldval = expect; @@ -8031,8 +8039,9 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg) { tree itype = TREE_TYPE (TREE_TYPE (fndecl)); tree ftype = TREE_TYPE (arg); - double_int val; + wide_int val; REAL_VALUE_TYPE r; + bool fail = false; switch (DECL_FUNCTION_CODE (fndecl)) { @@ -8058,9 +8067,10 @@ fold_builtin_int_roundingfn (location_t loc, tree fndecl, tree arg) gcc_unreachable (); } - real_to_integer2 ((HOST_WIDE_INT *)&val.low, &val.high, &r); - if (double_int_fits_to_tree_p (itype, val)) - return double_int_to_tree (itype, val); + val = real_to_integer (&r, &fail, + TYPE_PRECISION (itype)); + if (!fail) + return wide_int_to_tree (itype, val); } } @@ -8093,102 +8103,40 @@ fold_builtin_bitop (tree fndecl, tree arg) /* Optimize for constant argument. */ if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg)) { - HOST_WIDE_INT hi, width, result; - unsigned HOST_WIDE_INT lo; - tree type; - - type = TREE_TYPE (arg); - width = TYPE_PRECISION (type); - lo = TREE_INT_CST_LOW (arg); - - /* Clear all the bits that are beyond the type's precision. */ - if (width > HOST_BITS_PER_WIDE_INT) - { - hi = TREE_INT_CST_HIGH (arg); - if (width < HOST_BITS_PER_DOUBLE_INT) - hi &= ~((unsigned HOST_WIDE_INT) (-1) - << (width - HOST_BITS_PER_WIDE_INT)); - } - else - { - hi = 0; - if (width < HOST_BITS_PER_WIDE_INT) - lo &= ~((unsigned HOST_WIDE_INT) (-1) << width); - } + wide_int warg = arg; + wide_int result; switch (DECL_FUNCTION_CODE (fndecl)) { CASE_INT_FN (BUILT_IN_FFS): - if (lo != 0) - result = ffs_hwi (lo); - else if (hi != 0) - result = HOST_BITS_PER_WIDE_INT + ffs_hwi (hi); - else - result = 0; + result = warg.ffs (); break; CASE_INT_FN (BUILT_IN_CLZ): - if (hi != 0) - result = width - floor_log2 (hi) - 1 - HOST_BITS_PER_WIDE_INT; - else if (lo != 0) - result = width - floor_log2 (lo) - 1; - else if (! CLZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result)) - result = width; + result = warg.clz (); break; CASE_INT_FN (BUILT_IN_CTZ): - if (lo != 0) - result = ctz_hwi (lo); - else if (hi != 0) - result = HOST_BITS_PER_WIDE_INT + ctz_hwi (hi); - else if (! CTZ_DEFINED_VALUE_AT_ZERO (TYPE_MODE (type), result)) - result = width; + result = warg.ctz (); break; CASE_INT_FN (BUILT_IN_CLRSB): - if (width > 2 * HOST_BITS_PER_WIDE_INT) - return NULL_TREE; - if (width > HOST_BITS_PER_WIDE_INT - && (hi & ((unsigned HOST_WIDE_INT) 1 - << (width - HOST_BITS_PER_WIDE_INT - 1))) != 0) - { - hi = ~hi & ~((unsigned HOST_WIDE_INT) (-1) - << (width - HOST_BITS_PER_WIDE_INT - 1)); - lo = ~lo; - } - else if (width <= HOST_BITS_PER_WIDE_INT - && (lo & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) != 0) - lo = ~lo & ~((unsigned HOST_WIDE_INT) (-1) << (width - 1)); - if (hi != 0) - result = width - floor_log2 (hi) - 2 - HOST_BITS_PER_WIDE_INT; - else if (lo != 0) - result = width - floor_log2 (lo) - 2; - else - result = width - 1; + result = warg.clrsb (); break; CASE_INT_FN (BUILT_IN_POPCOUNT): - result = 0; - while (lo) - result++, lo &= lo - 1; - while (hi) - result++, hi &= (unsigned HOST_WIDE_INT) hi - 1; + result = warg.popcount (); break; CASE_INT_FN (BUILT_IN_PARITY): - result = 0; - while (lo) - result++, lo &= lo - 1; - while (hi) - result++, hi &= (unsigned HOST_WIDE_INT) hi - 1; - result &= 1; + result = warg.parity (); break; default: gcc_unreachable (); } - return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), result); + return wide_int_to_tree (TREE_TYPE (TREE_TYPE (fndecl)), result); } return NULL_TREE; @@ -8205,49 +8153,24 @@ fold_builtin_bswap (tree fndecl, tree arg) /* Optimize constant value. */ if (TREE_CODE (arg) == INTEGER_CST && !TREE_OVERFLOW (arg)) { - HOST_WIDE_INT hi, width, r_hi = 0; - unsigned HOST_WIDE_INT lo, r_lo = 0; tree type = TREE_TYPE (TREE_TYPE (fndecl)); - width = TYPE_PRECISION (type); - lo = TREE_INT_CST_LOW (arg); - hi = TREE_INT_CST_HIGH (arg); - switch (DECL_FUNCTION_CODE (fndecl)) { case BUILT_IN_BSWAP16: case BUILT_IN_BSWAP32: case BUILT_IN_BSWAP64: { - int s; - - for (s = 0; s < width; s += 8) - { - int d = width - s - 8; - unsigned HOST_WIDE_INT byte; - - if (s < HOST_BITS_PER_WIDE_INT) - byte = (lo >> s) & 0xff; - else - byte = (hi >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; - - if (d < HOST_BITS_PER_WIDE_INT) - r_lo |= byte << d; - else - r_hi |= byte << (d - HOST_BITS_PER_WIDE_INT); - } + signop sgn = TYPE_SIGN (type); + tree result = + wide_int_to_tree (type, + wide_int (arg) + .force_to_size (TYPE_PRECISION (type), sgn).bswap ()); + return result; } - - break; - default: gcc_unreachable (); } - - if (width < HOST_BITS_PER_WIDE_INT) - return build_int_cst (type, r_lo); - else - return build_int_cst_wide (type, r_lo, r_hi); } return NULL_TREE; @@ -8309,7 +8232,7 @@ fold_builtin_logarithm (location_t loc, tree fndecl, tree arg, /* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */ { REAL_VALUE_TYPE dconst10; - real_from_integer (&dconst10, VOIDmode, 10, 0, 0); + real_from_integer (&dconst10, VOIDmode, 10, SIGNED); x = build_real (type, dconst10); } exponent = CALL_EXPR_ARG (arg, 0); @@ -8462,7 +8385,7 @@ fold_builtin_pow (location_t loc, tree fndecl, tree arg0, tree arg1, tree type) /* Check for an integer exponent. */ n = real_to_integer (&c); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); if (real_identical (&c, &cint)) { /* Attempt to evaluate pow at compile-time, unless this should @@ -8560,9 +8483,9 @@ fold_builtin_powi (location_t loc, tree fndecl ATTRIBUTE_UNUSED, if (real_onep (arg0)) return omit_one_operand_loc (loc, type, build_real (type, dconst1), arg1); - if (host_integerp (arg1, 0)) + if (tree_fits_shwi_p (arg1)) { - HOST_WIDE_INT c = TREE_INT_CST_LOW (arg1); + HOST_WIDE_INT c = tree_to_shwi (arg1); /* Evaluate powi at compile-time. */ if (TREE_CODE (arg0) == REAL_CST @@ -8659,7 +8582,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, || ! validate_arg (len, INTEGER_TYPE)) return NULL_TREE; - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) return NULL_TREE; /* If the LEN parameter is zero, return DEST. */ @@ -8689,7 +8612,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, if (! var_decl_component_p (var)) return NULL_TREE; - length = tree_low_cst (len, 1); + length = tree_to_uhwi (len); if (GET_MODE_SIZE (TYPE_MODE (etype)) != length || get_pointer_alignment (dest) / BITS_PER_UNIT < length) return NULL_TREE; @@ -8704,7 +8627,7 @@ fold_builtin_memset (location_t loc, tree dest, tree c, tree len, if (CHAR_BIT != 8 || BITS_PER_UNIT != 8 || HOST_BITS_PER_WIDE_INT > 64) return NULL_TREE; - cval = TREE_INT_CST_LOW (c); + cval = tree_to_hwi (c); cval &= 0xff; cval |= cval << 8; cval |= cval << 16; @@ -8792,9 +8715,9 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, if (!dest_align || !src_align) return NULL_TREE; if (readonly_data_expr (src) - || (host_integerp (len, 1) + || (tree_fits_uhwi_p (len) && (MIN (src_align, dest_align) / BITS_PER_UNIT - >= (unsigned HOST_WIDE_INT) tree_low_cst (len, 1)))) + >= (unsigned HOST_WIDE_INT) tree_to_uhwi (len)))) { tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY); if (!fn) @@ -8817,8 +8740,8 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, destvar = TREE_OPERAND (dest, 0); dest_base = get_ref_base_and_extent (destvar, &dest_offset, &size, &maxsize); - if (host_integerp (len, 1)) - maxsize = tree_low_cst (len, 1); + if (tree_fits_uhwi_p (len)) + maxsize = tree_to_uhwi (len); else maxsize = -1; src_offset /= BITS_PER_UNIT; @@ -8834,20 +8757,19 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, else if (TREE_CODE (src_base) == MEM_REF && TREE_CODE (dest_base) == MEM_REF) { - double_int off; + addr_wide_int off; if (! operand_equal_p (TREE_OPERAND (src_base, 0), TREE_OPERAND (dest_base, 0), 0)) return NULL_TREE; - off = mem_ref_offset (src_base) + - double_int::from_shwi (src_offset); - if (!off.fits_shwi ()) + off = mem_ref_offset (src_base) + src_offset; + if (!off.fits_shwi_p ()) return NULL_TREE; - src_offset = off.low; - off = mem_ref_offset (dest_base) + - double_int::from_shwi (dest_offset); - if (!off.fits_shwi ()) + src_offset = off.to_shwi (); + + off = mem_ref_offset (dest_base) + dest_offset; + if (!off.fits_shwi_p ()) return NULL_TREE; - dest_offset = off.low; + dest_offset = off.to_shwi (); if (ranges_overlap_p (src_offset, maxsize, dest_offset, maxsize)) return NULL_TREE; @@ -8884,7 +8806,7 @@ fold_builtin_memory_op (location_t loc, tree dest, tree src, return NULL_TREE; } - if (!host_integerp (len, 0)) + if (!tree_fits_shwi_p (len)) return NULL_TREE; /* FIXME: This logic lose for arguments like (type *)malloc (sizeof (type)), @@ -9172,7 +9094,7 @@ fold_builtin_memchr (location_t loc, tree arg1, tree arg2, tree len, tree type) const char *p1; if (TREE_CODE (arg2) != INTEGER_CST - || !host_integerp (len, 1)) + || !tree_fits_uhwi_p (len)) return NULL_TREE; p1 = c_getstr (arg1); @@ -9185,7 +9107,7 @@ fold_builtin_memchr (location_t loc, tree arg1, tree arg2, tree len, tree type) if (target_char_cast (arg2, &c)) return NULL_TREE; - r = (const char *) memchr (p1, c, tree_low_cst (len, 1)); + r = (const char *) memchr (p1, c, tree_to_uhwi (len)); if (r == NULL) return build_int_cst (TREE_TYPE (arg1), 0); @@ -9224,11 +9146,11 @@ fold_builtin_memcmp (location_t loc, tree arg1, tree arg2, tree len) /* If all arguments are constant, and the value of len is not greater than the lengths of arg1 and arg2, evaluate at compile-time. */ - if (host_integerp (len, 1) && p1 && p2 + if (tree_fits_uhwi_p (len) && p1 && p2 && compare_tree_int (len, strlen (p1) + 1) <= 0 && compare_tree_int (len, strlen (p2) + 1) <= 0) { - const int r = memcmp (p1, p2, tree_low_cst (len, 1)); + const int r = memcmp (p1, p2, tree_to_uhwi (len)); if (r > 0) return integer_one_node; @@ -9240,7 +9162,7 @@ fold_builtin_memcmp (location_t loc, tree arg1, tree arg2, tree len) /* If len parameter is one, return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ - if (host_integerp (len, 1) && tree_low_cst (len, 1) == 1) + if (tree_fits_uhwi_p (len) && tree_to_uhwi (len) == 1) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node @@ -9352,9 +9274,9 @@ fold_builtin_strncmp (location_t loc, tree arg1, tree arg2, tree len) p1 = c_getstr (arg1); p2 = c_getstr (arg2); - if (host_integerp (len, 1) && p1 && p2) + if (tree_fits_uhwi_p (len) && p1 && p2) { - const int i = strncmp (p1, p2, tree_low_cst (len, 1)); + const int i = strncmp (p1, p2, tree_to_uhwi (len)); if (i > 0) return integer_one_node; else if (i < 0) @@ -9400,7 +9322,7 @@ fold_builtin_strncmp (location_t loc, tree arg1, tree arg2, tree len) /* If len parameter is one, return an expression corresponding to (*(const unsigned char*)arg1 - (const unsigned char*)arg2). */ - if (host_integerp (len, 1) && tree_low_cst (len, 1) == 1) + if (tree_fits_uhwi_p (len) && tree_to_uhwi (len) == 1) { tree cst_uchar_node = build_type_variant (unsigned_char_type_node, 1, 0); tree cst_uchar_ptr_node @@ -9858,7 +9780,7 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1, /* If both arguments are constant, then try to evaluate it. */ if ((ldexp || REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2) && TREE_CODE (arg0) == REAL_CST && !TREE_OVERFLOW (arg0) - && host_integerp (arg1, 0)) + && tree_fits_shwi_p (arg1)) { /* Bound the maximum adjustment to twice the range of the mode's valid exponents. Use abs to ensure the range is @@ -9868,7 +9790,7 @@ fold_builtin_load_exponent (location_t loc, tree arg0, tree arg1, - REAL_MODE_FORMAT (TYPE_MODE (type))->emin); /* Get the user-requested adjustment. */ - const HOST_WIDE_INT req_exp_adj = tree_low_cst (arg1, 0); + const HOST_WIDE_INT req_exp_adj = tree_to_shwi (arg1); /* The requested adjustment must be inside this range. This is a preliminary cap to avoid things like overflow, we @@ -12338,7 +12260,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, if (orig && !validate_arg (orig, POINTER_TYPE)) return NULL_TREE; - if (!host_integerp (destsize, 1)) + if (!tree_fits_uhwi_p (destsize)) return NULL_TREE; /* Check whether the format is a literal string constant. */ @@ -12352,7 +12274,7 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, if (!init_target_chars ()) return NULL_TREE; - destlen = tree_low_cst (destsize, 1); + destlen = tree_to_uhwi (destsize); /* If the format doesn't contain % args or %%, use strcpy. */ if (strchr (fmt_str, target_percent) == NULL) @@ -12397,10 +12319,10 @@ fold_builtin_snprintf (location_t loc, tree dest, tree destsize, tree fmt, return NULL_TREE; retval = c_strlen (orig, 1); - if (!retval || !host_integerp (retval, 1)) + if (!retval || !tree_fits_uhwi_p (retval)) return NULL_TREE; - origlen = tree_low_cst (retval, 1); + origlen = tree_to_uhwi (retval); /* We could expand this as memcpy (str1, str2, cst - 1); str1[cst - 1] = '\0'; or to @@ -12462,7 +12384,7 @@ expand_builtin_object_size (tree exp) return const0_rtx; } - object_size_type = tree_low_cst (ost, 0); + object_size_type = tree_to_shwi (ost); return object_size_type < 2 ? constm1_rtx : const0_rtx; } @@ -12491,10 +12413,10 @@ expand_builtin_memory_chk (tree exp, rtx target, enum machine_mode mode, len = CALL_EXPR_ARG (exp, 2); size = CALL_EXPR_ARG (exp, 3); - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_RTX; - if (host_integerp (len, 1) || integer_all_onesp (size)) + if (tree_fits_uhwi_p (len) || integer_all_onesp (size)) { tree fn; @@ -12625,22 +12547,22 @@ maybe_emit_chk_warning (tree exp, enum built_in_function fcode) if (!len || !size) return; - if (! host_integerp (size, 1) || integer_all_onesp (size)) + if (! tree_fits_uhwi_p (size) || integer_all_onesp (size)) return; if (is_strlen) { len = c_strlen (len, 1); - if (! len || ! host_integerp (len, 1) || tree_int_cst_lt (len, size)) + if (! len || ! tree_fits_uhwi_p (len) || tree_int_cst_lt (len, size)) return; } else if (fcode == BUILT_IN_STRNCAT_CHK) { tree src = CALL_EXPR_ARG (exp, 1); - if (! src || ! host_integerp (len, 1) || tree_int_cst_lt (len, size)) + if (! src || ! tree_fits_uhwi_p (len) || tree_int_cst_lt (len, size)) return; src = c_strlen (src, 1); - if (! src || ! host_integerp (src, 1)) + if (! src || ! tree_fits_uhwi_p (src)) { warning_at (loc, 0, "%Kcall to %D might overflow destination buffer", exp, get_callee_fndecl (exp)); @@ -12649,7 +12571,7 @@ maybe_emit_chk_warning (tree exp, enum built_in_function fcode) else if (tree_int_cst_lt (src, size)) return; } - else if (! host_integerp (len, 1) || ! tree_int_cst_lt (size, len)) + else if (! tree_fits_uhwi_p (len) || ! tree_int_cst_lt (size, len)) return; warning_at (loc, 0, "%Kcall to %D will always overflow destination buffer", @@ -12673,7 +12595,7 @@ maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode) size = CALL_EXPR_ARG (exp, 2); fmt = CALL_EXPR_ARG (exp, 3); - if (! host_integerp (size, 1) || integer_all_onesp (size)) + if (! tree_fits_uhwi_p (size) || integer_all_onesp (size)) return; /* Check whether the format is a literal string constant. */ @@ -12701,7 +12623,7 @@ maybe_emit_sprintf_chk_warning (tree exp, enum built_in_function fcode) return; len = c_strlen (arg, 1); - if (!len || ! host_integerp (len, 1)) + if (!len || ! tree_fits_uhwi_p (len)) return; } else @@ -12744,6 +12666,7 @@ fold_builtin_object_size (tree ptr, tree ost) { unsigned HOST_WIDE_INT bytes; int object_size_type; + int precision = TYPE_PRECISION (TREE_TYPE (ptr)); if (!validate_arg (ptr, POINTER_TYPE) || !validate_arg (ost, INTEGER_TYPE)) @@ -12756,7 +12679,7 @@ fold_builtin_object_size (tree ptr, tree ost) || compare_tree_int (ost, 3) > 0) return NULL_TREE; - object_size_type = tree_low_cst (ost, 0); + object_size_type = tree_to_shwi (ost); /* __builtin_object_size doesn't evaluate side-effects in its arguments; if there are any side-effects, it returns (size_t) -1 for types 0 and 1 @@ -12766,21 +12689,24 @@ fold_builtin_object_size (tree ptr, tree ost) if (TREE_CODE (ptr) == ADDR_EXPR) { - bytes = compute_builtin_object_size (ptr, object_size_type); - if (double_int_fits_to_tree_p (size_type_node, - double_int::from_uhwi (bytes))) - return build_int_cstu (size_type_node, bytes); + + wide_int wbytes + = wide_int::from_uhwi (compute_builtin_object_size (ptr, object_size_type), + precision); + if (wbytes.fits_to_tree_p (size_type_node)) + return wide_int_to_tree (size_type_node, wbytes); } else if (TREE_CODE (ptr) == SSA_NAME) { /* If object size is not known yet, delay folding until later. Maybe subsequent passes will help determining it. */ + wide_int wbytes; bytes = compute_builtin_object_size (ptr, object_size_type); + wbytes = wide_int::from_uhwi (bytes, precision); if (bytes != (unsigned HOST_WIDE_INT) (object_size_type < 2 ? -1 : 0) - && double_int_fits_to_tree_p (size_type_node, - double_int::from_uhwi (bytes))) - return build_int_cstu (size_type_node, bytes); + && wbytes.fits_to_tree_p (size_type_node)) + return wide_int_to_tree (size_type_node, wbytes); } return NULL_TREE; @@ -12822,17 +12748,17 @@ fold_builtin_memory_chk (location_t loc, tree fndecl, } } - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) { if (fcode == BUILT_IN_MEMPCPY_CHK && ignore) { @@ -12904,18 +12830,18 @@ fold_builtin_stxcpy_chk (location_t loc, tree fndecl, tree dest, if (fcode == BUILT_IN_STRCPY_CHK && operand_equal_p (src, dest, 0)) return fold_convert_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), dest); - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { len = c_strlen (src, 1); - if (! len || ! host_integerp (len, 1)) + if (! len || ! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) { if (fcode == BUILT_IN_STPCPY_CHK) { @@ -12991,17 +12917,17 @@ fold_builtin_stxncpy_chk (location_t loc, tree dest, tree src, return build_call_expr_loc (loc, fn, 4, dest, src, len, size); } - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) return NULL_TREE; } else @@ -13040,7 +12966,7 @@ fold_builtin_strcat_chk (location_t loc, tree fndecl, tree dest, if (p && *p == '\0') return omit_one_operand_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), dest, src); - if (! host_integerp (size, 1) || ! integer_all_onesp (size)) + if (! tree_fits_uhwi_p (size) || ! integer_all_onesp (size)) return NULL_TREE; /* If __builtin_strcat_chk is used, assume strcat is available. */ @@ -13074,15 +13000,15 @@ fold_builtin_strncat_chk (location_t loc, tree fndecl, else if (integer_zerop (len)) return omit_one_operand_loc (loc, TREE_TYPE (TREE_TYPE (fndecl)), dest, src); - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { tree src_len = c_strlen (src, 1); if (src_len - && host_integerp (src_len, 1) - && host_integerp (len, 1) + && tree_fits_uhwi_p (src_len) + && tree_fits_uhwi_p (len) && ! tree_int_cst_lt (len, src_len)) { /* If LEN >= strlen (SRC), optimize into __strcat_chk. */ @@ -13131,7 +13057,7 @@ fold_builtin_sprintf_chk_1 (location_t loc, int nargs, tree *args, if (!validate_arg (fmt, POINTER_TYPE)) return NULL_TREE; - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; len = NULL_TREE; @@ -13162,7 +13088,7 @@ fold_builtin_sprintf_chk_1 (location_t loc, int nargs, tree *args, if (validate_arg (arg, POINTER_TYPE)) { len = c_strlen (arg, 1); - if (! len || ! host_integerp (len, 1)) + if (! len || ! tree_fits_uhwi_p (len)) len = NULL_TREE; } } @@ -13239,17 +13165,17 @@ fold_builtin_snprintf_chk_1 (location_t loc, int nargs, tree *args, if (!validate_arg (fmt, POINTER_TYPE)) return NULL_TREE; - if (! host_integerp (size, 1)) + if (! tree_fits_uhwi_p (size)) return NULL_TREE; if (! integer_all_onesp (size)) { - if (! host_integerp (len, 1)) + if (! tree_fits_uhwi_p (len)) { /* If LEN is not constant, try MAXLEN too. For MAXLEN only allow optimizing into non-_ocs function if SIZE is >= MAXLEN, never convert to __ocs_fail (). */ - if (maxlen == NULL_TREE || ! host_integerp (maxlen, 1)) + if (maxlen == NULL_TREE || ! tree_fits_uhwi_p (maxlen)) return NULL_TREE; } else @@ -13894,10 +13820,10 @@ do_mpfr_bessel_n (tree arg1, tree arg2, tree type, /* To proceed, MPFR must exactly represent the target floating point format, which only happens when the target base equals two. */ if (REAL_MODE_FORMAT (TYPE_MODE (type))->b == 2 - && host_integerp (arg1, 0) + && tree_fits_shwi_p (arg1) && TREE_CODE (arg2) == REAL_CST && !TREE_OVERFLOW (arg2)) { - const HOST_WIDE_INT n = tree_low_cst(arg1, 0); + const HOST_WIDE_INT n = tree_to_shwi (arg1); const REAL_VALUE_TYPE *const ra = &TREE_REAL_CST (arg2); if (n == (long)n diff --git a/gcc/c-family/c-ada-spec.c b/gcc/c-family/c-ada-spec.c index 22784c93ec9..f1e785aa0fb 100644 --- a/gcc/c-family/c-ada-spec.c +++ b/gcc/c-family/c-ada-spec.c @@ -29,21 +29,7 @@ along with GCC; see the file COPYING3. If not see #include "cpplib.h" #include "c-pragma.h" #include "cpp-id-data.h" - -/* Adapted from hwint.h to use the Ada prefix. */ -#if HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG -# if HOST_BITS_PER_WIDE_INT == 64 -# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \ - "16#%" HOST_LONG_FORMAT "x%016" HOST_LONG_FORMAT "x#" -# else -# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \ - "16#%" HOST_LONG_FORMAT "x%08" HOST_LONG_FORMAT "x#" -# endif -#else - /* We can assume that 'long long' is at least 64 bits. */ -# define ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX \ - "16#%" HOST_LONG_LONG_FORMAT "x%016" HOST_LONG_LONG_FORMAT "x#" -#endif /* HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG */ +#include "wide-int.h" /* Local functions, macros and variables. */ static int dump_generic_ada_node (pretty_printer *, tree, tree, @@ -1794,7 +1780,7 @@ dump_ada_template (pretty_printer *buffer, tree t, static bool is_simple_enum (tree node) { - unsigned HOST_WIDE_INT count = 0; + HOST_WIDE_INT count = 0; tree value; for (value = TYPE_VALUES (node); value; value = TREE_CHAIN (value)) @@ -1804,9 +1790,9 @@ is_simple_enum (tree node) if (TREE_CODE (int_val) != INTEGER_CST) int_val = DECL_INITIAL (int_val); - if (!host_integerp (int_val, 0)) + if (!tree_fits_shwi_p (int_val)) return false; - else if (TREE_INT_CST_LOW (int_val) != count) + else if (tree_to_shwi (int_val) != count) return false; count++; @@ -2203,25 +2189,24 @@ dump_generic_ada_node (pretty_printer *buffer, tree node, tree type, to generate the (0 .. -1) range for flexible array members. */ if (TREE_TYPE (node) == sizetype) node = fold_convert (ssizetype, node); - if (host_integerp (node, 0)) - pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); - else if (host_integerp (node, 1)) - pp_unsigned_wide_integer (buffer, TREE_INT_CST_LOW (node)); + if (tree_fits_shwi_p (node)) + pp_wide_integer (buffer, tree_to_shwi (node)); + else if (tree_fits_uhwi_p (node)) + pp_unsigned_wide_integer (buffer, tree_to_uhwi (node)); else { - tree val = node; - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); - HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); - - if (tree_int_cst_sgn (val) < 0) + wide_int val = node; + int i; + if (val.neg_p (SIGNED)) { pp_minus (buffer); - high = ~high + !low; - low = -low; + val = -val; } sprintf (pp_buffer (buffer)->digit_buffer, - ADA_HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) high, low); + "16#%" HOST_LONG_FORMAT "x", val.elt (val.get_len () - 1)); + for (i = val.get_len () - 2; i <= 0; i--) + sprintf (pp_buffer (buffer)->digit_buffer, + HOST_WIDE_INT_PRINT_PADDED_HEX, val.elt (i)); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } break; diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c index 5d1a1c6c2ce..8bde6270e76 100644 --- a/gcc/c-family/c-common.c +++ b/gcc/c-family/c-common.c @@ -43,6 +43,7 @@ along with GCC; see the file COPYING3. If not see #include "opts.h" #include "cgraph.h" #include "target-def.h" +#include "wide-int-print.h" cpp_reader *parse_in; /* Declared in c-pragma.h. */ @@ -832,7 +833,7 @@ finish_fname_decls (void) for (saved = TREE_PURPOSE (stack); saved; saved = TREE_CHAIN (saved)) { tree decl = TREE_PURPOSE (saved); - unsigned ix = TREE_INT_CST_LOW (TREE_VALUE (saved)); + unsigned ix = tree_to_hwi (TREE_VALUE (saved)); *fname_vars[ix].decl = decl; } @@ -2418,7 +2419,7 @@ shorten_binary_op (tree result_type, tree op0, tree op1, bool bitwise) arg0 = c_common_get_narrower (op0, &unsigned0); arg1 = c_common_get_narrower (op1, &unsigned1); - + /* UNS is 1 if the operation to be done is an unsigned one. */ uns = TYPE_UNSIGNED (result_type); @@ -3451,7 +3452,7 @@ c_common_type_for_mode (enum machine_mode mode, int unsignedp) if (mode == DImode) return unsignedp ? unsigned_intDI_type_node : intDI_type_node; - + #if HOST_BITS_PER_WIDE_INT >= 64 if (mode == TYPE_MODE (intTI_type_node)) return unsignedp ? unsigned_intTI_type_node : intTI_type_node; @@ -3967,7 +3968,7 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr, /* If one of the operands must be floated, we cannot optimize. */ real1 = TREE_CODE (TREE_TYPE (primop0)) == REAL_TYPE; real2 = TREE_CODE (TREE_TYPE (primop1)) == REAL_TYPE; - + /* If first arg is constant, swap the args (changing operation so value is preserved), for canonicalization. Don't do this if the second arg is 0. */ @@ -4052,9 +4053,10 @@ shorten_compare (tree *op0_ptr, tree *op1_ptr, tree *restype_ptr, { /* Convert primop1 to target type, but do not introduce additional overflow. We know primop1 is an int_cst. */ - primop1 = force_fit_type_double (*restype_ptr, - tree_to_double_int (primop1), - 0, TREE_OVERFLOW (primop1)); + primop1 = force_fit_type (*restype_ptr, + wide_int (primop1).force_to_size (TYPE_PRECISION (*restype_ptr), + TYPE_SIGN (TREE_TYPE (primop1))), + 0, TREE_OVERFLOW (primop1)); } if (type != *restype_ptr) { @@ -4366,8 +4368,7 @@ pointer_int_sum (location_t loc, enum tree_code resultcode, convert (TREE_TYPE (intop), size_exp), 1); intop = convert (sizetype, t); if (TREE_OVERFLOW_P (intop) && !TREE_OVERFLOW (t)) - intop = build_int_cst_wide (TREE_TYPE (intop), TREE_INT_CST_LOW (intop), - TREE_INT_CST_HIGH (intop)); + intop = wide_int_to_tree (TREE_TYPE (intop), intop); } /* Create the sum or difference. */ @@ -4490,7 +4491,7 @@ c_common_truthvalue_conversion (location_t location, tree expr) case ERROR_MARK: return expr; - + case INTEGER_CST: return integer_zerop (expr) ? truthvalue_false_node : truthvalue_true_node; @@ -4740,7 +4741,7 @@ c_type_hash (const void *p) if (TREE_CODE (TYPE_SIZE (t)) != INTEGER_CST) size = 0; else - size = TREE_INT_CST_LOW (TYPE_SIZE (t)); + size = tree_to_hwi (TYPE_SIZE (t)); return ((size << 24) | (n_elements << shift)); } @@ -4748,7 +4749,7 @@ static GTY((param_is (union tree_node))) htab_t type_hash_table; /* Return the typed-based alias set for T, which may be an expression or a type. Return -1 if we don't do anything special. */ - + alias_set_type c_common_get_alias_set (tree t) { @@ -5419,7 +5420,7 @@ c_common_nodes_and_builtins (void) } /* This node must not be shared. */ - void_zero_node = make_node (INTEGER_CST); + void_zero_node = make_int_cst (1); TREE_TYPE (void_zero_node) = void_type_node; void_list_node = build_void_list_node (); @@ -5608,7 +5609,7 @@ c_common_nodes_and_builtins (void) (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier (pname), ptype)); - + } } @@ -5630,7 +5631,7 @@ c_common_nodes_and_builtins (void) /* Create the built-in __null node. It is important that this is not shared. */ - null_node = make_node (INTEGER_CST); + null_node = make_int_cst (1); TREE_TYPE (null_node) = c_common_type_for_size (POINTER_SIZE, 0); /* Since builtin_types isn't gc'ed, don't export these nodes. */ @@ -6008,22 +6009,12 @@ c_add_case_label (location_t loc, splay_tree cases, tree cond, tree orig_type, static void match_case_to_enum_1 (tree key, tree type, tree label) { - char buf[2 + 2*HOST_BITS_PER_WIDE_INT/4 + 1]; - - /* ??? Not working too hard to print the double-word value. - Should perhaps be done with %lwd in the diagnostic routines? */ - if (TREE_INT_CST_HIGH (key) == 0) - snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_UNSIGNED, - TREE_INT_CST_LOW (key)); - else if (!TYPE_UNSIGNED (type) - && TREE_INT_CST_HIGH (key) == -1 - && TREE_INT_CST_LOW (key) != 0) - snprintf (buf, sizeof (buf), "-" HOST_WIDE_INT_PRINT_UNSIGNED, - -TREE_INT_CST_LOW (key)); + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + + if (tree_fits_hwi_p (key, TYPE_SIGN (type))) + print_dec (key, buf, TYPE_SIGN (type)); else - snprintf (buf, sizeof (buf), HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (key), - (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (key)); + print_hex (key, buf); if (TYPE_NAME (type) == 0) warning_at (DECL_SOURCE_LOCATION (CASE_LABEL (label)), @@ -6930,11 +6921,11 @@ get_priority (tree args, bool is_destructor) arg = TREE_VALUE (args); arg = default_conversion (arg); - if (!host_integerp (arg, /*pos=*/0) + if (!tree_fits_shwi_p (arg) || !INTEGRAL_TYPE_P (TREE_TYPE (arg))) goto invalid; - pri = tree_low_cst (arg, /*pos=*/0); + pri = tree_to_shwi (arg); if (pri < 0 || pri > MAX_INIT_PRIORITY) goto invalid; @@ -7068,7 +7059,7 @@ handle_mode_attribute (tree *node, tree name, tree args, tree ident = TREE_VALUE (args); *no_add_attrs = true; - + if (TREE_CODE (ident) != IDENTIFIER_NODE) warning (OPT_Wattributes, "%qE attribute ignored", name); else @@ -7931,11 +7922,11 @@ handle_alloc_size_attribute (tree *node, tree ARG_UNUSED (name), tree args, for (; args; args = TREE_CHAIN (args)) { tree position = TREE_VALUE (args); + wide_int p; if (TREE_CODE (position) != INTEGER_CST - || TREE_INT_CST_HIGH (position) - || TREE_INT_CST_LOW (position) < 1 - || TREE_INT_CST_LOW (position) > arg_count ) + || (p = wide_int (position)).ltu_p (1) + || p.gtu_p (arg_count) ) { warning (OPT_Wattributes, "alloc_size parameter outside range"); @@ -8358,14 +8349,14 @@ handle_vector_size_attribute (tree *node, tree name, tree args, size = TREE_VALUE (args); - if (!host_integerp (size, 1)) + if (!tree_fits_uhwi_p (size)) { warning (OPT_Wattributes, "%qE attribute ignored", name); return NULL_TREE; } /* Get the vector size (in bytes). */ - vecsize = tree_low_cst (size, 1); + vecsize = tree_to_uhwi (size); /* We need to provide for vector pointers, vector arrays, and functions returning vectors. For example: @@ -8391,14 +8382,14 @@ handle_vector_size_attribute (tree *node, tree name, tree args, || (!SCALAR_FLOAT_MODE_P (orig_mode) && GET_MODE_CLASS (orig_mode) != MODE_INT && !ALL_SCALAR_FIXED_POINT_MODE_P (orig_mode)) - || !host_integerp (TYPE_SIZE_UNIT (type), 1) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) || TREE_CODE (type) == BOOLEAN_TYPE) { error ("invalid vector type for attribute %qE", name); return NULL_TREE; } - if (vecsize % tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + if (vecsize % tree_to_uhwi (TYPE_SIZE_UNIT (type))) { error ("vector size not an integral multiple of component size"); return NULL; @@ -8411,7 +8402,7 @@ handle_vector_size_attribute (tree *node, tree name, tree args, } /* Calculate how many units fit in the vector. */ - nunits = vecsize / tree_low_cst (TYPE_SIZE_UNIT (type), 1); + nunits = vecsize / tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (nunits & (nunits - 1)) { error ("number of components of the vector not a power of two"); @@ -8573,7 +8564,7 @@ check_function_sentinel (const_tree fntype, int nargs, tree *argarray) if (TREE_VALUE (attr)) { tree p = TREE_VALUE (TREE_VALUE (attr)); - pos = TREE_INT_CST_LOW (p); + pos = tree_to_hwi (p); } /* The sentinel must be one of the varargs, i.e. @@ -8646,13 +8637,14 @@ check_nonnull_arg (void * ARG_UNUSED (ctx), tree param, static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { - /* Verify the arg number is a constant. */ - if (TREE_CODE (arg_num_expr) != INTEGER_CST - || TREE_INT_CST_HIGH (arg_num_expr) != 0) + /* Verify the arg number is a small constant. */ + if (cst_fits_uhwi_p (arg_num_expr)) + { + *valp = tree_to_hwi (arg_num_expr); + return true; + } + else return false; - - *valp = TREE_INT_CST_LOW (arg_num_expr); - return true; } /* Handle a "nothrow" attribute; arguments as in @@ -8850,7 +8842,7 @@ parse_optimize_options (tree args, bool attr_p) if (TREE_CODE (value) == INTEGER_CST) { char buffer[20]; - sprintf (buffer, "-O%ld", (long) TREE_INT_CST_LOW (value)); + sprintf (buffer, "-O%ld", (long) tree_to_hwi (value)); vec_safe_push (optimize_args, ggc_strdup (buffer)); } @@ -9072,11 +9064,10 @@ check_function_arguments_recurse (void (*callback) /* Extract the argument number, which was previously checked to be valid. */ format_num_expr = TREE_VALUE (TREE_VALUE (attrs)); + + gcc_assert (tree_fits_uhwi_p (format_num_expr)); - gcc_assert (TREE_CODE (format_num_expr) == INTEGER_CST - && !TREE_INT_CST_HIGH (format_num_expr)); - - format_num = TREE_INT_CST_LOW (format_num_expr); + format_num = tree_to_uhwi (format_num_expr); for (inner_arg = first_call_expr_arg (param, &iter), i = 1; inner_arg != 0; @@ -9330,7 +9321,7 @@ c_parse_error (const char *gmsgid, enum cpp_ttype token_type, || token_type == CPP_CHAR16 || token_type == CPP_CHAR32) { - unsigned int val = TREE_INT_CST_LOW (value); + unsigned int val = tree_to_hwi (value); const char *prefix; switch (token_type) @@ -9584,8 +9575,7 @@ fold_offsetof_1 (tree expr) return error_mark_node; } off = size_binop_loc (input_location, PLUS_EXPR, DECL_FIELD_OFFSET (t), - size_int (tree_low_cst (DECL_FIELD_BIT_OFFSET (t), - 1) + size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t)) / BITS_PER_UNIT)); break; @@ -9949,7 +9939,7 @@ complete_array_type (tree *ptype, tree initial_value, bool do_default) { error ("size of array is too large"); /* If we proceed with the array type as it is, we'll eventually - crash in tree_low_cst(). */ + crash in tree_to_uhwi(). */ type = error_mark_node; } @@ -10007,7 +9997,7 @@ sync_resolve_size (tree function, vec<tree, va_gc> *params) if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) goto incompatible; - size = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + size = tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (size == 1 || size == 2 || size == 4 || size == 8 || size == 16) return size; @@ -10166,7 +10156,7 @@ get_atomic_generic_size (location_t loc, tree function, return 0; } - size_0 = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type_0)), 1); + size_0 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type_0))); /* Zero size objects are not allowed. */ if (size_0 == 0) @@ -10191,7 +10181,7 @@ get_atomic_generic_size (location_t loc, tree function, function); return 0; } - size = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1); + size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); if (size != size_0) { error_at (loc, "size mismatch in argument %d of %qE", x + 1, @@ -10206,7 +10196,7 @@ get_atomic_generic_size (location_t loc, tree function, tree p = (*params)[x]; if (TREE_CODE (p) == INTEGER_CST) { - int i = tree_low_cst (p, 1); + int i = tree_to_uhwi (p); if (i < 0 || (i & MEMMODEL_MASK) >= MEMMODEL_LAST) { warning_at (loc, OPT_Winvalid_memory_model, @@ -10375,7 +10365,7 @@ resolve_overloaded_atomic_compare_exchange (location_t loc, tree function, bool fn(T* mem, T* desired, T* return, weak, success, failure) into bool fn ((In *)mem, (In *)expected, (In) *desired, weak, succ, fail) */ - + p0 = (*params)[0]; p1 = (*params)[1]; p2 = (*params)[2]; @@ -11054,24 +11044,24 @@ warn_for_sign_compare (location_t location, if (TREE_CODE (op1) == BIT_NOT_EXPR) op1 = c_common_get_narrower (TREE_OPERAND (op1, 0), &unsignedp1); - if (host_integerp (op0, 0) || host_integerp (op1, 0)) + if (tree_fits_shwi_p (op0) || tree_fits_shwi_p (op1)) { tree primop; HOST_WIDE_INT constant, mask; int unsignedp; unsigned int bits; - if (host_integerp (op0, 0)) + if (tree_fits_shwi_p (op0)) { primop = op1; unsignedp = unsignedp1; - constant = tree_low_cst (op0, 0); + constant = tree_to_shwi (op0); } else { primop = op0; unsignedp = unsignedp0; - constant = tree_low_cst (op1, 0); + constant = tree_to_shwi (op1); } bits = TYPE_PRECISION (TREE_TYPE (primop)); @@ -11149,7 +11139,7 @@ do_warn_double_promotion (tree result_type, tree type1, tree type2, early on, later parts of the compiler can always do the reverse translation and get back the corresponding typedef name. For example, given: - + typedef struct S MY_TYPE; MY_TYPE object; @@ -11511,8 +11501,8 @@ convert_vector_to_pointer_for_subscript (location_t loc, tree type1; if (TREE_CODE (index) == INTEGER_CST) - if (!host_integerp (index, 1) - || ((unsigned HOST_WIDE_INT) tree_low_cst (index, 1) + if (!tree_fits_uhwi_p (index) + || ((unsigned HOST_WIDE_INT) tree_to_uhwi (index) >= TYPE_VECTOR_SUBPARTS (type))) warning_at (loc, OPT_Warray_bounds, "index value is out of bound"); diff --git a/gcc/c-family/c-cppbuiltin.c b/gcc/c-family/c-cppbuiltin.c index 94078c00460..0021341ba98 100644 --- a/gcc/c-family/c-cppbuiltin.c +++ b/gcc/c-family/c-cppbuiltin.c @@ -106,7 +106,7 @@ static void builtin_define_type_sizeof (const char *name, tree type) { builtin_define_with_int_value (name, - tree_low_cst (TYPE_SIZE_UNIT (type), 1)); + tree_to_uhwi (TYPE_SIZE_UNIT (type))); } /* Define the float.h constants for TYPE using NAME_PREFIX, FP_SUFFIX, @@ -648,7 +648,7 @@ cpp_atomic_builtins (cpp_reader *pfile) /* Tell the source code about various types. These map to the C++11 and C11 macros where 2 indicates lock-free always, and 1 indicates sometimes lock free. */ -#define SIZEOF_NODE(T) (tree_low_cst (TYPE_SIZE_UNIT (T), 1)) +#define SIZEOF_NODE(T) (tree_to_uhwi (TYPE_SIZE_UNIT (T))) #define SWAP_INDEX(T) ((SIZEOF_NODE (T) < SWAP_LIMIT) ? SIZEOF_NODE (T) : 0) builtin_define_with_int_value ("__GCC_ATOMIC_BOOL_LOCK_FREE", (have_swap[SWAP_INDEX (boolean_type_node)]? 2 : 1)); diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c index c11d93aa89d..e5847021b83 100644 --- a/gcc/c-family/c-format.c +++ b/gcc/c-family/c-format.c @@ -226,13 +226,13 @@ check_format_string (tree fntype, unsigned HOST_WIDE_INT format_num, static bool get_constant (tree expr, unsigned HOST_WIDE_INT *value, int validated_p) { - if (TREE_CODE (expr) != INTEGER_CST || TREE_INT_CST_HIGH (expr) != 0) + if (!cst_fits_uhwi_p (expr)) { gcc_assert (!validated_p); return false; } - *value = TREE_INT_CST_LOW (expr); + *value = tree_to_hwi (expr); return true; } @@ -1459,8 +1459,8 @@ check_format_arg (void *ctx, tree format_tree, res->number_non_literal++; return; } - if (!host_integerp (arg1, 0) - || (offset = tree_low_cst (arg1, 0)) < 0) + if (!tree_fits_shwi_p (arg1) + || (offset = tree_to_shwi (arg1)) < 0) { res->number_non_literal++; return; @@ -1506,8 +1506,8 @@ check_format_arg (void *ctx, tree format_tree, return; } if (TREE_CODE (format_tree) == ARRAY_REF - && host_integerp (TREE_OPERAND (format_tree, 1), 0) - && (offset += tree_low_cst (TREE_OPERAND (format_tree, 1), 0)) >= 0) + && tree_fits_shwi_p (TREE_OPERAND (format_tree, 1)) + && (offset += tree_to_shwi (TREE_OPERAND (format_tree, 1))) >= 0) format_tree = TREE_OPERAND (format_tree, 0); if (TREE_CODE (format_tree) == VAR_DECL && TREE_CODE (TREE_TYPE (format_tree)) == ARRAY_TYPE @@ -1537,9 +1537,9 @@ check_format_arg (void *ctx, tree format_tree, /* Variable length arrays can't be initialized. */ gcc_assert (TREE_CODE (array_size) == INTEGER_CST); - if (host_integerp (array_size, 0)) + if (tree_fits_shwi_p (array_size)) { - HOST_WIDE_INT array_size_value = TREE_INT_CST_LOW (array_size); + HOST_WIDE_INT array_size_value = tree_to_shwi (array_size); if (array_size_value > 0 && array_size_value == (int) array_size_value && format_length > array_size_value) diff --git a/gcc/c-family/c-lex.c b/gcc/c-family/c-lex.c index 819e9d51e10..bbf7ee5c849 100644 --- a/gcc/c-family/c-lex.c +++ b/gcc/c-family/c-lex.c @@ -33,6 +33,7 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "debug.h" #include "target.h" +#include "wide-int.h" /* We may keep statistics about how long which files took to compile. */ static int header_time, body_time; @@ -47,9 +48,9 @@ static tree interpret_float (const cpp_token *, unsigned int, const char *, enum overflow_type *); static tree interpret_fixed (const cpp_token *, unsigned int); static enum integer_type_kind narrowest_unsigned_type - (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned int); + (const wide_int &, unsigned int); static enum integer_type_kind narrowest_signed_type - (unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, unsigned int); + (const wide_int &, unsigned int); static enum cpp_ttype lex_string (const cpp_token *, tree *, bool, bool); static tree lex_charconst (const cpp_token *); static void update_header_times (const char *); @@ -525,9 +526,7 @@ c_lex_with_flags (tree *value, location_t *loc, unsigned char *cpp_flags, there isn't one. */ static enum integer_type_kind -narrowest_unsigned_type (unsigned HOST_WIDE_INT low, - unsigned HOST_WIDE_INT high, - unsigned int flags) +narrowest_unsigned_type (const wide_int &val, unsigned int flags) { int itk; @@ -546,9 +545,7 @@ narrowest_unsigned_type (unsigned HOST_WIDE_INT low, continue; upper = TYPE_MAX_VALUE (integer_types[itk]); - if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high - || ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high - && TREE_INT_CST_LOW (upper) >= low)) + if (wide_int::geu_p (upper, val)) return (enum integer_type_kind) itk; } @@ -557,8 +554,7 @@ narrowest_unsigned_type (unsigned HOST_WIDE_INT low, /* Ditto, but narrowest signed type. */ static enum integer_type_kind -narrowest_signed_type (unsigned HOST_WIDE_INT low, - unsigned HOST_WIDE_INT high, unsigned int flags) +narrowest_signed_type (const wide_int &val, unsigned int flags) { int itk; @@ -569,7 +565,6 @@ narrowest_signed_type (unsigned HOST_WIDE_INT low, else itk = itk_long_long; - for (; itk < itk_none; itk += 2 /* skip signed types */) { tree upper; @@ -578,9 +573,7 @@ narrowest_signed_type (unsigned HOST_WIDE_INT low, continue; upper = TYPE_MAX_VALUE (integer_types[itk]); - if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) > high - || ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (upper) == high - && TREE_INT_CST_LOW (upper) >= low)) + if (wide_int::geu_p (upper, val)) return (enum integer_type_kind) itk; } @@ -596,6 +589,8 @@ interpret_integer (const cpp_token *token, unsigned int flags, enum integer_type_kind itk; cpp_num integer; cpp_options *options = cpp_get_options (parse_in); + HOST_WIDE_INT ival[2]; + wide_int wval; *overflow = OT_NONE; @@ -604,18 +599,22 @@ interpret_integer (const cpp_token *token, unsigned int flags, if (integer.overflow) *overflow = OT_OVERFLOW; + ival[0] = integer.low; + ival[1] = integer.high; + wval = wide_int::from_array (ival, 2, HOST_BITS_PER_WIDE_INT * 2); + /* The type of a constant with a U suffix is straightforward. */ if (flags & CPP_N_UNSIGNED) - itk = narrowest_unsigned_type (integer.low, integer.high, flags); + itk = narrowest_unsigned_type (wval, flags); else { /* The type of a potentially-signed integer constant varies depending on the base it's in, the standard in use, and the length suffixes. */ enum integer_type_kind itk_u - = narrowest_unsigned_type (integer.low, integer.high, flags); + = narrowest_unsigned_type (wval, flags); enum integer_type_kind itk_s - = narrowest_signed_type (integer.low, integer.high, flags); + = narrowest_signed_type (wval, flags); /* In both C89 and C99, octal and hex constants may be signed or unsigned, whichever fits tighter. We do not warn about this @@ -667,7 +666,7 @@ interpret_integer (const cpp_token *token, unsigned int flags, : "integer constant is too large for %<long%> type"); } - value = build_int_cst_wide (type, integer.low, integer.high); + value = wide_int_to_tree (type, wval); /* Convert imaginary to a complex type. */ if (flags & CPP_N_IMAGINARY) @@ -1154,9 +1153,9 @@ lex_charconst (const cpp_token *token) /* Cast to cppchar_signed_t to get correct sign-extension of RESULT before possibly widening to HOST_WIDE_INT for build_int_cst. */ if (unsignedp || (cppchar_signed_t) result >= 0) - value = build_int_cst_wide (type, result, 0); + value = build_int_cst (type, result); else - value = build_int_cst_wide (type, (cppchar_signed_t) result, -1); + value = build_int_cst (type, (cppchar_signed_t) result); return value; } diff --git a/gcc/c-family/c-pragma.c b/gcc/c-family/c-pragma.c index 309859fc8ec..c91afdd13b4 100644 --- a/gcc/c-family/c-pragma.c +++ b/gcc/c-family/c-pragma.c @@ -151,7 +151,8 @@ handle_pragma_pack (cpp_reader * ARG_UNUSED (dummy)) { if (TREE_CODE (x) != INTEGER_CST) GCC_BAD ("invalid constant in %<#pragma pack%> - ignored"); - align = TREE_INT_CST_LOW (x); + /* Cannot use tree_to_uhwi here or it will ice if above message printed. */ + align = tree_to_hwi (x); action = set; if (pragma_lex (&x) != CPP_CLOSE_PAREN) GCC_BAD ("malformed %<#pragma pack%> - ignored"); @@ -183,7 +184,8 @@ handle_pragma_pack (cpp_reader * ARG_UNUSED (dummy)) { if (TREE_CODE (x) != INTEGER_CST) GCC_BAD ("invalid constant in %<#pragma pack%> - ignored"); - align = TREE_INT_CST_LOW (x); + /* Cannot use tree_to_uhwi here or it will ice if above message printed. */ + align = tree_to_hwi (x); if (align == -1) action = set; } diff --git a/gcc/c-family/c-pretty-print.c b/gcc/c-family/c-pretty-print.c index fed58021cd8..c68221449bc 100644 --- a/gcc/c-family/c-pretty-print.c +++ b/gcc/c-family/c-pretty-print.c @@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-pretty-print.h" #include "tree-iterator.h" #include "diagnostic.h" +#include "wide-int-print.h" /* Translate if being used for diagnostics, but not for dump files or __PRETTY_FUNCTION. */ @@ -581,8 +582,8 @@ pp_c_direct_abstract_declarator (c_pretty_printer *pp, tree t) tree maxval = TYPE_MAX_VALUE (TYPE_DOMAIN (t)); tree type = TREE_TYPE (maxval); - if (host_integerp (maxval, 0)) - pp_wide_integer (pp, tree_low_cst (maxval, 0) + 1); + if (tree_fits_shwi_p (maxval)) + pp_wide_integer (pp, tree_to_shwi (maxval) + 1); else pp_expression (pp, fold_build2 (PLUS_EXPR, type, maxval, build_int_cst (type, 1))); @@ -910,22 +911,20 @@ pp_c_integer_constant (c_pretty_printer *pp, tree i) ? TYPE_CANONICAL (TREE_TYPE (i)) : TREE_TYPE (i); - if (host_integerp (i, 0)) - pp_wide_integer (pp, TREE_INT_CST_LOW (i)); - else if (host_integerp (i, 1)) - pp_unsigned_wide_integer (pp, TREE_INT_CST_LOW (i)); + if (tree_fits_shwi_p (i)) + pp_wide_integer (pp, tree_to_shwi (i)); + else if (tree_fits_uhwi_p (i)) + pp_unsigned_wide_integer (pp, tree_to_uhwi (i)); else { - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (i); - HOST_WIDE_INT high = TREE_INT_CST_HIGH (i); - if (tree_int_cst_sgn (i) < 0) + wide_int wi = i; + + if (wi.lt_p (i, 0, TYPE_SIGN (TREE_TYPE (i)))) { pp_minus (pp); - high = ~high + !low; - low = -low; + wi = -wi; } - sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) high, (unsigned HOST_WIDE_INT) low); + print_hex (wi, pp_buffer (pp)->digit_buffer); pp_string (pp, pp_buffer (pp)->digit_buffer); } if (TYPE_UNSIGNED (type)) @@ -949,10 +948,10 @@ pp_c_character_constant (c_pretty_printer *pp, tree c) if (type == wchar_type_node) pp_character (pp, 'L'); pp_quote (pp); - if (host_integerp (c, TYPE_UNSIGNED (type))) - pp_c_char (pp, tree_low_cst (c, TYPE_UNSIGNED (type))); + if (tree_fits_hwi_p (c, TYPE_SIGN (type))) + pp_c_char (pp, tree_to_hwi (c, TYPE_SIGN (type))); else - pp_scalar (pp, "\\x%x", (unsigned) TREE_INT_CST_LOW (c)); + pp_scalar (pp, "\\x%x", (unsigned) tree_to_hwi (c)); pp_quote (pp); } @@ -1591,8 +1590,8 @@ pp_c_postfix_expression (c_pretty_printer *pp, tree e) if (type && tree_int_cst_equal (TYPE_SIZE (type), TREE_OPERAND (e, 1))) { - HOST_WIDE_INT bitpos = tree_low_cst (TREE_OPERAND (e, 2), 0); - HOST_WIDE_INT size = tree_low_cst (TYPE_SIZE (type), 0); + HOST_WIDE_INT bitpos = tree_to_shwi (TREE_OPERAND (e, 2)); + HOST_WIDE_INT size = tree_to_shwi (TYPE_SIZE (type)); if ((bitpos % size) == 0) { pp_c_left_paren (pp); diff --git a/gcc/c/Make-lang.in b/gcc/c/Make-lang.in index 1161742c274..8a4eb85cbbe 100644 --- a/gcc/c/Make-lang.in +++ b/gcc/c/Make-lang.in @@ -190,7 +190,7 @@ c/c-parser.o : c/c-parser.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ c/c-typeck.o : c/c-typeck.c c/c-lang.h $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(TREE_H) $(C_TREE_H) $(TARGET_H) $(FLAGS_H) intl.h \ langhooks.h tree-iterator.h $(BITMAP_H) $(GIMPLE_H) \ - c-family/c-objc.h c-family/c-common.h + c-family/c-objc.h c-family/c-common.h wide-int.h c/c-array-notation.o: c/c-array-notation.c c/c-lang.h $(CONFIG_H) \ $(SYSTEM_H) coretypes.h $(TREE_H) $(C_TREE_H) $(TARGET_H) \ diff --git a/gcc/c/c-decl.c b/gcc/c/c-decl.c index f7ae648f024..321ae0bf53c 100644 --- a/gcc/c/c-decl.c +++ b/gcc/c/c-decl.c @@ -4790,14 +4790,14 @@ check_bitfield_type_and_width (tree *type, tree *width, tree orig_name) *width = build_int_cst (integer_type_node, w); } else - w = tree_low_cst (*width, 1); + w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt - || w < tree_int_cst_min_precision (lt->enum_min, TYPE_UNSIGNED (*type)) - || w < tree_int_cst_min_precision (lt->enum_max, TYPE_UNSIGNED (*type))) + || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) + || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning (0, "%qs is narrower than values of its type", name); } } @@ -5827,7 +5827,7 @@ grokdeclarator (const struct c_declarator *declarator, else error_at (loc, "size of unnamed array is too large"); /* If we proceed with the array type as it is, we'll eventually - crash in tree_low_cst(). */ + crash in tree_to_uhwi (). */ type = error_mark_node; } @@ -7153,7 +7153,7 @@ finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, if (DECL_INITIAL (x)) { - unsigned HOST_WIDE_INT width = tree_low_cst (DECL_INITIAL (x), 1); + unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; SET_DECL_C_BIT_FIELD (x); @@ -7224,7 +7224,7 @@ finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, && TREE_TYPE (*fieldlistp) != error_mark_node) { unsigned HOST_WIDE_INT width - = tree_low_cst (DECL_INITIAL (*fieldlistp), 1); + = tree_to_uhwi (DECL_INITIAL (*fieldlistp)); tree type = TREE_TYPE (*fieldlistp); if (width != TYPE_PRECISION (type)) { @@ -7450,7 +7450,8 @@ finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = 0, maxnode = 0; - int precision, unsign; + int precision; + signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; @@ -7477,13 +7478,13 @@ finish_enum (tree enumtype, tree values, tree attributes) as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ - unsign = (tree_int_cst_sgn (minnode) >= 0); - precision = MAX (tree_int_cst_min_precision (minnode, unsign), - tree_int_cst_min_precision (maxnode, unsign)); + sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; + precision = MAX (tree_int_cst_min_precision (minnode, sign), + tree_int_cst_min_precision (maxnode, sign)); if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node)) { - tem = c_common_type_for_size (precision, unsign); + tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); @@ -7491,7 +7492,7 @@ finish_enum (tree enumtype, tree values, tree attributes) } } else - tem = unsign ? unsigned_type_node : integer_type_node; + tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c index b612e29c852..db0f730f787 100644 --- a/gcc/c/c-parser.c +++ b/gcc/c/c-parser.c @@ -378,7 +378,7 @@ c_lex_one_token (c_parser *parser, c_token *token) break; case CPP_PRAGMA: /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ - token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value); + token->pragma_kind = (enum pragma_kind) tree_to_hwi (token->value); token->value = NULL; break; default: @@ -9210,8 +9210,8 @@ c_parser_omp_clause_collapse (c_parser *parser, tree list) if (num == error_mark_node) return list; if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) - || !host_integerp (num, 0) - || (n = tree_low_cst (num, 0)) <= 0 + || !tree_fits_shwi_p (num) + || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, @@ -10261,7 +10261,7 @@ c_parser_omp_for_loop (location_t loc, for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) - collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); + collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); gcc_assert (collapse >= 1); diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c index 30871db3623..ce0cddc8ae6 100644 --- a/gcc/c/c-typeck.c +++ b/gcc/c/c-typeck.c @@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see #include "gimple.h" #include "c-family/c-objc.h" #include "c-family/c-common.h" +#include "wide-int.h" /* Possible cases of implicit bad conversions. Used to select diagnostic messages in convert_for_assignment. */ @@ -4736,9 +4737,8 @@ build_c_cast (location_t loc, tree type, tree expr) } else if (TREE_OVERFLOW (value)) /* Reset VALUE's overflow flags, ensuring constant sharing. */ - value = build_int_cst_wide (TREE_TYPE (value), - TREE_INT_CST_LOW (value), - TREE_INT_CST_HIGH (value)); + value = wide_int_to_tree (TREE_TYPE (value), + value); } } @@ -6798,7 +6798,7 @@ push_init_level (int implicit, struct obstack * braced_init_obstack) else if (TREE_CODE (constructor_type) == ARRAY_TYPE) { constructor_type = TREE_TYPE (constructor_type); - push_array_bounds (tree_low_cst (constructor_index, 1)); + push_array_bounds (tree_to_uhwi (constructor_index)); constructor_depth++; } @@ -7597,20 +7597,20 @@ set_nonincremental_init_from_string (tree str, { if (wchar_bytes == 1) { - val[1] = (unsigned char) *p++; - val[0] = 0; + val[0] = (unsigned char) *p++; + val[1] = 0; } else { - val[0] = 0; val[1] = 0; + val[0] = 0; for (byte = 0; byte < wchar_bytes; byte++) { if (BYTES_BIG_ENDIAN) bitpos = (wchar_bytes - byte - 1) * charwidth; else bitpos = byte * charwidth; - val[bitpos < HOST_BITS_PER_WIDE_INT] + val[bitpos % HOST_BITS_PER_WIDE_INT] |= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++)) << (bitpos % HOST_BITS_PER_WIDE_INT); } @@ -7621,24 +7621,26 @@ set_nonincremental_init_from_string (tree str, bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR; if (bitpos < HOST_BITS_PER_WIDE_INT) { - if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1))) + if (val[0] & (((HOST_WIDE_INT) 1) << (bitpos - 1))) { - val[1] |= ((HOST_WIDE_INT) -1) << bitpos; - val[0] = -1; + val[0] |= ((HOST_WIDE_INT) -1) << bitpos; + val[1] = -1; } } else if (bitpos == HOST_BITS_PER_WIDE_INT) { - if (val[1] < 0) - val[0] = -1; + if (val[0] < 0) + val[1] = -1; } - else if (val[0] & (((HOST_WIDE_INT) 1) + else if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1 - HOST_BITS_PER_WIDE_INT))) - val[0] |= ((HOST_WIDE_INT) -1) + val[1] |= ((HOST_WIDE_INT) -1) << (bitpos - HOST_BITS_PER_WIDE_INT); } - value = build_int_cst_wide (type, val[1], val[0]); + value = wide_int_to_tree (type, + wide_int::from_array (val, 2, + HOST_BITS_PER_WIDE_INT * 2)); add_pending_init (purpose, value, NULL_TREE, true, braced_init_obstack); } @@ -8348,7 +8350,7 @@ process_init_element (struct c_expr value, bool implicit, /* Now output the actual element. */ if (value.value) { - push_array_bounds (tree_low_cst (constructor_index, 1)); + push_array_bounds (tree_to_uhwi (constructor_index)); output_init_element (value.value, value.original_type, strict_string, elttype, constructor_index, 1, implicit, @@ -9144,7 +9146,7 @@ c_finish_bc_stmt (location_t loc, tree *label_p, bool is_break) } else if (TREE_CODE (label) == LABEL_DECL) ; - else switch (TREE_INT_CST_LOW (label)) + else switch (tree_to_hwi (label)) { case 0: if (is_break) diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index a7d9170779c..2fc51164142 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -270,7 +270,7 @@ add_stack_var (tree decl) * (size_t *)pointer_map_insert (decl_to_stack_part, decl) = stack_vars_num; v->decl = decl; - v->size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1); + v->size = tree_to_uhwi (DECL_SIZE_UNIT (SSAVAR (decl))); /* Ensure that all variables have size, so that &a != &b for any two variables that are simultaneously live. */ if (v->size == 0) @@ -1034,7 +1034,7 @@ expand_one_stack_var (tree var) HOST_WIDE_INT size, offset; unsigned byte_align; - size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (SSAVAR (var))); byte_align = align_local_variable (SSAVAR (var)); /* We handle highly aligned variables in expand_stack_vars. */ @@ -1131,7 +1131,7 @@ defer_stack_allocation (tree var, bool toplevel) other hand, we don't want the function's stack frame size to get completely out of hand. So we avoid adding scalars and "small" aggregates to the list at all. */ - if (optimize == 0 && tree_low_cst (DECL_SIZE_UNIT (var), 1) < 32) + if (optimize == 0 && tree_to_uhwi (DECL_SIZE_UNIT (var)) < 32) return false; return true; @@ -1245,7 +1245,7 @@ expand_one_var (tree var, bool toplevel, bool really_expand) { if (really_expand) expand_one_stack_var (origvar); - return tree_low_cst (DECL_SIZE_UNIT (var), 1); + return tree_to_uhwi (DECL_SIZE_UNIT (var)); } return 0; } @@ -1322,10 +1322,10 @@ stack_protect_classify_type (tree type) unsigned HOST_WIDE_INT len; if (!TYPE_SIZE_UNIT (type) - || !host_integerp (TYPE_SIZE_UNIT (type), 1)) + || !tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) len = max; else - len = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + len = tree_to_uhwi (TYPE_SIZE_UNIT (type)); if (len < max) ret = SPCT_HAS_SMALL_CHAR_ARRAY | SPCT_HAS_ARRAY; diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c index 01287246c33..11e8e36f6eb 100644 --- a/gcc/cfgloop.c +++ b/gcc/cfgloop.c @@ -332,7 +332,8 @@ alloc_loop (void) loop->exits = ggc_alloc_cleared_loop_exit (); loop->exits->next = loop->exits->prev = loop->exits; loop->can_be_parallel = false; - + loop->nb_iterations_upper_bound = 0; + loop->nb_iterations_estimate = 0; return loop; } diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h index 0f247996630..8da409e7c14 100644 --- a/gcc/cfgloop.h +++ b/gcc/cfgloop.h @@ -22,6 +22,7 @@ along with GCC; see the file COPYING3. If not see #include "basic-block.h" #include "double-int.h" +#include "wide-int.h" #include "bitmap.h" #include "sbitmap.h" @@ -63,7 +64,7 @@ struct GTY ((chain_next ("%h.next"))) nb_iter_bound { overflows (as MAX + 1 is sometimes produced as the estimate on number of executions of STMT). b) it is consistent with the result of number_of_iterations_exit. */ - double_int bound; + max_wide_int bound; /* True if the statement will cause the loop to be leaved the (at most) BOUND + 1-st time it is executed, that is, all the statements after it @@ -147,12 +148,12 @@ struct GTY ((chain_next ("%h.next"))) loop { /* An integer guaranteed to be greater or equal to nb_iterations. Only valid if any_upper_bound is true. */ - double_int nb_iterations_upper_bound; + max_wide_int nb_iterations_upper_bound; /* An integer giving an estimate on nb_iterations. Unlike nb_iterations_upper_bound, there is no guarantee that it is at least nb_iterations. */ - double_int nb_iterations_estimate; + max_wide_int nb_iterations_estimate; bool any_upper_bound; bool any_estimate; @@ -293,13 +294,13 @@ extern unsigned expected_loop_iterations (const struct loop *); extern rtx doloop_condition_get (rtx); void estimate_numbers_of_iterations_loop (struct loop *); -void record_niter_bound (struct loop *, double_int, bool, bool); -bool estimated_loop_iterations (struct loop *, double_int *); -bool max_loop_iterations (struct loop *, double_int *); +void record_niter_bound (struct loop *, const max_wide_int &, bool, bool); +bool estimated_loop_iterations (struct loop *, max_wide_int *); +bool max_loop_iterations (struct loop *, max_wide_int *); HOST_WIDE_INT estimated_loop_iterations_int (struct loop *); HOST_WIDE_INT max_loop_iterations_int (struct loop *); -bool max_stmt_executions (struct loop *, double_int *); -bool estimated_stmt_executions (struct loop *, double_int *); +bool max_stmt_executions (struct loop *, max_wide_int *); +bool estimated_stmt_executions (struct loop *, max_wide_int *); HOST_WIDE_INT max_stmt_executions_int (struct loop *); HOST_WIDE_INT estimated_stmt_executions_int (struct loop *); diff --git a/gcc/cgraph.c b/gcc/cgraph.c index a939ae83484..21086a94ff2 100644 --- a/gcc/cgraph.c +++ b/gcc/cgraph.c @@ -624,8 +624,7 @@ cgraph_add_thunk (struct cgraph_node *decl_node ATTRIBUTE_UNUSED, node = cgraph_create_node (alias); gcc_checking_assert (!virtual_offset - || tree_to_double_int (virtual_offset) == - double_int::from_shwi (virtual_value)); + || wide_int::eq_p (virtual_offset, virtual_value)); node->thunk.fixed_offset = fixed_offset; node->thunk.this_adjusting = this_adjusting; node->thunk.virtual_value = virtual_value; diff --git a/gcc/cgraphunit.c b/gcc/cgraphunit.c index 3cd2b417ca7..9cc21ff6967 100644 --- a/gcc/cgraphunit.c +++ b/gcc/cgraphunit.c @@ -1632,7 +1632,7 @@ expand_function (struct cgraph_node *node) larger_than_size)) { unsigned int size_as_int - = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (ret_type)); + = tree_to_hwi (TYPE_SIZE_UNIT (ret_type)); if (compare_tree_int (TYPE_SIZE_UNIT (ret_type), size_as_int) == 0) warning (OPT_Wlarger_than_, "size of return value of %q+D is %u bytes", diff --git a/gcc/combine.c b/gcc/combine.c index 29dfd15cc9f..474998dd29b 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -2669,23 +2669,15 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, offset = -1; } - if (offset >= 0 - && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp))) - <= HOST_BITS_PER_DOUBLE_INT)) + if (offset >= 0) { - double_int m, o, i; + wide_int o; rtx inner = SET_SRC (PATTERN (i3)); rtx outer = SET_SRC (temp); - - o = rtx_to_double_int (outer); - i = rtx_to_double_int (inner); - - m = double_int::mask (width); - i &= m; - m = m.llshift (offset, HOST_BITS_PER_DOUBLE_INT); - i = i.llshift (offset, HOST_BITS_PER_DOUBLE_INT); - o = o.and_not (m) | i; - + + o = (wide_int (std::make_pair (outer, GET_MODE (SET_DEST (temp)))) + .insert (std::make_pair (inner, GET_MODE (dest)), + offset, width)); combine_merges++; subst_insn = i3; subst_low_luid = DF_INSN_LUID (i2); @@ -2696,8 +2688,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, /* Replace the source in I2 with the new constant and make the resulting insn the new pattern for I3. Then skip to where we validate the pattern. Everything was set up above. */ - SUBST (SET_SRC (temp), - immed_double_int_const (o, GET_MODE (SET_DEST (temp)))); + SUBST (SET_SRC (temp), + immed_wide_int_const (o, GET_MODE (SET_DEST (temp)))); newpat = PATTERN (i2); @@ -5112,7 +5104,7 @@ subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy) if (! x) x = gen_rtx_CLOBBER (mode, const0_rtx); } - else if (CONST_INT_P (new_rtx) + else if (CONST_SCALAR_INT_P (new_rtx) && GET_CODE (x) == ZERO_EXTEND) { x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index aed035a434e..586a67f926a 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -5996,18 +5996,18 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) if (count == -1 || !index || !TYPE_MAX_VALUE (index) - || !host_integerp (TYPE_MAX_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) || !TYPE_MIN_VALUE (index) - || !host_integerp (TYPE_MIN_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) || count < 0) return -1; - count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1) - - tree_low_cst (TYPE_MIN_VALUE (index), 1)); + count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) + - tree_to_uhwi (TYPE_MIN_VALUE (index))); /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -6036,8 +6036,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -6068,8 +6068,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -6822,7 +6822,7 @@ aarch64_simd_attr_length_move (rtx insn) static HOST_WIDE_INT aarch64_simd_vector_alignment (const_tree type) { - HOST_WIDE_INT align = tree_low_cst (TYPE_SIZE (type), 0); + HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type)); return MIN (align, 128); } @@ -7401,8 +7401,8 @@ aarch64_float_const_representable_p (rtx x) int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1; int exponent; unsigned HOST_WIDE_INT mantissa, mask; - HOST_WIDE_INT m1, m2; REAL_VALUE_TYPE r, m; + bool &fail if (!CONST_DOUBLE_P (x)) return false; @@ -7426,16 +7426,16 @@ aarch64_float_const_representable_p (rtx x) WARNING: If we ever have a representation using more than 2 * H_W_I - 1 bits for the mantissa, this can fail (low bits will be lost). */ real_ldexp (&m, &r, point_pos - exponent); - REAL_VALUE_TO_INT (&m1, &m2, m); + w = real_to_integer (m, &fail, HOST_BITS_PER_WIDE_INT * 2); /* If the low part of the mantissa has bits set we cannot represent the value. */ - if (m1 != 0) + if (w.elt (0) != 0) return false; /* We have rejected the lower HOST_WIDE_INT, so update our understanding of how many bits lie in the mantissa and look only at the high HOST_WIDE_INT. */ - mantissa = m2; + mantissa = w.elt (1); point_pos -= HOST_BITS_PER_WIDE_INT; /* We can only represent values with a mantissa of the form 1.xxxx. */ diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c index 5f5b33e347b..44b4225072b 100644 --- a/gcc/config/alpha/alpha.c +++ b/gcc/config/alpha/alpha.c @@ -5857,7 +5857,7 @@ va_list_skip_additions (tree lhs) if (!CONVERT_EXPR_CODE_P (code) && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR) || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST - || !host_integerp (gimple_assign_rhs2 (stmt), 1))) + || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt)))) return stmt; lhs = gimple_assign_rhs1 (stmt); @@ -5983,10 +5983,10 @@ alpha_stdarg_optimize_hook (struct stdarg_info *si, const_gimple stmt) else goto escapes; - if (!host_integerp (gimple_assign_rhs2 (arg2_stmt), 0)) + if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt))) goto escapes; - sub = tree_low_cst (gimple_assign_rhs2 (arg2_stmt), 0); + sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt)); if (code2 == MINUS_EXPR) sub = -sub; if (sub < -48 || sub > -32) diff --git a/gcc/config/alpha/predicates.md b/gcc/config/alpha/predicates.md index a63d1254a6f..8a2166c1e03 100644 --- a/gcc/config/alpha/predicates.md +++ b/gcc/config/alpha/predicates.md @@ -357,7 +357,7 @@ && !SYMBOL_REF_TLS_MODEL (op)) { if (SYMBOL_REF_DECL (op)) - max_ofs = tree_low_cst (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op)), 1); + max_ofs = tree_to_uhwi (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op))); } else return false; diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index f731bb60beb..fb66dff5916 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -4349,18 +4349,18 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) if (count == -1 || !index || !TYPE_MAX_VALUE (index) - || !host_integerp (TYPE_MAX_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index)) || !TYPE_MIN_VALUE (index) - || !host_integerp (TYPE_MIN_VALUE (index), 1) + || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index)) || count < 0) return -1; - count *= (1 + tree_low_cst (TYPE_MAX_VALUE (index), 1) - - tree_low_cst (TYPE_MIN_VALUE (index), 1)); + count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index)) + - tree_to_uhwi (TYPE_MIN_VALUE (index))); /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -4389,8 +4389,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -4421,8 +4421,8 @@ aapcs_vfp_sub_candidate (const_tree type, enum machine_mode *modep) } /* There must be no padding. */ - if (!host_integerp (TYPE_SIZE (type), 1) - || (tree_low_cst (TYPE_SIZE (type), 1) + if (!tree_fits_uhwi_p (TYPE_SIZE (type)) + || (tree_to_uhwi (TYPE_SIZE (type)) != count * GET_MODE_BITSIZE (*modep))) return -1; @@ -9242,8 +9242,9 @@ vfp3_const_double_index (rtx x) int sign, exponent; unsigned HOST_WIDE_INT mantissa, mant_hi; unsigned HOST_WIDE_INT mask; - HOST_WIDE_INT m1, m2; int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1; + bool fail; + wide_int w; if (!TARGET_VFP3 || !CONST_DOUBLE_P (x)) return -1; @@ -9263,9 +9264,9 @@ vfp3_const_double_index (rtx x) WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1 bits for the mantissa, this may fail (low bits would be lost). */ real_ldexp (&m, &r, point_pos - exponent); - REAL_VALUE_TO_INT (&m1, &m2, m); - mantissa = m1; - mant_hi = m2; + w = real_to_integer (m, &fail, HOST_BITS_PER_WIDE_INT * 2); + mantissa = w.elt (0); + mant_hi = w.elt (1); /* If there are bits set in the low part of the mantissa, we can't represent this value. */ @@ -26650,7 +26651,7 @@ arm_builtin_vectorized_function (tree fndecl, tree type_out, tree type_in) static HOST_WIDE_INT arm_vector_alignment (const_tree type) { - HOST_WIDE_INT align = tree_low_cst (TYPE_SIZE (type), 0); + HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type)); if (TARGET_AAPCS_BASED) align = MIN (align, 64); diff --git a/gcc/config/avr/avr-log.c b/gcc/config/avr/avr-log.c index 9e538e60124..18215679b71 100644 --- a/gcc/config/avr/avr-log.c +++ b/gcc/config/avr/avr-log.c @@ -142,28 +142,27 @@ avr_log_set_caller_f (const char *caller) Splits last digit of *CST (taken as unsigned) in BASE and returns it. */ static unsigned -avr_double_int_pop_digit (double_int *cst, unsigned base) +avr_wide_int_pop_digit (wide_int *cst, unsigned base) { - double_int drem; + wide_int wrem; - *cst = cst->udivmod (double_int::from_uhwi (base), (int) FLOOR_DIV_EXPR, - &drem); + *cst = cst->udivmod_floor (base, &wrem); - return (unsigned) drem.to_uhwi(); + return (unsigned) wrem.to_uhwi(); } /* Dump VAL as hex value to FILE. */ static void -avr_dump_double_int_hex (FILE *file, double_int val) +avr_dump_wide_int_hex (FILE *file, wide_int val) { unsigned digit[4]; - digit[0] = avr_double_int_pop_digit (&val, 1 << 16); - digit[1] = avr_double_int_pop_digit (&val, 1 << 16); - digit[2] = avr_double_int_pop_digit (&val, 1 << 16); - digit[3] = avr_double_int_pop_digit (&val, 1 << 16); + digit[0] = avr_wide_int_pop_digit (&val, 1 << 16); + digit[1] = avr_wide_int_pop_digit (&val, 1 << 16); + digit[2] = avr_wide_int_pop_digit (&val, 1 << 16); + digit[3] = avr_wide_int_pop_digit (&val, 1 << 16); fprintf (file, "0x"); @@ -232,7 +231,7 @@ avr_log_vadump (FILE *file, const char *fmt, va_list ap) break; case 'D': - dump_double_int (file, va_arg (ap, double_int), false); + dump_double_int (file, va_arg (ap, double_int), false); break; case 'X': diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c index 14a3eee7c72..e013d5cb2fd 100644 --- a/gcc/config/avr/avr.c +++ b/gcc/config/avr/avr.c @@ -11173,24 +11173,24 @@ avr_expand_delay_cycles (rtx operands0) /* Return VAL * BASE + DIGIT. BASE = 0 is shortcut for BASE = 2^{32} */ -static double_int -avr_double_int_push_digit (double_int val, int base, - unsigned HOST_WIDE_INT digit) +static wide_int +avr_wide_int_push_digit (wide_int val, int base, + unsigned HOST_WIDE_INT digit) { val = 0 == base - ? val.llshift (32, 64) - : val * double_int::from_uhwi (base); + ? val.llshift (32) + : val * base; - return val + double_int::from_uhwi (digit); + return val + digit; } /* Compute the image of x under f, i.e. perform x --> f(x) */ static int -avr_map (double_int f, int x) +avr_map (wide_int f, int x) { - return 0xf & f.lrshift (4*x, 64).to_uhwi (); + return 0xf & f.lrshift (4*x).to_uhwi (); } @@ -11215,7 +11215,7 @@ enum }; static unsigned -avr_map_metric (double_int a, int mode) +avr_map_metric (wide_int a, int mode) { unsigned i, metric = 0; @@ -11248,7 +11248,7 @@ avr_map_metric (double_int a, int mode) bool avr_has_nibble_0xf (rtx ival) { - return 0 != avr_map_metric (rtx_to_double_int (ival), MAP_MASK_PREIMAGE_F); + return 0 != avr_map_metric (wide_int::from_rtx (ival), MAP_MASK_PREIMAGE_F); } @@ -11282,7 +11282,7 @@ typedef struct int cost; /* The composition F o G^-1 (*, arg) for some function F */ - double_int map; + wide_int map; /* For debug purpose only */ const char *str; @@ -11313,12 +11313,12 @@ static const avr_map_op_t avr_map_op[] = If result.cost < 0 then such a decomposition does not exist. */ static avr_map_op_t -avr_map_decompose (double_int f, const avr_map_op_t *g, bool val_const_p) +avr_map_decompose (wide_int f, const avr_map_op_t *g, bool val_const_p) { int i; bool val_used_p = 0 != avr_map_metric (f, MAP_MASK_PREIMAGE_F); avr_map_op_t f_ginv = *g; - double_int ginv = double_int::from_uhwi (g->ginv); + wide_int ginv = wide_int::from_uhwi (g->ginv); f_ginv.cost = -1; @@ -11338,7 +11338,7 @@ avr_map_decompose (double_int f, const avr_map_op_t *g, bool val_const_p) return f_ginv; } - f_ginv.map = avr_double_int_push_digit (f_ginv.map, 16, x); + f_ginv.map = avr_wide_int_push_digit (f_ginv.map, 16, x); } /* Step 2: Compute the cost of the operations. @@ -11390,7 +11390,7 @@ avr_map_decompose (double_int f, const avr_map_op_t *g, bool val_const_p) is different to its source position. */ static void -avr_move_bits (rtx *xop, double_int map, bool fixp_p, int *plen) +avr_move_bits (rtx *xop, wide_int map, bool fixp_p, int *plen) { int bit_dest, b; @@ -11443,7 +11443,7 @@ avr_move_bits (rtx *xop, double_int map, bool fixp_p, int *plen) const char* avr_out_insert_bits (rtx *op, int *plen) { - double_int map = rtx_to_double_int (op[1]); + wide_int map = wide_int::from_rtx (op[1]); unsigned mask_fixed; bool fixp_p = true; rtx xop[4]; @@ -11891,7 +11891,7 @@ avr_expand_builtin (tree exp, rtx target, if (TREE_CODE (CALL_EXPR_ARG (exp, 1)) != INTEGER_CST) break; - int rbit = (int) TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1)); + int rbit = (int) tree_to_hwi (CALL_EXPR_ARG (exp, 1)); if (rbit >= (int) GET_MODE_FBIT (mode)) { @@ -12034,7 +12034,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, tree tval = arg[2]; tree tmap; tree map_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); - double_int map; + wide_int map; bool changed = false; unsigned i; avr_map_op_t best_g; @@ -12047,8 +12047,8 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, break; } - map = tree_to_double_int (arg[0]); - tmap = double_int_to_tree (map_type, map); + map = wide_int::from_tree (arg[0]); + tmap = wide_int_to_tree (map_type, map); if (TREE_CODE (tval) != INTEGER_CST && 0 == avr_map_metric (map, MAP_MASK_PREIMAGE_F)) @@ -12075,7 +12075,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, /* Inserting bits known at compile time is easy and can be performed by AND and OR with appropriate masks. */ - int bits = TREE_INT_CST_LOW (tbits); + int bits = tree_to_hwi (tbits); int mask_ior = 0, mask_and = 0xff; for (i = 0; i < 8; i++) @@ -12152,7 +12152,7 @@ avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg, /* Use map o G^-1 instead of original map to undo the effect of G. */ - tmap = double_int_to_tree (map_type, best_g.map); + tmap = wide_int_to_tree (map_type, best_g.map); return build_call_expr (fndecl, 3, tmap, tbits, tval); } /* AVR_BUILTIN_INSERT_BITS */ diff --git a/gcc/config/bfin/bfin.c b/gcc/config/bfin/bfin.c index 7fab975a673..356408c4776 100644 --- a/gcc/config/bfin/bfin.c +++ b/gcc/config/bfin/bfin.c @@ -3285,8 +3285,8 @@ bfin_local_alignment (tree type, unsigned align) memcpy can use 32 bit loads/stores. */ if (TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) > 8 - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 32) + && (!wide_int::gtu_p (TYPE_SIZE (type), 8)) + && align < 32) return 32; return align; } diff --git a/gcc/config/c6x/predicates.md b/gcc/config/c6x/predicates.md index 1a2fe8f69f5..fbcbdd02457 100644 --- a/gcc/config/c6x/predicates.md +++ b/gcc/config/c6x/predicates.md @@ -210,9 +210,9 @@ t = DECL_SIZE_UNIT (t); else t = TYPE_SIZE_UNIT (TREE_TYPE (t)); - if (t && host_integerp (t, 0)) + if (t && tree_fits_shwi_p (t)) { - size = tree_low_cst (t, 0); + size = tree_to_shwi (t); if (size < 0) size = 0; } diff --git a/gcc/config/darwin.c b/gcc/config/darwin.c index e07fa4c8324..89d5397b1a8 100644 --- a/gcc/config/darwin.c +++ b/gcc/config/darwin.c @@ -1271,22 +1271,18 @@ darwin_mergeable_constant_section (tree exp, { tree size = TYPE_SIZE_UNIT (TREE_TYPE (exp)); - if (TREE_CODE (size) == INTEGER_CST - && TREE_INT_CST_LOW (size) == 4 - && TREE_INT_CST_HIGH (size) == 0) - return darwin_sections[literal4_section]; - else if (TREE_CODE (size) == INTEGER_CST - && TREE_INT_CST_LOW (size) == 8 - && TREE_INT_CST_HIGH (size) == 0) - return darwin_sections[literal8_section]; - else if (HAVE_GAS_LITERAL16 - && TARGET_64BIT - && TREE_CODE (size) == INTEGER_CST - && TREE_INT_CST_LOW (size) == 16 - && TREE_INT_CST_HIGH (size) == 0) - return darwin_sections[literal16_section]; - else - return readonly_data_section; + if (TREE_CODE (size) == INTEGER_CST) + { + wide_int wsize = size; + if (wsize == 4) + return darwin_sections[literal4_section]; + else if (wsize == 8) + return darwin_sections[literal8_section]; + else if (HAVE_GAS_LITERAL16 + && TARGET_64BIT + && wsize == 16) + return darwin_sections[literal16_section]; + } } return readonly_data_section; @@ -1491,7 +1487,7 @@ machopic_select_section (tree decl, zsize = (DECL_P (decl) && (TREE_CODE (decl) == VAR_DECL || TREE_CODE (decl) == CONST_DECL) - && tree_low_cst (DECL_SIZE_UNIT (decl), 1) == 0); + && tree_to_uhwi (DECL_SIZE_UNIT (decl)) == 0); one = DECL_P (decl) && TREE_CODE (decl) == VAR_DECL @@ -1634,7 +1630,7 @@ machopic_select_section (tree decl, static bool warned_objc_46 = false; /* We shall assert that zero-sized objects are an error in ObjC meta-data. */ - gcc_assert (tree_low_cst (DECL_SIZE_UNIT (decl), 1) != 0); + gcc_assert (tree_to_uhwi (DECL_SIZE_UNIT (decl)) != 0); /* ??? This mechanism for determining the metadata section is broken when LTO is in use, since the frontend that generated @@ -2171,7 +2167,7 @@ darwin_asm_declare_object_name (FILE *file, machopic_define_symbol (DECL_RTL (decl)); } - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); #ifdef DEBUG_DARWIN_MEM_ALLOCATORS fprintf (file, "# dadon: %s %s (%llu, %u) local %d weak %d" diff --git a/gcc/config/epiphany/epiphany.c b/gcc/config/epiphany/epiphany.c index fd4c01c49a4..f07f15e888c 100644 --- a/gcc/config/epiphany/epiphany.c +++ b/gcc/config/epiphany/epiphany.c @@ -2758,11 +2758,11 @@ epiphany_special_round_type_align (tree type, unsigned computed, continue; offset = bit_position (field); size = DECL_SIZE (field); - if (!host_integerp (offset, 1) || !host_integerp (size, 1) - || TREE_INT_CST_LOW (offset) >= try_align - || TREE_INT_CST_LOW (size) >= try_align) + if (!tree_fits_uhwi_p (offset) || !tree_fits_uhwi_p (size) + || tree_to_uhwi (offset) >= try_align + || tree_to_uhwi (size) >= try_align) return try_align; - total = TREE_INT_CST_LOW (offset) + TREE_INT_CST_LOW (size); + total = tree_to_hwi (offset) + tree_to_hwi (size); if (total > max) max = total; } @@ -2785,7 +2785,7 @@ epiphany_adjust_field_align (tree field, unsigned computed) { tree elmsz = TYPE_SIZE (TREE_TYPE (TREE_TYPE (field))); - if (!host_integerp (elmsz, 1) || tree_low_cst (elmsz, 1) >= 32) + if (!tree_fits_uhwi_p (elmsz) || tree_to_uhwi (elmsz) >= 32) return 64; } return computed; diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c index 842f99363b4..6bccc7d9b73 100644 --- a/gcc/config/i386/i386.c +++ b/gcc/config/i386/i386.c @@ -62,6 +62,7 @@ along with GCC; see the file COPYING3. If not see #include "dumpfile.h" #include "tree-pass.h" #include "tree-flow.h" +#include "wide-int.h" #include "context.h" #include "pass_manager.h" @@ -5603,7 +5604,7 @@ ix86_function_regparm (const_tree type, const_tree decl) attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (type)); if (attr) { - regparm = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))); + regparm = tree_to_hwi (TREE_VALUE (TREE_VALUE (attr))); return regparm; } } @@ -5730,7 +5731,7 @@ ix86_keep_aggregate_return_pointer (tree fntype) attr = lookup_attribute ("callee_pop_aggregate_return", TYPE_ATTRIBUTES (fntype)); if (attr) - return (TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr))) == 0); + return (tree_to_hwi (TREE_VALUE (TREE_VALUE (attr))) == 0); /* For 32-bit MS-ABI the default is to keep aggregate return pointer. */ @@ -6399,7 +6400,7 @@ classify_argument (enum machine_mode mode, const_tree type, for (i = (int_bit_position (field) + (bit_offset % 64)) / 8 / 8; i < ((int_bit_position (field) + (bit_offset % 64)) - + tree_low_cst (DECL_SIZE (field), 0) + + tree_to_shwi (DECL_SIZE (field)) + 63) / 8 / 8; i++) classes[i] = merge_classes (X86_64_INTEGER_CLASS, @@ -25694,8 +25695,7 @@ ix86_data_alignment (tree type, int align, bool opt) && AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= (unsigned) max_align - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) + && (wide_int::geu_p (TYPE_SIZE (type), max_align)) && align < max_align) align = max_align; @@ -25706,8 +25706,8 @@ ix86_data_alignment (tree type, int align, bool opt) if ((opt ? AGGREGATE_TYPE_P (type) : TREE_CODE (type) == ARRAY_TYPE) && TYPE_SIZE (type) && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 128 - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128) + && (wide_int::geu_p (TYPE_SIZE (type), 128)) + && align < 128) return 128; } @@ -25816,13 +25816,13 @@ ix86_local_alignment (tree exp, enum machine_mode mode, && TARGET_SSE) { if (AGGREGATE_TYPE_P (type) - && (va_list_type_node == NULL_TREE - || (TYPE_MAIN_VARIANT (type) - != TYPE_MAIN_VARIANT (va_list_type_node))) - && TYPE_SIZE (type) - && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST - && (TREE_INT_CST_LOW (TYPE_SIZE (type)) >= 16 - || TREE_INT_CST_HIGH (TYPE_SIZE (type))) && align < 128) + && (va_list_type_node == NULL_TREE + || (TYPE_MAIN_VARIANT (type) + != TYPE_MAIN_VARIANT (va_list_type_node))) + && TYPE_SIZE (type) + && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST + && (wide_int::geu_p (TYPE_SIZE (type), 16)) + && align < 128) return 128; } if (TREE_CODE (type) == ARRAY_TYPE) @@ -28842,7 +28842,7 @@ ix86_builtin_tm_load (tree type) { if (TREE_CODE (type) == VECTOR_TYPE) { - switch (tree_low_cst (TYPE_SIZE (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE (type))) { case 64: return builtin_decl_explicit (BUILT_IN_TM_LOAD_M64); @@ -28862,7 +28862,7 @@ ix86_builtin_tm_store (tree type) { if (TREE_CODE (type) == VECTOR_TYPE) { - switch (tree_low_cst (TYPE_SIZE (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE (type))) { case 64: return builtin_decl_explicit (BUILT_IN_TM_STORE_M64); @@ -32175,8 +32175,8 @@ get_element_number (tree vec_type, tree arg) { unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1; - if (!host_integerp (arg, 1) - || (elt = tree_low_cst (arg, 1), elt > max)) + if (!tree_fits_uhwi_p (arg) + || (elt = tree_to_uhwi (arg), elt > max)) { error ("selector must be an integer constant in the range 0..%wi", max); return 0; @@ -38179,7 +38179,7 @@ void ix86_emit_swsqrtsf (rtx res, rtx a, enum machine_mode mode, e2 = gen_reg_rtx (mode); e3 = gen_reg_rtx (mode); - real_from_integer (&r, VOIDmode, -3, -1, 0); + real_from_integer (&r, VOIDmode, -3, SIGNED); mthree = CONST_DOUBLE_FROM_REAL_VALUE (r, SFmode); real_arithmetic (&r, NEGATE_EXPR, &dconsthalf, NULL); diff --git a/gcc/config/ia64/predicates.md b/gcc/config/ia64/predicates.md index adfb15ff7b7..da4b393d633 100644 --- a/gcc/config/ia64/predicates.md +++ b/gcc/config/ia64/predicates.md @@ -72,9 +72,9 @@ t = DECL_SIZE_UNIT (t); else t = TYPE_SIZE_UNIT (TREE_TYPE (t)); - if (t && host_integerp (t, 0)) + if (t && tree_fits_shwi_p (t)) { - size = tree_low_cst (t, 0); + size = tree_to_shwi (t); if (size < 0) size = 0; } diff --git a/gcc/config/iq2000/iq2000.c b/gcc/config/iq2000/iq2000.c index 7e19366ae93..689a87cb094 100644 --- a/gcc/config/iq2000/iq2000.c +++ b/gcc/config/iq2000/iq2000.c @@ -1279,7 +1279,7 @@ iq2000_function_arg (cumulative_args_t cum_v, enum machine_mode mode, if (! type || TREE_CODE (type) != RECORD_TYPE || ! named || ! TYPE_SIZE_UNIT (type) - || ! host_integerp (TYPE_SIZE_UNIT (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) ret = gen_rtx_REG (mode, regbase + *arg_words + bias); else { @@ -1289,7 +1289,7 @@ iq2000_function_arg (cumulative_args_t cum_v, enum machine_mode mode, if (TREE_CODE (field) == FIELD_DECL && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD - && host_integerp (bit_position (field), 0) + && tree_fits_shwi_p (bit_position (field)) && int_bit_position (field) % BITS_PER_WORD == 0) break; @@ -1307,7 +1307,7 @@ iq2000_function_arg (cumulative_args_t cum_v, enum machine_mode mode, /* ??? If this is a packed structure, then the last hunk won't be 64 bits. */ chunks - = tree_low_cst (TYPE_SIZE_UNIT (type), 1) / UNITS_PER_WORD; + = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD; if (chunks + *arg_words + bias > (unsigned) MAX_ARGS_IN_REGISTERS) chunks = MAX_ARGS_IN_REGISTERS - *arg_words - bias; diff --git a/gcc/config/m32c/m32c-pragma.c b/gcc/config/m32c/m32c-pragma.c index 6b0d05a8aaf..aa16a30453d 100644 --- a/gcc/config/m32c/m32c-pragma.c +++ b/gcc/config/m32c/m32c-pragma.c @@ -46,9 +46,9 @@ m32c_pragma_memregs (cpp_reader * reader ATTRIBUTE_UNUSED) type = pragma_lex (&val); if (type == CPP_NUMBER) { - if (host_integerp (val, 1)) + if (tree_fits_uhwi_p (val)) { - i = tree_low_cst (val, 1); + i = tree_to_uhwi (val); type = pragma_lex (&val); if (type != CPP_EOF) @@ -95,7 +95,7 @@ m32c_pragma_address (cpp_reader * reader ATTRIBUTE_UNUSED) { if (var != error_mark_node) { - unsigned uaddr = tree_low_cst (addr, 1); + unsigned uaddr = tree_to_uhwi (addr); m32c_note_pragma_address (IDENTIFIER_POINTER (var), uaddr); } diff --git a/gcc/config/m32c/m32c.c b/gcc/config/m32c/m32c.c index deac40c228f..f78037446fd 100644 --- a/gcc/config/m32c/m32c.c +++ b/gcc/config/m32c/m32c.c @@ -2935,8 +2935,8 @@ function_vector_handler (tree * node ATTRIBUTE_UNUSED, name); *no_add_attrs = true; } - else if (TREE_INT_CST_LOW (TREE_VALUE (args)) < 18 - || TREE_INT_CST_LOW (TREE_VALUE (args)) > 255) + else if (tree_to_hwi (TREE_VALUE (args)) < 18 + || tree_to_hwi (TREE_VALUE (args)) > 255) { /* The argument value must be between 18 to 255. */ warning (OPT_Wattributes, @@ -2968,7 +2968,7 @@ current_function_special_page_vector (rtx x) { if (is_attribute_p ("function_vector", TREE_PURPOSE (list))) { - num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list))); + num = tree_to_hwi (TREE_VALUE (TREE_VALUE (list))); return num; } diff --git a/gcc/config/mep/mep-pragma.c b/gcc/config/mep/mep-pragma.c index 8a9c577f5a9..45a4b4496a4 100644 --- a/gcc/config/mep/mep-pragma.c +++ b/gcc/config/mep/mep-pragma.c @@ -232,9 +232,9 @@ mep_pragma_coprocessor_width (void) switch (type) { case CPP_NUMBER: - if (! host_integerp (val, 1)) + if (! tree_fits_uhwi_p (val)) break; - i = tree_low_cst (val, 1); + i = tree_to_uhwi (val); /* This pragma no longer has any effect. */ #if 0 if (i == 32) @@ -273,7 +273,7 @@ mep_pragma_coprocessor_subclass (void) type = mep_pragma_lex (&val); if (type != CPP_CHAR) goto syntax_error; - class_letter = tree_low_cst (val, 1); + class_letter = tree_to_uhwi (val); if (class_letter >= 'A' && class_letter <= 'D') switch (class_letter) { diff --git a/gcc/config/mep/mep.c b/gcc/config/mep/mep.c index a5b7e6c68be..e1f87698839 100644 --- a/gcc/config/mep/mep.c +++ b/gcc/config/mep/mep.c @@ -4209,7 +4209,7 @@ mep_attrlist_to_encoding (tree list, tree decl) && TREE_VALUE (TREE_VALUE (list)) && TREE_CODE (TREE_VALUE (TREE_VALUE (list))) == INTEGER_CST) { - int location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(list))); + int location = tree_to_hwi (TREE_VALUE (TREE_VALUE(list))); if (location >= 0 && location <= 0x1000000) return 'i'; @@ -4298,7 +4298,7 @@ mep_insert_attributes (tree decl, tree *attributes) && TREE_VALUE (attr) && TREE_VALUE (TREE_VALUE(attr))) { - int location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(attr))); + int location = tree_to_hwi (TREE_VALUE (TREE_VALUE(attr))); static tree previous_value = 0; static int previous_location = 0; static tree previous_name = 0; @@ -4714,7 +4714,7 @@ mep_output_aligned_common (FILE *stream, tree decl, const char *name, if (attr && TREE_VALUE (attr) && TREE_VALUE (TREE_VALUE(attr))) - location = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE(attr))); + location = tree_to_hwi (TREE_VALUE (TREE_VALUE(attr))); if (location == -1) return; if (global) diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c index 4da80f42e7b..9e4c2acad97 100644 --- a/gcc/config/mips/mips.c +++ b/gcc/config/mips/mips.c @@ -5136,7 +5136,7 @@ mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode, && type != 0 && TREE_CODE (type) == RECORD_TYPE && TYPE_SIZE_UNIT (type) - && host_integerp (TYPE_SIZE_UNIT (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { tree field; @@ -5145,7 +5145,7 @@ mips_function_arg (cumulative_args_t cum_v, enum machine_mode mode, if (TREE_CODE (field) == FIELD_DECL && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)) && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD - && host_integerp (bit_position (field), 0) + && tree_fits_shwi_p (bit_position (field)) && int_bit_position (field) % BITS_PER_WORD == 0) break; @@ -14915,7 +14915,7 @@ r10k_safe_mem_expr_p (tree expr, HOST_WIDE_INT offset) return false; offset += bitoffset / BITS_PER_UNIT; - return offset >= 0 && offset < tree_low_cst (DECL_SIZE_UNIT (inner), 1); + return offset >= 0 && offset < tree_to_uhwi (DECL_SIZE_UNIT (inner)); } /* A for_each_rtx callback for which DATA points to the instruction diff --git a/gcc/config/picochip/picochip.c b/gcc/config/picochip/picochip.c index e8575ace160..2e4d0e49758 100644 --- a/gcc/config/picochip/picochip.c +++ b/gcc/config/picochip/picochip.c @@ -809,7 +809,7 @@ picochip_compute_arg_size (const_tree type, enum machine_mode mode) int type_size_in_units = 0; if (type) - type_size_in_units = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + type_size_in_units = tree_to_uhwi (TYPE_SIZE_UNIT (type)); else type_size_in_units = GET_MODE_SIZE (mode); diff --git a/gcc/config/rs6000/rs6000-c.c b/gcc/config/rs6000/rs6000-c.c index d58e6865193..ef3fc29f562 100644 --- a/gcc/config/rs6000/rs6000-c.c +++ b/gcc/config/rs6000/rs6000-c.c @@ -26,6 +26,7 @@ #include "tm.h" #include "cpplib.h" #include "tree.h" +#include "wide-int.h" #include "c-family/c-common.h" #include "c-family/c-pragma.h" #include "diagnostic-core.h" @@ -4195,8 +4196,7 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl, mode = TYPE_MODE (arg1_type); if ((mode == V2DFmode || mode == V2DImode) && VECTOR_MEM_VSX_P (mode) && TREE_CODE (arg2) == INTEGER_CST - && TREE_INT_CST_HIGH (arg2) == 0 - && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1)) + && wide_int::ltu_p (arg2, 2)) { tree call = NULL_TREE; @@ -4280,9 +4280,8 @@ altivec_resolve_overloaded_builtin (location_t loc, tree fndecl, /* If we can use the VSX xxpermdi instruction, use that for insert. */ mode = TYPE_MODE (arg1_type); if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode) - && TREE_CODE (arg2) == INTEGER_CST - && TREE_INT_CST_HIGH (arg2) == 0 - && (TREE_INT_CST_LOW (arg2) == 0 || TREE_INT_CST_LOW (arg2) == 1)) + && tree_fits_uhwi_p (arg2) + && wide_int::ltu_p (arg2, 2)) { tree call = NULL_TREE; diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index 30cd6961f7b..32d618e6a3b 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -3777,7 +3777,7 @@ rs6000_builtin_support_vector_misalignment (enum machine_mode mode, it's word aligned. */ if (rs6000_vector_alignment_reachable (type, is_packed)) { - int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type)); + int element_size = tree_to_hwi (TYPE_SIZE (type)); if (element_size == 64 || element_size == 32) return true; @@ -5844,13 +5844,13 @@ offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset, if (!DECL_SIZE_UNIT (decl)) return false; - if (!host_integerp (DECL_SIZE_UNIT (decl), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl))) return false; - - dsize = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + + dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl)); if (dsize > 32768) return false; - + return dalign / BITS_PER_UNIT >= dsize; } } @@ -5870,8 +5870,8 @@ offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset, if (TREE_CODE (decl) == STRING_CST) dsize = TREE_STRING_LENGTH (decl); else if (TYPE_SIZE_UNIT (type) - && host_integerp (TYPE_SIZE_UNIT (type), 1)) - dsize = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) + dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type)); else return false; if (dsize > 32768) @@ -8460,7 +8460,7 @@ rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum, mode = TYPE_MODE (ftype); if (DECL_SIZE (f) != 0 - && host_integerp (bit_position (f), 1)) + && tree_fits_uhwi_p (bit_position (f))) bitpos += int_bit_position (f); /* ??? FIXME: else assume zero offset. */ @@ -8937,7 +8937,7 @@ rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type, mode = TYPE_MODE (ftype); if (DECL_SIZE (f) != 0 - && host_integerp (bit_position (f), 1)) + && tree_fits_uhwi_p (bit_position (f))) bitpos += int_bit_position (f); /* ??? FIXME: else assume zero offset. */ @@ -10675,7 +10675,7 @@ rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 5-bit unsigned literals. */ STRIP_NOPS (arg1); if (TREE_CODE (arg1) != INTEGER_CST - || TREE_INT_CST_LOW (arg1) & ~0x1f) + || tree_to_hwi (arg1) & ~0x1f) { error ("argument 2 must be a 5-bit unsigned literal"); return const0_rtx; @@ -10720,7 +10720,7 @@ altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) return const0_rtx; } else - cr6_form_int = TREE_INT_CST_LOW (cr6_form); + cr6_form_int = tree_to_hwi (cr6_form); gcc_assert (mode0 == mode1); @@ -11211,7 +11211,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 4-bit unsigned literals. */ STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0xf) + || tree_to_hwi (arg2) & ~0xf) { error ("argument 3 must be a 4-bit unsigned literal"); return const0_rtx; @@ -11229,7 +11229,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 2-bit unsigned literals. */ STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0x3) + || tree_to_hwi (arg2) & ~0x3) { error ("argument 3 must be a 2-bit unsigned literal"); return const0_rtx; @@ -11241,7 +11241,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) /* Only allow 1-bit unsigned literals. */ STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0x1) + || tree_to_hwi (arg2) & ~0x1) { error ("argument 3 must be a 1-bit unsigned literal"); return const0_rtx; @@ -11254,7 +11254,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) range and prepare arguments. */ STRIP_NOPS (arg1); if (TREE_CODE (arg1) != INTEGER_CST - || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1)) + || !IN_RANGE (TREE_INT_CST_ELT (arg1, 0), 0, 1)) { error ("argument 2 must be 0 or 1"); return const0_rtx; @@ -11262,7 +11262,7 @@ rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target) STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || !IN_RANGE (TREE_INT_CST_LOW (arg2), 0, 15)) + || !IN_RANGE (TREE_INT_CST_ELT (arg2, 0), 0, 15)) { error ("argument 3 must be in the range 0..15"); return const0_rtx; @@ -11445,7 +11445,7 @@ altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED, *expandedp = true; STRIP_NOPS (arg2); if (TREE_CODE (arg2) != INTEGER_CST - || TREE_INT_CST_LOW (arg2) & ~0x3) + || tree_to_hwi (arg2) & ~0x3) { error ("argument to %qs must be a 2-bit unsigned literal", d->name); return const0_rtx; @@ -11499,8 +11499,8 @@ get_element_number (tree vec_type, tree arg) { unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1; - if (!host_integerp (arg, 1) - || (elt = tree_low_cst (arg, 1), elt > max)) + if (!tree_fits_uhwi_p (arg) + || (elt = tree_to_uhwi (arg), elt > max)) { error ("selector must be an integer constant in the range 0..%wi", max); return 0; @@ -11692,7 +11692,7 @@ altivec_expand_builtin (tree exp, rtx target, bool *expandedp) return const0_rtx; if (TREE_CODE (arg0) != INTEGER_CST - || TREE_INT_CST_LOW (arg0) & ~0x3) + || tree_to_hwi (arg0) & ~0x3) { error ("argument to dss must be a 2-bit unsigned literal"); return const0_rtx; @@ -11901,7 +11901,7 @@ spe_expand_builtin (tree exp, rtx target, bool *expandedp) case SPE_BUILTIN_EVSTWWO: arg1 = CALL_EXPR_ARG (exp, 2); if (TREE_CODE (arg1) != INTEGER_CST - || TREE_INT_CST_LOW (arg1) & ~0x1f) + || tree_to_hwi (arg1) & ~0x1f) { error ("argument 2 must be a 5-bit unsigned literal"); return const0_rtx; @@ -12027,7 +12027,7 @@ paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) return const0_rtx; } else - form_int = TREE_INT_CST_LOW (form); + form_int = tree_to_hwi (form); gcc_assert (mode0 == mode1); @@ -12099,7 +12099,7 @@ spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target) return const0_rtx; } else - form_int = TREE_INT_CST_LOW (form); + form_int = tree_to_hwi (form); gcc_assert (mode0 == mode1); @@ -28217,7 +28217,7 @@ rs6000_emit_swrsqrt (rtx dst, rtx src) gcc_assert (code != CODE_FOR_nothing); /* Load up the constant 1.5 either as a scalar, or as a vector. */ - real_from_integer (&dconst3_2, VOIDmode, 3, 0, 0); + real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED); SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1); halfthree = rs6000_load_constant_and_splat (mode, dconst3_2); diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c index 1ea52af0f2b..b12cfc0d31f 100644 --- a/gcc/config/s390/s390.c +++ b/gcc/config/s390/s390.c @@ -9986,9 +9986,9 @@ s390_encode_section_info (tree decl, rtx rtl, int first) SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1; if (!DECL_SIZE (decl) || !DECL_ALIGN (decl) - || !host_integerp (DECL_SIZE (decl), 0) + || !tree_fits_shwi_p (DECL_SIZE (decl)) || (DECL_ALIGN (decl) <= 64 - && DECL_ALIGN (decl) != tree_low_cst (DECL_SIZE (decl), 0))) + && DECL_ALIGN (decl) != tree_to_shwi (DECL_SIZE (decl)))) SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_NOT_NATURALLY_ALIGNED; } diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c index 60f45452036..fda132c3a21 100644 --- a/gcc/config/sh/sh.c +++ b/gcc/config/sh/sh.c @@ -1127,7 +1127,7 @@ sh_print_operand (FILE *stream, rtx x, int code) DECL_ATTRIBUTES (current_function_decl)); if (trapa_attr) fprintf (stream, "trapa #%ld", - (long) TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (trapa_attr)))); + (long) tree_to_hwi (TREE_VALUE (TREE_VALUE (trapa_attr)))); else if (sh_cfun_interrupt_handler_p ()) { if (sh_cfun_resbank_handler_p ()) @@ -9575,7 +9575,7 @@ sh2a_handle_function_vector_handler_attribute (tree * node, tree name, name); *no_add_attrs = true; } - else if (TREE_INT_CST_LOW (TREE_VALUE (args)) > 255) + else if (tree_to_hwi (TREE_VALUE (args)) > 255) { /* The argument value must be between 0 to 255. */ warning (OPT_Wattributes, @@ -9624,7 +9624,7 @@ sh2a_get_function_vector_number (rtx x) { if (is_attribute_p ("function_vector", TREE_PURPOSE (list))) { - num = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (list))); + num = tree_to_hwi (TREE_VALUE (TREE_VALUE (list))); return num; } diff --git a/gcc/config/sol2-c.c b/gcc/config/sol2-c.c index ee44621591e..86a8d907521 100644 --- a/gcc/config/sol2-c.c +++ b/gcc/config/sol2-c.c @@ -93,8 +93,8 @@ solaris_pragma_align (cpp_reader *pfile ATTRIBUTE_UNUSED) return; } - low = TREE_INT_CST_LOW (x); - if (TREE_INT_CST_HIGH (x) != 0 + low = tree_to_hwi (x); + if (!cst_fits_uhwi_p (x) || (low != 1 && low != 2 && low != 4 && low != 8 && low != 16 && low != 32 && low != 64 && low != 128)) { diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c index e5b4662512d..15c30c9d389 100644 --- a/gcc/config/sparc/sparc.c +++ b/gcc/config/sparc/sparc.c @@ -54,6 +54,7 @@ along with GCC; see the file COPYING3. If not see #include "opts.h" #include "tree-pass.h" #include "context.h" +#include "wide-int.h" /* Processor costs */ @@ -6313,7 +6314,7 @@ function_arg_record_value_1 (const_tree type, HOST_WIDE_INT startbitpos, if (integer_zerop (DECL_SIZE (field))) continue; - if (host_integerp (bit_position (field), 1)) + if (tree_fits_uhwi_p (bit_position (field))) bitpos += int_bit_position (field); } @@ -6461,7 +6462,7 @@ function_arg_record_value_2 (const_tree type, HOST_WIDE_INT startbitpos, if (integer_zerop (DECL_SIZE (field))) continue; - if (host_integerp (bit_position (field), 1)) + if (tree_fits_uhwi_p (bit_position (field))) bitpos += int_bit_position (field); } @@ -7128,10 +7129,10 @@ sparc_struct_value_rtx (tree fndecl, int incoming) /* Calculate the return object size */ tree size = TYPE_SIZE_UNIT (TREE_TYPE (fndecl)); - rtx size_rtx = GEN_INT (TREE_INT_CST_LOW (size) & 0xfff); + rtx size_rtx = GEN_INT (tree_to_hwi (size) & 0xfff); /* Construct a temporary return value */ rtx temp_val - = assign_stack_local (Pmode, TREE_INT_CST_LOW (size), 0); + = assign_stack_local (Pmode, tree_to_hwi (size), 0); /* Implement SPARC 32-bit psABI callee return struct checking: @@ -10470,31 +10471,31 @@ sparc_handle_vis_mul8x16 (tree *n_elts, int fncode, tree inner_type, for (i = 0; i < num; ++i) { int val - = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)), - TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, i))); + = sparc_vis_mul8x16 (tree_to_hwi (VECTOR_CST_ELT (cst0, i)), + tree_to_hwi (VECTOR_CST_ELT (cst1, i))); n_elts[i] = build_int_cst (inner_type, val); } break; case CODE_FOR_fmul8x16au_vis: - scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 0)); + scale = tree_to_hwi (VECTOR_CST_ELT (cst1, 0)); for (i = 0; i < num; ++i) { int val - = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)), + = sparc_vis_mul8x16 (tree_to_hwi (VECTOR_CST_ELT (cst0, i)), scale); n_elts[i] = build_int_cst (inner_type, val); } break; case CODE_FOR_fmul8x16al_vis: - scale = TREE_INT_CST_LOW (VECTOR_CST_ELT (cst1, 1)); + scale = tree_to_hwi (VECTOR_CST_ELT (cst1, 1)); for (i = 0; i < num; ++i) { int val - = sparc_vis_mul8x16 (TREE_INT_CST_LOW (VECTOR_CST_ELT (cst0, i)), + = sparc_vis_mul8x16 (tree_to_hwi (VECTOR_CST_ELT (cst0, i)), scale); n_elts[i] = build_int_cst (inner_type, val); } @@ -10554,7 +10555,7 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, n_elts = XALLOCAVEC (tree, VECTOR_CST_NELTS (arg0)); for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i) n_elts[i] = build_int_cst (inner_type, - TREE_INT_CST_LOW + tree_to_hwi (VECTOR_CST_ELT (arg0, i)) << 4); return build_vector (rtype, n_elts); } @@ -10609,30 +10610,33 @@ sparc_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, && TREE_CODE (arg1) == VECTOR_CST && TREE_CODE (arg2) == INTEGER_CST) { - bool overflow = false; - double_int result = TREE_INT_CST (arg2); - double_int tmp; + bool overflow, overall_overflow = false; + wide_int result = wide_int::from_tree (arg2); + wide_int tmp; unsigned i; for (i = 0; i < VECTOR_CST_NELTS (arg0); ++i) { - double_int e0 = TREE_INT_CST (VECTOR_CST_ELT (arg0, i)); - double_int e1 = TREE_INT_CST (VECTOR_CST_ELT (arg1, i)); - - bool neg1_ovf, neg2_ovf, add1_ovf, add2_ovf; - - tmp = e1.neg_with_overflow (&neg1_ovf); - tmp = e0.add_with_sign (tmp, false, &add1_ovf); - if (tmp.is_negative ()) - tmp = tmp.neg_with_overflow (&neg2_ovf); + wide_int e0 = wide_int::from_tree (VECTOR_CST_ELT (arg0, i)); + wide_int e1 = wide_int::from_tree (VECTOR_CST_ELT (arg1, i)); + + tmp = e1.neg (&overflow); + overall_overflow |= overall_overflow; + tmp = e0.add (tmp, SIGNED, &overflow); + overall_overflow |= overall_overflow; + if (tmp.neg_p (SIGNED)) + { + tmp = tmp.neg (&overflow); + overall_overflow |= overall_overflow; + } - result = result.add_with_sign (tmp, false, &add2_ovf); - overflow |= neg1_ovf | neg2_ovf | add1_ovf | add2_ovf; + result = result.add (tmp, SIGNED, &overflow); + overall_overflow |= overall_overflow; } - gcc_assert (!overflow); + gcc_assert (!overall_overflow); - return build_int_cst_wide (rtype, result.low, result.high); + return wide_int_to_tree (rtype, result); } default: diff --git a/gcc/config/vms/vms-c.c b/gcc/config/vms/vms-c.c index d56ac1b8a70..5e4ed7cca65 100644 --- a/gcc/config/vms/vms-c.c +++ b/gcc/config/vms/vms-c.c @@ -316,7 +316,7 @@ handle_pragma_pointer_size (const char *pragma_name) int val; if (TREE_CODE (x) == INTEGER_CST) - val = TREE_INT_CST_LOW (x); + val = tree_to_hwi (x); else val = -1; diff --git a/gcc/coretypes.h b/gcc/coretypes.h index 54bfe7f8654..ee5b5fe93d7 100644 --- a/gcc/coretypes.h +++ b/gcc/coretypes.h @@ -55,6 +55,9 @@ typedef const struct rtx_def *const_rtx; struct rtvec_def; typedef struct rtvec_def *rtvec; typedef const struct rtvec_def *const_rtvec; +struct hwivec_def; +typedef struct hwivec_def *hwivec; +typedef const struct hwivec_def *const_hwivec; union tree_node; typedef union tree_node *tree; typedef const union tree_node *const_tree; diff --git a/gcc/coverage.c b/gcc/coverage.c index 9b664cf1500..efe3998701b 100644 --- a/gcc/coverage.c +++ b/gcc/coverage.c @@ -821,7 +821,7 @@ build_fn_info (const struct coverage_data *data, tree type, tree key) if (var) count - = tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (var))), 0) + = tree_to_shwi (TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (var)))) + 1; CONSTRUCTOR_APPEND_ELT (ctr, TYPE_FIELDS (ctr_type), diff --git a/gcc/cp/ChangeLog-2000 b/gcc/cp/ChangeLog-2000 index 7be14cd5bc2..bcab005a104 100644 --- a/gcc/cp/ChangeLog-2000 +++ b/gcc/cp/ChangeLog-2000 @@ -3132,7 +3132,7 @@ (write_discriminator): Use write_unsigned_number or write_signed_number as appropriate. (write_template_arg_literal): Likewise. - (write_array_type): Use tree_low_cst. + (write_array_type): Use tree_to_uhwi. (write_template_parm): Use write_unsigned_number or write_signed_number as appropriate. (write_substitution): Adjust call to write_number. diff --git a/gcc/cp/call.c b/gcc/cp/call.c index 56346063f3d..8f56cf17cbc 100644 --- a/gcc/cp/call.c +++ b/gcc/cp/call.c @@ -38,6 +38,7 @@ along with GCC; see the file COPYING3. If not see #include "c-family/c-objc.h" #include "timevar.h" #include "cgraph.h" +#include "wide-int.h" /* The various kinds of conversion. */ @@ -941,7 +942,7 @@ build_array_conv (tree type, tree ctor, int flags, tsubst_flags_t complain) if (TYPE_DOMAIN (type)) { - unsigned HOST_WIDE_INT alen = tree_low_cst (array_type_nelts_top (type), 1); + unsigned HOST_WIDE_INT alen = tree_to_uhwi (array_type_nelts_top (type)); if (alen < len) return NULL; } diff --git a/gcc/cp/class.c b/gcc/cp/class.c index 2f08d5f0c7c..a942f1fbb87 100644 --- a/gcc/cp/class.c +++ b/gcc/cp/class.c @@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "pointer-set.h" #include "hash-table.h" +#include "wide-int.h" /* The number of nested classes being processed. If we are not in the scope of any class, this is zero. */ @@ -6077,7 +6078,7 @@ layout_class_type (tree t, tree *virtuals_p) { unsigned HOST_WIDE_INT width; tree ftype = TREE_TYPE (field); - width = tree_low_cst (DECL_SIZE (field), /*unsignedp=*/1); + width = tree_to_uhwi (DECL_SIZE (field)); if (width != TYPE_PRECISION (ftype)) { TREE_TYPE (field) @@ -7949,7 +7950,7 @@ dump_class_hierarchy_r (FILE *stream, igo = TREE_CHAIN (binfo); fprintf (stream, HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (BINFO_OFFSET (binfo), 0)); + tree_to_shwi (BINFO_OFFSET (binfo))); if (is_empty_class (BINFO_TYPE (binfo))) fprintf (stream, " empty"); else if (CLASSTYPE_NEARLY_EMPTY_P (BINFO_TYPE (binfo))) @@ -8025,10 +8026,10 @@ dump_class_hierarchy_1 (FILE *stream, int flags, tree t) { fprintf (stream, "Class %s\n", type_as_string (t, TFF_PLAIN_IDENTIFIER)); fprintf (stream, " size=%lu align=%lu\n", - (unsigned long)(tree_low_cst (TYPE_SIZE (t), 0) / BITS_PER_UNIT), + (unsigned long)(tree_to_shwi (TYPE_SIZE (t)) / BITS_PER_UNIT), (unsigned long)(TYPE_ALIGN (t) / BITS_PER_UNIT)); fprintf (stream, " base size=%lu base align=%lu\n", - (unsigned long)(tree_low_cst (TYPE_SIZE (CLASSTYPE_AS_BASE (t)), 0) + (unsigned long)(tree_to_shwi (TYPE_SIZE (CLASSTYPE_AS_BASE (t))) / BITS_PER_UNIT), (unsigned long)(TYPE_ALIGN (CLASSTYPE_AS_BASE (t)) / BITS_PER_UNIT)); @@ -8065,7 +8066,7 @@ dump_array (FILE * stream, tree decl) HOST_WIDE_INT elt; tree size = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (decl))); - elt = (tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl))), 0) + elt = (tree_to_shwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (decl)))) / BITS_PER_UNIT); fprintf (stream, "%s:", decl_as_string (decl, TFF_PLAIN_IDENTIFIER)); fprintf (stream, " %s entries", @@ -8154,10 +8155,10 @@ dump_thunk (FILE *stream, int indent, tree thunk) /*NOP*/; else if (DECL_THIS_THUNK_P (thunk)) fprintf (stream, " vcall=" HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (virtual_adjust, 0)); + tree_to_shwi (virtual_adjust)); else fprintf (stream, " vbase=" HOST_WIDE_INT_PRINT_DEC "(%s)", - tree_low_cst (BINFO_VPTR_FIELD (virtual_adjust), 0), + tree_to_shwi (BINFO_VPTR_FIELD (virtual_adjust)), type_as_string (BINFO_TYPE (virtual_adjust), TFF_SCOPE)); if (THUNK_ALIAS (thunk)) fprintf (stream, " alias to %p", (void *)THUNK_ALIAS (thunk)); diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h index 86727398fbc..4d169f83642 100644 --- a/gcc/cp/cp-tree.h +++ b/gcc/cp/cp-tree.h @@ -2804,7 +2804,7 @@ extern void decl_shadowed_for_var_insert (tree, tree); /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ - ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) + ((HOST_WIDE_INT) tree_to_hwi (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general @@ -3684,7 +3684,7 @@ more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node - is a INT_CST whose TREE_INT_CST_LOW indicates the level of the + is a INT_CST whose tree_to_hwi indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a diff --git a/gcc/cp/cvt.c b/gcc/cp/cvt.c index 532e8fd9d6b..5cae99c41e3 100644 --- a/gcc/cp/cvt.c +++ b/gcc/cp/cvt.c @@ -35,6 +35,7 @@ along with GCC; see the file COPYING3. If not see #include "convert.h" #include "decl.h" #include "target.h" +#include "wide-int.h" static tree cp_convert_to_pointer (tree, tree, tsubst_flags_t); static tree convert_to_pointer_force (tree, tree, tsubst_flags_t); @@ -581,9 +582,7 @@ ignore_overflows (tree expr, tree orig) { gcc_assert (!TREE_OVERFLOW (orig)); /* Ensure constant sharing. */ - expr = build_int_cst_wide (TREE_TYPE (expr), - TREE_INT_CST_LOW (expr), - TREE_INT_CST_HIGH (expr)); + expr = wide_int_to_tree (TREE_TYPE (expr), expr); } return expr; } diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c index 16f751c8ff1..688a555ff15 100644 --- a/gcc/cp/decl.c +++ b/gcc/cp/decl.c @@ -52,6 +52,7 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "plugin.h" #include "cgraph.h" +#include "wide-int.h" /* Possible cases of bad specifiers type used by bad_specifiers. */ enum bad_spec_place { @@ -4801,7 +4802,8 @@ check_array_designated_initializer (constructor_elt *ce, if (TREE_CODE (ce->index) == INTEGER_CST) { /* A C99 designator is OK if it matches the current index. */ - if (TREE_INT_CST_LOW (ce->index) == index) + if (tree_fits_uhwi_p (ce->index) + && tree_to_uhwi (ce->index) == index) return true; else sorry ("non-trivial designated initializers not supported"); @@ -5088,12 +5090,11 @@ reshape_init_array_1 (tree elt_type, tree max_index, reshape_iter *d, if (integer_all_onesp (max_index)) return new_init; - if (host_integerp (max_index, 1)) - max_index_cst = tree_low_cst (max_index, 1); + if (tree_fits_uhwi_p (max_index)) + max_index_cst = tree_to_uhwi (max_index); /* sizetype is sign extended, not zero extended. */ else - max_index_cst = tree_low_cst (fold_convert (size_type_node, max_index), - 1); + max_index_cst = tree_to_uhwi (fold_convert (size_type_node, max_index)); } /* Loop until there are no more initializers. */ @@ -9986,7 +9987,7 @@ grokdeclarator (const cp_declarator *declarator, { error ("size of array %qs is too large", name); /* If we proceed with the array type as it is, we'll eventually - crash in tree_low_cst(). */ + crash in tree_to_uhwi (). */ type = error_mark_node; } @@ -12607,9 +12608,9 @@ finish_enum_value_list (tree enumtype) enumeration. We must do this before the type of MINNODE and MAXNODE are transformed, since tree_int_cst_min_precision relies on the TREE_TYPE of the value it is passed. */ - bool unsignedp = tree_int_cst_sgn (minnode) >= 0; - int lowprec = tree_int_cst_min_precision (minnode, unsignedp); - int highprec = tree_int_cst_min_precision (maxnode, unsignedp); + signop sgn = tree_int_cst_sgn (minnode) >= 0 ? UNSIGNED : SIGNED; + int lowprec = tree_int_cst_min_precision (minnode, sgn); + int highprec = tree_int_cst_min_precision (maxnode, sgn); int precision = MAX (lowprec, highprec); unsigned int itk; bool use_short_enum; @@ -12641,7 +12642,7 @@ finish_enum_value_list (tree enumtype) underlying_type = integer_types[itk]; if (underlying_type != NULL_TREE && TYPE_PRECISION (underlying_type) >= precision - && TYPE_UNSIGNED (underlying_type) == unsignedp) + && TYPE_SIGN (underlying_type) == sgn) break; } if (itk == itk_none) @@ -12688,12 +12689,12 @@ finish_enum_value_list (tree enumtype) = build_distinct_type_copy (underlying_type); TYPE_PRECISION (ENUM_UNDERLYING_TYPE (enumtype)) = precision; set_min_and_max_values_for_integral_type - (ENUM_UNDERLYING_TYPE (enumtype), precision, unsignedp); + (ENUM_UNDERLYING_TYPE (enumtype), precision, sgn); /* If -fstrict-enums, still constrain TYPE_MIN/MAX_VALUE. */ if (flag_strict_enums) set_min_and_max_values_for_integral_type (enumtype, precision, - unsignedp); + sgn); } else underlying_type = ENUM_UNDERLYING_TYPE (enumtype); @@ -12817,14 +12818,14 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc) value = error_mark_node; else { - double_int di = TREE_INT_CST (prev_value) - .add_with_sign (double_int_one, - false, &overflowed); + tree type = TREE_TYPE (prev_value); + signop sgn = TYPE_SIGN (type); + wide_int wi = (max_wide_int (prev_value) + .add (1, sgn, &overflowed)); if (!overflowed) { - tree type = TREE_TYPE (prev_value); - bool pos = TYPE_UNSIGNED (type) || !di.is_negative (); - if (!double_int_fits_to_tree_p (type, di)) + bool pos = !wi.neg_p (sgn); + if (!wi.fits_to_tree_p (type)) { unsigned int itk; for (itk = itk_int; itk != itk_none; itk++) @@ -12832,7 +12833,7 @@ build_enumerator (tree name, tree value, tree enumtype, location_t loc) type = integer_types[itk]; if (type != NULL_TREE && (pos || !TYPE_UNSIGNED (type)) - && double_int_fits_to_tree_p (type, di)) + && wi.fits_to_tree_p (type)) break; } if (type && cxx_dialect < cxx11 @@ -12844,7 +12845,7 @@ incremented enumerator value is too large for %<long%>"); if (type == NULL_TREE) overflowed = true; else - value = double_int_to_tree (type, di); + value = wide_int_to_tree (type, wi); } if (overflowed) diff --git a/gcc/cp/dump.c b/gcc/cp/dump.c index 4aa3935a7d2..cc888339af9 100644 --- a/gcc/cp/dump.c +++ b/gcc/cp/dump.c @@ -346,7 +346,7 @@ cp_dump_tree (void* dump_info, tree t) } dump_int (di, "fixd", THUNK_FIXED_OFFSET (t)); if (virt) - dump_int (di, "virt", tree_low_cst (virt, 0)); + dump_int (di, "virt", tree_to_shwi (virt)); dump_child ("fn", DECL_INITIAL (t)); } break; diff --git a/gcc/cp/error.c b/gcc/cp/error.c index 440169a2d65..9421d232ff2 100644 --- a/gcc/cp/error.c +++ b/gcc/cp/error.c @@ -845,8 +845,8 @@ dump_type_suffix (tree t, int flags) tree max = TYPE_MAX_VALUE (dtype); if (integer_all_onesp (max)) pp_character (cxx_pp, '0'); - else if (host_integerp (max, 0)) - pp_wide_integer (cxx_pp, tree_low_cst (max, 0) + 1); + else if (tree_fits_shwi_p (max)) + pp_wide_integer (cxx_pp, tree_to_shwi (max) + 1); else { STRIP_NOPS (max); @@ -1815,7 +1815,7 @@ static tree resolve_virtual_fun_from_obj_type_ref (tree ref) { tree obj_type = TREE_TYPE (OBJ_TYPE_REF_OBJECT (ref)); - HOST_WIDE_INT index = tree_low_cst (OBJ_TYPE_REF_TOKEN (ref), 1); + HOST_WIDE_INT index = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (ref)); tree fun = BINFO_VIRTUALS (TYPE_BINFO (TREE_TYPE (obj_type))); while (index) { @@ -2239,7 +2239,7 @@ dump_expr (tree t, int flags) pp_cxx_right_paren (cxx_pp); break; } - else if (host_integerp (idx, 0)) + else if (tree_fits_shwi_p (idx)) { tree virtuals; unsigned HOST_WIDE_INT n; @@ -2248,7 +2248,7 @@ dump_expr (tree t, int flags) t = TYPE_METHOD_BASETYPE (t); virtuals = BINFO_VIRTUALS (TYPE_BINFO (TYPE_MAIN_VARIANT (t))); - n = tree_low_cst (idx, 0); + n = tree_to_shwi (idx); /* Map vtable index back one, to allow for the null pointer to member. */ diff --git a/gcc/cp/init.c b/gcc/cp/init.c index 3ec32c580ac..7eb340abebe 100644 --- a/gcc/cp/init.c +++ b/gcc/cp/init.c @@ -28,6 +28,7 @@ along with GCC; see the file COPYING3. If not see #include "cp-tree.h" #include "flags.h" #include "target.h" +#include "wide-int.h" static bool begin_init_stmts (tree *, tree *); static tree finish_init_stmts (bool, tree, tree); @@ -2210,10 +2211,10 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, /* For arrays, a bounds checks on the NELTS parameter. */ tree outer_nelts_check = NULL_TREE; bool outer_nelts_from_type = false; - double_int inner_nelts_count = double_int_one; + addr_wide_int inner_nelts_count = 1; tree alloc_call, alloc_expr; /* Size of the inner array elements. */ - double_int inner_size; + addr_wide_int inner_size; /* The address returned by the call to "operator new". This node is a VAR_DECL and is therefore reusable. */ tree alloc_node; @@ -2268,9 +2269,8 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, if (TREE_CODE (inner_nelts_cst) == INTEGER_CST) { bool overflow; - double_int result = TREE_INT_CST (inner_nelts_cst) - .mul_with_sign (inner_nelts_count, - false, &overflow); + addr_wide_int result = (addr_wide_int (inner_nelts_cst) + .mul (inner_nelts_count, SIGNED, &overflow)); if (overflow) { if (complain & tf_error) @@ -2372,42 +2372,39 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, { /* Maximum available size in bytes. Half of the address space minus the cookie size. */ - double_int max_size - = double_int_one.llshift (TYPE_PRECISION (sizetype) - 1, - HOST_BITS_PER_DOUBLE_INT); + addr_wide_int max_size + = addr_wide_int::set_bit_in_zero (TYPE_PRECISION (sizetype) - 1); /* Maximum number of outer elements which can be allocated. */ - double_int max_outer_nelts; + addr_wide_int max_outer_nelts; tree max_outer_nelts_tree; gcc_assert (TREE_CODE (size) == INTEGER_CST); cookie_size = targetm.cxx.get_cookie_size (elt_type); gcc_assert (TREE_CODE (cookie_size) == INTEGER_CST); - gcc_checking_assert (TREE_INT_CST (cookie_size).ult (max_size)); + gcc_checking_assert (addr_wide_int (cookie_size).ltu_p(max_size)); /* Unconditionally subtract the cookie size. This decreases the maximum object size and is safe even if we choose not to use a cookie after all. */ - max_size -= TREE_INT_CST (cookie_size); + max_size -= cookie_size; bool overflow; - inner_size = TREE_INT_CST (size) - .mul_with_sign (inner_nelts_count, false, &overflow); - if (overflow || inner_size.ugt (max_size)) + inner_size = addr_wide_int (size) + .mul (inner_nelts_count, SIGNED, &overflow); + if (overflow || inner_size.gtu_p (max_size)) { if (complain & tf_error) error ("size of array is too large"); return error_mark_node; } - max_outer_nelts = max_size.udiv (inner_size, TRUNC_DIV_EXPR); + + max_outer_nelts = max_size.udiv_trunc (inner_size); /* Only keep the top-most seven bits, to simplify encoding the constant in the instruction stream. */ { - unsigned shift = HOST_BITS_PER_DOUBLE_INT - 7 - - (max_outer_nelts.high ? clz_hwi (max_outer_nelts.high) - : (HOST_BITS_PER_WIDE_INT + clz_hwi (max_outer_nelts.low))); - max_outer_nelts - = max_outer_nelts.lrshift (shift, HOST_BITS_PER_DOUBLE_INT) - .llshift (shift, HOST_BITS_PER_DOUBLE_INT); + unsigned shift = (max_outer_nelts.get_precision ()) - 7 + - max_outer_nelts.clz ().to_shwi (); + max_outer_nelts = max_outer_nelts.rshiftu (shift).lshift (shift); } - max_outer_nelts_tree = double_int_to_tree (sizetype, max_outer_nelts); + max_outer_nelts_tree = wide_int_to_tree (sizetype, max_outer_nelts); size = size_binop (MULT_EXPR, size, convert (sizetype, nelts)); outer_nelts_check = fold_build2 (LE_EXPR, boolean_type_node, @@ -2481,7 +2478,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, cookie_size = NULL_TREE; /* No size arithmetic necessary, so the size check is not needed. */ - if (outer_nelts_check != NULL && inner_size.is_one ()) + if (outer_nelts_check != NULL && inner_size.one_p ()) outer_nelts_check = NULL_TREE; } /* Perform the overflow check. */ @@ -2526,7 +2523,7 @@ build_new_1 (vec<tree, va_gc> **placement, tree type, tree nelts, cookie_size = NULL_TREE; /* No size arithmetic necessary, so the size check is not needed. */ - if (outer_nelts_check != NULL && inner_size.is_one ()) + if (outer_nelts_check != NULL && inner_size.one_p ()) outer_nelts_check = NULL_TREE; } @@ -3591,9 +3588,9 @@ build_vec_init (tree base, tree maxindex, tree init, if (from_array || ((type_build_ctor_call (type) || init || explicit_value_init_p) - && ! (host_integerp (maxindex, 0) + && ! (tree_fits_shwi_p (maxindex) && (num_initialized_elts - == tree_low_cst (maxindex, 0) + 1)))) + == tree_to_shwi (maxindex) + 1)))) { /* If the ITERATOR is equal to -1, then we don't have to loop; we've already initialized all the elements. */ diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c index 8c11ba8d394..f9b399f7e94 100644 --- a/gcc/cp/mangle.c +++ b/gcc/cp/mangle.c @@ -55,6 +55,7 @@ along with GCC; see the file COPYING3. If not see #include "flags.h" #include "target.h" #include "cgraph.h" +#include "wide-int.h" /* Debugging support. */ @@ -1503,7 +1504,7 @@ write_integer_cst (const tree cst) { int sign = tree_int_cst_sgn (cst); - if (TREE_INT_CST_HIGH (cst) + (sign < 0)) + if (!cst_fits_shwi_p (cst)) { /* A bignum. We do this in chunks, each of which fits in a HOST_WIDE_INT. */ @@ -1529,8 +1530,7 @@ write_integer_cst (const tree cst) type = c_common_signed_or_unsigned_type (1, TREE_TYPE (cst)); base = build_int_cstu (type, chunk); - n = build_int_cst_wide (type, - TREE_INT_CST_LOW (cst), TREE_INT_CST_HIGH (cst)); + n = wide_int_to_tree (type, cst); if (sign < 0) { @@ -1545,7 +1545,7 @@ write_integer_cst (const tree cst) done = integer_zerop (d); tmp = fold_build2_loc (input_location, MINUS_EXPR, type, n, tmp); - c = hwint_to_ascii (TREE_INT_CST_LOW (tmp), 10, ptr, + c = hwint_to_ascii (tree_to_hwi (tmp), 10, ptr, done ? 1 : chunk_digits); ptr -= c; count += c; @@ -1557,7 +1557,7 @@ write_integer_cst (const tree cst) else { /* A small num. */ - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (cst); + unsigned HOST_WIDE_INT low = tree_to_hwi (cst); if (sign < 0) { @@ -3223,12 +3223,12 @@ write_array_type (const tree type) { /* The ABI specifies that we should mangle the number of elements in the array, not the largest allowed index. */ - double_int dmax = tree_to_double_int (max) + double_int_one; + addr_wide_int wmax = addr_wide_int (max) + 1; /* Truncate the result - this will mangle [0, SIZE_INT_MAX] number of elements as zero. */ - dmax = dmax.zext (TYPE_PRECISION (TREE_TYPE (max))); - gcc_assert (dmax.fits_uhwi ()); - write_unsigned_number (dmax.low); + wmax = wmax.zext (TYPE_PRECISION (TREE_TYPE (max))); + gcc_assert (wmax.fits_uhwi_p ()); + write_unsigned_number (wmax.to_uhwi ()); } else { diff --git a/gcc/cp/method.c b/gcc/cp/method.c index 4ac533eacf7..adef81c576e 100644 --- a/gcc/cp/method.c +++ b/gcc/cp/method.c @@ -95,7 +95,7 @@ make_thunk (tree function, bool this_adjusting, convert (ssizetype, TYPE_SIZE_UNIT (vtable_entry_type))); - d = tree_low_cst (fixed_offset, 0); + d = tree_to_shwi (fixed_offset); /* See if we already have the thunk in question. For this_adjusting thunks VIRTUAL_OFFSET will be an INTEGER_CST, for covariant thunks it @@ -323,7 +323,7 @@ use_thunk (tree thunk_fndecl, bool emit_p) { if (!this_adjusting) virtual_offset = BINFO_VPTR_FIELD (virtual_offset); - virtual_value = tree_low_cst (virtual_offset, /*pos=*/0); + virtual_value = tree_to_shwi (virtual_offset); gcc_assert (virtual_value); } else diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c index c5d398a5baf..369e3015f28 100644 --- a/gcc/cp/parser.c +++ b/gcc/cp/parser.c @@ -812,7 +812,7 @@ cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token) { /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = ((enum pragma_kind) - TREE_INT_CST_LOW (token->u.value)); + tree_to_hwi (token->u.value)); token->u.value = NULL_TREE; } } @@ -3852,7 +3852,7 @@ cp_parser_userdef_string_literal (cp_token *token) tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree value = USERDEF_LITERAL_VALUE (literal); int len = TREE_STRING_LENGTH (value) - / TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; + / tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; tree decl, result; vec<tree, va_gc> *args; @@ -26461,8 +26461,8 @@ cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location return list; num = fold_non_dependent_expr (num); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) - || !host_integerp (num, 0) - || (n = tree_low_cst (num, 0)) <= 0 + || !tree_fits_shwi_p (num) + || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); @@ -27565,7 +27565,7 @@ cp_parser_omp_for_loop (cp_parser *parser, tree clauses, tree *par_clauses) for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) - collapse = tree_low_cst (OMP_CLAUSE_COLLAPSE_EXPR (cl), 0); + collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); gcc_assert (collapse >= 1); diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c index fa47db7cac7..6ae8ba1b474 100644 --- a/gcc/cp/semantics.c +++ b/gcc/cp/semantics.c @@ -7087,7 +7087,7 @@ cxx_eval_array_reference (const constexpr_call *call, tree t, *non_constant_p = true; return t; } - i = tree_low_cst (index, 0); + i = tree_to_shwi (index); if (TREE_CODE (ary) == CONSTRUCTOR) return (*CONSTRUCTOR_ELTS (ary))[i].value; else if (elem_nchars == 1) @@ -7202,8 +7202,8 @@ cxx_eval_bit_field_ref (const constexpr_call *call, tree t, TREE_OPERAND (t, 1), TREE_OPERAND (t, 2)); start = TREE_OPERAND (t, 2); - istart = tree_low_cst (start, 0); - isize = tree_low_cst (TREE_OPERAND (t, 1), 0); + istart = tree_to_shwi (start); + isize = tree_to_shwi (TREE_OPERAND (t, 1)); utype = TREE_TYPE (t); if (!TYPE_UNSIGNED (utype)) utype = build_nonstandard_integer_type (TYPE_PRECISION (utype), 1); @@ -7215,11 +7215,11 @@ cxx_eval_bit_field_ref (const constexpr_call *call, tree t, return value; if (TREE_CODE (TREE_TYPE (field)) == INTEGER_TYPE && TREE_CODE (value) == INTEGER_CST - && host_integerp (bitpos, 0) - && host_integerp (DECL_SIZE (field), 0)) + && tree_fits_shwi_p (bitpos) + && tree_fits_shwi_p (DECL_SIZE (field))) { - HOST_WIDE_INT bit = tree_low_cst (bitpos, 0); - HOST_WIDE_INT sz = tree_low_cst (DECL_SIZE (field), 0); + HOST_WIDE_INT bit = tree_to_shwi (bitpos); + HOST_WIDE_INT sz = tree_to_shwi (DECL_SIZE (field)); HOST_WIDE_INT shift; if (bit >= istart && bit + sz <= istart + isize) { @@ -7376,7 +7376,7 @@ cxx_eval_vec_init_1 (const constexpr_call *call, tree atype, tree init, bool *non_constant_p, bool *overflow_p) { tree elttype = TREE_TYPE (atype); - int max = tree_low_cst (array_type_nelts (atype), 0); + int max = tree_to_shwi (array_type_nelts (atype)); vec<constructor_elt, va_gc> *n; vec_alloc (n, max + 1); bool pre_init = false; @@ -7595,9 +7595,9 @@ cxx_fold_indirect_ref (location_t loc, tree type, tree op0, bool *empty_base) && (same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (op00type)))) { - HOST_WIDE_INT offset = tree_low_cst (op01, 0); + HOST_WIDE_INT offset = tree_to_shwi (op01); tree part_width = TYPE_SIZE (type); - unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT; + unsigned HOST_WIDE_INT part_widthi = tree_to_shwi (part_width)/BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); diff --git a/gcc/cp/tree.c b/gcc/cp/tree.c index 63ec7fa7266..9bc400525fd 100644 --- a/gcc/cp/tree.c +++ b/gcc/cp/tree.c @@ -32,6 +32,8 @@ along with GCC; see the file COPYING3. If not see #include "splay-tree.h" #include "gimple.h" /* gimple_has_body_p */ #include "hash-table.h" +#include "wide-int.h" + static tree bot_manip (tree *, int *, void *); static tree bot_replace (tree *, int *, void *); @@ -1692,7 +1694,7 @@ debug_binfo (tree elem) fprintf (stderr, "type \"%s\", offset = " HOST_WIDE_INT_PRINT_DEC "\nvtable type:\n", TYPE_NAME_STRING (BINFO_TYPE (elem)), - TREE_INT_CST_LOW (BINFO_OFFSET (elem))); + tree_to_hwi (BINFO_OFFSET (elem))); debug_tree (BINFO_TYPE (elem)); if (BINFO_VTABLE (elem)) fprintf (stderr, "vtable decl \"%s\"\n", @@ -1708,7 +1710,7 @@ debug_binfo (tree elem) tree fndecl = TREE_VALUE (virtuals); fprintf (stderr, "%s [%ld =? %ld]\n", IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fndecl)), - (long) n, (long) TREE_INT_CST_LOW (DECL_VINDEX (fndecl))); + (long) n, (long) tree_to_hwi (DECL_VINDEX (fndecl))); ++n; virtuals = TREE_CHAIN (virtuals); } @@ -2601,8 +2603,7 @@ cp_tree_equal (tree t1, tree t2) switch (code1) { case INTEGER_CST: - return TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2); + return wide_int::eq_p (t1, t2); case REAL_CST: return REAL_VALUES_EQUAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); @@ -3240,7 +3241,7 @@ handle_init_priority_attribute (tree* node, return NULL_TREE; } - pri = TREE_INT_CST_LOW (initp_expr); + pri = tree_to_hwi (initp_expr); type = strip_array_types (type); diff --git a/gcc/cp/typeck2.c b/gcc/cp/typeck2.c index 79329397a8b..ec4c8518549 100644 --- a/gcc/cp/typeck2.c +++ b/gcc/cp/typeck2.c @@ -34,6 +34,7 @@ along with GCC; see the file COPYING3. If not see #include "cp-tree.h" #include "flags.h" #include "diagnostic-core.h" +#include "wide-int.h" static tree process_init_constructor (tree type, tree init, tsubst_flags_t complain); @@ -979,7 +980,7 @@ digest_init_r (tree type, tree init, bool nested, int flags, } if (TYPE_DOMAIN (type) != 0 && TREE_CONSTANT (TYPE_SIZE (type))) { - int size = TREE_INT_CST_LOW (TYPE_SIZE (type)); + int size = tree_to_hwi (TYPE_SIZE (type)); size = (size + BITS_PER_UNIT - 1) / BITS_PER_UNIT; /* In C it is ok to subtract 1 from the length of the string because it's ok to ignore the terminating null char that is @@ -1118,12 +1119,11 @@ process_init_constructor_array (tree type, tree init, { tree domain = TYPE_DOMAIN (type); if (domain && TREE_CONSTANT (TYPE_MAX_VALUE (domain))) - len = (tree_to_double_int (TYPE_MAX_VALUE (domain)) - - tree_to_double_int (TYPE_MIN_VALUE (domain)) - + double_int_one) - .ext (TYPE_PRECISION (TREE_TYPE (domain)), - TYPE_UNSIGNED (TREE_TYPE (domain))) - .low; + len = (addr_wide_int (TYPE_MAX_VALUE (domain)) + - TYPE_MIN_VALUE (domain) + 1) + .ext (TYPE_PRECISION (TREE_TYPE (domain)), + TYPE_SIGN (TREE_TYPE (domain))) + .to_uhwi (); else unbounded = true; /* Take as many as there are. */ } diff --git a/gcc/cp/vtable-class-hierarchy.c b/gcc/cp/vtable-class-hierarchy.c index 479447aa7d0..c1b466e49c8 100644 --- a/gcc/cp/vtable-class-hierarchy.c +++ b/gcc/cp/vtable-class-hierarchy.c @@ -467,7 +467,7 @@ check_and_record_registered_pairs (tree vtable_decl, tree vptr_address, vptr_address = TREE_OPERAND (vptr_address, 0); if (TREE_OPERAND_LENGTH (vptr_address) > 1) - offset = TREE_INT_CST_LOW (TREE_OPERAND (vptr_address, 1)); + offset = TREE_INT_CST_ELT (TREE_OPERAND (vptr_address, 1), 0); else offset = 0; @@ -889,7 +889,7 @@ output_set_info (tree record_type, tree *vtbl_ptr_array, int array_size) vptr_name = IDENTIFIER_POINTER (DECL_NAME (arg0)); if (TREE_CODE (arg1) == INTEGER_CST) - vptr_offset = TREE_INT_CST_LOW (arg1); + vptr_offset = TREE_INT_CST_ELT (arg1, 0); } snprintf (buffer, sizeof (buffer), "%s %s %s + %d\n", diff --git a/gcc/cppbuiltin.c b/gcc/cppbuiltin.c index 7ce01cb6934..da4efc2bc84 100644 --- a/gcc/cppbuiltin.c +++ b/gcc/cppbuiltin.c @@ -128,7 +128,7 @@ define_builtin_macros_for_type_sizes (cpp_reader *pfile) { #define define_type_sizeof(NAME, TYPE) \ cpp_define_formatted (pfile, NAME"="HOST_WIDE_INT_PRINT_DEC, \ - tree_low_cst (TYPE_SIZE_UNIT (TYPE), 1)) + tree_to_uhwi (TYPE_SIZE_UNIT (TYPE))) define_type_sizeof ("__SIZEOF_INT__", integer_type_node); define_type_sizeof ("__SIZEOF_LONG__", long_integer_type_node); diff --git a/gcc/cse.c b/gcc/cse.c index ee1b7be1170..744a495b16e 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -2331,15 +2331,23 @@ hash_rtx_cb (const_rtx x, enum machine_mode mode, + (unsigned int) INTVAL (x)); return hash; + case CONST_WIDE_INT: + { + int i; + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hash += CONST_WIDE_INT_ELT (x, i); + } + return hash; + case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned int) code + (unsigned int) GET_MODE (x); - if (GET_MODE (x) != VOIDmode) - hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); - else + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode) hash += ((unsigned int) CONST_DOUBLE_LOW (x) + (unsigned int) CONST_DOUBLE_HIGH (x)); + else + hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); return hash; case CONST_FIXED: @@ -3283,8 +3291,8 @@ fold_rtx (rtx x, rtx insn) break; new_rtx = simplify_unary_operation (code, mode, - const_arg0 ? const_arg0 : folded_arg0, - mode_arg0); + const_arg0 ? const_arg0 : folded_arg0, + mode_arg0); } break; @@ -3756,6 +3764,7 @@ equiv_constant (rtx x) /* See if we previously assigned a constant value to this SUBREG. */ if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0 + || (new_rtx = lookup_as_function (x, CONST_WIDE_INT)) != 0 || (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0 || (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0) return new_rtx; diff --git a/gcc/cselib.c b/gcc/cselib.c index c3431af1246..acd9b2def27 100644 --- a/gcc/cselib.c +++ b/gcc/cselib.c @@ -926,8 +926,7 @@ rtx_equal_for_cselib_1 (rtx x, rtx y, enum machine_mode memmode) /* These won't be handled correctly by the code below. */ switch (GET_CODE (x)) { - case CONST_DOUBLE: - case CONST_FIXED: + CASE_CONST_UNIQUE: case DEBUG_EXPR: return 0; @@ -1121,15 +1120,23 @@ cselib_hash_rtx (rtx x, int create, enum machine_mode memmode) hash += ((unsigned) CONST_INT << 7) + INTVAL (x); return hash ? hash : (unsigned int) CONST_INT; + case CONST_WIDE_INT: + { + int i; + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hash += CONST_WIDE_INT_ELT (x, i); + } + return hash; + case CONST_DOUBLE: /* This is like the general case, except that it only counts the integers representing the constant. */ hash += (unsigned) code + (unsigned) GET_MODE (x); - if (GET_MODE (x) != VOIDmode) - hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); - else + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode) hash += ((unsigned) CONST_DOUBLE_LOW (x) + (unsigned) CONST_DOUBLE_HIGH (x)); + else + hash += real_hash (CONST_DOUBLE_REAL_VALUE (x)); return hash ? hash : (unsigned int) CONST_DOUBLE; case CONST_FIXED: diff --git a/gcc/dbxout.c b/gcc/dbxout.c index 1cc5d524205..2c6a59fbed8 100644 --- a/gcc/dbxout.c +++ b/gcc/dbxout.c @@ -690,88 +690,40 @@ stabstr_U (unsigned HOST_WIDE_INT num) static void stabstr_O (tree cst) { - unsigned HOST_WIDE_INT high = TREE_INT_CST_HIGH (cst); - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (cst); - - char buf[128]; - char *p = buf + sizeof buf; - - /* GDB wants constants with no extra leading "1" bits, so - we need to remove any sign-extension that might be - present. */ - { - const unsigned int width = TYPE_PRECISION (TREE_TYPE (cst)); - if (width == HOST_BITS_PER_DOUBLE_INT) - ; - else if (width > HOST_BITS_PER_WIDE_INT) - high &= (((HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT)) - 1); - else if (width == HOST_BITS_PER_WIDE_INT) - high = 0; - else - high = 0, low &= (((HOST_WIDE_INT) 1 << width) - 1); - } + wide_int wcst = cst; + int prec = wcst.get_precision (); + int res_pres = prec % 3; + int i; + unsigned int digit; /* Leading zero for base indicator. */ stabstr_C ('0'); /* If the value is zero, the base indicator will serve as the value all by itself. */ - if (high == 0 && low == 0) + if (wcst.zero_p ()) return; - /* If the high half is zero, we need only print the low half normally. */ - if (high == 0) - NUMBER_FMT_LOOP (p, low, 8); - else + /* GDB wants constants with no extra leading "1" bits, so + we need to remove any sign-extension that might be + present. */ + if (res_pres == 1) { - /* When high != 0, we need to print enough zeroes from low to - give the digits from high their proper place-values. Hence - NUMBER_FMT_LOOP cannot be used. */ - const int n_digits = HOST_BITS_PER_WIDE_INT / 3; - int i; - - for (i = 1; i <= n_digits; i++) - { - unsigned int digit = low % 8; - low /= 8; - *--p = '0' + digit; - } - - /* Octal digits carry exactly three bits of information. The - width of a HOST_WIDE_INT is not normally a multiple of three. - Therefore, the next digit printed probably needs to carry - information from both low and high. */ - if (HOST_BITS_PER_WIDE_INT % 3 != 0) - { - const int n_leftover_bits = HOST_BITS_PER_WIDE_INT % 3; - const int n_bits_from_high = 3 - n_leftover_bits; - - const unsigned HOST_WIDE_INT - low_mask = (((unsigned HOST_WIDE_INT)1) << n_leftover_bits) - 1; - const unsigned HOST_WIDE_INT - high_mask = (((unsigned HOST_WIDE_INT)1) << n_bits_from_high) - 1; - - unsigned int digit; - - /* At this point, only the bottom n_leftover_bits bits of low - should be set. */ - gcc_assert (!(low & ~low_mask)); - - digit = (low | ((high & high_mask) << n_leftover_bits)); - high >>= n_bits_from_high; - - *--p = '0' + digit; - } - - /* Now we can format high in the normal manner. However, if - the only bits of high that were set were handled by the - digit split between low and high, high will now be zero, and - we don't want to print extra digits in that case. */ - if (high) - NUMBER_FMT_LOOP (p, high, 8); + digit = wcst.extract_to_hwi (prec - 1, 1) & 0x1; + stabstr_C ('0' + digit); + } + else if (res_pres == 2) + { + digit = wcst.extract_to_hwi (prec - 2, 2) & 0x3; + stabstr_C ('0' + digit); } - obstack_grow (&stabstr_ob, p, (buf + sizeof buf) - p); + prec -= res_pres; + for (i = prec - 3; i <= 0; i = i - 3) + { + digit = wcst.extract_to_hwi (i, 3) & 0x7; + stabstr_C ('0' + digit); + } } /* Called whenever it is safe to break a stabs string into multiple @@ -1519,9 +1471,9 @@ dbxout_type_fields (tree type) /* Omit fields whose position or size are variable or too large to represent. */ || (TREE_CODE (tem) == FIELD_DECL - && (! host_integerp (bit_position (tem), 0) + && (! tree_fits_shwi_p (bit_position (tem)) || ! DECL_SIZE (tem) - || ! host_integerp (DECL_SIZE (tem), 1)))) + || ! tree_fits_uhwi_p (DECL_SIZE (tem))))) continue; else if (TREE_CODE (tem) != CONST_DECL) @@ -1566,7 +1518,7 @@ dbxout_type_fields (tree type) stabstr_C (','); stabstr_D (int_bit_position (tem)); stabstr_C (','); - stabstr_D (tree_low_cst (DECL_SIZE (tem), 1)); + stabstr_D (tree_to_uhwi (DECL_SIZE (tem))); stabstr_C (';'); } } @@ -1610,9 +1562,9 @@ dbxout_type_method_1 (tree decl) stabstr_C (c1); stabstr_C (c2); - if (DECL_VINDEX (decl) && host_integerp (DECL_VINDEX (decl), 0)) + if (DECL_VINDEX (decl) && tree_fits_shwi_p (DECL_VINDEX (decl))) { - stabstr_D (tree_low_cst (DECL_VINDEX (decl), 0)); + stabstr_D (tree_to_shwi (DECL_VINDEX (decl))); stabstr_C (';'); dbxout_type (DECL_CONTEXT (decl), 0); stabstr_C (';'); @@ -1718,23 +1670,23 @@ dbxout_range_type (tree type, tree low, tree high) } stabstr_C (';'); - if (low && host_integerp (low, 0)) + if (low && tree_fits_shwi_p (low)) { if (print_int_cst_bounds_in_octal_p (type, low, high)) stabstr_O (low); else - stabstr_D (tree_low_cst (low, 0)); + stabstr_D (tree_to_shwi (low)); } else stabstr_C ('0'); stabstr_C (';'); - if (high && host_integerp (high, 0)) + if (high && tree_fits_shwi_p (high)) { if (print_int_cst_bounds_in_octal_p (type, low, high)) stabstr_O (high); else - stabstr_D (tree_low_cst (high, 0)); + stabstr_D (tree_to_shwi (high)); stabstr_C (';'); } else @@ -1864,7 +1816,7 @@ dbxout_type (tree type, int full) Sun dbx crashes if we do. */ if (! full || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ - || ! host_integerp (TYPE_SIZE (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE (type))) return; break; case TYPE_DEFINED: @@ -1889,7 +1841,7 @@ dbxout_type (tree type, int full) && !full) || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ - || ! host_integerp (TYPE_SIZE (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE (type))) { typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF; return; @@ -2147,7 +2099,7 @@ dbxout_type (tree type, int full) && !full) || !COMPLETE_TYPE_P (type) /* No way in DBX fmt to describe a variable size. */ - || ! host_integerp (TYPE_SIZE (type), 1)) + || ! tree_fits_uhwi_p (TYPE_SIZE (type))) { /* If the type is just a cross reference, output one and mark the type as partially described. @@ -2210,10 +2162,10 @@ dbxout_type (tree type, int full) offset within the vtable where we must look to find the necessary adjustment. */ stabstr_D - (tree_low_cst (BINFO_VPTR_FIELD (child), 0) + (tree_to_shwi (BINFO_VPTR_FIELD (child)) * BITS_PER_UNIT); else - stabstr_D (tree_low_cst (BINFO_OFFSET (child), 0) + stabstr_D (tree_to_shwi (BINFO_OFFSET (child)) * BITS_PER_UNIT); stabstr_C (','); dbxout_type (BINFO_TYPE (child), 0); @@ -2228,11 +2180,11 @@ dbxout_type (tree type, int full) stabstr_C (':'); dbxout_type (BINFO_TYPE (child), full); stabstr_C (','); - stabstr_D (tree_low_cst (BINFO_OFFSET (child), 0) + stabstr_D (tree_to_shwi (BINFO_OFFSET (child)) * BITS_PER_UNIT); stabstr_C (','); stabstr_D - (tree_low_cst (TYPE_SIZE (BINFO_TYPE (child)), 0) + (tree_to_shwi (TYPE_SIZE (BINFO_TYPE (child))) * BITS_PER_UNIT); stabstr_C (';'); } @@ -2299,11 +2251,8 @@ dbxout_type (tree type, int full) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (TREE_INT_CST_HIGH (value) == 0) - stabstr_D (TREE_INT_CST_LOW (value)); - else if (TREE_INT_CST_HIGH (value) == -1 - && (HOST_WIDE_INT) TREE_INT_CST_LOW (value) < 0) - stabstr_D (TREE_INT_CST_LOW (value)); + if (cst_fits_shwi_p (value)) + stabstr_D (tree_to_hwi (value)); else stabstr_O (value); @@ -2516,9 +2465,9 @@ dbxout_expand_expr (tree expr) return NULL; if (offset != NULL) { - if (!host_integerp (offset, 0)) + if (!tree_fits_shwi_p (offset)) return NULL; - x = adjust_address_nv (x, mode, tree_low_cst (offset, 0)); + x = adjust_address_nv (x, mode, tree_to_shwi (offset)); } if (bitpos != 0) x = adjust_address_nv (x, mode, bitpos / BITS_PER_UNIT); @@ -2796,7 +2745,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) /* Do not generate a tag for records of variable size, since this type can not be properly described in the DBX format, and it confuses some tools such as objdump. */ - && host_integerp (TYPE_SIZE (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (type))) { tree name = TYPE_NAME (type); if (TREE_CODE (name) == TYPE_DECL) @@ -2912,7 +2861,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) ??? Why do we skip emitting the type and location in this case? */ if (TREE_STATIC (decl) && TREE_READONLY (decl) && DECL_INITIAL (decl) != 0 - && host_integerp (DECL_INITIAL (decl), 0) + && tree_fits_shwi_p (DECL_INITIAL (decl)) && ! TREE_ASM_WRITTEN (decl) && (DECL_FILE_SCOPE_P (decl) || TREE_CODE (DECL_CONTEXT (decl)) == BLOCK @@ -2924,7 +2873,7 @@ dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED) if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) { - HOST_WIDE_INT ival = TREE_INT_CST_LOW (DECL_INITIAL (decl)); + HOST_WIDE_INT ival = tree_to_hwi (DECL_INITIAL (decl)); dbxout_begin_complex_stabs (); dbxout_symbol_name (decl, NULL, 'c'); diff --git a/gcc/defaults.h b/gcc/defaults.h index 4f43f6f0067..080107330c4 100644 --- a/gcc/defaults.h +++ b/gcc/defaults.h @@ -1404,6 +1404,14 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #define SWITCHABLE_TARGET 0 #endif +/* If the target supports integers that are wider than two + HOST_WIDE_INTs on the host compiler, then the target should define + TARGET_SUPPORTS_WIDE_INT and make the appropriate fixups. + Otherwise the compiler really is not robust. */ +#ifndef TARGET_SUPPORTS_WIDE_INT +#define TARGET_SUPPORTS_WIDE_INT 0 +#endif + #endif /* GCC_INSN_FLAGS_H */ #endif /* ! GCC_DEFAULTS_H */ diff --git a/gcc/dfp.c b/gcc/dfp.c index d15ee8f8848..3988ac9f5fb 100644 --- a/gcc/dfp.c +++ b/gcc/dfp.c @@ -24,6 +24,7 @@ along with GCC; see the file COPYING3. If not see #include "tree.h" #include "tm_p.h" #include "dfp.h" +#include "wide-int.h" /* The order of the following headers is important for making sure decNumber structure is large enough to hold decimal128 digits. */ @@ -604,11 +605,11 @@ decimal_real_to_integer (const REAL_VALUE_TYPE *r) return real_to_integer (&to); } -/* Likewise, but to an integer pair, HI+LOW. */ +/* Likewise, but returns a wide_int with PRECISION. Fail + is set if the value does not fit. */ -void -decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, - const REAL_VALUE_TYPE *r) +wide_int +decimal_real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) { decContext set; decNumber dn, dn2, dn3; @@ -628,7 +629,7 @@ decimal_real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, function. */ decNumberToString (&dn, string); real_from_string (&to, string); - real_to_integer2 (plow, phigh, &to); + return real_to_integer (&to, fail, precision); } /* Perform the decimal floating point operation described by CODE. diff --git a/gcc/dfp.h b/gcc/dfp.h index 3b9bb8dd889..dcf3d833a9c 100644 --- a/gcc/dfp.h +++ b/gcc/dfp.h @@ -38,7 +38,6 @@ void decimal_real_convert (REAL_VALUE_TYPE *, enum machine_mode, const REAL_VALU void decimal_real_to_decimal (char *, const REAL_VALUE_TYPE *, size_t, size_t, int); void decimal_do_fix_trunc (REAL_VALUE_TYPE *, const REAL_VALUE_TYPE *); void decimal_real_maxval (REAL_VALUE_TYPE *, int, enum machine_mode); -void decimal_real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *, const REAL_VALUE_TYPE *); HOST_WIDE_INT decimal_real_to_integer (const REAL_VALUE_TYPE *); #ifdef TREE_CODE diff --git a/gcc/doc/generic.texi b/gcc/doc/generic.texi index cacab01f9a4..78fa45f7d79 100644 --- a/gcc/doc/generic.texi +++ b/gcc/doc/generic.texi @@ -1017,10 +1017,12 @@ As this example indicates, the operands are zero-indexed. @node Constant expressions @subsection Constant expressions @tindex INTEGER_CST -@findex TREE_INT_CST_HIGH -@findex TREE_INT_CST_LOW -@findex tree_int_cst_lt -@findex tree_int_cst_equal +@tindex tree_fits_uhwi_p +@tindex tree_fits_shwi_p +@tindex tree_fits_hwi_p +@tindex tree_to_uhwi +@tindex tree_to_shwi +@tindex tree_to_hwi @tindex REAL_CST @tindex FIXED_CST @tindex COMPLEX_CST @@ -1039,36 +1041,20 @@ These nodes represent integer constants. Note that the type of these constants is obtained with @code{TREE_TYPE}; they are not always of type @code{int}. In particular, @code{char} constants are represented with @code{INTEGER_CST} nodes. The value of the integer constant @code{e} is -given by -@smallexample -((TREE_INT_CST_HIGH (e) << HOST_BITS_PER_WIDE_INT) -+ TREE_INST_CST_LOW (e)) -@end smallexample -@noindent -HOST_BITS_PER_WIDE_INT is at least thirty-two on all platforms. Both -@code{TREE_INT_CST_HIGH} and @code{TREE_INT_CST_LOW} return a -@code{HOST_WIDE_INT}. The value of an @code{INTEGER_CST} is interpreted -as a signed or unsigned quantity depending on the type of the constant. -In general, the expression given above will overflow, so it should not -be used to calculate the value of the constant. - -The variable @code{integer_zero_node} is an integer constant with value -zero. Similarly, @code{integer_one_node} is an integer constant with -value one. The @code{size_zero_node} and @code{size_one_node} variables -are analogous, but have type @code{size_t} rather than @code{int}. - -The function @code{tree_int_cst_lt} is a predicate which holds if its -first argument is less than its second. Both constants are assumed to -have the same signedness (i.e., either both should be signed or both -should be unsigned.) The full width of the constant is used when doing -the comparison; the usual rules about promotions and conversions are -ignored. Similarly, @code{tree_int_cst_equal} holds if the two -constants are equal. The @code{tree_int_cst_sgn} function returns the -sign of a constant. The value is @code{1}, @code{0}, or @code{-1} -according on whether the constant is greater than, equal to, or less -than zero. Again, the signedness of the constant's type is taken into -account; an unsigned constant is never less than zero, no matter what -its bit-pattern. +represented in an array of HOST_WIDE_INT. There are enough elements +in the array to represent the value without taking extra elements for +redundant 0s or -1. + +The functions @code{tree_fits_uhwi_p}, @code{tree_fits_shwi_p}, and +@code{tree_fits_hwi_p} can be used to tell if the value is small +enough to fit in a HOST_WIDE_INT, as either a signed value, an unsiged +value or a value whose sign is given as a parameter. The value can +then be extracted using the @code{tree_to_uhwi}, @code{tree_to_shwi}, +or @code{tree_to_hwi}. The @code{tree_to_hwi} comes in both checked +and unchecked flavors. However, when the value is used in a context +where it may represent a value that is larger than can be represented +in HOST_BITS_PER_WIDE_INT bits, the wide_int class should be used to +manipulate the constant. @item REAL_CST diff --git a/gcc/doc/rtl.texi b/gcc/doc/rtl.texi index f14e11f6166..540a5bd3561 100644 --- a/gcc/doc/rtl.texi +++ b/gcc/doc/rtl.texi @@ -1525,17 +1525,22 @@ Similarly, there is only one object for the integer whose value is @findex const_double @item (const_double:@var{m} @var{i0} @var{i1} @dots{}) -Represents either a floating-point constant of mode @var{m} or an -integer constant too large to fit into @code{HOST_BITS_PER_WIDE_INT} -bits but small enough to fit within twice that number of bits (GCC -does not provide a mechanism to represent even larger constants). In -the latter case, @var{m} will be @code{VOIDmode}. For integral values -constants for modes with more bits than twice the number in -@code{HOST_WIDE_INT} the implied high order bits of that constant are -copies of the top bit of @code{CONST_DOUBLE_HIGH}. Note however that -integral values are neither inherently signed nor inherently unsigned; -where necessary, signedness is determined by the rtl operation -instead. +This represents either a floating-point constant of mode @var{m} or +(on ports older ports that do not define +@code{TARGET_SUPPORTS_WIDE_INT}) an integer constant too large to fit +into @code{HOST_BITS_PER_WIDE_INT} bits but small enough to fit within +twice that number of bits (GCC does not provide a mechanism to +represent even larger constants). In the latter case, @var{m} will be +@code{VOIDmode}. For integral values constants for modes with more +bits than twice the number in @code{HOST_WIDE_INT} the implied high +order bits of that constant are copies of the top bit of +@code{CONST_DOUBLE_HIGH}. Note however that integral values are +neither inherently signed nor inherently unsigned; where necessary, +signedness is determined by the rtl operation instead. + +On more modern ports, @code{CONST_DOUBLE} only represents floating +point values. New ports define to @code{TARGET_SUPPORTS_WIDE_INT} to +make this designation. @findex CONST_DOUBLE_LOW If @var{m} is @code{VOIDmode}, the bits of the value are stored in @@ -1550,6 +1555,37 @@ machine's or host machine's floating point format. To convert them to the precise bit pattern used by the target machine, use the macro @code{REAL_VALUE_TO_TARGET_DOUBLE} and friends (@pxref{Data Output}). +@findex CONST_WIDE_INT +@item (const_wide_int:@var{m} @var{nunits} @var{elt0} @dots{}) +This contains an array of @code{HOST_WIDE_INTS} that is large enough +to hold any constant that can be represented on the target. This form +of rtl is only used on targets that define +@code{TARGET_SUPPORTS_WIDE_INT} to be non zero and then +@code{CONST_DOUBLES} are only used to hold floating point values. If +the target leaves @code{TARGET_SUPPORTS_WIDE_INT} defined as 0, +@code{CONST_WIDE_INT}s are not used and @code{CONST_DOUBLE}s are as +they were before. + +The values are stored in a compressed format. The higher order +0s or -1s are not represented if they are just the logical sign +extension of the number that is represented. + +@findex CONST_WIDE_INT_VEC +@item CONST_WIDE_INT_VEC (@var{code}) +Returns the entire array of @code{HOST_WIDE_INT}s that are used to +store the value. This macro should be rarely used. + +@findex CONST_WIDE_INT_NUNITS +@item CONST_WIDE_INT_NUNITS (@var{code}) +The number of @code{HOST_WIDE_INT}s used to represent the number. +Note that this generally be smaller than the number of +@code{HOST_WIDE_INT}s implied by the mode size. + +@findex CONST_WIDE_INT_ELT +@item CONST_WIDE_INT_NUNITS (@var{code},@var{i}) +Returns the @code{i}th element of the array. Element 0 is contains +the low order bits of the constant. + @findex const_fixed @item (const_fixed:@var{m} @dots{}) Represents a fixed-point constant of mode @var{m}. diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index 69e7e03cf68..ffe9e5c257d 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -9667,14 +9667,8 @@ Returns the negative of the floating point value @var{x}. Returns the absolute value of @var{x}. @end deftypefn -@deftypefn Macro void REAL_VALUE_TO_INT (HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, REAL_VALUE_TYPE @var{x}) -Converts a floating point value @var{x} into a double-precision integer -which is then stored into @var{low} and @var{high}. If the value is not -integral, it is truncated. -@end deftypefn - -@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, enum machine_mode @var{mode}) -Converts a double-precision integer found in @var{low} and @var{high}, +@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{val}, enum machine_mode @var{mode}) +Converts a double-precision integer found in @var{val}, into a floating point value which is then stored into @var{x}. The value is truncated to fit in mode @var{mode}. @end deftypefn @@ -11375,3 +11369,50 @@ It returns true if the target supports GNU indirect functions. The support includes the assembler, linker and dynamic linker. The default value of this hook is based on target's libc. @end deftypefn + +@defmac TARGET_SUPPORTS_WIDE_INT + +On older ports, large integers are stored in @code{CONST_DOUBLE} rtl +objects. Newer ports define @code{TARGET_SUPPORTS_WIDE_INT} to be non +zero to indicate that large integers are stored in +@code{CONST_WIDE_INT} rtl objects. The @code{CONST_WIDE_INT} allows +very large integer constants to be represented. @code{CONST_DOUBLE} +are limited to twice the size of host's @code{HOST_WIDE_INT} +representation. + +Converting a port mostly requires looking for the places where +@code{CONST_DOUBLES} are used with @code{VOIDmode} and replacing that +code with code that accesses @code{CONST_WIDE_INT}s. @samp{"grep -i +const_double"} at the port level gets you to 95% of the changes that +need to be made. There are a few places that require a deeper look. + +@itemize @bullet +@item +There is no equivalent to @code{hval} and @code{lval} for +@code{CONST_WIDE_INT}s. This would be difficult to express in the md +language since there are a variable number of elements. + +Most ports only check that @code{hval} is either 0 or -1 to see if the +value is small. As mentioned above, this will no longer be necessary +since small constants are always @code{CONST_INT}. Of course there +are still a few exceptions, the alpha's constraint used by the zap +instruction certainly requires careful examination by C code. +However, all the current code does is pass the hval and lval to C +code, so evolving the c code to look at the @code{CONST_WIDE_INT} is +not really a large change. + +@item +Because there is no standard template that ports use to materialize +constants, there is likely to be some futzing that is unique to each +port in this code. + +@item +The rtx costs may have to be adjusted to properly account for larger +constants that are represented as @code{CONST_WIDE_INT}. +@end itemize + +All and all it does not takes long to convert ports that the +maintainer is familiar with. + +@end defmac + diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index fad6d1044ea..e29d782416b 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -7362,14 +7362,8 @@ Returns the negative of the floating point value @var{x}. Returns the absolute value of @var{x}. @end deftypefn -@deftypefn Macro void REAL_VALUE_TO_INT (HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, REAL_VALUE_TYPE @var{x}) -Converts a floating point value @var{x} into a double-precision integer -which is then stored into @var{low} and @var{high}. If the value is not -integral, it is truncated. -@end deftypefn - -@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{low}, HOST_WIDE_INT @var{high}, enum machine_mode @var{mode}) -Converts a double-precision integer found in @var{low} and @var{high}, +@deftypefn Macro void REAL_VALUE_FROM_INT (REAL_VALUE_TYPE @var{x}, HOST_WIDE_INT @var{val}, enum machine_mode @var{mode}) +Converts a double-precision integer found in @var{val}, into a floating point value which is then stored into @var{x}. The value is truncated to fit in mode @var{mode}. @end deftypefn @@ -8415,3 +8409,50 @@ and the associated definitions of those functions. @hook TARGET_ATOMIC_TEST_AND_SET_TRUEVAL @hook TARGET_HAS_IFUNC_P + +@defmac TARGET_SUPPORTS_WIDE_INT + +On older ports, large integers are stored in @code{CONST_DOUBLE} rtl +objects. Newer ports define @code{TARGET_SUPPORTS_WIDE_INT} to be non +zero to indicate that large integers are stored in +@code{CONST_WIDE_INT} rtl objects. The @code{CONST_WIDE_INT} allows +very large integer constants to be represented. @code{CONST_DOUBLE} +are limited to twice the size of host's @code{HOST_WIDE_INT} +representation. + +Converting a port mostly requires looking for the places where +@code{CONST_DOUBLES} are used with @code{VOIDmode} and replacing that +code with code that accesses @code{CONST_WIDE_INT}s. @samp{"grep -i +const_double"} at the port level gets you to 95% of the changes that +need to be made. There are a few places that require a deeper look. + +@itemize @bullet +@item +There is no equivalent to @code{hval} and @code{lval} for +@code{CONST_WIDE_INT}s. This would be difficult to express in the md +language since there are a variable number of elements. + +Most ports only check that @code{hval} is either 0 or -1 to see if the +value is small. As mentioned above, this will no longer be necessary +since small constants are always @code{CONST_INT}. Of course there +are still a few exceptions, the alpha's constraint used by the zap +instruction certainly requires careful examination by C code. +However, all the current code does is pass the hval and lval to C +code, so evolving the c code to look at the @code{CONST_WIDE_INT} is +not really a large change. + +@item +Because there is no standard template that ports use to materialize +constants, there is likely to be some futzing that is unique to each +port in this code. + +@item +The rtx costs may have to be adjusted to properly account for larger +constants that are represented as @code{CONST_WIDE_INT}. +@end itemize + +All and all it does not takes long to convert ports that the +maintainer is familiar with. + +@end defmac + diff --git a/gcc/dojump.c b/gcc/dojump.c index 3f04eacabb7..6eedecdaac0 100644 --- a/gcc/dojump.c +++ b/gcc/dojump.c @@ -142,6 +142,7 @@ static bool prefer_and_bit_test (enum machine_mode mode, int bitnum) { bool speed_p; + wide_int mask = wide_int::set_bit_in_zero (bitnum, GET_MODE_PRECISION (mode)); if (and_test == 0) { @@ -162,8 +163,7 @@ prefer_and_bit_test (enum machine_mode mode, int bitnum) } /* Fill in the integers. */ - XEXP (and_test, 1) - = immed_double_int_const (double_int_zero.set_bit (bitnum), mode); + XEXP (and_test, 1) = immed_wide_int_const (mask, mode); XEXP (XEXP (shift_test, 0), 1) = GEN_INT (bitnum); speed_p = optimize_insn_for_speed_p (); @@ -507,10 +507,10 @@ do_jump (tree exp, rtx if_false_label, rtx if_true_label, int prob) && compare_tree_int (shift, 0) >= 0 && compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0 && prefer_and_bit_test (TYPE_MODE (argtype), - TREE_INT_CST_LOW (shift))) + tree_to_hwi (shift))) { unsigned HOST_WIDE_INT mask - = (unsigned HOST_WIDE_INT) 1 << TREE_INT_CST_LOW (shift); + = (unsigned HOST_WIDE_INT) 1 << tree_to_hwi (shift); do_jump (build2 (BIT_AND_EXPR, argtype, arg, build_int_cstu (argtype, mask)), clr_label, set_label, setclr_prob); diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index 66cbfb032e0..5415ad1892d 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -346,6 +346,17 @@ dump_struct_debug (tree type, enum debug_info_usage usage, #endif + +/* Get the number of host wide ints needed to represent the precision + of the number. */ + +static unsigned int +get_full_len (const wide_int &op) +{ + return ((op.get_precision () + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT); +} + static bool should_emit_struct_debug (tree type, enum debug_info_usage usage) { @@ -1377,6 +1388,9 @@ dw_val_equal_p (dw_val_node *a, dw_val_node *b) return (a->v.val_double.high == b->v.val_double.high && a->v.val_double.low == b->v.val_double.low); + case dw_val_class_wide_int: + return *a->v.val_wide == *b->v.val_wide; + case dw_val_class_vec: { size_t a_len = a->v.val_vec.elt_size * a->v.val_vec.length; @@ -1633,6 +1647,10 @@ size_of_loc_descr (dw_loc_descr_ref loc) case dw_val_class_const_double: size += HOST_BITS_PER_DOUBLE_INT / BITS_PER_UNIT; break; + case dw_val_class_wide_int: + size += (get_full_len (*loc->dw_loc_oprnd2.v.val_wide) + * HOST_BITS_PER_WIDE_INT / BITS_PER_UNIT); + break; default: gcc_unreachable (); } @@ -1810,6 +1828,20 @@ output_loc_operands (dw_loc_descr_ref loc, int for_eh_or_skip) second, NULL); } break; + case dw_val_class_wide_int: + { + int i; + int len = get_full_len (*val2->v.val_wide); + if (WORDS_BIG_ENDIAN) + for (i = len; i >= 0; --i) + dw2_asm_output_data (HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR, + val2->v.val_wide->elt (i), NULL); + else + for (i = 0; i < len; ++i) + dw2_asm_output_data (HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR, + val2->v.val_wide->elt (i), NULL); + } + break; case dw_val_class_addr: gcc_assert (val1->v.val_unsigned == DWARF2_ADDR_SIZE); dw2_asm_output_addr_rtx (DWARF2_ADDR_SIZE, val2->v.val_addr, NULL); @@ -2019,6 +2051,21 @@ output_loc_operands (dw_loc_descr_ref loc, int for_eh_or_skip) dw2_asm_output_data (l, second, NULL); } break; + case dw_val_class_wide_int: + { + int i; + int len = get_full_len (*val2->v.val_wide); + l = HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + + dw2_asm_output_data (1, len * l, NULL); + if (WORDS_BIG_ENDIAN) + for (i = len; i >= 0; --i) + dw2_asm_output_data (l, val2->v.val_wide->elt (i), NULL); + else + for (i = 0; i < len; ++i) + dw2_asm_output_data (l, val2->v.val_wide->elt (i), NULL); + } + break; default: gcc_unreachable (); } @@ -3111,7 +3158,7 @@ static void add_AT_location_description (dw_die_ref, enum dwarf_attribute, static void add_data_member_location_attribute (dw_die_ref, tree); static bool add_const_value_attribute (dw_die_ref, rtx); static void insert_int (HOST_WIDE_INT, unsigned, unsigned char *); -static void insert_double (double_int, unsigned char *); +static void insert_wide_int (const wide_int &, unsigned char *); static void insert_float (const_rtx, unsigned char *); static rtx rtl_for_decl_location (tree); static bool add_location_or_const_value_attribute (dw_die_ref, tree, bool, @@ -3738,6 +3785,21 @@ AT_unsigned (dw_attr_ref a) return a->dw_attr_val.v.val_unsigned; } +/* Add an unsigned wide integer attribute value to a DIE. */ + +static inline void +add_AT_wide (dw_die_ref die, enum dwarf_attribute attr_kind, + const wide_int& w) +{ + dw_attr_node attr; + + attr.dw_attr = attr_kind; + attr.dw_attr_val.val_class = dw_val_class_wide_int; + attr.dw_attr_val.v.val_wide = ggc_alloc_cleared_wide_int (); + *attr.dw_attr_val.v.val_wide = w; + add_dwarf_attr (die, &attr); +} + /* Add an unsigned double integer attribute value to a DIE. */ static inline void @@ -5302,6 +5364,19 @@ print_die (dw_die_ref die, FILE *outfile) a->dw_attr_val.v.val_double.high, a->dw_attr_val.v.val_double.low); break; + case dw_val_class_wide_int: + { + int i = a->dw_attr_val.v.val_wide->get_len (); + fprintf (outfile, "constant ("); + gcc_assert (i > 0); + if (a->dw_attr_val.v.val_wide->elt (i) == 0) + fprintf (outfile, "0x"); + fprintf (outfile, HOST_WIDE_INT_PRINT_HEX, a->dw_attr_val.v.val_wide->elt (--i)); + while (-- i >= 0) + fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX, a->dw_attr_val.v.val_wide->elt (i)); + fprintf (outfile, ")"); + break; + } case dw_val_class_vec: fprintf (outfile, "floating-point or vector constant"); break; @@ -5474,6 +5549,9 @@ attr_checksum (dw_attr_ref at, struct md5_ctx *ctx, int *mark) case dw_val_class_const_double: CHECKSUM (at->dw_attr_val.v.val_double); break; + case dw_val_class_wide_int: + CHECKSUM (*at->dw_attr_val.v.val_wide); + break; case dw_val_class_vec: CHECKSUM (at->dw_attr_val.v.val_vec); break; @@ -5747,6 +5825,12 @@ attr_checksum_ordered (enum dwarf_tag tag, dw_attr_ref at, CHECKSUM (at->dw_attr_val.v.val_double); break; + case dw_val_class_wide_int: + CHECKSUM_ULEB128 (DW_FORM_block); + CHECKSUM_ULEB128 (sizeof (*at->dw_attr_val.v.val_wide)); + CHECKSUM (*at->dw_attr_val.v.val_wide); + break; + case dw_val_class_vec: CHECKSUM_ULEB128 (DW_FORM_block); CHECKSUM_ULEB128 (sizeof (at->dw_attr_val.v.val_vec)); @@ -6226,6 +6310,8 @@ same_dw_val_p (const dw_val_node *v1, const dw_val_node *v2, int *mark) case dw_val_class_const_double: return v1->v.val_double.high == v2->v.val_double.high && v1->v.val_double.low == v2->v.val_double.low; + case dw_val_class_wide_int: + return *v1->v.val_wide == *v2->v.val_wide; case dw_val_class_vec: if (v1->v.val_vec.length != v2->v.val_vec.length || v1->v.val_vec.elt_size != v2->v.val_vec.elt_size) @@ -7759,6 +7845,13 @@ size_of_die (dw_die_ref die) if (HOST_BITS_PER_WIDE_INT >= 64) size++; /* block */ break; + case dw_val_class_wide_int: + size += (get_full_len (*a->dw_attr_val.v.val_wide) + * HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR); + if (get_full_len (*a->dw_attr_val.v.val_wide) * HOST_BITS_PER_WIDE_INT + > 64) + size++; /* block */ + break; case dw_val_class_vec: size += constant_size (a->dw_attr_val.v.val_vec.length * a->dw_attr_val.v.val_vec.elt_size) @@ -8121,6 +8214,20 @@ value_format (dw_attr_ref a) default: return DW_FORM_block1; } + case dw_val_class_wide_int: + switch (get_full_len (*a->dw_attr_val.v.val_wide) * HOST_BITS_PER_WIDE_INT) + { + case 8: + return DW_FORM_data1; + case 16: + return DW_FORM_data2; + case 32: + return DW_FORM_data4; + case 64: + return DW_FORM_data8; + default: + return DW_FORM_block1; + } case dw_val_class_vec: switch (constant_size (a->dw_attr_val.v.val_vec.length * a->dw_attr_val.v.val_vec.elt_size)) @@ -8560,6 +8667,32 @@ output_die (dw_die_ref die) } break; + case dw_val_class_wide_int: + { + int i; + int len = get_full_len (*a->dw_attr_val.v.val_wide); + int l = HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; + if (len * HOST_BITS_PER_WIDE_INT > 64) + dw2_asm_output_data (1, get_full_len (*a->dw_attr_val.v.val_wide) * l, + NULL); + + if (WORDS_BIG_ENDIAN) + for (i = len; i >= 0; --i) + { + dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i), + name); + name = NULL; + } + else + for (i = 0; i < len; ++i) + { + dw2_asm_output_data (l, a->dw_attr_val.v.val_wide->elt (i), + name); + name = NULL; + } + } + break; + case dw_val_class_vec: { unsigned int elt_size = a->dw_attr_val.v.val_vec.elt_size; @@ -10150,25 +10283,25 @@ simple_type_size_in_bits (const_tree type) return BITS_PER_WORD; else if (TYPE_SIZE (type) == NULL_TREE) return 0; - else if (host_integerp (TYPE_SIZE (type), 1)) - return tree_low_cst (TYPE_SIZE (type), 1); + else if (tree_fits_uhwi_p (TYPE_SIZE (type))) + return tree_to_uhwi (TYPE_SIZE (type)); else return TYPE_ALIGN (type); } -/* Similarly, but return a double_int instead of UHWI. */ +/* Similarly, but return a wide_int instead of UHWI. */ -static inline double_int -double_int_type_size_in_bits (const_tree type) +static inline addr_wide_int +wide_int_type_size_in_bits (const_tree type) { if (TREE_CODE (type) == ERROR_MARK) - return double_int::from_uhwi (BITS_PER_WORD); + return BITS_PER_WORD; else if (TYPE_SIZE (type) == NULL_TREE) - return double_int_zero; + return 0; else if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST) - return tree_to_double_int (TYPE_SIZE (type)); + return TYPE_SIZE (type); else - return double_int::from_uhwi (TYPE_ALIGN (type)); + return TYPE_ALIGN (type); } /* Given a pointer to a tree node for a subrange type, return a pointer @@ -11655,9 +11788,7 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode, rtx msb; if (GET_MODE_CLASS (mode) != MODE_INT - || GET_MODE (XEXP (rtl, 0)) != mode - || (GET_CODE (rtl) == CLZ - && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_DOUBLE_INT)) + || GET_MODE (XEXP (rtl, 0)) != mode) return NULL; op0 = mem_loc_descriptor (XEXP (rtl, 0), mode, mem_mode, @@ -11701,9 +11832,9 @@ clz_loc_descriptor (rtx rtl, enum machine_mode mode, msb = GEN_INT ((unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)); else - msb = immed_double_const (0, (unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (mode) - - HOST_BITS_PER_WIDE_INT - 1), mode); + msb = immed_wide_int_const + (wide_int::set_bit_in_zero (GET_MODE_PRECISION (mode) - 1, + GET_MODE_PRECISION (mode)), mode); if (GET_CODE (msb) == CONST_INT && INTVAL (msb) < 0) tmp = new_loc_descr (HOST_BITS_PER_WIDE_INT == 32 ? DW_OP_const4u : HOST_BITS_PER_WIDE_INT == 64 @@ -12644,7 +12775,16 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode, mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_die_ref; mem_loc_result->dw_loc_oprnd1.v.val_die_ref.die = type_die; mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0; - if (SCALAR_FLOAT_MODE_P (mode)) +#if TARGET_SUPPORTS_WIDE_INT == 0 + if (!SCALAR_FLOAT_MODE_P (mode)) + { + mem_loc_result->dw_loc_oprnd2.val_class + = dw_val_class_const_double; + mem_loc_result->dw_loc_oprnd2.v.val_double + = rtx_to_double_int (rtl); + } + else +#endif { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array @@ -12656,13 +12796,26 @@ mem_loc_descriptor (rtx rtl, enum machine_mode mode, mem_loc_result->dw_loc_oprnd2.v.val_vec.elt_size = 4; mem_loc_result->dw_loc_oprnd2.v.val_vec.array = array; } - else - { - mem_loc_result->dw_loc_oprnd2.val_class - = dw_val_class_const_double; - mem_loc_result->dw_loc_oprnd2.v.val_double - = rtx_to_double_int (rtl); - } + } + break; + + case CONST_WIDE_INT: + if (!dwarf_strict) + { + dw_die_ref type_die; + + type_die = base_type_for_mode (mode, + GET_MODE_CLASS (mode) == MODE_INT); + if (type_die == NULL) + return NULL; + mem_loc_result = new_loc_descr (DW_OP_GNU_const_type, 0, 0); + mem_loc_result->dw_loc_oprnd1.val_class = dw_val_class_die_ref; + mem_loc_result->dw_loc_oprnd1.v.val_die_ref.die = type_die; + mem_loc_result->dw_loc_oprnd1.v.val_die_ref.external = 0; + mem_loc_result->dw_loc_oprnd2.val_class + = dw_val_class_wide_int; + mem_loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int (); + *mem_loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode); } break; @@ -13133,7 +13286,15 @@ loc_descriptor (rtx rtl, enum machine_mode mode, adequately represented. We output CONST_DOUBLEs as blocks. */ loc_result = new_loc_descr (DW_OP_implicit_value, GET_MODE_SIZE (mode), 0); - if (SCALAR_FLOAT_MODE_P (mode)) +#if TARGET_SUPPORTS_WIDE_INT == 0 + if (!SCALAR_FLOAT_MODE_P (mode)) + { + loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double; + loc_result->dw_loc_oprnd2.v.val_double + = rtx_to_double_int (rtl); + } + else +#endif { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array @@ -13145,12 +13306,26 @@ loc_descriptor (rtx rtl, enum machine_mode mode, loc_result->dw_loc_oprnd2.v.val_vec.elt_size = 4; loc_result->dw_loc_oprnd2.v.val_vec.array = array; } - else - { - loc_result->dw_loc_oprnd2.val_class = dw_val_class_const_double; - loc_result->dw_loc_oprnd2.v.val_double - = rtx_to_double_int (rtl); - } + } + break; + + case CONST_WIDE_INT: + if (mode == VOIDmode) + mode = GET_MODE (rtl); + + if (mode != VOIDmode && (dwarf_version >= 4 || !dwarf_strict)) + { + gcc_assert (mode == GET_MODE (rtl) || VOIDmode == GET_MODE (rtl)); + + /* Note that a CONST_DOUBLE rtx could represent either an integer + or a floating-point constant. A CONST_DOUBLE is used whenever + the constant requires more than one word in order to be + adequately represented. We output CONST_DOUBLEs as blocks. */ + loc_result = new_loc_descr (DW_OP_implicit_value, + GET_MODE_SIZE (mode), 0); + loc_result->dw_loc_oprnd2.val_class = dw_val_class_wide_int; + loc_result->dw_loc_oprnd2.v.val_wide = ggc_alloc_cleared_wide_int (); + *loc_result->dw_loc_oprnd2.v.val_wide = std::make_pair (rtl, mode); } break; @@ -13166,6 +13341,7 @@ loc_descriptor (rtx rtl, enum machine_mode mode, ggc_alloc_atomic (length * elt_size); unsigned int i; unsigned char *p; + enum machine_mode imode = GET_MODE_INNER (mode); gcc_assert (mode == GET_MODE (rtl) || VOIDmode == GET_MODE (rtl)); switch (GET_MODE_CLASS (mode)) @@ -13174,15 +13350,8 @@ loc_descriptor (rtx rtl, enum machine_mode mode, for (i = 0, p = array; i < length; i++, p += elt_size) { rtx elt = CONST_VECTOR_ELT (rtl, i); - double_int val = rtx_to_double_int (elt); - - if (elt_size <= sizeof (HOST_WIDE_INT)) - insert_int (val.to_shwi (), elt_size, p); - else - { - gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT)); - insert_double (val, p); - } + wide_int val = std::make_pair (elt, imode); + insert_wide_int (val, p); } break; @@ -13428,10 +13597,10 @@ dw_sra_loc_expr (tree decl, rtx loc) enum var_init_status initialized; if (DECL_SIZE (decl) == NULL - || !host_integerp (DECL_SIZE (decl), 1)) + || !tree_fits_uhwi_p (DECL_SIZE (decl))) return NULL; - decl_size = tree_low_cst (DECL_SIZE (decl), 1); + decl_size = tree_to_uhwi (DECL_SIZE (decl)); descr = NULL; descr_tail = &descr; @@ -14120,17 +14289,17 @@ loc_list_from_tree (tree loc, int want_address) } case INTEGER_CST: - if ((want_address || !host_integerp (loc, 0)) + if ((want_address || !tree_fits_shwi_p (loc)) && (ret = cst_pool_loc_descr (loc))) have_address = 1; else if (want_address == 2 - && host_integerp (loc, 0) + && tree_fits_shwi_p (loc) && (ret = address_of_int_loc_descriptor (int_size_in_bytes (TREE_TYPE (loc)), - tree_low_cst (loc, 0)))) + tree_to_shwi (loc)))) have_address = 1; - else if (host_integerp (loc, 0)) - ret = int_loc_descriptor (tree_low_cst (loc, 0)); + else if (tree_fits_shwi_p (loc)) + ret = int_loc_descriptor (tree_to_shwi (loc)); else { expansion_failed (loc, NULL_RTX, @@ -14219,13 +14388,13 @@ loc_list_from_tree (tree loc, int want_address) case POINTER_PLUS_EXPR: case PLUS_EXPR: - if (host_integerp (TREE_OPERAND (loc, 1), 0)) + if (tree_fits_shwi_p (TREE_OPERAND (loc, 1))) { list_ret = loc_list_from_tree (TREE_OPERAND (loc, 0), 0); if (list_ret == 0) return 0; - loc_list_plus_const (list_ret, tree_low_cst (TREE_OPERAND (loc, 1), 0)); + loc_list_plus_const (list_ret, tree_to_shwi (TREE_OPERAND (loc, 1))); break; } @@ -14490,14 +14659,12 @@ simple_decl_align_in_bits (const_tree decl) /* Return the result of rounding T up to ALIGN. */ -static inline double_int -round_up_to_align (double_int t, unsigned int align) +static inline addr_wide_int +round_up_to_align (addr_wide_int t, unsigned int align) { - double_int alignd = double_int::from_uhwi (align); - t += alignd; - t += double_int_minus_one; - t = t.div (alignd, true, TRUNC_DIV_EXPR); - t *= alignd; + t += align - 1; + t = t.udiv_trunc (align); + t *= align; return t; } @@ -14511,9 +14678,9 @@ round_up_to_align (double_int t, unsigned int align) static HOST_WIDE_INT field_byte_offset (const_tree decl) { - double_int object_offset_in_bits; - double_int object_offset_in_bytes; - double_int bitpos_int; + addr_wide_int object_offset_in_bits; + addr_wide_int object_offset_in_bytes; + addr_wide_int bitpos_int; if (TREE_CODE (decl) == ERROR_MARK) return 0; @@ -14526,21 +14693,21 @@ field_byte_offset (const_tree decl) if (TREE_CODE (bit_position (decl)) != INTEGER_CST) return 0; - bitpos_int = tree_to_double_int (bit_position (decl)); + bitpos_int = bit_position (decl); #ifdef PCC_BITFIELD_TYPE_MATTERS if (PCC_BITFIELD_TYPE_MATTERS) { tree type; tree field_size_tree; - double_int deepest_bitpos; - double_int field_size_in_bits; + addr_wide_int deepest_bitpos; + addr_wide_int field_size_in_bits; unsigned int type_align_in_bits; unsigned int decl_align_in_bits; - double_int type_size_in_bits; + addr_wide_int type_size_in_bits; type = field_type (decl); - type_size_in_bits = double_int_type_size_in_bits (type); + type_size_in_bits = wide_int_type_size_in_bits (type); type_align_in_bits = simple_type_align_in_bits (type); field_size_tree = DECL_SIZE (decl); @@ -14552,7 +14719,7 @@ field_byte_offset (const_tree decl) /* If the size of the field is not constant, use the type size. */ if (TREE_CODE (field_size_tree) == INTEGER_CST) - field_size_in_bits = tree_to_double_int (field_size_tree); + field_size_in_bits = field_size_tree; else field_size_in_bits = type_size_in_bits; @@ -14616,7 +14783,7 @@ field_byte_offset (const_tree decl) object_offset_in_bits = round_up_to_align (object_offset_in_bits, type_align_in_bits); - if (object_offset_in_bits.ugt (bitpos_int)) + if (object_offset_in_bits.gtu_p (bitpos_int)) { object_offset_in_bits = deepest_bitpos - type_size_in_bits; @@ -14630,8 +14797,7 @@ field_byte_offset (const_tree decl) object_offset_in_bits = bitpos_int; object_offset_in_bytes - = object_offset_in_bits.div (double_int::from_uhwi (BITS_PER_UNIT), - true, TRUNC_DIV_EXPR); + = object_offset_in_bits.udiv_trunc (BITS_PER_UNIT); return object_offset_in_bytes.to_shwi (); } @@ -14729,7 +14895,7 @@ add_data_member_location_attribute (dw_die_ref die, tree decl) add_loc_descr (&loc_descr, tmp); /* Calculate the address of the offset. */ - offset = tree_low_cst (BINFO_VPTR_FIELD (decl), 0); + offset = tree_to_shwi (BINFO_VPTR_FIELD (decl)); gcc_assert (offset < 0); tmp = int_loc_descriptor (-offset); @@ -14746,7 +14912,7 @@ add_data_member_location_attribute (dw_die_ref die, tree decl) add_loc_descr (&loc_descr, tmp); } else - offset = tree_low_cst (BINFO_OFFSET (decl), 0); + offset = tree_to_shwi (BINFO_OFFSET (decl)); } else offset = field_byte_offset (decl); @@ -14807,22 +14973,27 @@ extract_int (const unsigned char *src, unsigned int size) return val; } -/* Writes double_int values to dw_vec_const array. */ +/* Writes wide_int values to dw_vec_const array. */ static void -insert_double (double_int val, unsigned char *dest) +insert_wide_int (const wide_int &val, unsigned char *dest) { - unsigned char *p0 = dest; - unsigned char *p1 = dest + sizeof (HOST_WIDE_INT); + int i; if (WORDS_BIG_ENDIAN) - { - p0 = p1; - p1 = dest; - } - - insert_int ((HOST_WIDE_INT) val.low, sizeof (HOST_WIDE_INT), p0); - insert_int ((HOST_WIDE_INT) val.high, sizeof (HOST_WIDE_INT), p1); + for (i = (int)get_full_len (val) - 1; i >= 0; i--) + { + insert_int ((HOST_WIDE_INT) val.elt (i), + sizeof (HOST_WIDE_INT), dest); + dest += sizeof (HOST_WIDE_INT); + } + else + for (i = 0; i < (int)get_full_len (val); i++) + { + insert_int ((HOST_WIDE_INT) val.elt (i), + sizeof (HOST_WIDE_INT), dest); + dest += sizeof (HOST_WIDE_INT); + } } /* Writes floating point values to dw_vec_const array. */ @@ -14867,6 +15038,11 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) } return true; + case CONST_WIDE_INT: + add_AT_wide (die, DW_AT_const_value, + std::make_pair (rtl, GET_MODE (rtl))); + return true; + case CONST_DOUBLE: /* Note that a CONST_DOUBLE rtx could represent either an integer or a floating-point constant. A CONST_DOUBLE is used whenever the @@ -14875,7 +15051,10 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) { enum machine_mode mode = GET_MODE (rtl); - if (SCALAR_FLOAT_MODE_P (mode)) + if (TARGET_SUPPORTS_WIDE_INT == 0 && !SCALAR_FLOAT_MODE_P (mode)) + add_AT_double (die, DW_AT_const_value, + CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl)); + else { unsigned int length = GET_MODE_SIZE (mode); unsigned char *array = (unsigned char *) ggc_alloc_atomic (length); @@ -14883,9 +15062,6 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) insert_float (rtl, array); add_AT_vec (die, DW_AT_const_value, length / 4, 4, array); } - else - add_AT_double (die, DW_AT_const_value, - CONST_DOUBLE_HIGH (rtl), CONST_DOUBLE_LOW (rtl)); } return true; @@ -14898,6 +15074,7 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) (length * elt_size); unsigned int i; unsigned char *p; + enum machine_mode imode = GET_MODE_INNER (mode); switch (GET_MODE_CLASS (mode)) { @@ -14905,15 +15082,8 @@ add_const_value_attribute (dw_die_ref die, rtx rtl) for (i = 0, p = array; i < length; i++, p += elt_size) { rtx elt = CONST_VECTOR_ELT (rtl, i); - double_int val = rtx_to_double_int (elt); - - if (elt_size <= sizeof (HOST_WIDE_INT)) - insert_int (val.to_shwi (), elt_size, p); - else - { - gcc_assert (elt_size == 2 * sizeof (HOST_WIDE_INT)); - insert_double (val, p); - } + wide_int val = std::make_pair (elt, imode); + insert_wide_int (val, p); } break; @@ -15400,9 +15570,9 @@ fortran_common (tree decl, HOST_WIDE_INT *value) *value = 0; if (offset != NULL) { - if (!host_integerp (offset, 0)) + if (!tree_fits_shwi_p (offset)) return NULL_TREE; - *value = tree_low_cst (offset, 0); + *value = tree_to_shwi (offset); } if (bitpos != 0) *value += bitpos / BITS_PER_UNIT; @@ -15568,14 +15738,14 @@ native_encode_initializer (tree init, unsigned char *array, int size) constructor_elt *ce; if (TYPE_DOMAIN (type) == NULL_TREE - || !host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0)) + || !tree_fits_shwi_p (TYPE_MIN_VALUE (TYPE_DOMAIN (type)))) return false; fieldsize = int_size_in_bytes (TREE_TYPE (type)); if (fieldsize <= 0) return false; - min_index = tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0); + min_index = tree_to_shwi (TYPE_MIN_VALUE (TYPE_DOMAIN (type))); memset (array, '\0', size); FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (init), cnt, ce) { @@ -15583,10 +15753,10 @@ native_encode_initializer (tree init, unsigned char *array, int size) tree index = ce->index; int pos = curpos; if (index && TREE_CODE (index) == RANGE_EXPR) - pos = (tree_low_cst (TREE_OPERAND (index, 0), 0) - min_index) + pos = (tree_to_shwi (TREE_OPERAND (index, 0)) - min_index) * fieldsize; else if (index) - pos = (tree_low_cst (index, 0) - min_index) * fieldsize; + pos = (tree_to_shwi (index) - min_index) * fieldsize; if (val) { @@ -15597,8 +15767,8 @@ native_encode_initializer (tree init, unsigned char *array, int size) curpos = pos + fieldsize; if (index && TREE_CODE (index) == RANGE_EXPR) { - int count = tree_low_cst (TREE_OPERAND (index, 1), 0) - - tree_low_cst (TREE_OPERAND (index, 0), 0); + int count = tree_to_shwi (TREE_OPERAND (index, 1)) + - tree_to_shwi (TREE_OPERAND (index, 0)); while (count-- > 0) { if (val) @@ -15642,9 +15812,9 @@ native_encode_initializer (tree init, unsigned char *array, int size) && ! TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (field)))) return false; else if (DECL_SIZE_UNIT (field) == NULL_TREE - || !host_integerp (DECL_SIZE_UNIT (field), 0)) + || !tree_fits_shwi_p (DECL_SIZE_UNIT (field))) return false; - fieldsize = tree_low_cst (DECL_SIZE_UNIT (field), 0); + fieldsize = tree_to_shwi (DECL_SIZE_UNIT (field)); pos = int_byte_position (field); gcc_assert (pos + fieldsize <= size); if (val @@ -16034,9 +16204,9 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b /* Use the default if possible. */ if (bound_attr == DW_AT_lower_bound - && host_integerp (bound, 0) + && tree_fits_shwi_p (bound) && (dflt = lower_bound_default ()) != -1 - && tree_low_cst (bound, 0) == dflt) + && tree_to_shwi (bound) == dflt) ; /* Otherwise represent the bound as an unsigned value with the @@ -16044,18 +16214,14 @@ add_bound_info (dw_die_ref subrange_die, enum dwarf_attribute bound_attr, tree b type will be necessary to re-interpret it unambiguously. */ else if (prec < HOST_BITS_PER_WIDE_INT) { - unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 << prec) - 1; add_AT_unsigned (subrange_die, bound_attr, - TREE_INT_CST_LOW (bound) & mask); + zext_hwi (tree_to_hwi (bound), prec)); } - else if (prec == HOST_BITS_PER_WIDE_INT - || TREE_INT_CST_HIGH (bound) == 0) - add_AT_unsigned (subrange_die, bound_attr, - TREE_INT_CST_LOW (bound)); + else if (prec == HOST_BITS_PER_WIDE_INT + || (cst_fits_uhwi_p (bound) && wide_int (bound).ges_p (0))) + add_AT_unsigned (subrange_die, bound_attr, tree_to_hwi (bound)); else - add_AT_double (subrange_die, bound_attr, TREE_INT_CST_HIGH (bound), - TREE_INT_CST_LOW (bound)); + add_AT_wide (subrange_die, bound_attr, wide_int (bound)); } break; @@ -16265,8 +16431,8 @@ add_bit_offset_attribute (dw_die_ref die, tree decl) /* We can't yet handle bit-fields whose offsets are variable, so if we encounter such things, just return without generating any attribute whatsoever. Likewise for variable or too large size. */ - if (! host_integerp (bit_position (decl), 0) - || ! host_integerp (DECL_SIZE (decl), 1)) + if (! tree_fits_shwi_p (bit_position (decl)) + || ! tree_fits_uhwi_p (DECL_SIZE (decl))) return; bitpos_int = int_bit_position (decl); @@ -16281,7 +16447,7 @@ add_bit_offset_attribute (dw_die_ref die, tree decl) if (! BYTES_BIG_ENDIAN) { - highest_order_field_bit_offset += tree_low_cst (DECL_SIZE (decl), 0); + highest_order_field_bit_offset += tree_to_shwi (DECL_SIZE (decl)); highest_order_object_bit_offset += simple_type_size_in_bits (type); } @@ -16306,8 +16472,8 @@ add_bit_size_attribute (dw_die_ref die, tree decl) gcc_assert (TREE_CODE (decl) == FIELD_DECL && DECL_BIT_FIELD_TYPE (decl)); - if (host_integerp (DECL_SIZE (decl), 1)) - add_AT_unsigned (die, DW_AT_bit_size, tree_low_cst (DECL_SIZE (decl), 1)); + if (tree_fits_uhwi_p (DECL_SIZE (decl))) + add_AT_unsigned (die, DW_AT_bit_size, tree_to_uhwi (DECL_SIZE (decl))); } /* If the compiled language is ANSI C, then add a 'prototyped' @@ -16376,10 +16542,10 @@ add_pure_or_virtual_attribute (dw_die_ref die, tree func_decl) { add_AT_unsigned (die, DW_AT_virtuality, DW_VIRTUALITY_virtual); - if (host_integerp (DECL_VINDEX (func_decl), 0)) + if (tree_fits_shwi_p (DECL_VINDEX (func_decl))) add_AT_loc (die, DW_AT_vtable_elem_location, new_loc_descr (DW_OP_constu, - tree_low_cst (DECL_VINDEX (func_decl), 0), + tree_to_shwi (DECL_VINDEX (func_decl)), 0)); /* GNU extension: Record what type this method came from originally. */ @@ -16926,8 +17092,8 @@ descr_info_loc (tree val, tree base_decl) case VAR_DECL: return loc_descriptor_from_tree (val, 0); case INTEGER_CST: - if (host_integerp (val, 0)) - return int_loc_descriptor (tree_low_cst (val, 0)); + if (tree_fits_shwi_p (val)) + return int_loc_descriptor (tree_to_shwi (val)); break; case INDIRECT_REF: size = int_size_in_bytes (TREE_TYPE (val)); @@ -16943,14 +17109,14 @@ descr_info_loc (tree val, tree base_decl) return loc; case POINTER_PLUS_EXPR: case PLUS_EXPR: - if (host_integerp (TREE_OPERAND (val, 1), 1) - && (unsigned HOST_WIDE_INT) tree_low_cst (TREE_OPERAND (val, 1), 1) + if (tree_fits_uhwi_p (TREE_OPERAND (val, 1)) + && (unsigned HOST_WIDE_INT) tree_to_uhwi (TREE_OPERAND (val, 1)) < 16384) { loc = descr_info_loc (TREE_OPERAND (val, 0), base_decl); if (!loc) break; - loc_descr_plus_const (&loc, tree_low_cst (TREE_OPERAND (val, 1), 0)); + loc_descr_plus_const (&loc, tree_to_shwi (TREE_OPERAND (val, 1))); } else { @@ -16990,9 +17156,9 @@ add_descr_info_field (dw_die_ref die, enum dwarf_attribute attr, { dw_loc_descr_ref loc; - if (host_integerp (val, 0)) + if (tree_fits_shwi_p (val)) { - add_AT_unsigned (die, attr, tree_low_cst (val, 0)); + add_AT_unsigned (die, attr, tree_to_shwi (val)); return; } @@ -17043,9 +17209,9 @@ gen_descr_array_type_die (tree type, struct array_descr_info *info, /* If it is the default value, omit it. */ int dflt; - if (host_integerp (info->dimen[dim].lower_bound, 0) + if (tree_fits_shwi_p (info->dimen[dim].lower_bound) && (dflt = lower_bound_default ()) != -1 - && tree_low_cst (info->dimen[dim].lower_bound, 0) == dflt) + && tree_to_shwi (info->dimen[dim].lower_bound) == dflt) ; else add_descr_info_field (subrange_die, DW_AT_lower_bound, @@ -17192,9 +17358,9 @@ gen_enumeration_type_die (tree type, dw_die_ref context_die) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (host_integerp (value, TYPE_UNSIGNED (TREE_TYPE (value))) + if (tree_fits_hwi_p (value) && (simple_type_size_in_bits (TREE_TYPE (value)) - <= HOST_BITS_PER_WIDE_INT || host_integerp (value, 0))) + <= HOST_BITS_PER_WIDE_INT || tree_fits_shwi_p (value))) /* DWARF2 does not provide a way of indicating whether or not enumeration constants are signed or unsigned. GDB always assumes the values are signed, so we output all @@ -17207,12 +17373,11 @@ gen_enumeration_type_die (tree type, dw_die_ref context_die) This should be re-worked to use correct signed/unsigned int/double tags for all cases, instead of always treating as signed. */ - add_AT_int (enum_die, DW_AT_const_value, TREE_INT_CST_LOW (value)); + add_AT_int (enum_die, DW_AT_const_value, tree_to_hwi (value)); else /* Enumeration constants may be wider than HOST_WIDE_INT. Handle that here. */ - add_AT_double (enum_die, DW_AT_const_value, - TREE_INT_CST_HIGH (value), TREE_INT_CST_LOW (value)); + add_AT_wide (enum_die, DW_AT_const_value, wide_int (value)); } add_gnat_descriptive_type_attribute (type_die, type, context_die); @@ -22984,9 +23149,9 @@ optimize_location_into_implicit_ptr (dw_die_ref die, tree decl) we can add DW_OP_GNU_implicit_pointer. */ STRIP_NOPS (init); if (TREE_CODE (init) == POINTER_PLUS_EXPR - && host_integerp (TREE_OPERAND (init, 1), 0)) + && tree_fits_shwi_p (TREE_OPERAND (init, 1))) { - offset = tree_low_cst (TREE_OPERAND (init, 1), 0); + offset = tree_to_shwi (TREE_OPERAND (init, 1)); init = TREE_OPERAND (init, 0); STRIP_NOPS (init); } @@ -23274,6 +23439,9 @@ hash_loc_operands (dw_loc_descr_ref loc, hashval_t hash) hash = iterative_hash_object (val2->v.val_double.low, hash); hash = iterative_hash_object (val2->v.val_double.high, hash); break; + case dw_val_class_wide_int: + hash = iterative_hash_object (*val2->v.val_wide, hash); + break; case dw_val_class_addr: hash = iterative_hash_rtx (val2->v.val_addr, hash); break; @@ -23363,6 +23531,9 @@ hash_loc_operands (dw_loc_descr_ref loc, hashval_t hash) hash = iterative_hash_object (val2->v.val_double.low, hash); hash = iterative_hash_object (val2->v.val_double.high, hash); break; + case dw_val_class_wide_int: + hash = iterative_hash_object (*val2->v.val_wide, hash); + break; default: gcc_unreachable (); } @@ -23511,6 +23682,8 @@ compare_loc_operands (dw_loc_descr_ref x, dw_loc_descr_ref y) case dw_val_class_const_double: return valx2->v.val_double.low == valy2->v.val_double.low && valx2->v.val_double.high == valy2->v.val_double.high; + case dw_val_class_wide_int: + return *valx2->v.val_wide == *valy2->v.val_wide; case dw_val_class_addr: return rtx_equal_p (valx2->v.val_addr, valy2->v.val_addr); default: @@ -23554,6 +23727,8 @@ compare_loc_operands (dw_loc_descr_ref x, dw_loc_descr_ref y) case dw_val_class_const_double: return valx2->v.val_double.low == valy2->v.val_double.low && valx2->v.val_double.high == valy2->v.val_double.high; + case dw_val_class_wide_int: + return *valx2->v.val_wide == *valy2->v.val_wide; default: gcc_unreachable (); } diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h index ad03a34021d..d6af85befb3 100644 --- a/gcc/dwarf2out.h +++ b/gcc/dwarf2out.h @@ -21,6 +21,7 @@ along with GCC; see the file COPYING3. If not see #define GCC_DWARF2OUT_H 1 #include "dwarf2.h" /* ??? Remove this once only used by dwarf2foo.c. */ +#include "wide-int.h" typedef struct die_struct *dw_die_ref; typedef const struct die_struct *const_dw_die_ref; @@ -29,6 +30,7 @@ typedef struct dw_val_struct *dw_val_ref; typedef struct dw_cfi_struct *dw_cfi_ref; typedef struct dw_loc_descr_struct *dw_loc_descr_ref; typedef struct dw_loc_list_struct *dw_loc_list_ref; +typedef struct wide_int *wide_int_ref; /* Call frames are described using a sequence of Call Frame @@ -139,6 +141,7 @@ enum dw_val_class dw_val_class_const, dw_val_class_unsigned_const, dw_val_class_const_double, + dw_val_class_wide_int, dw_val_class_vec, dw_val_class_flag, dw_val_class_die_ref, @@ -180,6 +183,7 @@ typedef struct GTY(()) dw_val_struct { HOST_WIDE_INT GTY ((default)) val_int; unsigned HOST_WIDE_INT GTY ((tag ("dw_val_class_unsigned_const"))) val_unsigned; double_int GTY ((tag ("dw_val_class_const_double"))) val_double; + wide_int_ref GTY ((tag ("dw_val_class_wide_int"))) val_wide; dw_vec_const GTY ((tag ("dw_val_class_vec"))) val_vec; struct dw_val_die_union { diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index 8a7b8a563d5..ce403475984 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -124,6 +124,9 @@ rtx cc0_rtx; static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) htab_t const_int_htab; +static GTY ((if_marked ("ggc_marked_p"), param_is (struct rtx_def))) + htab_t const_wide_int_htab; + /* A hash table storing memory attribute structures. */ static GTY ((if_marked ("ggc_marked_p"), param_is (struct mem_attrs))) htab_t mem_attrs_htab; @@ -149,6 +152,11 @@ static void set_used_decls (tree); static void mark_label_nuses (rtx); static hashval_t const_int_htab_hash (const void *); static int const_int_htab_eq (const void *, const void *); +#if TARGET_SUPPORTS_WIDE_INT +static hashval_t const_wide_int_htab_hash (const void *); +static int const_wide_int_htab_eq (const void *, const void *); +static rtx lookup_const_wide_int (rtx); +#endif static hashval_t const_double_htab_hash (const void *); static int const_double_htab_eq (const void *, const void *); static rtx lookup_const_double (rtx); @@ -185,6 +193,43 @@ const_int_htab_eq (const void *x, const void *y) return (INTVAL ((const_rtx) x) == *((const HOST_WIDE_INT *) y)); } +#if TARGET_SUPPORTS_WIDE_INT +/* Returns a hash code for X (which is a really a CONST_WIDE_INT). */ + +static hashval_t +const_wide_int_htab_hash (const void *x) +{ + int i; + HOST_WIDE_INT hash = 0; + const_rtx xr = (const_rtx) x; + + for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) + hash += CONST_WIDE_INT_ELT (xr, i); + + return (hashval_t) hash; +} + +/* Returns nonzero if the value represented by X (which is really a + CONST_WIDE_INT) is the same as that given by Y (which is really a + CONST_WIDE_INT). */ + +static int +const_wide_int_htab_eq (const void *x, const void *y) +{ + int i; + const_rtx xr = (const_rtx)x; + const_rtx yr = (const_rtx)y; + if (CONST_WIDE_INT_NUNITS (xr) != CONST_WIDE_INT_NUNITS (yr)) + return 0; + + for (i = 0; i < CONST_WIDE_INT_NUNITS (xr); i++) + if (CONST_WIDE_INT_ELT (xr, i) != CONST_WIDE_INT_ELT (yr, i)) + return 0; + + return 1; +} +#endif + /* Returns a hash code for X (which is really a CONST_DOUBLE). */ static hashval_t const_double_htab_hash (const void *x) @@ -192,7 +237,7 @@ const_double_htab_hash (const void *x) const_rtx const value = (const_rtx) x; hashval_t h; - if (GET_MODE (value) == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (value) == VOIDmode) h = CONST_DOUBLE_LOW (value) ^ CONST_DOUBLE_HIGH (value); else { @@ -212,7 +257,7 @@ const_double_htab_eq (const void *x, const void *y) if (GET_MODE (a) != GET_MODE (b)) return 0; - if (GET_MODE (a) == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (a) == VOIDmode) return (CONST_DOUBLE_LOW (a) == CONST_DOUBLE_LOW (b) && CONST_DOUBLE_HIGH (a) == CONST_DOUBLE_HIGH (b)); else @@ -478,6 +523,7 @@ const_fixed_from_fixed_value (FIXED_VALUE_TYPE value, enum machine_mode mode) return lookup_const_fixed (fixed); } +#if TARGET_SUPPORTS_WIDE_INT == 0 /* Constructs double_int from rtx CST. */ double_int @@ -497,17 +543,70 @@ rtx_to_double_int (const_rtx cst) return r; } +#endif + +#if TARGET_SUPPORTS_WIDE_INT +/* Determine whether WIDE_INT, already exists in the hash table. If + so, return its counterpart; otherwise add it to the hash table and + return it. */ +static rtx +lookup_const_wide_int (rtx wint) +{ + void **slot = htab_find_slot (const_wide_int_htab, wint, INSERT); + if (*slot == 0) + *slot = wint; -/* Return a CONST_DOUBLE or CONST_INT for a value specified as - a double_int. */ + return (rtx) *slot; +} +#endif +/* V contains a wide_int. A CONST_INT or CONST_WIDE_INT (if + TARGET_SUPPORTS_WIDE_INT is defined) or CONST_DOUBLE if + TARGET_SUPPORTS_WIDE_INT is not defined is produced based on the + number of HOST_WIDE_INTs that are necessary to represent the value + in compact form. */ rtx -immed_double_int_const (double_int i, enum machine_mode mode) +immed_wide_int_const (const wide_int &v, enum machine_mode mode) { - return immed_double_const (i.low, i.high, mode); + unsigned int len = v.get_len (); + unsigned int prec = GET_MODE_PRECISION (mode); + + /* Allow truncation but not extension since we do not know if the + number is signed or unsigned. */ + gcc_assert (prec <= v.get_precision ()); + + if (len < 2 || prec <= HOST_BITS_PER_WIDE_INT) + return gen_int_mode (v.elt (0), mode); + +#if TARGET_SUPPORTS_WIDE_INT + { + unsigned int i; + rtx value; + unsigned int blocks_needed + = (prec + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; + + if (len > blocks_needed) + len = blocks_needed; + + value = const_wide_int_alloc (len); + + /* It is so tempting to just put the mode in here. Must control + myself ... */ + PUT_MODE (value, VOIDmode); + HWI_PUT_NUM_ELEM (CONST_WIDE_INT_VEC (value), len); + + for (i = 0; i < len; i++) + CONST_WIDE_INT_ELT (value, i) = v.elt (i); + + return lookup_const_wide_int (value); + } +#else + return immed_double_const (v.elt (0), v.elt (1), mode); +#endif } +#if TARGET_SUPPORTS_WIDE_INT == 0 /* Return a CONST_DOUBLE or CONST_INT for a value specified as a pair of ints: I0 is the low-order word and I1 is the high-order word. For values that are larger than HOST_BITS_PER_DOUBLE_INT, the @@ -559,6 +658,7 @@ immed_double_const (HOST_WIDE_INT i0, HOST_WIDE_INT i1, enum machine_mode mode) return lookup_const_double (value); } +#endif rtx gen_rtx_REG (enum machine_mode mode, unsigned int regno) @@ -1540,12 +1640,12 @@ get_mem_align_offset (rtx mem, unsigned int align) tree bit_offset = DECL_FIELD_BIT_OFFSET (field); if (!byte_offset - || !host_integerp (byte_offset, 1) - || !host_integerp (bit_offset, 1)) + || !tree_fits_uhwi_p (byte_offset) + || !tree_fits_uhwi_p (bit_offset)) return -1; - offset += tree_low_cst (byte_offset, 1); - offset += tree_low_cst (bit_offset, 1) / BITS_PER_UNIT; + offset += tree_to_uhwi (byte_offset); + offset += tree_to_uhwi (bit_offset) / BITS_PER_UNIT; if (inner == NULL_TREE) { @@ -1769,10 +1869,10 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, { attrs.expr = t2; attrs.offset_known_p = false; - if (host_integerp (off_tree, 1)) + if (tree_fits_uhwi_p (off_tree)) { attrs.offset_known_p = true; - attrs.offset = tree_low_cst (off_tree, 1); + attrs.offset = tree_to_uhwi (off_tree); apply_bitpos = bitpos; } } @@ -1799,10 +1899,10 @@ set_mem_attributes_minus_bitpos (rtx ref, tree t, int objectp, attrs.align = MAX (attrs.align, obj_align); } - if (host_integerp (new_size, 1)) + if (tree_fits_uhwi_p (new_size)) { attrs.size_known_p = true; - attrs.size = tree_low_cst (new_size, 1); + attrs.size = tree_to_uhwi (new_size); } /* If we modified OFFSET based on T, then subtract the outstanding @@ -2272,15 +2372,15 @@ widen_memory_access (rtx memref, enum machine_mode mode, HOST_WIDE_INT offset) && attrs.offset >= 0) break; - if (! host_integerp (offset, 1)) + if (! tree_fits_uhwi_p (offset)) { attrs.expr = NULL_TREE; break; } attrs.expr = TREE_OPERAND (attrs.expr, 0); - attrs.offset += tree_low_cst (offset, 1); - attrs.offset += (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) + attrs.offset += tree_to_uhwi (offset); + attrs.offset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) / BITS_PER_UNIT); } /* Similarly for the decl. */ @@ -5647,11 +5747,15 @@ init_emit_once (void) enum machine_mode mode; enum machine_mode double_mode; - /* Initialize the CONST_INT, CONST_DOUBLE, CONST_FIXED, and memory attribute - hash tables. */ + /* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE, + CONST_FIXED, and memory attribute hash tables. */ const_int_htab = htab_create_ggc (37, const_int_htab_hash, const_int_htab_eq, NULL); +#if TARGET_SUPPORTS_WIDE_INT + const_wide_int_htab = htab_create_ggc (37, const_wide_int_htab_hash, + const_wide_int_htab_eq, NULL); +#endif const_double_htab = htab_create_ggc (37, const_double_htab_hash, const_double_htab_eq, NULL); @@ -5715,9 +5819,9 @@ init_emit_once (void) else const_true_rtx = gen_rtx_CONST_INT (VOIDmode, STORE_FLAG_VALUE); - REAL_VALUE_FROM_INT (dconst0, 0, 0, double_mode); - REAL_VALUE_FROM_INT (dconst1, 1, 0, double_mode); - REAL_VALUE_FROM_INT (dconst2, 2, 0, double_mode); + REAL_VALUE_FROM_INT (dconst0, 0, double_mode); + REAL_VALUE_FROM_INT (dconst1, 1, double_mode); + REAL_VALUE_FROM_INT (dconst2, 2, double_mode); dconstm1 = dconst1; dconstm1.sign = 1; diff --git a/gcc/except.c b/gcc/except.c index 2d41d7b332d..c1d33c647aa 100644 --- a/gcc/except.c +++ b/gcc/except.c @@ -313,20 +313,20 @@ init_eh (void) /* Cache the interesting field offsets so that we have easy access from rtl. */ sjlj_fc_call_site_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_cs), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_cs), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_cs)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_cs)) / BITS_PER_UNIT); sjlj_fc_data_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_data), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_data), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_data)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_data)) / BITS_PER_UNIT); sjlj_fc_personality_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_per), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_per), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_per)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_per)) / BITS_PER_UNIT); sjlj_fc_lsda_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_lsda), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_lsda), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_lsda)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_lsda)) / BITS_PER_UNIT); sjlj_fc_jbuf_ofs - = (tree_low_cst (DECL_FIELD_OFFSET (f_jbuf), 1) - + tree_low_cst (DECL_FIELD_BIT_OFFSET (f_jbuf), 1) / BITS_PER_UNIT); + = (tree_to_uhwi (DECL_FIELD_OFFSET (f_jbuf)) + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_jbuf)) / BITS_PER_UNIT); } } @@ -2050,8 +2050,8 @@ expand_builtin_eh_common (tree region_nr_t) HOST_WIDE_INT region_nr; eh_region region; - gcc_assert (host_integerp (region_nr_t, 0)); - region_nr = tree_low_cst (region_nr_t, 0); + gcc_assert (tree_fits_shwi_p (region_nr_t)); + region_nr = tree_to_shwi (region_nr_t); region = (*cfun->eh->region_array)[region_nr]; @@ -2145,7 +2145,7 @@ expand_builtin_eh_return_data_regno (tree exp) return constm1_rtx; } - iwhich = tree_low_cst (which, 1); + iwhich = tree_to_uhwi (which); iwhich = EH_RETURN_DATA_REGNO (iwhich); if (iwhich == INVALID_REGNUM) return constm1_rtx; @@ -2381,7 +2381,7 @@ collect_one_action_chain (action_hash_type ar_hash, eh_region region) { /* Retrieve the filter from the head of the filter list where we have stored it (see assign_filter_values). */ - int filter = TREE_INT_CST_LOW (TREE_VALUE (c->filter_list)); + int filter = tree_to_hwi (TREE_VALUE (c->filter_list)); next = add_action_record (ar_hash, filter, 0); } else @@ -2408,7 +2408,7 @@ collect_one_action_chain (action_hash_type ar_hash, eh_region region) flt_node = c->filter_list; for (; flt_node; flt_node = TREE_CHAIN (flt_node)) { - int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node)); + int filter = tree_to_hwi (TREE_VALUE (flt_node)); next = add_action_record (ar_hash, filter, next); } } diff --git a/gcc/explow.c b/gcc/explow.c index 7da8bc75f19..aa3d971cae7 100644 --- a/gcc/explow.c +++ b/gcc/explow.c @@ -95,38 +95,8 @@ plus_constant (enum machine_mode mode, rtx x, HOST_WIDE_INT c) switch (code) { - case CONST_INT: - if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) - { - double_int di_x = double_int::from_shwi (INTVAL (x)); - double_int di_c = double_int::from_shwi (c); - - bool overflow; - double_int v = di_x.add_with_sign (di_c, false, &overflow); - if (overflow) - gcc_unreachable (); - - return immed_double_int_const (v, mode); - } - - return gen_int_mode (INTVAL (x) + c, mode); - - case CONST_DOUBLE: - { - double_int di_x = double_int::from_pair (CONST_DOUBLE_HIGH (x), - CONST_DOUBLE_LOW (x)); - double_int di_c = double_int::from_shwi (c); - - bool overflow; - double_int v = di_x.add_with_sign (di_c, false, &overflow); - if (overflow) - /* Sorry, we have no way to represent overflows this wide. - To fix, add constant support wider than CONST_DOUBLE. */ - gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT); - - return immed_double_int_const (v, mode); - } - + CASE_CONST_SCALAR_INT: + return immed_wide_int_const (wide_int (std::make_pair (x, mode)) + c, mode); case MEM: /* If this is a reference to the constant pool, try replacing it with a reference to a new constant. If the resulting address isn't @@ -270,10 +240,10 @@ int_expr_size (tree exp) gcc_assert (size); } - if (size == 0 || !host_integerp (size, 0)) + if (size == 0 || !tree_fits_shwi_p (size)) return -1; - return tree_low_cst (size, 0); + return tree_to_shwi (size); } /* Return a copy of X in which all memory references diff --git a/gcc/expmed.c b/gcc/expmed.c index 79f3424961d..6d69e4135b2 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -55,7 +55,6 @@ static void store_split_bit_field (rtx, unsigned HOST_WIDE_INT, static rtx extract_fixed_bit_field (enum machine_mode, rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx, int, bool); -static rtx mask_rtx (enum machine_mode, int, int, int); static rtx lshift_value (enum machine_mode, rtx, int, int); static rtx extract_split_bit_field (rtx, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, int); @@ -63,6 +62,19 @@ static void do_cmp_and_jump (rtx, rtx, enum rtx_code, enum machine_mode, rtx); static rtx expand_smod_pow2 (enum machine_mode, rtx, HOST_WIDE_INT); static rtx expand_sdiv_pow2 (enum machine_mode, rtx, HOST_WIDE_INT); +/* Return a constant integer mask value of mode MODE with BITSIZE ones + followed by BITPOS zeros, or the complement of that if COMPLEMENT. + The mask is truncated if necessary to the width of mode MODE. The + mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */ + +static inline rtx +mask_rtx (enum machine_mode mode, int bitpos, int bitsize, bool complement) +{ + return immed_wide_int_const + (wide_int::shifted_mask (bitpos, bitsize, complement, + GET_MODE_PRECISION (mode)), mode); +} + /* Test whether a value is zero of a power of two. */ #define EXACT_POWER_OF_2_OR_ZERO_P(x) \ (((x) & ((x) - (unsigned HOST_WIDE_INT) 1)) == 0) @@ -1840,39 +1852,15 @@ extract_fixed_bit_field (enum machine_mode tmode, rtx op0, return expand_shift (RSHIFT_EXPR, mode, op0, GET_MODE_BITSIZE (mode) - bitsize, target, 0); } - -/* Return a constant integer (CONST_INT or CONST_DOUBLE) mask value - of mode MODE with BITSIZE ones followed by BITPOS zeros, or the - complement of that if COMPLEMENT. The mask is truncated if - necessary to the width of mode MODE. The mask is zero-extended if - BITSIZE+BITPOS is too small for MODE. */ - -static rtx -mask_rtx (enum machine_mode mode, int bitpos, int bitsize, int complement) -{ - double_int mask; - - mask = double_int::mask (bitsize); - mask = mask.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT); - - if (complement) - mask = ~mask; - - return immed_double_int_const (mask, mode); -} - -/* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value - VALUE truncated to BITSIZE bits and then shifted left BITPOS bits. */ +/* Return a constant integer rtx with the value VALUE truncated to + BITSIZE bits and then shifted left BITPOS bits. */ static rtx lshift_value (enum machine_mode mode, rtx value, int bitpos, int bitsize) { - double_int val; - - val = double_int::from_uhwi (INTVAL (value)).zext (bitsize); - val = val.llshift (bitpos, HOST_BITS_PER_DOUBLE_INT); - - return immed_double_int_const (val, mode); + return + immed_wide_int_const (wide_int (std::make_pair (value, mode)) + .zext (bitsize).lshift (bitpos), mode); } /* Extract a bit field that is split across two words @@ -3100,37 +3088,41 @@ expand_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, only if the constant value exactly fits in an `unsigned int' without any truncation. This means that multiplying by negative values does not work; results are off by 2^32 on a 32 bit machine. */ - if (CONST_INT_P (scalar_op1)) { coeff = INTVAL (scalar_op1); is_neg = coeff < 0; } +#if TARGET_SUPPORTS_WIDE_INT + else if (CONST_WIDE_INT_P (scalar_op1)) +#else else if (CONST_DOUBLE_AS_INT_P (scalar_op1)) +#endif { - /* If we are multiplying in DImode, it may still be a win - to try to work with shifts and adds. */ - if (CONST_DOUBLE_HIGH (scalar_op1) == 0 - && (CONST_DOUBLE_LOW (scalar_op1) > 0 - || (CONST_DOUBLE_LOW (scalar_op1) < 0 - && EXACT_POWER_OF_2_OR_ZERO_P - (CONST_DOUBLE_LOW (scalar_op1))))) + int p = GET_MODE_PRECISION (mode); + wide_int val = std::make_pair (scalar_op1, mode); + int shift = val.exact_log2 ().to_shwi (); + /* Perfect power of 2. */ + is_neg = false; + if (shift > 0) { - coeff = CONST_DOUBLE_LOW (scalar_op1); - is_neg = false; + /* Do the shift count trucation against the bitsize, not + the precision. See the comment above + wide-int.c:trunc_shift for details. */ + if (SHIFT_COUNT_TRUNCATED) + shift &= GET_MODE_BITSIZE (mode) - 1; + /* We could consider adding just a move of 0 to target + if the shift >= p */ + if (shift < p) + return expand_shift (LSHIFT_EXPR, mode, op0, + shift, target, unsignedp); + /* Any positive number that fits in a word. */ + coeff = CONST_WIDE_INT_ELT (scalar_op1, 0); } - else if (CONST_DOUBLE_LOW (scalar_op1) == 0) + else if (val.sign_mask () == 0) { - coeff = CONST_DOUBLE_HIGH (scalar_op1); - if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)) - { - int shift = floor_log2 (coeff) + HOST_BITS_PER_WIDE_INT; - if (shift < HOST_BITS_PER_DOUBLE_INT - 1 - || mode_bitsize <= HOST_BITS_PER_DOUBLE_INT) - return expand_shift (LSHIFT_EXPR, mode, op0, - shift, target, unsignedp); - } - goto skip_synth; + /* Any positive number that fits in a word. */ + coeff = CONST_WIDE_INT_ELT (scalar_op1, 0); } else goto skip_synth; @@ -3308,7 +3300,7 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, unsigned HOST_WIDE_INT *multiplier_ptr, int *post_shift_ptr, int *lgup_ptr) { - double_int mhigh, mlow; + wide_int mhigh, mlow; int lgup, post_shift; int pow, pow2; @@ -3320,23 +3312,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, pow = n + lgup; pow2 = n + lgup - precision; - /* We could handle this with some effort, but this case is much - better handled directly with a scc insn, so rely on caller using - that. */ - gcc_assert (pow != HOST_BITS_PER_DOUBLE_INT); - /* mlow = 2^(N + lgup)/d */ - double_int val = double_int_zero.set_bit (pow); - mlow = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR); + wide_int val = wide_int::set_bit_in_zero (pow, HOST_BITS_PER_DOUBLE_INT); + mlow = val.udiv_trunc (d); /* mhigh = (2^(N + lgup) + 2^(N + lgup - precision))/d */ - val |= double_int_zero.set_bit (pow2); - mhigh = val.div (double_int::from_uhwi (d), true, TRUNC_DIV_EXPR); - - gcc_assert (!mhigh.high || val.high - d < d); - gcc_assert (mhigh.high <= 1 && mlow.high <= 1); - /* Assert that mlow < mhigh. */ - gcc_assert (mlow.ult (mhigh)); + val |= wide_int::set_bit_in_zero(pow2, HOST_BITS_PER_DOUBLE_INT); + mhigh = val.udiv_trunc (d); /* If precision == N, then mlow, mhigh exceed 2^N (but they do not exceed 2^(N+1)). */ @@ -3344,14 +3326,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, /* Reduce to lowest terms. */ for (post_shift = lgup; post_shift > 0; post_shift--) { - int shft = HOST_BITS_PER_WIDE_INT - 1; - unsigned HOST_WIDE_INT ml_lo = (mlow.high << shft) | (mlow.low >> 1); - unsigned HOST_WIDE_INT mh_lo = (mhigh.high << shft) | (mhigh.low >> 1); + unsigned HOST_WIDE_INT ml_lo = mlow.extract_to_hwi (1, HOST_BITS_PER_WIDE_INT); + unsigned HOST_WIDE_INT mh_lo = mhigh.extract_to_hwi (1, HOST_BITS_PER_WIDE_INT); if (ml_lo >= mh_lo) break; - mlow = double_int::from_uhwi (ml_lo); - mhigh = double_int::from_uhwi (mh_lo); + mlow = wide_int::from_uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT); + mhigh = wide_int::from_uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT); } *post_shift_ptr = post_shift; @@ -3359,13 +3340,13 @@ choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, if (n < HOST_BITS_PER_WIDE_INT) { unsigned HOST_WIDE_INT mask = ((unsigned HOST_WIDE_INT) 1 << n) - 1; - *multiplier_ptr = mhigh.low & mask; - return mhigh.low >= mask; + *multiplier_ptr = mhigh.to_uhwi () & mask; + return mhigh.to_uhwi () >= mask; } else { - *multiplier_ptr = mhigh.low; - return mhigh.high; + *multiplier_ptr = mhigh.to_uhwi (); + return mhigh.extract_to_hwi (HOST_BITS_PER_WIDE_INT, 1); } } @@ -3632,9 +3613,10 @@ expmed_mult_highpart (enum machine_mode mode, rtx op0, rtx op1, static rtx expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) { - unsigned HOST_WIDE_INT masklow, maskhigh; rtx result, temp, shift, label; int logd; + wide_int mask; + int prec = GET_MODE_PRECISION (mode); logd = floor_log2 (d); result = gen_reg_rtx (mode); @@ -3647,8 +3629,8 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) mode, 0, -1); if (signmask) { + HOST_WIDE_INT masklow = ((HOST_WIDE_INT) 1 << logd) - 1; signmask = force_reg (mode, signmask); - masklow = ((HOST_WIDE_INT) 1 << logd) - 1; shift = GEN_INT (GET_MODE_BITSIZE (mode) - logd); /* Use the rtx_cost of a LSHIFTRT instruction to determine @@ -3693,19 +3675,11 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) modulus. By including the signbit in the operation, many targets can avoid an explicit compare operation in the following comparison against zero. */ - - masklow = ((HOST_WIDE_INT) 1 << logd) - 1; - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) - { - masklow |= (HOST_WIDE_INT) -1 << (GET_MODE_BITSIZE (mode) - 1); - maskhigh = -1; - } - else - maskhigh = (HOST_WIDE_INT) -1 - << (GET_MODE_BITSIZE (mode) - HOST_BITS_PER_WIDE_INT - 1); + mask = wide_int::mask (logd, false, GET_MODE_PRECISION (mode)); + mask = mask.set_bit (prec - 1); temp = expand_binop (mode, and_optab, op0, - immed_double_const (masklow, maskhigh, mode), + immed_wide_int_const (mask, mode), result, 1, OPTAB_LIB_WIDEN); if (temp != result) emit_move_insn (result, temp); @@ -3715,10 +3689,10 @@ expand_smod_pow2 (enum machine_mode mode, rtx op0, HOST_WIDE_INT d) temp = expand_binop (mode, sub_optab, result, const1_rtx, result, 0, OPTAB_LIB_WIDEN); - masklow = (HOST_WIDE_INT) -1 << logd; - maskhigh = -1; + + mask = wide_int::mask (logd, true, GET_MODE_PRECISION (mode)); temp = expand_binop (mode, ior_optab, temp, - immed_double_const (masklow, maskhigh, mode), + immed_wide_int_const (mask, mode), result, 1, OPTAB_LIB_WIDEN); temp = expand_binop (mode, add_optab, temp, const1_rtx, result, 0, OPTAB_LIB_WIDEN); @@ -4957,24 +4931,14 @@ make_tree (tree type, rtx x) switch (GET_CODE (x)) { case CONST_INT: - { - HOST_WIDE_INT hi = 0; - - if (INTVAL (x) < 0 - && !(TYPE_UNSIGNED (type) - && (GET_MODE_BITSIZE (TYPE_MODE (type)) - < HOST_BITS_PER_WIDE_INT))) - hi = -1; - - t = build_int_cst_wide (type, INTVAL (x), hi); - - return t; - } + case CONST_WIDE_INT: + t = wide_int_to_tree (type, std::make_pair (x, TYPE_MODE (type))); + return t; case CONST_DOUBLE: - if (GET_MODE (x) == VOIDmode) - t = build_int_cst_wide (type, - CONST_DOUBLE_LOW (x), CONST_DOUBLE_HIGH (x)); + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode) + t = wide_int_to_tree (type, wide_int::from_array (&CONST_DOUBLE_LOW (x), 2, + HOST_BITS_PER_WIDE_INT * 2)); else { REAL_VALUE_TYPE d; diff --git a/gcc/expr.c b/gcc/expr.c index bbe0401291b..5603c5a1966 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -710,23 +710,23 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns if (mode == oldmode) return x; - /* There is one case that we must handle specially: If we are converting - a CONST_INT into a mode whose size is twice HOST_BITS_PER_WIDE_INT and - we are to interpret the constant as unsigned, gen_lowpart will do - the wrong if the constant appears negative. What we want to do is - make the high-order word of the constant zero, not all ones. */ + /* There is one case that we must handle specially: If we are + converting a CONST_INT into a mode whose size is larger than + HOST_BITS_PER_WIDE_INT and we are to interpret the constant as + unsigned, gen_lowpart will do the wrong if the constant appears + negative. What we want to do is make the high-order word of the + constant zero, not all ones. */ if (unsignedp && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) == HOST_BITS_PER_DOUBLE_INT + && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && CONST_INT_P (x) && INTVAL (x) < 0) { - double_int val = double_int::from_uhwi (INTVAL (x)); - + wide_int val = std::make_pair (x, mode); /* We need to zero extend VAL. */ if (oldmode != VOIDmode) - val = val.zext (GET_MODE_BITSIZE (oldmode)); + val = val.zext (GET_MODE_PRECISION (oldmode)); - return immed_double_int_const (val, mode); + return immed_wide_int_const (val, mode); } /* We can do this with a gen_lowpart if both desired and current modes @@ -738,7 +738,7 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns && GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT) || (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_CLASS (oldmode) == MODE_INT - && (CONST_DOUBLE_AS_INT_P (x) + && (CONST_SCALAR_INT_P (x) || (GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (oldmode) && ((MEM_P (x) && ! MEM_VOLATILE_P (x) && direct_load[(int) mode]) @@ -1743,6 +1743,7 @@ emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, int ssize) { rtx first, second; + /* TODO: const_wide_int can have sizes other than this... */ gcc_assert (2 * len == ssize); split_double (src, &first, &second); if (i) @@ -4541,14 +4542,14 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, relative to the representative. DECL_FIELD_OFFSET of field and repr are the same by construction if they are not constants, see finish_bitfield_layout. */ - if (host_integerp (DECL_FIELD_OFFSET (field), 1) - && host_integerp (DECL_FIELD_OFFSET (repr), 1)) - bitoffset = (tree_low_cst (DECL_FIELD_OFFSET (field), 1) - - tree_low_cst (DECL_FIELD_OFFSET (repr), 1)) * BITS_PER_UNIT; + if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)) + && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))) + bitoffset = (tree_to_uhwi (DECL_FIELD_OFFSET (field)) + - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT; else bitoffset = 0; - bitoffset += (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)); + bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); /* If the adjustment is larger than bitpos, we would have a negative bit position for the lower bound and this may wreak havoc later. This can @@ -4569,7 +4570,7 @@ get_bit_range (unsigned HOST_WIDE_INT *bitstart, else *bitstart = *bitpos - bitoffset; - *bitend = *bitstart + tree_low_cst (DECL_SIZE (repr), 1) - 1; + *bitend = *bitstart + tree_to_uhwi (DECL_SIZE (repr)) - 1; } /* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside @@ -5239,10 +5240,10 @@ store_expr (tree exp, rtx target, int call_param_p, bool nontemporal) &alt_rtl); } - /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not - the same as that of TARGET, adjust the constant. This is needed, for - example, in case it is a CONST_DOUBLE and we want only a word-sized - value. */ + /* If TEMP is a VOIDmode constant and the mode of the type of EXP is + not the same as that of TARGET, adjust the constant. This is + needed, for example, in case it is a CONST_DOUBLE or + CONST_WIDE_INT and we want only a word-sized value. */ if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode && TREE_CODE (exp) != ERROR_MARK && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp))) @@ -5435,11 +5436,11 @@ count_type_elements (const_tree type, bool for_ctor_p) tree nelts; nelts = array_type_nelts (type); - if (nelts && host_integerp (nelts, 1)) + if (nelts && tree_fits_uhwi_p (nelts)) { unsigned HOST_WIDE_INT n; - n = tree_low_cst (nelts, 1) + 1; + n = tree_to_uhwi (nelts) + 1; if (n == 0 || for_ctor_p) return n; else @@ -5554,9 +5555,9 @@ categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts, tree lo_index = TREE_OPERAND (purpose, 0); tree hi_index = TREE_OPERAND (purpose, 1); - if (host_integerp (lo_index, 1) && host_integerp (hi_index, 1)) - mult = (tree_low_cst (hi_index, 1) - - tree_low_cst (lo_index, 1) + 1); + if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index)) + mult = (tree_to_uhwi (hi_index) + - tree_to_uhwi (lo_index) + 1); } num_fields += mult; elt_type = TREE_TYPE (value); @@ -5856,8 +5857,8 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) if (cleared && initializer_zerop (value)) continue; - if (host_integerp (DECL_SIZE (field), 1)) - bitsize = tree_low_cst (DECL_SIZE (field), 1); + if (tree_fits_uhwi_p (DECL_SIZE (field))) + bitsize = tree_to_uhwi (DECL_SIZE (field)); else bitsize = -1; @@ -5866,14 +5867,14 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) mode = VOIDmode; offset = DECL_FIELD_OFFSET (field); - if (host_integerp (offset, 0) - && host_integerp (bit_position (field), 0)) + if (tree_fits_shwi_p (offset) + && tree_fits_shwi_p (bit_position (field))) { bitpos = int_bit_position (field); offset = 0; } else - bitpos = tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 0); + bitpos = tree_to_shwi (DECL_FIELD_BIT_OFFSET (field)); if (offset) { @@ -5956,14 +5957,14 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) domain = TYPE_DOMAIN (type); const_bounds_p = (TYPE_MIN_VALUE (domain) && TYPE_MAX_VALUE (domain) - && host_integerp (TYPE_MIN_VALUE (domain), 0) - && host_integerp (TYPE_MAX_VALUE (domain), 0)); + && tree_fits_shwi_p (TYPE_MIN_VALUE (domain)) + && tree_fits_shwi_p (TYPE_MAX_VALUE (domain))); /* If we have constant bounds for the range of the type, get them. */ if (const_bounds_p) { - minelt = tree_low_cst (TYPE_MIN_VALUE (domain), 0); - maxelt = tree_low_cst (TYPE_MAX_VALUE (domain), 0); + minelt = tree_to_shwi (TYPE_MIN_VALUE (domain)); + maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain)); } /* If the constructor has fewer elements than the array, clear @@ -5995,15 +5996,15 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) tree lo_index = TREE_OPERAND (index, 0); tree hi_index = TREE_OPERAND (index, 1); - if (! host_integerp (lo_index, 1) - || ! host_integerp (hi_index, 1)) + if (! tree_fits_uhwi_p (lo_index) + || ! tree_fits_uhwi_p (hi_index)) { need_to_clear = 1; break; } - this_node_count = (tree_low_cst (hi_index, 1) - - tree_low_cst (lo_index, 1) + 1); + this_node_count = (tree_to_uhwi (hi_index) + - tree_to_uhwi (lo_index) + 1); } else this_node_count = 1; @@ -6050,8 +6051,8 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) mode = TYPE_MODE (elttype); if (mode == BLKmode) - bitsize = (host_integerp (TYPE_SIZE (elttype), 1) - ? tree_low_cst (TYPE_SIZE (elttype), 1) + bitsize = (tree_fits_uhwi_p (TYPE_SIZE (elttype)) + ? tree_to_uhwi (TYPE_SIZE (elttype)) : -1); else bitsize = GET_MODE_BITSIZE (mode); @@ -6066,21 +6067,21 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) /* If the range is constant and "small", unroll the loop. */ if (const_bounds_p - && host_integerp (lo_index, 0) - && host_integerp (hi_index, 0) - && (lo = tree_low_cst (lo_index, 0), - hi = tree_low_cst (hi_index, 0), + && tree_fits_shwi_p (lo_index) + && tree_fits_shwi_p (hi_index) + && (lo = tree_to_shwi (lo_index), + hi = tree_to_shwi (hi_index), count = hi - lo + 1, (!MEM_P (target) || count <= 2 - || (host_integerp (TYPE_SIZE (elttype), 1) - && (tree_low_cst (TYPE_SIZE (elttype), 1) * count + || (tree_fits_uhwi_p (TYPE_SIZE (elttype)) + && (tree_to_uhwi (TYPE_SIZE (elttype)) * count <= 40 * 8))))) { lo -= minelt; hi -= minelt; for (; lo <= hi; lo++) { - bitpos = lo * tree_low_cst (TYPE_SIZE (elttype), 0); + bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype)); if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target) @@ -6155,8 +6156,8 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) emit_label (loop_end); } } - else if ((index != 0 && ! host_integerp (index, 0)) - || ! host_integerp (TYPE_SIZE (elttype), 1)) + else if ((index != 0 && ! tree_fits_shwi_p (index)) + || ! tree_fits_uhwi_p (TYPE_SIZE (elttype))) { tree position; @@ -6183,10 +6184,10 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) else { if (index != 0) - bitpos = ((tree_low_cst (index, 0) - minelt) - * tree_low_cst (TYPE_SIZE (elttype), 1)); + bitpos = ((tree_to_shwi (index) - minelt) + * tree_to_uhwi (TYPE_SIZE (elttype))); else - bitpos = (i * tree_low_cst (TYPE_SIZE (elttype), 1)); + bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype))); if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target) && TREE_CODE (type) == ARRAY_TYPE @@ -6210,7 +6211,7 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) int need_to_clear; int icode = CODE_FOR_nothing; tree elttype = TREE_TYPE (type); - int elt_size = tree_low_cst (TYPE_SIZE (elttype), 1); + int elt_size = tree_to_uhwi (TYPE_SIZE (elttype)); enum machine_mode eltmode = TYPE_MODE (elttype); HOST_WIDE_INT bitsize; HOST_WIDE_INT bitpos; @@ -6250,10 +6251,10 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value) { - int n_elts_here = tree_low_cst + int n_elts_here = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, TYPE_SIZE (TREE_TYPE (value)), - TYPE_SIZE (elttype)), 1); + TYPE_SIZE (elttype))); count += n_elts_here; if (mostly_zeros_p (value)) @@ -6292,12 +6293,12 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size) HOST_WIDE_INT eltpos; tree value = ce->value; - bitsize = tree_low_cst (TYPE_SIZE (TREE_TYPE (value)), 1); + bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value))); if (cleared && initializer_zerop (value)) continue; if (ce->index) - eltpos = tree_low_cst (ce->index, 1); + eltpos = tree_to_uhwi (ce->index); else eltpos = i; @@ -6574,7 +6575,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, enum machine_mode mode = VOIDmode; bool blkmode_bitfield = false; tree offset = size_zero_node; - double_int bit_offset = double_int_zero; + addr_wide_int bit_offset = 0; /* First get the mode, signedness, and size. We do this from just the outermost expression. */ @@ -6622,10 +6623,10 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, if (size_tree != 0) { - if (! host_integerp (size_tree, 1)) + if (! tree_fits_uhwi_p (size_tree)) mode = BLKmode, *pbitsize = -1; else - *pbitsize = tree_low_cst (size_tree, 1); + *pbitsize = tree_to_uhwi (size_tree); } /* Compute cumulative bit-offset for nested component-refs and array-refs, @@ -6635,7 +6636,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, switch (TREE_CODE (exp)) { case BIT_FIELD_REF: - bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2)); + bit_offset += TREE_OPERAND (exp, 2); break; case COMPONENT_REF: @@ -6650,7 +6651,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, break; offset = size_binop (PLUS_EXPR, offset, this_offset); - bit_offset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field)); + bit_offset += DECL_FIELD_BIT_OFFSET (field); /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */ } @@ -6682,7 +6683,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, break; case IMAGPART_EXPR: - bit_offset += double_int::from_uhwi (*pbitsize); + bit_offset += *pbitsize; break; case VIEW_CONVERT_EXPR: @@ -6703,7 +6704,7 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, tree off = TREE_OPERAND (exp, 1); if (!integer_zerop (off)) { - double_int boff, coff = mem_ref_offset (exp); + addr_wide_int boff, coff = mem_ref_offset (exp); boff = coff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); bit_offset += boff; @@ -6729,11 +6730,10 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, this conversion. */ if (TREE_CODE (offset) == INTEGER_CST) { - double_int tem = tree_to_double_int (offset); - tem = tem.sext (TYPE_PRECISION (sizetype)); + addr_wide_int tem = addr_wide_int (offset).sext (TYPE_PRECISION (sizetype)); tem = tem.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); tem += bit_offset; - if (tem.fits_shwi ()) + if (tem.fits_shwi_p ()) { *pbitpos = tem.to_shwi (); *poffset = offset = NULL_TREE; @@ -6744,20 +6744,20 @@ get_inner_reference (tree exp, HOST_WIDE_INT *pbitsize, if (offset) { /* Avoid returning a negative bitpos as this may wreak havoc later. */ - if (bit_offset.is_negative ()) + if (bit_offset.neg_p (SIGNED)) { - double_int mask - = double_int::mask (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - double_int tem = bit_offset.and_not (mask); + addr_wide_int mask + = addr_wide_int::mask (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT), + false); + addr_wide_int tem = bit_offset.and_not (mask); /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf. Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */ bit_offset -= tem; - tem = tem.arshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT), - HOST_BITS_PER_DOUBLE_INT); + tem = tem.rshifts (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT)); offset = size_binop (PLUS_EXPR, offset, - double_int_to_tree (sizetype, tem)); + wide_int_to_tree (sizetype, tem)); } *pbitpos = bit_offset.to_shwi (); @@ -7323,9 +7323,7 @@ highest_pow2_factor (const_tree exp) return BIGGEST_ALIGNMENT; else { - /* Note: tree_low_cst is intentionally not used here, - we don't care about the upper bits. */ - c0 = TREE_INT_CST_LOW (exp); + c0 = tree_to_hwi (exp); c0 &= -c0; return c0 ? c0 : BIGGEST_ALIGNMENT; } @@ -7344,10 +7342,10 @@ highest_pow2_factor (const_tree exp) case ROUND_DIV_EXPR: case TRUNC_DIV_EXPR: case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: if (integer_pow2p (TREE_OPERAND (exp, 1)) - && host_integerp (TREE_OPERAND (exp, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (exp, 1))) { c0 = highest_pow2_factor (TREE_OPERAND (exp, 0)); - c1 = tree_low_cst (TREE_OPERAND (exp, 1), 1); + c1 = tree_to_uhwi (TREE_OPERAND (exp, 1)); return MAX (1, c0 / c1); } break; @@ -7748,11 +7746,12 @@ expand_constructor (tree exp, rtx target, enum expand_modifier modifier, /* All elts simple constants => refer to a constant in memory. But if this is a non-BLKmode mode, let it store a field at a time - since that should make a CONST_INT or CONST_DOUBLE when we - fold. Likewise, if we have a target we can use, it is best to - store directly into the target unless the type is large enough - that memcpy will be used. If we are making an initializer and - all operands are constant, put it in memory as well. + since that should make a CONST_INT, CONST_WIDE_INT or + CONST_DOUBLE when we fold. Likewise, if we have a target we can + use, it is best to store directly into the target unless the type + is large enough that memcpy will be used. If we are making an + initializer and all operands are constant, put it in memory as + well. FIXME: Avoid trying to fill vector constructors piece-meal. Output them with output_constant_def below unless we're sure @@ -7762,9 +7761,9 @@ expand_constructor (tree exp, rtx target, enum expand_modifier modifier, && ((mode == BLKmode && ! (target != 0 && safe_from_p (target, exp, 1))) || TREE_ADDRESSABLE (exp) - || (host_integerp (TYPE_SIZE_UNIT (type), 1) + || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) && (! MOVE_BY_PIECES_P - (tree_low_cst (TYPE_SIZE_UNIT (type), 1), + (tree_to_uhwi (TYPE_SIZE_UNIT (type)), TYPE_ALIGN (type))) && ! mostly_zeros_p (exp)))) || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS) @@ -8222,17 +8221,18 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, && TREE_CONSTANT (treeop1)) { rtx constant_part; + HOST_WIDE_INT wc; + enum machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1)); op1 = expand_expr (treeop1, subtarget, VOIDmode, EXPAND_SUM); - /* Use immed_double_const to ensure that the constant is + /* Use wide_int::from_shwi to ensure that the constant is truncated according to the mode of OP1, then sign extended to a HOST_WIDE_INT. Using the constant directly can result in non-canonical RTL in a 64x32 cross compile. */ - constant_part - = immed_double_const (TREE_INT_CST_LOW (treeop0), - (HOST_WIDE_INT) 0, - TYPE_MODE (TREE_TYPE (treeop1))); + wc = tree_to_hwi (treeop0); + constant_part + = immed_wide_int_const (wide_int::from_shwi (wc, wmode), wmode); op1 = plus_constant (mode, op1, INTVAL (constant_part)); if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) op1 = force_operand (op1, target); @@ -8244,6 +8244,8 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, && TREE_CONSTANT (treeop0)) { rtx constant_part; + HOST_WIDE_INT wc; + enum machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0)); op0 = expand_expr (treeop0, subtarget, VOIDmode, (modifier == EXPAND_INITIALIZER @@ -8258,14 +8260,13 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, return simplify_gen_binary (PLUS, mode, op0, op1); goto binop2; } - /* Use immed_double_const to ensure that the constant is + /* Use wide_int::from_shwi to ensure that the constant is truncated according to the mode of OP1, then sign extended to a HOST_WIDE_INT. Using the constant directly can result in non-canonical RTL in a 64x32 cross compile. */ - constant_part - = immed_double_const (TREE_INT_CST_LOW (treeop1), - (HOST_WIDE_INT) 0, - TYPE_MODE (TREE_TYPE (treeop0))); + wc = tree_to_hwi (treeop1); + constant_part + = immed_wide_int_const (wide_int::from_shwi (wc, wmode), wmode); op0 = plus_constant (mode, op0, INTVAL (constant_part)); if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER) op0 = force_operand (op0, target); @@ -8544,7 +8545,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, indexed address, for machines that support that. */ if (modifier == EXPAND_SUM && mode == ptr_mode - && host_integerp (treeop1, 0)) + && tree_fits_shwi_p (treeop1)) { tree exp1 = treeop1; @@ -8557,8 +8558,7 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, op0 = copy_to_mode_reg (mode, op0); return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0, - gen_int_mode (tree_low_cst (exp1, 0), - TYPE_MODE (TREE_TYPE (exp1))))); + gen_int_mode (tree_to_shwi (exp1), TYPE_MODE (TREE_TYPE (exp1))))); } if (modifier == EXPAND_STACK_PARM) @@ -8794,10 +8794,14 @@ expand_expr_real_2 (sepops ops, rtx target, enum machine_mode tmode, for unsigned bitfield expand this as XOR with a proper constant instead. */ if (reduce_bit_field && TYPE_UNSIGNED (type)) - temp = expand_binop (mode, xor_optab, op0, - immed_double_int_const - (double_int::mask (TYPE_PRECISION (type)), mode), - target, 1, OPTAB_LIB_WIDEN); + { + wide_int mask = wide_int::mask (TYPE_PRECISION (type), + false, GET_MODE_PRECISION (mode)); + + temp = expand_binop (mode, xor_optab, op0, + immed_wide_int_const (mask, mode), + target, 1, OPTAB_LIB_WIDEN); + } else temp = expand_unop (mode, one_cmpl_optab, op0, target, 1); gcc_assert (temp); @@ -9430,11 +9434,18 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, return decl_rtl; case INTEGER_CST: - temp = immed_double_const (TREE_INT_CST_LOW (exp), - TREE_INT_CST_HIGH (exp), mode); - - return temp; - + { + tree type = TREE_TYPE (exp); + /* One could argue that GET_MODE_PRECISION (TYPE_MODE (type)) + should always be the same as TYPE_PRECISION (type). + However, it is not. Since we are converting from tree to + rtl, we have to expose this ugly truth here. */ + temp = immed_wide_int_const (wide_int (exp) + .force_to_size (GET_MODE_PRECISION (TYPE_MODE (type)), + TYPE_SIGN (type)), + TYPE_MODE (type)); + return temp; + } case VECTOR_CST: { tree tmp = NULL_TREE; @@ -9620,12 +9631,12 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, might end up in a register. */ if (mem_ref_refers_to_non_mem_p (exp)) { - HOST_WIDE_INT offset = mem_ref_offset (exp).low; + HOST_WIDE_INT offset = mem_ref_offset (exp).to_short_addr (); base = TREE_OPERAND (base, 0); if (offset == 0 - && host_integerp (TYPE_SIZE (type), 1) + && tree_fits_uhwi_p (TYPE_SIZE (type)) && (GET_MODE_BITSIZE (DECL_MODE (base)) - == TREE_INT_CST_LOW (TYPE_SIZE (type)))) + == tree_to_uhwi (TYPE_SIZE (type)))) return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base), target, tmode, modifier); if (TYPE_MODE (type) == BLKmode) @@ -9655,8 +9666,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, op0 = memory_address_addr_space (address_mode, op0, as); if (!integer_zerop (TREE_OPERAND (exp, 1))) { - rtx off - = immed_double_int_const (mem_ref_offset (exp), address_mode); + rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode); op0 = simplify_gen_binary (PLUS, address_mode, op0, off); } op0 = memory_address_addr_space (mode, op0, as); @@ -9804,11 +9814,11 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, { tree type = TREE_TYPE (TREE_TYPE (init)); enum machine_mode mode = TYPE_MODE (type); - + if (GET_MODE_CLASS (mode) == MODE_INT && GET_MODE_SIZE (mode) == 1) return gen_int_mode (TREE_STRING_POINTER (init) - [TREE_INT_CST_LOW (index1)], + [tree_to_hwi (index1)], mode); } } @@ -9845,7 +9855,7 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode, op0 = expand_expr (value, target, tmode, modifier); if (DECL_BIT_FIELD (field)) { - HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field)); + HOST_WIDE_INT bitsize = tree_to_hwi (DECL_SIZE (field)); enum machine_mode imode = TYPE_MODE (TREE_TYPE (field)); if (TYPE_UNSIGNED (TREE_TYPE (field))) @@ -10525,9 +10535,10 @@ reduce_to_bit_field_precision (rtx exp, rtx target, tree type) } else if (TYPE_UNSIGNED (type)) { - rtx mask = immed_double_int_const (double_int::mask (prec), - GET_MODE (exp)); - return expand_and (GET_MODE (exp), exp, mask, target); + enum machine_mode mode = GET_MODE (exp); + rtx mask = immed_wide_int_const + (wide_int::mask (prec, false, GET_MODE_PRECISION (mode)), mode); + return expand_and (mode, exp, mask, target); } else { @@ -10553,10 +10564,10 @@ is_aligning_offset (const_tree offset, const_tree exp) /* We must now have a BIT_AND_EXPR with a constant that is one less than power of 2 and which is larger than BIGGEST_ALIGNMENT. */ if (TREE_CODE (offset) != BIT_AND_EXPR - || !host_integerp (TREE_OPERAND (offset, 1), 1) + || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1)) || compare_tree_int (TREE_OPERAND (offset, 1), BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0 - || !exact_log2 (tree_low_cst (TREE_OPERAND (offset, 1), 1) + 1) < 0) + || !exact_log2 (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1) < 0) return 0; /* Look at the first operand of BIT_AND_EXPR and strip any conversion. @@ -10691,7 +10702,7 @@ string_constant (tree arg, tree *ptr_offset) and inside of the bounds of the string literal. */ offset = fold_convert (sizetype, offset); if (compare_tree_int (DECL_SIZE_UNIT (array), length) > 0 - && (! host_integerp (offset, 1) + && (! tree_fits_uhwi_p (offset) || compare_tree_int (offset, length) >= 0)) return 0; @@ -11100,8 +11111,9 @@ const_vector_from_tree (tree exp) RTVEC_ELT (v, i) = CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt), inner); else - RTVEC_ELT (v, i) = immed_double_int_const (tree_to_double_int (elt), - inner); + RTVEC_ELT (v, i) + = immed_wide_int_const (wide_int (elt), + TYPE_MODE (TREE_TYPE (elt))); } return gen_rtx_CONST_VECTOR (mode, v); diff --git a/gcc/expr.h b/gcc/expr.h index 15fcb471d8d..8dde4b3f98f 100644 --- a/gcc/expr.h +++ b/gcc/expr.h @@ -26,7 +26,7 @@ along with GCC; see the file COPYING3. If not see #include "rtl.h" /* For optimize_size */ #include "flags.h" -/* For host_integerp, tree_low_cst, fold_convert, size_binop, ssize_int, +/* For tree_fits_uhwi_p, tree_to_uhwi, fold_convert, size_binop, ssize_int, TREE_CODE, TYPE_SIZE, int_size_in_bytes, */ #include "tree.h" /* For GET_MODE_BITSIZE, word_mode */ @@ -94,8 +94,8 @@ struct locate_and_pad_arg_data #define ADD_PARM_SIZE(TO, INC) \ do { \ tree inc = (INC); \ - if (host_integerp (inc, 0)) \ - (TO).constant += tree_low_cst (inc, 0); \ + if (tree_fits_shwi_p (inc)) \ + (TO).constant += tree_to_shwi (inc); \ else if ((TO).var == 0) \ (TO).var = fold_convert (ssizetype, inc); \ else \ @@ -106,8 +106,8 @@ do { \ #define SUB_PARM_SIZE(TO, DEC) \ do { \ tree dec = (DEC); \ - if (host_integerp (dec, 0)) \ - (TO).constant -= tree_low_cst (dec, 0); \ + if (tree_fits_shwi_p (dec)) \ + (TO).constant -= tree_to_shwi (dec); \ else if ((TO).var == 0) \ (TO).var = size_binop (MINUS_EXPR, ssize_int (0), \ fold_convert (ssizetype, dec)); \ diff --git a/gcc/final.c b/gcc/final.c index 31ced4f483b..1f1ac1b8b59 100644 --- a/gcc/final.c +++ b/gcc/final.c @@ -78,6 +78,7 @@ along with GCC; see the file COPYING3. If not see #include "cfgloop.h" #include "params.h" #include "tree-pretty-print.h" /* for dump_function_header */ +#include "wide-int-print.h" #ifdef XCOFF_DEBUGGING_INFO #include "xcoffout.h" /* Needed for external data @@ -3865,8 +3866,21 @@ output_addr_const (FILE *file, rtx x) output_addr_const (file, XEXP (x, 0)); break; + case CONST_WIDE_INT: + /* We do not know the mode here so we have to use a round about + way to build a wide-int to get it printed properly. */ + { + wide_int w = wide_int::from_array (&CONST_WIDE_INT_ELT (x, 0), + CONST_WIDE_INT_NUNITS (x), + CONST_WIDE_INT_NUNITS (x) + * HOST_BITS_PER_WIDE_INT, + false); + print_decs (w, file); + } + break; + case CONST_DOUBLE: - if (GET_MODE (x) == VOIDmode) + if (CONST_DOUBLE_AS_INT_P (x)) { /* We can use %d if the number is one word and positive. */ if (CONST_DOUBLE_HIGH (x)) diff --git a/gcc/fixed-value.c b/gcc/fixed-value.c index 8ba78769c79..65a5deb454d 100644 --- a/gcc/fixed-value.c +++ b/gcc/fixed-value.c @@ -23,6 +23,7 @@ along with GCC; see the file COPYING3. If not see #include "tm.h" #include "tree.h" #include "diagnostic-core.h" +#include "wide-int.h" /* Compare two fixed objects for bitwise identity. */ @@ -113,6 +114,8 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode) REAL_VALUE_TYPE real_value, fixed_value, base_value; unsigned int fbit; enum fixed_value_range_code temp; + bool fail; + wide_int w; f->mode = mode; fbit = GET_MODE_FBIT (mode); @@ -127,8 +130,9 @@ fixed_from_string (FIXED_VALUE_TYPE *f, const char *str, enum machine_mode mode) "large fixed-point constant implicitly truncated to fixed-point type"); real_2expN (&base_value, fbit, mode); real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value); - real_to_integer2 ((HOST_WIDE_INT *)&f->data.low, &f->data.high, - &fixed_value); + w = real_to_integer (&fixed_value, &fail, GET_MODE_PRECISION (mode)); + f->data.low = w.elt (0); + f->data.high = w.elt (1); if (temp == FIXED_MAX_EPS && ALL_FRACT_MODE_P (f->mode)) { @@ -154,8 +158,10 @@ fixed_to_decimal (char *str, const FIXED_VALUE_TYPE *f_orig, REAL_VALUE_TYPE real_value, base_value, fixed_value; real_2expN (&base_value, GET_MODE_FBIT (f_orig->mode), f_orig->mode); - real_from_integer (&real_value, VOIDmode, f_orig->data.low, f_orig->data.high, - UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode)); + real_from_integer (&real_value, VOIDmode, + wide_int::from_double_int (f_orig->data, + GET_MODE_PRECISION (f_orig->mode)), + UNSIGNED_FIXED_POINT_MODE_P (f_orig->mode) ? UNSIGNED : SIGNED); real_arithmetic (&fixed_value, RDIV_EXPR, &real_value, &base_value); real_to_decimal (str, &fixed_value, buf_size, 0, 1); } @@ -1041,12 +1047,17 @@ fixed_convert_from_real (FIXED_VALUE_TYPE *f, enum machine_mode mode, int i_f_bits = GET_MODE_IBIT (mode) + GET_MODE_FBIT (mode); unsigned int fbit = GET_MODE_FBIT (mode); enum fixed_value_range_code temp; + bool fail; + wide_int w; real_value = *a; f->mode = mode; real_2expN (&base_value, fbit, mode); real_arithmetic (&fixed_value, MULT_EXPR, &real_value, &base_value); - real_to_integer2 ((HOST_WIDE_INT *)&f->data.low, &f->data.high, &fixed_value); + + w = real_to_integer (&fixed_value, &fail, GET_MODE_PRECISION (mode)); + f->data.low = w.elt (0); + f->data.high = w.elt (1); temp = check_real_for_fixed_mode (&real_value, mode); if (temp == FIXED_UNDERFLOW) /* Minimum. */ { @@ -1092,8 +1103,10 @@ real_convert_from_fixed (REAL_VALUE_TYPE *r, enum machine_mode mode, REAL_VALUE_TYPE base_value, fixed_value, real_value; real_2expN (&base_value, GET_MODE_FBIT (f->mode), f->mode); - real_from_integer (&fixed_value, VOIDmode, f->data.low, f->data.high, - UNSIGNED_FIXED_POINT_MODE_P (f->mode)); + real_from_integer (&fixed_value, VOIDmode, + wide_int::from_double_int (f->data, + GET_MODE_PRECISION (f->mode)), + UNSIGNED_FIXED_POINT_MODE_P (f->mode) ? UNSIGNED : SIGNED); real_arithmetic (&real_value, RDIV_EXPR, &fixed_value, &base_value); real_convert (r, mode, &real_value); } diff --git a/gcc/fold-const.c b/gcc/fold-const.c index 6506ae7bbfb..ed391bb486c 100644 --- a/gcc/fold-const.c +++ b/gcc/fold-const.c @@ -107,7 +107,6 @@ static tree decode_field_reference (location_t, tree, HOST_WIDE_INT *, HOST_WIDE_INT *, enum machine_mode *, int *, int *, tree *, tree *); -static int all_ones_mask_p (const_tree, int); static tree sign_bit_p (tree, const_tree); static int simple_operand_p (const_tree); static bool simple_operand_p_2 (tree); @@ -164,26 +163,41 @@ protected_set_expr_location_unshare (tree x, location_t loc) return x; } -/* If ARG2 divides ARG1 with zero remainder, carries out the division - of type CODE and returns the quotient. - Otherwise returns NULL_TREE. */ +/* If ARG2 divides ARG1 with zero remainder, carries out the exact + division and returns the quotient. Otherwise returns + NULL_TREE. */ tree -div_if_zero_remainder (enum tree_code code, const_tree arg1, const_tree arg2) +div_if_zero_remainder (const_tree arg1, const_tree arg2) { - double_int quo, rem; - int uns; + wide_int quo, rem; + wide_int warg1 = arg1; + wide_int warg2 = arg2; + signop sgn = TYPE_SIGN (TREE_TYPE (arg1)); + signop sgn2 = TYPE_SIGN (TREE_TYPE (arg2)); - /* The sign of the division is according to operand two, that - does the correct thing for POINTER_PLUS_EXPR where we want - a signed division. */ - uns = TYPE_UNSIGNED (TREE_TYPE (arg2)); + if (sgn != sgn2) + { + /* When signedness mismatches, we promote the unsigned value to + a signed value. We preserve the value by extending the + precision by 1 bit, iff the top bit is set. */ + if (sgn == UNSIGNED) + { + if (warg1.neg_p (SIGNED)) + warg1 = warg1.force_to_size (warg1.get_precision () + 1, sgn); + sgn = SIGNED; + } + else + { + if (warg2.neg_p (SIGNED)) + warg2 = warg2.force_to_size (warg2.get_precision () + 1, sgn2); + } + } - quo = tree_to_double_int (arg1).divmod (tree_to_double_int (arg2), - uns, code, &rem); + quo = warg1.divmod_trunc (warg2, &rem, sgn); - if (rem.is_zero ()) - return build_int_cst_wide (TREE_TYPE (arg1), quo.low, quo.high); + if (rem.zero_p ()) + return wide_int_to_tree (TREE_TYPE (arg1), quo); return NULL_TREE; } @@ -357,8 +371,6 @@ negate_mathfn_p (enum built_in_function code) bool may_negate_without_overflow_p (const_tree t) { - unsigned HOST_WIDE_INT val; - unsigned int prec; tree type; gcc_assert (TREE_CODE (t) == INTEGER_CST); @@ -367,19 +379,7 @@ may_negate_without_overflow_p (const_tree t) if (TYPE_UNSIGNED (type)) return false; - prec = TYPE_PRECISION (type); - if (prec > HOST_BITS_PER_WIDE_INT) - { - if (TREE_INT_CST_LOW (t) != 0) - return true; - prec -= HOST_BITS_PER_WIDE_INT; - val = TREE_INT_CST_HIGH (t); - } - else - val = TREE_INT_CST_LOW (t); - if (prec < HOST_BITS_PER_WIDE_INT) - val &= ((unsigned HOST_WIDE_INT) 1 << prec) - 1; - return val != ((unsigned HOST_WIDE_INT) 1 << (prec - 1)); + return !wide_int (t).only_sign_bit_p (); } /* Determine whether an expression T can be cheaply negated using @@ -506,13 +506,11 @@ negate_expr_p (tree t) break; case RSHIFT_EXPR: - /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */ + /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (t, 1); - if (TREE_INT_CST_HIGH (op1) == 0 - && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1) - == TREE_INT_CST_LOW (op1)) + if (wide_int::eq_p (op1, TYPE_PRECISION (type) - 1)) return true; } break; @@ -719,13 +717,11 @@ fold_negate_expr (location_t loc, tree t) break; case RSHIFT_EXPR: - /* Optimize -((int)x >> 31) into (unsigned)x >> 31. */ + /* Optimize -((int)x >> 31) into (unsigned)x >> 31 for int. */ if (TREE_CODE (TREE_OPERAND (t, 1)) == INTEGER_CST) { tree op1 = TREE_OPERAND (t, 1); - if (TREE_INT_CST_HIGH (op1) == 0 - && (unsigned HOST_WIDE_INT) (TYPE_PRECISION (type) - 1) - == TREE_INT_CST_LOW (op1)) + if (wide_int::eq_p (op1, TYPE_PRECISION (type) - 1)) { tree ntype = TYPE_UNSIGNED (type) ? signed_type_for (type) @@ -955,153 +951,150 @@ int_binop_types_match_p (enum tree_code code, const_tree type1, const_tree type2 to evaluate CODE at compile-time. */ static tree -int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree arg2, +int_const_binop_1 (enum tree_code code, const_tree arg1, const_tree parg2, int overflowable) { - double_int op1, op2, res, tmp; + wide_int op1, arg2, res; tree t; tree type = TREE_TYPE (arg1); - bool uns = TYPE_UNSIGNED (type); + signop sign = TYPE_SIGN (type); bool overflow = false; - op1 = tree_to_double_int (arg1); - op2 = tree_to_double_int (arg2); + op1 = arg1; + arg2 = wide_int (parg2).force_to_size (TYPE_PRECISION (type), TYPE_SIGN (TREE_TYPE (parg2))); switch (code) { case BIT_IOR_EXPR: - res = op1 | op2; + res = op1 | arg2; break; case BIT_XOR_EXPR: - res = op1 ^ op2; + res = op1 ^ arg2; break; case BIT_AND_EXPR: - res = op1 & op2; + res = op1 & arg2; break; case RSHIFT_EXPR: - res = op1.rshift (op2.to_shwi (), TYPE_PRECISION (type), !uns); - break; - case LSHIFT_EXPR: - /* It's unclear from the C standard whether shifts can overflow. - The following code ignores overflow; perhaps a C standard - interpretation ruling is needed. */ - res = op1.lshift (op2.to_shwi (), TYPE_PRECISION (type), !uns); + if (arg2.neg_p (SIGNED)) + { + arg2 = -arg2; + if (code == RSHIFT_EXPR) + code = LSHIFT_EXPR; + else + code = RSHIFT_EXPR; + } + + if (code == RSHIFT_EXPR) + /* It's unclear from the C standard whether shifts can overflow. + The following code ignores overflow; perhaps a C standard + interpretation ruling is needed. */ + res = op1.rshift (arg2, sign, GET_MODE_BITSIZE (TYPE_MODE (type)), TRUNC); + else + res = op1.lshift (arg2, GET_MODE_BITSIZE (TYPE_MODE (type)), TRUNC); break; - + case RROTATE_EXPR: - res = op1.rrotate (op2.to_shwi (), TYPE_PRECISION (type)); - break; - case LROTATE_EXPR: - res = op1.lrotate (op2.to_shwi (), TYPE_PRECISION (type)); + if (arg2.neg_p (SIGNED)) + { + arg2 = -arg2; + if (code == RROTATE_EXPR) + code = LROTATE_EXPR; + else + code = RROTATE_EXPR; + } + + if (code == RROTATE_EXPR) + res = op1.rrotate (arg2); + else + res = op1.lrotate (arg2); break; case PLUS_EXPR: - res = op1.add_with_sign (op2, false, &overflow); + res = op1.add (arg2, sign, &overflow); break; case MINUS_EXPR: - res = op1.sub_with_overflow (op2, &overflow); + res = op1.sub (arg2, sign, &overflow); break; - + case MULT_EXPR: - res = op1.mul_with_sign (op2, false, &overflow); + res = op1.mul (arg2, sign, &overflow); break; case MULT_HIGHPART_EXPR: - if (TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT) - { - bool dummy_overflow; - if (TYPE_PRECISION (type) != 2 * HOST_BITS_PER_WIDE_INT) - return NULL_TREE; - op1.wide_mul_with_sign (op2, uns, &res, &dummy_overflow); - } - else - { - bool dummy_overflow; - /* MULT_HIGHPART_EXPR can't ever oveflow, as the multiplication - is performed in twice the precision of arguments. */ - tmp = op1.mul_with_sign (op2, false, &dummy_overflow); - res = tmp.rshift (TYPE_PRECISION (type), - 2 * TYPE_PRECISION (type), !uns); - } + res = op1.mul_high (arg2, sign); break; case TRUNC_DIV_EXPR: - case FLOOR_DIV_EXPR: case CEIL_DIV_EXPR: case EXACT_DIV_EXPR: - /* This is a shortcut for a common special case. */ - if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0 - && !TREE_OVERFLOW (arg1) - && !TREE_OVERFLOW (arg2) - && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0) - { - if (code == CEIL_DIV_EXPR) - op1.low += op2.low - 1; + res = op1.div_trunc (arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; - res.low = op1.low / op2.low, res.high = 0; - break; - } + case FLOOR_DIV_EXPR: + res = op1.div_floor (arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; - /* ... fall through ... */ + case CEIL_DIV_EXPR: + res = op1.div_ceil (arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; case ROUND_DIV_EXPR: - if (op2.is_zero ()) + res = op1.div_round (arg2, sign, &overflow); + if (overflow) return NULL_TREE; - if (op2.is_one ()) - { - res = op1; - break; - } - if (op1 == op2 && !op1.is_zero ()) - { - res = double_int_one; - break; - } - res = op1.divmod_with_overflow (op2, uns, code, &tmp, &overflow); break; case TRUNC_MOD_EXPR: - case FLOOR_MOD_EXPR: case CEIL_MOD_EXPR: - /* This is a shortcut for a common special case. */ - if (op2.high == 0 && (HOST_WIDE_INT) op2.low > 0 - && !TREE_OVERFLOW (arg1) - && !TREE_OVERFLOW (arg2) - && op1.high == 0 && (HOST_WIDE_INT) op1.low >= 0) - { - if (code == CEIL_MOD_EXPR) - op1.low += op2.low - 1; - res.low = op1.low % op2.low, res.high = 0; - break; - } + res = op1.mod_trunc (arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; - /* ... fall through ... */ + case FLOOR_MOD_EXPR: + res = op1.mod_floor (arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; + + case CEIL_MOD_EXPR: + res = op1.mod_ceil (arg2, sign, &overflow); + if (overflow) + return NULL_TREE; + break; case ROUND_MOD_EXPR: - if (op2.is_zero ()) + res = op1.mod_round (arg2, sign, &overflow); + if (overflow) return NULL_TREE; - tmp = op1.divmod_with_overflow (op2, uns, code, &res, &overflow); break; case MIN_EXPR: - res = op1.min (op2, uns); + res = op1.min (arg2, sign); break; case MAX_EXPR: - res = op1.max (op2, uns); + res = op1.max (arg2, sign); break; default: return NULL_TREE; } - t = force_fit_type_double (TREE_TYPE (arg1), res, overflowable, - (!uns && overflow) - | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (arg2)); + t = force_fit_type (type, res, overflowable, + (((sign == SIGNED || overflowable == -1) + && overflow) + | TREE_OVERFLOW (arg1) | TREE_OVERFLOW (parg2))); return t; } @@ -1229,9 +1222,12 @@ const_binop (enum tree_code code, tree arg1, tree arg2) case LSHIFT_EXPR: case RSHIFT_EXPR: - f2.data.high = TREE_INT_CST_HIGH (arg2); - f2.data.low = TREE_INT_CST_LOW (arg2); - f2.mode = SImode; + { + wide_int w2 = arg2; + f2.data.high = w2.elt (1); + f2.data.low = w2.elt (0); + f2.mode = SImode; + } break; default: @@ -1412,13 +1408,13 @@ const_binop (enum tree_code code, tree arg1, tree arg2) if (code == VEC_LSHIFT_EXPR || code == VEC_RSHIFT_EXPR) { - if (!host_integerp (arg2, 1)) + if (!tree_fits_uhwi_p (arg2)) return NULL_TREE; - unsigned HOST_WIDE_INT shiftc = tree_low_cst (arg2, 1); - unsigned HOST_WIDE_INT outerc = tree_low_cst (TYPE_SIZE (type), 1); + unsigned HOST_WIDE_INT shiftc = tree_to_uhwi (arg2); + unsigned HOST_WIDE_INT outerc = tree_to_uhwi (TYPE_SIZE (type)); unsigned HOST_WIDE_INT innerc - = tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1); + = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type))); if (shiftc >= outerc || (shiftc % innerc) != 0) return NULL_TREE; int offset = shiftc / innerc; @@ -1567,15 +1563,15 @@ static tree fold_convert_const_int_from_int (tree type, const_tree arg1) { tree t; + /* Extend the value coming in to something so large that there is no + edge conditions on conversion of unsigned to signed numbers. */ + max_wide_int wt = arg1; /* Given an integer constant, make new constant with new type, appropriately sign-extended or truncated. */ - t = force_fit_type_double (type, tree_to_double_int (arg1), - !POINTER_TYPE_P (TREE_TYPE (arg1)), - (TREE_INT_CST_HIGH (arg1) < 0 - && (TYPE_UNSIGNED (type) - < TYPE_UNSIGNED (TREE_TYPE (arg1)))) - | TREE_OVERFLOW (arg1)); + t = force_fit_type (type, wt, + !POINTER_TYPE_P (TREE_TYPE (arg1)), + TREE_OVERFLOW (arg1)); return t; } @@ -1586,7 +1582,7 @@ fold_convert_const_int_from_int (tree type, const_tree arg1) static tree fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg1) { - int overflow = 0; + bool overflow = false; tree t; /* The following code implements the floating point to integer @@ -1598,7 +1594,7 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg C and C++ standards that simply state that the behavior of FP-to-integer conversion is unspecified upon overflow. */ - double_int val; + wide_int val; REAL_VALUE_TYPE r; REAL_VALUE_TYPE x = TREE_REAL_CST (arg1); @@ -1615,8 +1611,8 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg /* If R is NaN, return zero and show we have an overflow. */ if (REAL_VALUE_ISNAN (r)) { - overflow = 1; - val = double_int_zero; + overflow = true; + val = max_wide_int (0); } /* See if R is less than the lower bound or greater than the @@ -1628,8 +1624,8 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg REAL_VALUE_TYPE l = real_value_from_int_cst (NULL_TREE, lt); if (REAL_VALUES_LESS (r, l)) { - overflow = 1; - val = tree_to_double_int (lt); + overflow = true; + val = max_wide_int (lt); } } @@ -1641,16 +1637,16 @@ fold_convert_const_int_from_real (enum tree_code code, tree type, const_tree arg REAL_VALUE_TYPE u = real_value_from_int_cst (NULL_TREE, ut); if (REAL_VALUES_LESS (u, r)) { - overflow = 1; - val = tree_to_double_int (ut); + overflow = true; + val = max_wide_int (ut); } } } if (! overflow) - real_to_integer2 ((HOST_WIDE_INT *) &val.low, &val.high, &r); + val = real_to_integer (&r, &overflow, TYPE_PRECISION (type)); - t = force_fit_type_double (type, val, -1, overflow | TREE_OVERFLOW (arg1)); + t = force_fit_type (type, val, -1, overflow | TREE_OVERFLOW (arg1)); return t; } @@ -1693,12 +1689,14 @@ fold_convert_const_int_from_fixed (tree type, const_tree arg1) /* Given a fixed-point constant, make new constant with new type, appropriately sign-extended or truncated. */ - t = force_fit_type_double (type, temp, -1, - (temp.is_negative () - && (TYPE_UNSIGNED (type) - < TYPE_UNSIGNED (TREE_TYPE (arg1)))) - | TREE_OVERFLOW (arg1)); - + t = force_fit_type (type, wide_int::from_double_int (temp, + TYPE_PRECISION (type)), + -1, + (temp.is_negative () + && (TYPE_UNSIGNED (type) + < TYPE_UNSIGNED (TREE_TYPE (arg1)))) + | TREE_OVERFLOW (arg1)); + return t; } @@ -1780,9 +1778,18 @@ fold_convert_const_fixed_from_int (tree type, const_tree arg1) FIXED_VALUE_TYPE value; tree t; bool overflow_p; + double_int di; + + gcc_assert (TREE_INT_CST_NUNITS (arg1) <= 2); + + di.low = TREE_INT_CST_ELT (arg1, 0); + if (TREE_INT_CST_NUNITS (arg1) == 1) + di.high = (HOST_WIDE_INT)di.low < 0 ? (HOST_WIDE_INT)-1 : 0; + else + di.high = TREE_INT_CST_ELT (arg1, 1); overflow_p = fixed_convert_from_int (&value, TYPE_MODE (type), - TREE_INT_CST (arg1), + di, TYPE_UNSIGNED (TREE_TYPE (arg1)), TYPE_SATURATING (type)); t = build_fixed (type, value); @@ -3400,8 +3407,8 @@ make_bit_field_ref (location_t loc, tree inner, tree type, tree size = TYPE_SIZE (TREE_TYPE (inner)); if ((INTEGRAL_TYPE_P (TREE_TYPE (inner)) || POINTER_TYPE_P (TREE_TYPE (inner))) - && host_integerp (size, 0) - && tree_low_cst (size, 0) == bitsize) + && tree_fits_shwi_p (size) + && tree_to_shwi (size) == bitsize) return fold_convert_loc (loc, type, inner); } @@ -3691,23 +3698,24 @@ decode_field_reference (location_t loc, tree exp, HOST_WIDE_INT *pbitsize, } /* Return nonzero if MASK represents a mask of SIZE ones in the low-order - bit positions. */ + bit positions and MASK is SIGNED. */ static int -all_ones_mask_p (const_tree mask, int size) +all_ones_mask_p (const_tree mask, unsigned int size) { tree type = TREE_TYPE (mask); unsigned int precision = TYPE_PRECISION (type); - tree tmask; - tmask = build_int_cst_type (signed_type_for (type), -1); + /* If this function returns true when the type of the mask is + UNSIGNED, then there will be errors. In particular see + gcc.c-torture/execute/990326-1.c. There does not appear to be + any documentation paper trail as to why this is so. But the pre + wide-int worked with that restriction and it has been preserved + here. */ + if (size > precision || TYPE_SIGN (type) == UNSIGNED) + return false; - return - tree_int_cst_equal (mask, - const_binop (RSHIFT_EXPR, - const_binop (LSHIFT_EXPR, tmask, - size_int (precision - size)), - size_int (precision - size))); + return wide_int::mask (size, false, precision) == mask; } /* Subroutine for fold: determine if VAL is the INTEGER_CONST that @@ -3719,8 +3727,6 @@ all_ones_mask_p (const_tree mask, int size) static tree sign_bit_p (tree exp, const_tree val) { - unsigned HOST_WIDE_INT mask_lo, lo; - HOST_WIDE_INT mask_hi, hi; int width; tree t; @@ -3735,29 +3741,7 @@ sign_bit_p (tree exp, const_tree val) return NULL_TREE; width = TYPE_PRECISION (t); - if (width > HOST_BITS_PER_WIDE_INT) - { - hi = (unsigned HOST_WIDE_INT) 1 << (width - HOST_BITS_PER_WIDE_INT - 1); - lo = 0; - - mask_hi = ((unsigned HOST_WIDE_INT) -1 - >> (HOST_BITS_PER_DOUBLE_INT - width)); - mask_lo = -1; - } - else - { - hi = 0; - lo = (unsigned HOST_WIDE_INT) 1 << (width - 1); - - mask_hi = 0; - mask_lo = ((unsigned HOST_WIDE_INT) -1 - >> (HOST_BITS_PER_WIDE_INT - width)); - } - - /* We mask off those bits beyond TREE_TYPE (exp) so that we can - treat VAL as if it were unsigned. */ - if ((TREE_INT_CST_HIGH (val) & mask_hi) == hi - && (TREE_INT_CST_LOW (val) & mask_lo) == lo) + if (wide_int (val).only_sign_bit_p (width)) return exp; /* Handle extension from a narrower type. */ @@ -4002,7 +3986,7 @@ make_range_step (location_t loc, enum tree_code code, tree arg0, tree arg1, { in_p = ! in_p; high = range_binop (MINUS_EXPR, NULL_TREE, low, 0, - integer_one_node, 0); + build_int_cst (TREE_TYPE (low), 1), 0); low = build_int_cst (arg0_type, 0); } } @@ -4072,9 +4056,9 @@ make_range_step (location_t loc, enum tree_code code, tree arg0, tree arg1, if (n_low && n_high && tree_int_cst_lt (n_high, n_low)) { low = range_binop (PLUS_EXPR, arg0_type, n_high, 0, - integer_one_node, 0); + build_int_cst (TREE_TYPE (n_high), 1), 0); high = range_binop (MINUS_EXPR, arg0_type, n_low, 0, - integer_one_node, 0); + build_int_cst (TREE_TYPE (n_low), 1), 0); /* If the range is of the form +/- [ x+1, x ], we won't be able to normalize it. But then, it represents the @@ -4312,23 +4296,10 @@ build_range_check (location_t loc, tree type, tree exp, int in_p, /* Optimize (c>=1) && (c<=127) into (signed char)c > 0. */ if (integer_onep (low) && TREE_CODE (high) == INTEGER_CST) { - unsigned HOST_WIDE_INT lo; - HOST_WIDE_INT hi; - int prec; + int prec = TYPE_PRECISION (etype); + wide_int osb = wide_int::set_bit_in_zero (prec - 1, prec) - 1; - prec = TYPE_PRECISION (etype); - if (prec <= HOST_BITS_PER_WIDE_INT) - { - hi = 0; - lo = ((unsigned HOST_WIDE_INT) 1 << (prec - 1)) - 1; - } - else - { - hi = ((HOST_WIDE_INT) 1 << (prec - HOST_BITS_PER_WIDE_INT - 1)) - 1; - lo = (unsigned HOST_WIDE_INT) -1; - } - - if (TREE_INT_CST_HIGH (high) == hi && TREE_INT_CST_LOW (high) == lo) + if (osb == high) { if (TYPE_UNSIGNED (etype)) { @@ -4362,7 +4333,7 @@ build_range_check (location_t loc, tree type, tree exp, int in_p, utype = unsigned_type_for (etype); maxv = fold_convert_loc (loc, utype, TYPE_MAX_VALUE (etype)); maxv = range_binop (PLUS_EXPR, NULL_TREE, maxv, 1, - integer_one_node, 1); + build_int_cst (TREE_TYPE (maxv), 1), 1); minv = fold_convert_loc (loc, utype, TYPE_MIN_VALUE (etype)); if (integer_zerop (range_binop (NE_EXPR, integer_type_node, @@ -4410,7 +4381,8 @@ range_predecessor (tree val) && operand_equal_p (val, TYPE_MIN_VALUE (type), 0)) return 0; else - return range_binop (MINUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0); + return range_binop (MINUS_EXPR, NULL_TREE, val, 0, + build_int_cst (TREE_TYPE (val), 1), 0); } /* Return the successor of VAL in its type, handling the infinite case. */ @@ -4424,7 +4396,8 @@ range_successor (tree val) && operand_equal_p (val, TYPE_MAX_VALUE (type), 0)) return 0; else - return range_binop (PLUS_EXPR, NULL_TREE, val, 0, integer_one_node, 0); + return range_binop (PLUS_EXPR, NULL_TREE, val, 0, + build_int_cst (TREE_TYPE (val), 1), 0); } /* Given two ranges, see if we can merge them into one. Return 1 if we @@ -4604,7 +4577,8 @@ merge_ranges (int *pin_p, tree *plow, tree *phigh, int in0_p, tree low0, if (TYPE_UNSIGNED (TREE_TYPE (high1)) && integer_zerop (range_binop (PLUS_EXPR, NULL_TREE, high1, 1, - integer_one_node, 1))) + build_int_cst (TREE_TYPE (high1), 1), + 1))) high1 = 0; break; default: @@ -5056,8 +5030,7 @@ unextend (tree c, int p, int unsignedp, tree mask) /* We work by getting just the sign bit into the low-order bit, then into the high-order bit, then sign-extend. We then XOR that value with C. */ - temp = const_binop (RSHIFT_EXPR, c, size_int (p - 1)); - temp = const_binop (BIT_AND_EXPR, temp, size_int (1)); + temp = wide_int_to_tree (TREE_TYPE (c), (wide_int (c).rshiftu (p - 1)) & 1); /* We must use a signed type in order to get an arithmetic right shift. However, we must also avoid introducing accidental overflows, so that @@ -5863,8 +5836,7 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type, && (tcode == RSHIFT_EXPR || TYPE_UNSIGNED (TREE_TYPE (op0))) /* const_binop may not detect overflow correctly, so check for it explicitly here. */ - && TYPE_PRECISION (TREE_TYPE (size_one_node)) > TREE_INT_CST_LOW (op1) - && TREE_INT_CST_HIGH (op1) == 0 + && wide_int::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1) && 0 != (t1 = fold_convert (ctype, const_binop (LSHIFT_EXPR, size_one_node, @@ -6010,21 +5982,17 @@ extract_muldiv_1 (tree t, tree c, enum tree_code code, tree wide_type, assuming no overflow. */ if (tcode == code) { - double_int mul; + wide_int mul; bool overflow_p; - unsigned prec = TYPE_PRECISION (ctype); - bool uns = TYPE_UNSIGNED (ctype); - double_int diop1 = tree_to_double_int (op1).ext (prec, uns); - double_int dic = tree_to_double_int (c).ext (prec, uns); - mul = diop1.mul_with_sign (dic, false, &overflow_p); - overflow_p = ((!uns && overflow_p) - | TREE_OVERFLOW (c) | TREE_OVERFLOW (op1)); - if (!double_int_fits_to_tree_p (ctype, mul) - && ((uns && tcode != MULT_EXPR) || !uns)) - overflow_p = 1; + signop sign = TYPE_SIGN (ctype); + mul = wide_int (op1).mul_full (wide_int (c), sign); + overflow_p = TREE_OVERFLOW (c) | TREE_OVERFLOW (op1); + if (!mul.fits_to_tree_p (ctype) + && ((sign == UNSIGNED && tcode != MULT_EXPR) || sign == SIGNED)) + overflow_p = true; if (!overflow_p) return fold_build2 (tcode, ctype, fold_convert (ctype, op0), - double_int_to_tree (ctype, mul)); + wide_int_to_tree (ctype, mul)); } /* If these operations "cancel" each other, we have the main @@ -6423,29 +6391,27 @@ fold_div_compare (location_t loc, tree prod, tmp, hi, lo; tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); - double_int val; - bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (arg0)); - bool neg_overflow; + wide_int val; + signop sign = TYPE_SIGN (TREE_TYPE (arg0)); + bool neg_overflow = false; bool overflow; /* We have to do this the hard way to detect unsigned overflow. prod = int_const_binop (MULT_EXPR, arg01, arg1); */ - val = TREE_INT_CST (arg01) - .mul_with_sign (TREE_INT_CST (arg1), unsigned_p, &overflow); - prod = force_fit_type_double (TREE_TYPE (arg00), val, -1, overflow); + val = wide_int (arg01).mul (arg1, sign, &overflow); + prod = force_fit_type (TREE_TYPE (arg00), val, -1, overflow); neg_overflow = false; - if (unsigned_p) + if (sign == UNSIGNED) { tmp = int_const_binop (MINUS_EXPR, arg01, build_int_cst (TREE_TYPE (arg01), 1)); lo = prod; /* Likewise hi = int_const_binop (PLUS_EXPR, prod, tmp). */ - val = TREE_INT_CST (prod) - .add_with_sign (TREE_INT_CST (tmp), unsigned_p, &overflow); - hi = force_fit_type_double (TREE_TYPE (arg00), val, - -1, overflow | TREE_OVERFLOW (prod)); + val = wide_int (prod).add (tmp, sign, &overflow); + hi = force_fit_type (TREE_TYPE (arg00), val, + -1, overflow | TREE_OVERFLOW (prod)); } else if (tree_int_cst_sgn (arg01) >= 0) { @@ -6636,12 +6602,9 @@ fold_single_bit_test (location_t loc, enum tree_code code, not overflow, adjust BITNUM and INNER. */ if (TREE_CODE (inner) == RSHIFT_EXPR && TREE_CODE (TREE_OPERAND (inner, 1)) == INTEGER_CST - && TREE_INT_CST_HIGH (TREE_OPERAND (inner, 1)) == 0 - && bitnum < TYPE_PRECISION (type) - && 0 > compare_tree_int (TREE_OPERAND (inner, 1), - bitnum - TYPE_PRECISION (type))) + && (wide_int (TREE_OPERAND (inner, 1) + bitnum).ltu_p (TYPE_PRECISION (type)))) { - bitnum += TREE_INT_CST_LOW (TREE_OPERAND (inner, 1)); + bitnum += tree_to_hwi (TREE_OPERAND (inner, 1)); inner = TREE_OPERAND (inner, 0); } @@ -6899,8 +6862,8 @@ fold_sign_changed_comparison (location_t loc, enum tree_code code, tree type, return NULL_TREE; if (TREE_CODE (arg1) == INTEGER_CST) - arg1 = force_fit_type_double (inner_type, tree_to_double_int (arg1), - 0, TREE_OVERFLOW (arg1)); + arg1 = force_fit_type (inner_type, wide_int (arg1), + 0, TREE_OVERFLOW (arg1)); else arg1 = fold_convert_loc (loc, inner_type, arg1); @@ -6988,7 +6951,7 @@ try_move_mult_to_index (location_t loc, tree addr, tree op1) else { /* Try if delta is a multiple of step. */ - tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step); + tree tmp = div_if_zero_remainder (op1, step); if (! tmp) goto cont; delta = tmp; @@ -7060,7 +7023,7 @@ cont: else { /* Try if delta is a multiple of step. */ - tree tmp = div_if_zero_remainder (EXACT_DIV_EXPR, op1, step); + tree tmp = div_if_zero_remainder (op1, step); if (! tmp) continue; delta = tmp; @@ -7216,7 +7179,8 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type, arg10 = build_one_cst (type); /* As we canonicalize A - 2 to A + -2 get rid of that sign for the purpose of this canonicalization. */ - if (TREE_INT_CST_HIGH (arg1) == -1 + if (TYPE_SIGN (TREE_TYPE (arg1)) == SIGNED + && wide_int (arg1).neg_p (SIGNED) && negate_expr_p (arg1) && code == PLUS_EXPR) { @@ -7248,14 +7212,14 @@ fold_plusminus_mult_expr (location_t loc, enum tree_code code, tree type, /* No identical multiplicands; see if we can find a common power-of-two factor in non-power-of-two multiplies. This can help in multi-dimensional array access. */ - else if (host_integerp (arg01, 0) - && host_integerp (arg11, 0)) + else if (tree_fits_shwi_p (arg01) + && tree_fits_shwi_p (arg11)) { HOST_WIDE_INT int01, int11, tmp; bool swap = false; tree maybe_same; - int01 = TREE_INT_CST_LOW (arg01); - int11 = TREE_INT_CST_LOW (arg11); + int01 = tree_to_shwi (arg01); + int11 = tree_to_shwi (arg11); /* Move min of absolute values to int11. */ if (absu_hwi (int01) < absu_hwi (int11)) @@ -7306,6 +7270,7 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len) int total_bytes = GET_MODE_SIZE (TYPE_MODE (type)); int byte, offset, word, words; unsigned char value; + wide_int wexpr = wide_int (expr); if (total_bytes > len) return 0; @@ -7314,11 +7279,7 @@ native_encode_int (const_tree expr, unsigned char *ptr, int len) for (byte = 0; byte < total_bytes; byte++) { int bitpos = byte * BITS_PER_UNIT; - if (bitpos < HOST_BITS_PER_WIDE_INT) - value = (unsigned char) (TREE_INT_CST_LOW (expr) >> bitpos); - else - value = (unsigned char) (TREE_INT_CST_HIGH (expr) - >> (bitpos - HOST_BITS_PER_WIDE_INT)); + value = wexpr.extract_to_hwi (bitpos, BITS_PER_UNIT); if (total_bytes > UNITS_PER_WORD) { @@ -7481,9 +7442,9 @@ native_encode_string (const_tree expr, unsigned char *ptr, int len) if (TREE_CODE (type) != ARRAY_TYPE || TREE_CODE (TREE_TYPE (type)) != INTEGER_TYPE || GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type))) != BITS_PER_UNIT - || !host_integerp (TYPE_SIZE_UNIT (type), 0)) + || !tree_fits_shwi_p (TYPE_SIZE_UNIT (type))) return 0; - total_bytes = tree_low_cst (TYPE_SIZE_UNIT (type), 0); + total_bytes = tree_to_shwi (TYPE_SIZE_UNIT (type)); if (total_bytes > len) return 0; if (TREE_STRING_LENGTH (expr) < total_bytes) @@ -7540,15 +7501,15 @@ static tree native_interpret_int (tree type, const unsigned char *ptr, int len) { int total_bytes = GET_MODE_SIZE (TYPE_MODE (type)); - double_int result; + wide_int result; if (total_bytes > len || total_bytes * BITS_PER_UNIT > HOST_BITS_PER_DOUBLE_INT) return NULL_TREE; - result = double_int::from_buffer (ptr, total_bytes); + result = wide_int::from_buffer (ptr, total_bytes); - return double_int_to_tree (type, result); + return wide_int_to_tree (type, result); } @@ -8091,11 +8052,11 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0) change = 1; else if (TYPE_PRECISION (TREE_TYPE (and1)) <= HOST_BITS_PER_WIDE_INT - && host_integerp (and1, 1)) + && tree_fits_uhwi_p (and1)) { unsigned HOST_WIDE_INT cst; - cst = tree_low_cst (and1, 1); + cst = tree_to_uhwi (and1); cst &= (HOST_WIDE_INT) -1 << (TYPE_PRECISION (TREE_TYPE (and1)) - 1); change = (cst == 0); @@ -8113,8 +8074,8 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0) } if (change) { - tem = force_fit_type_double (type, tree_to_double_int (and1), - 0, TREE_OVERFLOW (and1)); + tem = force_fit_type (type, max_wide_int (and1), + 0, TREE_OVERFLOW (and1)); return fold_build2_loc (loc, BIT_AND_EXPR, type, fold_convert_loc (loc, type, and0), tem); } @@ -8896,7 +8857,7 @@ maybe_canonicalize_comparison (location_t loc, enum tree_code code, tree type, static bool pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos) { - double_int di_offset, total; + wide_int wi_offset, total; if (!POINTER_TYPE_P (TREE_TYPE (base))) return true; @@ -8905,19 +8866,19 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos) return true; if (offset == NULL_TREE) - di_offset = double_int_zero; + wi_offset = wide_int::zero (TYPE_PRECISION (TREE_TYPE (base))); else if (TREE_CODE (offset) != INTEGER_CST || TREE_OVERFLOW (offset)) return true; else - di_offset = TREE_INT_CST (offset); + wi_offset = offset; bool overflow; - double_int units = double_int::from_uhwi (bitpos / BITS_PER_UNIT); - total = di_offset.add_with_sign (units, true, &overflow); + wide_int units = wide_int::from_shwi (bitpos / BITS_PER_UNIT); + total = wi_offset.add (units, UNSIGNED, &overflow); if (overflow) return true; - if (total.high != 0) + if (!total.fits_uhwi_p ()) return true; HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (TREE_TYPE (base))); @@ -8935,7 +8896,7 @@ pointer_may_wrap_p (tree base, tree offset, HOST_WIDE_INT bitpos) size = base_size; } - return total.low > (unsigned HOST_WIDE_INT) size; + return total.to_uhwi () > (unsigned HOST_WIDE_INT) size; } /* Subroutine of fold_binary. This routine performs all of the @@ -9071,7 +9032,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type, indirect_base0 = true; } offset0 = TREE_OPERAND (arg0, 1); - if (host_integerp (offset0, 0)) + if (tree_fits_shwi_p (offset0)) { HOST_WIDE_INT off = size_low_cst (offset0); if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off) @@ -9105,7 +9066,7 @@ fold_comparison (location_t loc, enum tree_code code, tree type, indirect_base1 = true; } offset1 = TREE_OPERAND (arg1, 1); - if (host_integerp (offset1, 0)) + if (tree_fits_shwi_p (offset1)) { HOST_WIDE_INT off = size_low_cst (offset1); if ((HOST_WIDE_INT) (((unsigned HOST_WIDE_INT) off) @@ -9743,7 +9704,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue, inner_code = TREE_CODE (op1); if (inner_code == INTEGER_CST) { - *residue += TREE_INT_CST_LOW (op1); + *residue += tree_to_hwi (op1); return modulus; } else if (inner_code == MULT_EXPR) @@ -9754,7 +9715,7 @@ get_pointer_modulus_and_residue (tree expr, unsigned HOST_WIDE_INT *residue, unsigned HOST_WIDE_INT align; /* Compute the greatest power-of-2 divisor of op1. */ - align = TREE_INT_CST_LOW (op1); + align = tree_to_hwi (op1); align &= -align; /* If align is non-zero and less than *modulus, replace @@ -10465,9 +10426,7 @@ fold_binary_loc (location_t loc, code11 = TREE_CODE (tree11); if (code01 == INTEGER_CST && code11 == INTEGER_CST - && TREE_INT_CST_HIGH (tree01) == 0 - && TREE_INT_CST_HIGH (tree11) == 0 - && ((TREE_INT_CST_LOW (tree01) + TREE_INT_CST_LOW (tree11)) + && ((max_wide_int (tree01) + tree11) == element_precision (TREE_TYPE (TREE_OPERAND (arg0, 0))))) { tem = build2_loc (loc, LROTATE_EXPR, @@ -11249,20 +11208,20 @@ fold_binary_loc (location_t loc, && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { - double_int c1, c2, c3, msk; + wide_int c1, c2, c3, msk; int width = TYPE_PRECISION (type), w; - c1 = tree_to_double_int (TREE_OPERAND (arg0, 1)); - c2 = tree_to_double_int (arg1); + c1 = TREE_OPERAND (arg0, 1); + c2 = arg1; /* If (C1&C2) == C1, then (X&C1)|C2 becomes (X,C2). */ if ((c1 & c2) == c1) return omit_one_operand_loc (loc, type, arg1, TREE_OPERAND (arg0, 0)); - msk = double_int::mask (width); + msk = wide_int::mask (width, false, TYPE_PRECISION (TREE_TYPE (arg1))); /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */ - if (msk.and_not (c1 | c2).is_zero ()) + if (msk.and_not (c1 | c2).zero_p ()) return fold_build2_loc (loc, BIT_IOR_EXPR, type, TREE_OPERAND (arg0, 0), arg1); @@ -11272,16 +11231,12 @@ fold_binary_loc (location_t loc, c1 &= msk; c2 &= msk; c3 = c1.and_not (c2); - for (w = BITS_PER_UNIT; - w <= width && w <= HOST_BITS_PER_WIDE_INT; - w <<= 1) + for (w = BITS_PER_UNIT; w <= width; w <<= 1) { - unsigned HOST_WIDE_INT mask - = (unsigned HOST_WIDE_INT) -1 >> (HOST_BITS_PER_WIDE_INT - w); - if (((c1.low | c2.low) & mask) == mask - && (c1.low & ~mask) == 0 && c1.high == 0) + wide_int mask = wide_int::mask (width - w, false, TYPE_PRECISION (type)); + if (((c1 | c2) & mask) == mask && c1.and_not (mask).zero_p ()) { - c3 = double_int::from_uhwi (mask); + c3 = mask; break; } } @@ -11289,8 +11244,8 @@ fold_binary_loc (location_t loc, return fold_build2_loc (loc, BIT_IOR_EXPR, type, fold_build2_loc (loc, BIT_AND_EXPR, type, TREE_OPERAND (arg0, 0), - double_int_to_tree (type, - c3)), + wide_int_to_tree (type, + c3)), arg1); } @@ -11660,12 +11615,11 @@ fold_binary_loc (location_t loc, multiple of 1 << CST. */ if (TREE_CODE (arg1) == INTEGER_CST) { - double_int cst1 = tree_to_double_int (arg1); - double_int ncst1 = (-cst1).ext(TYPE_PRECISION (TREE_TYPE (arg1)), - TYPE_UNSIGNED (TREE_TYPE (arg1))); + wide_int cst1 = arg1; + wide_int ncst1 = -cst1; if ((cst1 & ncst1) == ncst1 && multiple_of_p (type, arg0, - double_int_to_tree (TREE_TYPE (arg1), ncst1))) + wide_int_to_tree (TREE_TYPE (arg1), ncst1))) return fold_convert_loc (loc, type, arg0); } @@ -11676,20 +11630,18 @@ fold_binary_loc (location_t loc, && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST) { int arg1tz - = tree_to_double_int (TREE_OPERAND (arg0, 1)).trailing_zeros (); + = wide_int (TREE_OPERAND (arg0, 1)).ctz ().to_shwi (); if (arg1tz > 0) { - double_int arg1mask, masked; - arg1mask = ~double_int::mask (arg1tz); - arg1mask = arg1mask.ext (TYPE_PRECISION (type), - TYPE_UNSIGNED (type)); - masked = arg1mask & tree_to_double_int (arg1); - if (masked.is_zero ()) + wide_int arg1mask, masked; + arg1mask = wide_int::mask (arg1tz, true, TYPE_PRECISION (type)); + masked = arg1mask & wide_int (arg1); + if (masked.zero_p ()) return omit_two_operands_loc (loc, type, build_zero_cst (type), arg0, arg1); - else if (masked != tree_to_double_int (arg1)) + else if (masked != arg1) return fold_build2_loc (loc, code, type, op0, - double_int_to_tree (type, masked)); + wide_int_to_tree (type, masked)); } } @@ -11700,10 +11652,10 @@ fold_binary_loc (location_t loc, and for - instead of + (or unary - instead of +) and/or ^ instead of |. If B is constant and (B & M) == 0, fold into A & M. */ - if (host_integerp (arg1, 1)) + if (TREE_CODE (arg1) == INTEGER_CST) { - unsigned HOST_WIDE_INT cst1 = tree_low_cst (arg1, 1); - if (~cst1 && (cst1 & (cst1 + 1)) == 0 + wide_int cst1 = arg1; + if ((~cst1 != 0) && (cst1 & (cst1 + 1)) == 0 && INTEGRAL_TYPE_P (TREE_TYPE (arg0)) && (TREE_CODE (arg0) == PLUS_EXPR || TREE_CODE (arg0) == MINUS_EXPR @@ -11713,8 +11665,8 @@ fold_binary_loc (location_t loc, { tree pmop[2]; int which = 0; - unsigned HOST_WIDE_INT cst0; - + wide_int cst0; + /* Now we know that arg0 is (C + D) or (C - D) or -C and arg1 (M) is == (1LL << cst) - 1. Store C into PMOP[0] and D into PMOP[1]. */ @@ -11725,12 +11677,10 @@ fold_binary_loc (location_t loc, pmop[1] = TREE_OPERAND (arg0, 1); which = 1; } - - if (!host_integerp (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1) - || (tree_low_cst (TYPE_MAX_VALUE (TREE_TYPE (arg0)), 1) - & cst1) != cst1) + + if ((wide_int::max_value (TREE_TYPE (arg0)) & cst1) != cst1) which = -1; - + for (; which >= 0; which--) switch (TREE_CODE (pmop[which])) { @@ -11740,9 +11690,7 @@ fold_binary_loc (location_t loc, if (TREE_CODE (TREE_OPERAND (pmop[which], 1)) != INTEGER_CST) break; - /* tree_low_cst not used, because we don't care about - the upper bits. */ - cst0 = TREE_INT_CST_LOW (TREE_OPERAND (pmop[which], 1)); + cst0 = TREE_OPERAND (pmop[which], 1); cst0 &= cst1; if (TREE_CODE (pmop[which]) == BIT_AND_EXPR) { @@ -11761,13 +11709,13 @@ fold_binary_loc (location_t loc, omitted (assumed 0). */ if ((TREE_CODE (arg0) == PLUS_EXPR || (TREE_CODE (arg0) == MINUS_EXPR && which == 0)) - && (TREE_INT_CST_LOW (pmop[which]) & cst1) == 0) + && (wide_int (pmop[which]) & cst1) == 0) pmop[which] = NULL; break; default: break; } - + /* Only build anything new if we optimized one or both arguments above. */ if (pmop[0] != TREE_OPERAND (arg0, 0) @@ -11785,7 +11733,7 @@ fold_binary_loc (location_t loc, if (pmop[1] != NULL) pmop[1] = fold_convert_loc (loc, utype, pmop[1]); } - + if (TREE_CODE (arg0) == NEGATE_EXPR) tem = fold_build1_loc (loc, NEGATE_EXPR, utype, pmop[0]); else if (TREE_CODE (arg0) == PLUS_EXPR) @@ -11812,7 +11760,7 @@ fold_binary_loc (location_t loc, } } } - + t1 = distribute_bit_expr (loc, code, type, arg0, arg1); if (t1 != NULL_TREE) return t1; @@ -11820,11 +11768,11 @@ fold_binary_loc (location_t loc, if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == NOP_EXPR && TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (arg0, 0)))) { + wide_int mask; prec = TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (arg0, 0))); - if (prec < BITS_PER_WORD && prec < HOST_BITS_PER_WIDE_INT - && (~TREE_INT_CST_LOW (arg1) - & (((HOST_WIDE_INT) 1 << prec) - 1)) == 0) + mask = wide_int (arg1).zforce_to_size (prec); + if (mask.minus_one_p ()) return fold_convert_loc (loc, type, TREE_OPERAND (arg0, 0)); } @@ -11849,10 +11797,10 @@ fold_binary_loc (location_t loc, /* If arg0 is derived from the address of an object or function, we may be able to fold this expression using the object or function's alignment. */ - if (POINTER_TYPE_P (TREE_TYPE (arg0)) && host_integerp (arg1, 1)) + if (POINTER_TYPE_P (TREE_TYPE (arg0)) && tree_fits_uhwi_p (arg1)) { unsigned HOST_WIDE_INT modulus, residue; - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (arg1); + unsigned HOST_WIDE_INT low = tree_to_uhwi (arg1); modulus = get_pointer_modulus_and_residue (arg0, &residue, integer_onep (arg1)); @@ -11869,16 +11817,16 @@ fold_binary_loc (location_t loc, if the new mask might be further optimized. */ if ((TREE_CODE (arg0) == LSHIFT_EXPR || TREE_CODE (arg0) == RSHIFT_EXPR) - && host_integerp (TREE_OPERAND (arg0, 1), 1) - && host_integerp (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1))) - && tree_low_cst (TREE_OPERAND (arg0, 1), 1) + && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1)) + && tree_fits_hwi_p (arg1) + && tree_to_uhwi (TREE_OPERAND (arg0, 1)) < TYPE_PRECISION (TREE_TYPE (arg0)) && TYPE_PRECISION (TREE_TYPE (arg0)) <= HOST_BITS_PER_WIDE_INT - && tree_low_cst (TREE_OPERAND (arg0, 1), 1) > 0) + && tree_to_uhwi (TREE_OPERAND (arg0, 1)) > 0) { - unsigned int shiftc = tree_low_cst (TREE_OPERAND (arg0, 1), 1); + unsigned int shiftc = tree_to_uhwi (TREE_OPERAND (arg0, 1)); unsigned HOST_WIDE_INT mask - = tree_low_cst (arg1, TYPE_UNSIGNED (TREE_TYPE (arg1))); + = tree_to_hwi (arg1, TYPE_SIGN (TREE_TYPE (arg1))); unsigned HOST_WIDE_INT newmask, zerobits = 0; tree shift_type = TREE_TYPE (arg0); @@ -12228,17 +12176,11 @@ fold_binary_loc (location_t loc, tree sum = fold_binary_loc (loc, PLUS_EXPR, TREE_TYPE (arg1), arg1, TREE_OPERAND (arg0, 1)); if (sum && integer_zerop (sum)) { - unsigned long pow2; - - if (TREE_INT_CST_LOW (arg1)) - pow2 = exact_log2 (TREE_INT_CST_LOW (arg1)); - else - pow2 = exact_log2 (TREE_INT_CST_HIGH (arg1)) - + HOST_BITS_PER_WIDE_INT; - + tree pow2 + = wide_int_to_tree (integer_type_node, + wide_int (arg1).exact_log2 ()); return fold_build2_loc (loc, RSHIFT_EXPR, type, - TREE_OPERAND (arg0, 0), - build_int_cst (integer_type_node, pow2)); + TREE_OPERAND (arg0, 0), pow2); } } @@ -12256,13 +12198,9 @@ fold_binary_loc (location_t loc, if (integer_pow2p (sval) && tree_int_cst_sgn (sval) > 0) { tree sh_cnt = TREE_OPERAND (arg1, 1); - unsigned long pow2; - - if (TREE_INT_CST_LOW (sval)) - pow2 = exact_log2 (TREE_INT_CST_LOW (sval)); - else - pow2 = exact_log2 (TREE_INT_CST_HIGH (sval)) - + HOST_BITS_PER_WIDE_INT; + tree pow2 + = wide_int_to_tree (TREE_TYPE (sh_cnt), + wide_int (sval).exact_log2 ()); if (strict_overflow_p) fold_overflow_warning (("assuming signed overflow does not " @@ -12270,11 +12208,9 @@ fold_binary_loc (location_t loc, WARN_STRICT_OVERFLOW_MISC); sh_cnt = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (sh_cnt), - sh_cnt, - build_int_cst (TREE_TYPE (sh_cnt), - pow2)); + sh_cnt, pow2); return fold_build2_loc (loc, RSHIFT_EXPR, type, - fold_convert_loc (loc, type, arg0), sh_cnt); + fold_convert_loc (loc, type, arg0), sh_cnt); } } @@ -12297,8 +12233,7 @@ fold_binary_loc (location_t loc, /* X / -1 is -X. */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST - && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1 - && TREE_INT_CST_HIGH (arg1) == -1) + && wide_int (arg1).minus_one_p ()) return fold_convert_loc (loc, type, negate_expr (arg0)); /* Convert -A / -B to A / B when the type is signed and overflow is @@ -12380,16 +12315,15 @@ fold_binary_loc (location_t loc, /* X % -1 is zero. */ if (!TYPE_UNSIGNED (type) && TREE_CODE (arg1) == INTEGER_CST - && TREE_INT_CST_LOW (arg1) == (unsigned HOST_WIDE_INT) -1 - && TREE_INT_CST_HIGH (arg1) == -1) + && wide_int (arg1).minus_one_p ()) return omit_one_operand_loc (loc, type, integer_zero_node, arg0); /* X % -C is the same as X % C. */ if (code == TRUNC_MOD_EXPR - && !TYPE_UNSIGNED (type) + && TYPE_SIGN (type) == SIGNED && TREE_CODE (arg1) == INTEGER_CST && !TREE_OVERFLOW (arg1) - && TREE_INT_CST_HIGH (arg1) < 0 + && wide_int (arg1).neg_p (SIGNED) && !TYPE_OVERFLOW_TRAPS (type) /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */ && !sign_bit_p (arg1, arg1)) @@ -12483,13 +12417,13 @@ fold_binary_loc (location_t loc, prec = element_precision (type); /* Turn (a OP c1) OP c2 into a OP (c1+c2). */ - if (TREE_CODE (op0) == code && host_integerp (arg1, true) - && TREE_INT_CST_LOW (arg1) < prec - && host_integerp (TREE_OPERAND (arg0, 1), true) - && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) < prec) + if (TREE_CODE (op0) == code && tree_fits_uhwi_p (arg1) + && tree_to_uhwi (arg1) < prec + && tree_fits_uhwi_p (TREE_OPERAND (arg0, 1)) + && tree_to_uhwi (TREE_OPERAND (arg0, 1)) < prec) { - unsigned int low = (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) - + TREE_INT_CST_LOW (arg1)); + HOST_WIDE_INT low = (tree_to_shwi (TREE_OPERAND (arg0, 1)) + + tree_to_shwi (arg1)); /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2 being well defined. */ @@ -12513,13 +12447,13 @@ fold_binary_loc (location_t loc, if (((code == LSHIFT_EXPR && TREE_CODE (arg0) == RSHIFT_EXPR) || (TYPE_UNSIGNED (type) && code == RSHIFT_EXPR && TREE_CODE (arg0) == LSHIFT_EXPR)) - && host_integerp (arg1, false) - && TREE_INT_CST_LOW (arg1) < prec - && host_integerp (TREE_OPERAND (arg0, 1), false) - && TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)) < prec) + && tree_fits_shwi_p (arg1) + && tree_to_shwi (arg1) < prec + && tree_fits_shwi_p (TREE_OPERAND (arg0, 1)) + && tree_to_shwi (TREE_OPERAND (arg0, 1)) < prec) { - HOST_WIDE_INT low0 = TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1)); - HOST_WIDE_INT low1 = TREE_INT_CST_LOW (arg1); + HOST_WIDE_INT low0 = tree_to_shwi (TREE_OPERAND (arg0, 1)); + HOST_WIDE_INT low1 = tree_to_shwi (arg1); tree lshift; tree arg00; @@ -12557,16 +12491,13 @@ fold_binary_loc (location_t loc, fold_build2_loc (loc, code, type, TREE_OPERAND (arg0, 1), arg1)); - /* Two consecutive rotates adding up to the precision of the - type can be ignored. */ + /* Two consecutive rotates adding up to the some integer + multiple of the precision of the type can be ignored. */ if (code == RROTATE_EXPR && TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg0) == RROTATE_EXPR && TREE_CODE (TREE_OPERAND (arg0, 1)) == INTEGER_CST - && TREE_INT_CST_HIGH (arg1) == 0 - && TREE_INT_CST_HIGH (TREE_OPERAND (arg0, 1)) == 0 - && ((TREE_INT_CST_LOW (arg1) - + TREE_INT_CST_LOW (TREE_OPERAND (arg0, 1))) - == prec)) + && (wide_int (arg1) + TREE_OPERAND (arg0, 1)) + .umod_trunc (prec).zero_p ()) return TREE_OPERAND (arg0, 0); /* Fold (X & C2) << C1 into (X << C1) & (C2 << C1) @@ -12888,7 +12819,7 @@ fold_binary_loc (location_t loc, && operand_equal_p (tree_strip_nop_conversions (TREE_OPERAND (arg0, 1)), arg1, 0) - && (TREE_INT_CST_LOW (TREE_OPERAND (arg0, 0)) & 1) == 1) + && (wide_int (TREE_OPERAND (arg0, 0)) & 1) == 1) { return omit_two_operands_loc (loc, type, code == NE_EXPR @@ -12979,15 +12910,14 @@ fold_binary_loc (location_t loc, prec = TYPE_PRECISION (itype); /* Check for a valid shift count. */ - if (TREE_INT_CST_HIGH (arg001) == 0 - && TREE_INT_CST_LOW (arg001) < prec) + if (wide_int::ltu_p (arg001, prec)) { tree arg01 = TREE_OPERAND (arg0, 1); tree arg000 = TREE_OPERAND (TREE_OPERAND (arg0, 0), 0); unsigned HOST_WIDE_INT log2 = tree_log2 (arg01); /* If (C2 << C1) doesn't overflow, then ((X >> C1) & C2) != 0 can be rewritten as (X & (C2 << C1)) != 0. */ - if ((log2 + TREE_INT_CST_LOW (arg001)) < prec) + if ((log2 + tree_to_uhwi (arg001)) < prec) { tem = fold_build2_loc (loc, LSHIFT_EXPR, itype, arg01, arg001); tem = fold_build2_loc (loc, BIT_AND_EXPR, itype, arg000, tem); @@ -13105,9 +13035,7 @@ fold_binary_loc (location_t loc, tree arg00 = TREE_OPERAND (arg0, 0); tree arg01 = TREE_OPERAND (arg0, 1); tree itype = TREE_TYPE (arg00); - if (TREE_INT_CST_HIGH (arg01) == 0 - && TREE_INT_CST_LOW (arg01) - == (unsigned HOST_WIDE_INT) (TYPE_PRECISION (itype) - 1)) + if (wide_int::eq_p (arg01, TYPE_PRECISION (itype) - 1)) { if (TYPE_UNSIGNED (itype)) { @@ -13509,59 +13437,17 @@ fold_binary_loc (location_t loc, the specified precision will have known values. */ { tree arg1_type = TREE_TYPE (arg1); - unsigned int width = TYPE_PRECISION (arg1_type); + unsigned int prec = TYPE_PRECISION (arg1_type); if (TREE_CODE (arg1) == INTEGER_CST - && width <= HOST_BITS_PER_DOUBLE_INT && (INTEGRAL_TYPE_P (arg1_type) || POINTER_TYPE_P (arg1_type))) { - HOST_WIDE_INT signed_max_hi; - unsigned HOST_WIDE_INT signed_max_lo; - unsigned HOST_WIDE_INT max_hi, max_lo, min_hi, min_lo; - - if (width <= HOST_BITS_PER_WIDE_INT) - { - signed_max_lo = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - - 1; - signed_max_hi = 0; - max_hi = 0; - - if (TYPE_UNSIGNED (arg1_type)) - { - max_lo = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1; - min_lo = 0; - min_hi = 0; - } - else - { - max_lo = signed_max_lo; - min_lo = ((unsigned HOST_WIDE_INT) -1 << (width - 1)); - min_hi = -1; - } - } - else - { - width -= HOST_BITS_PER_WIDE_INT; - signed_max_lo = -1; - signed_max_hi = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - - 1; - max_lo = -1; - min_lo = 0; - - if (TYPE_UNSIGNED (arg1_type)) - { - max_hi = ((unsigned HOST_WIDE_INT) 2 << (width - 1)) - 1; - min_hi = 0; - } - else - { - max_hi = signed_max_hi; - min_hi = ((unsigned HOST_WIDE_INT) -1 << (width - 1)); - } - } + wide_int max = wide_int::max_value (arg1_type); + wide_int signed_max = wide_int::max_value (prec, SIGNED); + wide_int min = wide_int::min_value (arg1_type); + wide_int wi_arg1 = arg1; - if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) == max_hi - && TREE_INT_CST_LOW (arg1) == max_lo) + if (wi_arg1 == max) switch (code) { case GT_EXPR: @@ -13582,9 +13468,7 @@ fold_binary_loc (location_t loc, default: break; } - else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) - == max_hi - && TREE_INT_CST_LOW (arg1) == max_lo - 1) + else if (wi_arg1 == (max - 1)) switch (code) { case GT_EXPR: @@ -13604,9 +13488,7 @@ fold_binary_loc (location_t loc, default: break; } - else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) - == min_hi - && TREE_INT_CST_LOW (arg1) == min_lo) + else if (wi_arg1 == min) switch (code) { case LT_EXPR: @@ -13624,19 +13506,19 @@ fold_binary_loc (location_t loc, default: break; } - else if ((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (arg1) - == min_hi - && TREE_INT_CST_LOW (arg1) == min_lo + 1) + else if (wi_arg1 == (min + 1)) switch (code) { case GE_EXPR: - arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node); + arg1 = const_binop (MINUS_EXPR, arg1, + build_int_cst (TREE_TYPE (arg1), 1)); return fold_build2_loc (loc, NE_EXPR, type, fold_convert_loc (loc, TREE_TYPE (arg1), arg0), arg1); case LT_EXPR: - arg1 = const_binop (MINUS_EXPR, arg1, integer_one_node); + arg1 = const_binop (MINUS_EXPR, arg1, + build_int_cst (TREE_TYPE (arg1), 1)); return fold_build2_loc (loc, EQ_EXPR, type, fold_convert_loc (loc, TREE_TYPE (arg1), arg0), @@ -13645,14 +13527,17 @@ fold_binary_loc (location_t loc, break; } - else if (TREE_INT_CST_HIGH (arg1) == signed_max_hi - && TREE_INT_CST_LOW (arg1) == signed_max_lo + else if (wi_arg1 == signed_max && TYPE_UNSIGNED (arg1_type) + /* KENNY QUESTIONS THE CHECKING OF THE BITSIZE + HERE. HE FEELS THAT THE PRECISION SHOULD BE + CHECKED */ + /* We will flip the signedness of the comparison operator associated with the mode of arg1, so the sign bit is specified by this mode. Check that arg1 is the signed max associated with this sign bit. */ - && width == GET_MODE_BITSIZE (TYPE_MODE (arg1_type)) + && prec == GET_MODE_BITSIZE (TYPE_MODE (arg1_type)) /* signed_type does not work on pointer types. */ && INTEGRAL_TYPE_P (arg1_type)) { @@ -14163,8 +14048,8 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, && TYPE_PRECISION (TREE_TYPE (tem)) < TYPE_PRECISION (type)) { - unsigned HOST_WIDE_INT mask_lo; - HOST_WIDE_INT mask_hi; + wide_int mask; + wide_int wi_arg1 = arg1; int inner_width, outer_width; tree tem_type; @@ -14173,36 +14058,16 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, if (outer_width > TYPE_PRECISION (type)) outer_width = TYPE_PRECISION (type); - if (outer_width > HOST_BITS_PER_WIDE_INT) - { - mask_hi = ((unsigned HOST_WIDE_INT) -1 - >> (HOST_BITS_PER_DOUBLE_INT - outer_width)); - mask_lo = -1; - } - else - { - mask_hi = 0; - mask_lo = ((unsigned HOST_WIDE_INT) -1 - >> (HOST_BITS_PER_WIDE_INT - outer_width)); - } - if (inner_width > HOST_BITS_PER_WIDE_INT) - { - mask_hi &= ~((unsigned HOST_WIDE_INT) -1 - >> (HOST_BITS_PER_WIDE_INT - inner_width)); - mask_lo = 0; - } - else - mask_lo &= ~((unsigned HOST_WIDE_INT) -1 - >> (HOST_BITS_PER_WIDE_INT - inner_width)); + mask = wide_int::shifted_mask + (inner_width, outer_width - inner_width, false, + TYPE_PRECISION (TREE_TYPE (arg1))); - if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == mask_hi - && (TREE_INT_CST_LOW (arg1) & mask_lo) == mask_lo) + if (wi_arg1 == mask) { tem_type = signed_type_for (TREE_TYPE (tem)); tem = fold_convert_loc (loc, tem_type, tem); } - else if ((TREE_INT_CST_HIGH (arg1) & mask_hi) == 0 - && (TREE_INT_CST_LOW (arg1) & mask_lo) == 0) + else if ((wi_arg1 & mask).zero_p ()) { tem_type = unsigned_type_for (TREE_TYPE (tem)); tem = fold_convert_loc (loc, tem_type, tem); @@ -14231,9 +14096,9 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, tree tem = TREE_OPERAND (arg0, 0); STRIP_NOPS (tem); if (TREE_CODE (tem) == RSHIFT_EXPR - && TREE_CODE (TREE_OPERAND (tem, 1)) == INTEGER_CST + && tree_fits_uhwi_p (TREE_OPERAND (tem, 1)) && (unsigned HOST_WIDE_INT) tree_log2 (arg1) == - TREE_INT_CST_LOW (TREE_OPERAND (tem, 1))) + tree_to_uhwi (TREE_OPERAND (tem, 1))) return fold_build2_loc (loc, BIT_AND_EXPR, type, TREE_OPERAND (tem, 0), arg1); } @@ -14326,9 +14191,9 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, && TREE_TYPE (type) == TREE_TYPE (TREE_TYPE (arg0))))) { tree eltype = TREE_TYPE (TREE_TYPE (arg0)); - unsigned HOST_WIDE_INT width = tree_low_cst (TYPE_SIZE (eltype), 1); - unsigned HOST_WIDE_INT n = tree_low_cst (arg1, 1); - unsigned HOST_WIDE_INT idx = tree_low_cst (op2, 1); + unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype)); + unsigned HOST_WIDE_INT n = tree_to_uhwi (arg1); + unsigned HOST_WIDE_INT idx = tree_to_uhwi (op2); if (n != 0 && (idx % width) == 0 @@ -14399,7 +14264,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, /* A bit-field-ref that referenced the full argument can be stripped. */ if (INTEGRAL_TYPE_P (TREE_TYPE (arg0)) - && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_low_cst (arg1, 1) + && TYPE_PRECISION (TREE_TYPE (arg0)) == tree_to_uhwi (arg1) && integer_zerop (op2)) return fold_convert_loc (loc, type, arg0); @@ -14407,17 +14272,17 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, fold (nearly) all BIT_FIELD_REFs. */ if (CONSTANT_CLASS_P (arg0) && can_native_interpret_type_p (type) - && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (arg0)), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (arg0))) /* This limitation should not be necessary, we just need to round this up to mode size. */ - && tree_low_cst (op1, 1) % BITS_PER_UNIT == 0 + && tree_to_uhwi (op1) % BITS_PER_UNIT == 0 /* Need bit-shifting of the buffer to relax the following. */ - && tree_low_cst (op2, 1) % BITS_PER_UNIT == 0) + && tree_to_uhwi (op2) % BITS_PER_UNIT == 0) { - unsigned HOST_WIDE_INT bitpos = tree_low_cst (op2, 1); - unsigned HOST_WIDE_INT bitsize = tree_low_cst (op1, 1); + unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (op2); + unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (op1); unsigned HOST_WIDE_INT clen; - clen = tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (arg0)), 1); + clen = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (arg0))); /* ??? We cannot tell native_encode_expr to start at some random byte only. So limit us to a reasonable amount of work. */ @@ -14453,29 +14318,35 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, case VEC_PERM_EXPR: if (TREE_CODE (arg2) == VECTOR_CST) { - unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i, mask; + unsigned int nelts = TYPE_VECTOR_SUBPARTS (type), i; unsigned char *sel = XALLOCAVEC (unsigned char, nelts); - tree t; bool need_mask_canon = false; bool all_in_vec0 = true; bool all_in_vec1 = true; bool maybe_identity = true; bool single_arg = (op0 == op1); bool changed = false; + int nelts_cnt = single_arg ? nelts : nelts * 2; - mask = single_arg ? (nelts - 1) : (2 * nelts - 1); gcc_assert (nelts == VECTOR_CST_NELTS (arg2)); for (i = 0; i < nelts; i++) { tree val = VECTOR_CST_ELT (arg2, i); + wide_int t; + if (TREE_CODE (val) != INTEGER_CST) return NULL_TREE; - sel[i] = TREE_INT_CST_LOW (val) & mask; - if (TREE_INT_CST_HIGH (val) - || ((unsigned HOST_WIDE_INT) - TREE_INT_CST_LOW (val) != sel[i])) - need_mask_canon = true; + /* Make sure that the perm value is in an acceptable + range. */ + t = val; + if (t.gtu_p (nelts_cnt)) + { + need_mask_canon = true; + sel[i] = t.to_uhwi () & (nelts_cnt - 1); + } + else + sel[i] = t.to_uhwi (); if (sel[i] < nelts) all_in_vec1 = false; @@ -14509,7 +14380,7 @@ fold_ternary_loc (location_t loc, enum tree_code code, tree type, && (TREE_CODE (op1) == VECTOR_CST || TREE_CODE (op1) == CONSTRUCTOR)) { - t = fold_vec_perm (type, op0, op1, sel); + tree t = fold_vec_perm (type, op0, op1, sel); if (t != NULL_TREE) return t; } @@ -15291,9 +15162,7 @@ multiple_of_p (tree type, const_tree top, const_tree bottom) op1 = TREE_OPERAND (top, 1); /* const_binop may not detect overflow correctly, so check for it explicitly here. */ - if (TYPE_PRECISION (TREE_TYPE (size_one_node)) - > TREE_INT_CST_LOW (op1) - && TREE_INT_CST_HIGH (op1) == 0 + if (wide_int::gtu_p (TYPE_PRECISION (TREE_TYPE (size_one_node)), op1) && 0 != (t1 = fold_convert (type, const_binop (LSHIFT_EXPR, size_one_node, @@ -15498,11 +15367,11 @@ tree_binary_nonnegative_warnv_p (enum tree_code code, tree type, tree op0, && TREE_CODE (inner1) == INTEGER_TYPE && unsigned1) { unsigned int precision0 = (TREE_CODE (op0) == INTEGER_CST) - ? tree_int_cst_min_precision (op0, /*unsignedp=*/true) + ? tree_int_cst_min_precision (op0, UNSIGNED) : TYPE_PRECISION (inner0); unsigned int precision1 = (TREE_CODE (op1) == INTEGER_CST) - ? tree_int_cst_min_precision (op1, /*unsignedp=*/true) + ? tree_int_cst_min_precision (op1, UNSIGNED) : TYPE_PRECISION (inner1); return precision0 + precision1 < TYPE_PRECISION (type); @@ -15682,7 +15551,7 @@ tree_call_nonnegative_warnv_p (tree type, tree fndecl, /* True if the 1st argument is nonnegative or the second argument is an even integer. */ if (TREE_CODE (arg1) == INTEGER_CST - && (TREE_INT_CST_LOW (arg1) & 1) == 0) + && (tree_to_hwi (arg1) & 1) == 0) return true; return tree_expr_nonnegative_warnv_p (arg0, strict_overflow_p); @@ -15700,8 +15569,7 @@ tree_call_nonnegative_warnv_p (tree type, tree fndecl, if ((n & 1) == 0) { REAL_VALUE_TYPE cint; - real_from_integer (&cint, VOIDmode, n, - n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); if (real_identical (&c, &cint)) return true; } @@ -16266,7 +16134,7 @@ fold_read_from_constant_string (tree exp) && (GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (string)))) == 1)) return build_int_cst_type (TREE_TYPE (exp), (TREE_STRING_POINTER (string) - [TREE_INT_CST_LOW (index)])); + [tree_to_uhwi (index)])); } return NULL; } @@ -16285,12 +16153,12 @@ fold_negate_const (tree arg0, tree type) { case INTEGER_CST: { - double_int val = tree_to_double_int (arg0); + wide_int val = arg0; bool overflow; - val = val.neg_with_overflow (&overflow); - t = force_fit_type_double (type, val, 1, - (overflow | TREE_OVERFLOW (arg0)) - && !TYPE_UNSIGNED (type)); + val = val.neg (&overflow); + t = force_fit_type (type, val, 1, + (overflow | TREE_OVERFLOW (arg0)) + && !TYPE_UNSIGNED (type)); break; } @@ -16332,12 +16200,11 @@ fold_abs_const (tree arg0, tree type) { case INTEGER_CST: { - double_int val = tree_to_double_int (arg0); + wide_int val = arg0; /* If the value is unsigned or non-negative, then the absolute value is the same as the ordinary value. */ - if (TYPE_UNSIGNED (type) - || !val.is_negative ()) + if (!val.neg_p (TYPE_SIGN (type))) t = arg0; /* If the value is negative, then the absolute value is @@ -16345,9 +16212,9 @@ fold_abs_const (tree arg0, tree type) else { bool overflow; - val = val.neg_with_overflow (&overflow); - t = force_fit_type_double (type, val, -1, - overflow | TREE_OVERFLOW (arg0)); + val = val.neg (&overflow); + t = force_fit_type (type, val, -1, + overflow | TREE_OVERFLOW (arg0)); } } break; @@ -16372,12 +16239,12 @@ fold_abs_const (tree arg0, tree type) static tree fold_not_const (const_tree arg0, tree type) { - double_int val; + wide_int val; gcc_assert (TREE_CODE (arg0) == INTEGER_CST); - val = ~tree_to_double_int (arg0); - return force_fit_type_double (type, val, 0, TREE_OVERFLOW (arg0)); + val = ~wide_int (arg0); + return force_fit_type (type, val, 0, TREE_OVERFLOW (arg0)); } /* Given CODE, a relational operator, the target type, TYPE and two @@ -16641,9 +16508,10 @@ fold_indirect_ref_1 (location_t loc, tree type, tree op0) if (TREE_CODE (op00type) == VECTOR_TYPE && type == TREE_TYPE (op00type)) { - HOST_WIDE_INT offset = tree_low_cst (op01, 0); + HOST_WIDE_INT offset = tree_to_shwi (op01); tree part_width = TYPE_SIZE (type); - unsigned HOST_WIDE_INT part_widthi = tree_low_cst (part_width, 0)/BITS_PER_UNIT; + unsigned HOST_WIDE_INT part_widthi + = tree_to_shwi (part_width) / BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); @@ -16781,8 +16649,7 @@ fold_ignored_result (tree t) } } -/* Return the value of VALUE, rounded up to a multiple of DIVISOR. - This can only be applied to objects of a sizetype. */ +/* Return the value of VALUE, rounded up to a multiple of DIVISOR. */ tree round_up_loc (location_t loc, tree value, int divisor) @@ -16810,24 +16677,19 @@ round_up_loc (location_t loc, tree value, int divisor) { if (TREE_CODE (value) == INTEGER_CST) { - double_int val = tree_to_double_int (value); + wide_int val = value; bool overflow_p; - if ((val.low & (divisor - 1)) == 0) + if ((val & (divisor - 1)) == 0) return value; overflow_p = TREE_OVERFLOW (value); - val.low &= ~(divisor - 1); - val.low += divisor; - if (val.low == 0) - { - val.high++; - if (val.high == 0) - overflow_p = true; - } + val &= ~(divisor - 1); + val += divisor; + if (val.zero_p ()) + overflow_p = true; - return force_fit_type_double (TREE_TYPE (value), val, - -1, overflow_p); + return force_fit_type (TREE_TYPE (value), val, -1, overflow_p); } else { @@ -16948,7 +16810,7 @@ ptr_difference_const (tree e1, tree e2, HOST_WIDE_INT *diff) toffset2 = fold_convert (type, toffset2); tdiff = fold_build2 (MINUS_EXPR, type, toffset1, toffset2); - if (!cst_and_fits_in_hwi (tdiff)) + if (!cst_fits_shwi_p (tdiff)) return false; *diff = int_cst_value (tdiff); diff --git a/gcc/fortran/Make-lang.in b/gcc/fortran/Make-lang.in index ee704233bba..12d0deee20d 100644 --- a/gcc/fortran/Make-lang.in +++ b/gcc/fortran/Make-lang.in @@ -338,7 +338,7 @@ GFORTRAN_TRANS_DEPS = fortran/gfortran.h fortran/libgfortran.h \ fortran/trans-const.h fortran/trans-const.h fortran/trans.h \ fortran/trans-stmt.h fortran/trans-types.h \ $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(TM_H) coretypes.h $(GGC_H) \ - fortran/iso-c-binding.def fortran/iso-fortran-env.def + fortran/iso-c-binding.def fortran/iso-fortran-env.def wide-int.h CFLAGS-fortran/cpp.o += $(TARGET_SYSTEM_ROOT_DEFINE) fortran/f95-lang.o: $(GFORTRAN_TRANS_DEPS) fortran/mathbuiltins.def \ @@ -373,3 +373,4 @@ fortran/cpp.o: fortran/cpp.c incpath.h incpath.o cppbuiltin.h fortran/scanner.o: fortran/scanner.h CFLAGS-fortran/module.o += $(ZLIBINC) fortran/module.o: fortran/scanner.h +fortran/target-memory.o: wide-int.h diff --git a/gcc/fortran/target-memory.c b/gcc/fortran/target-memory.c index 21b44ae482f..2c8ba379dad 100644 --- a/gcc/fortran/target-memory.c +++ b/gcc/fortran/target-memory.c @@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-const.h" #include "trans-types.h" #include "target-memory.h" +#include "wide-int.h" /* --------------------------------------------------------------- */ /* Calculate the size of an expression. */ @@ -251,8 +252,8 @@ encode_derived (gfc_expr *source, unsigned char *buffer, size_t buffer_size) gcc_assert (cmp); if (!c->expr) continue; - ptr = TREE_INT_CST_LOW(DECL_FIELD_OFFSET(cmp->backend_decl)) - + TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; + ptr = tree_to_hwi (DECL_FIELD_OFFSET(cmp->backend_decl)) + + tree_to_hwi (DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; if (c->expr->expr_type == EXPR_NULL) { @@ -427,7 +428,7 @@ gfc_interpret_logical (int kind, unsigned char *buffer, size_t buffer_size, { tree t = native_interpret_expr (gfc_get_logical_type (kind), buffer, buffer_size); - *logical = tree_to_double_int (t).is_zero () ? 0 : 1; + *logical = wide_int (t).zero_p () ? 0 : 1; return size_logical (kind); } @@ -545,9 +546,9 @@ gfc_interpret_derived (unsigned char *buffer, size_t buffer_size, gfc_expr *resu i.e. there are, e.g., no bit fields. */ gcc_assert (cmp->backend_decl); - ptr = TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (cmp->backend_decl)); + ptr = tree_to_hwi (DECL_FIELD_BIT_OFFSET (cmp->backend_decl)); gcc_assert (ptr % 8 == 0); - ptr = ptr/8 + TREE_INT_CST_LOW (DECL_FIELD_OFFSET (cmp->backend_decl)); + ptr = ptr/8 + tree_to_hwi (DECL_FIELD_OFFSET (cmp->backend_decl)); gfc_target_interpret_expr (&buffer[ptr], buffer_size - ptr, e, true); } @@ -659,8 +660,8 @@ expr_to_char (gfc_expr *e, unsigned char *data, unsigned char *chk, size_t len) gcc_assert (cmp && cmp->backend_decl); if (!c->expr) continue; - ptr = TREE_INT_CST_LOW(DECL_FIELD_OFFSET(cmp->backend_decl)) - + TREE_INT_CST_LOW(DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; + ptr = tree_to_hwi (DECL_FIELD_OFFSET(cmp->backend_decl)) + + tree_to_hwi (DECL_FIELD_BIT_OFFSET(cmp->backend_decl))/8; expr_to_char (c->expr, &data[ptr], &chk[ptr], len); } return len; diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c index 1a2e508b25b..2d2ff3eafe7 100644 --- a/gcc/fortran/trans-array.c +++ b/gcc/fortran/trans-array.c @@ -90,6 +90,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-array.h" #include "trans-const.h" #include "dependency.h" +#include "wide-int.h" static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base); @@ -1684,7 +1685,7 @@ gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type, tmp = gfc_build_addr_expr (NULL_TREE, tmp); init = gfc_build_addr_expr (NULL_TREE, init); - size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type)); + size = tree_to_hwi (TYPE_SIZE_UNIT (type)); bound = build_int_cst (size_type_node, n * size); tmp = build_call_expr_loc (input_location, builtin_decl_explicit (BUILT_IN_MEMCPY), @@ -5354,9 +5355,8 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr) { gfc_constructor *c; tree tmp; + addr_wide_int wtmp; gfc_se se; - HOST_WIDE_INT hi; - unsigned HOST_WIDE_INT lo; tree index, range; vec<constructor_elt, va_gc> *v = NULL; @@ -5378,20 +5378,13 @@ gfc_conv_array_initializer (tree type, gfc_expr * expr) else gfc_conv_structure (&se, expr, 1); - tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); - gcc_assert (tmp && INTEGER_CST_P (tmp)); - hi = TREE_INT_CST_HIGH (tmp); - lo = TREE_INT_CST_LOW (tmp); - lo++; - if (lo == 0) - hi++; + wtmp = addr_wide_int (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1; + gcc_assert (!wtmp.zero_p ()); /* This will probably eat buckets of memory for large arrays. */ - while (hi != 0 || lo != 0) + while (!wtmp.zero_p ()) { CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr); - if (lo == 0) - hi--; - lo--; + wtmp -= 1; } break; diff --git a/gcc/fortran/trans-common.c b/gcc/fortran/trans-common.c index e2234b1ae0c..ffe48925ace 100644 --- a/gcc/fortran/trans-common.c +++ b/gcc/fortran/trans-common.c @@ -397,8 +397,8 @@ build_common_decl (gfc_common_head *com, tree union_type, bool is_init) gfc_warning ("Named COMMON block '%s' at %L shall be of the " "same size as elsewhere (%lu vs %lu bytes)", com->name, &com->where, - (unsigned long) TREE_INT_CST_LOW (size), - (unsigned long) TREE_INT_CST_LOW (DECL_SIZE_UNIT (decl))); + (unsigned long) tree_to_uhwi (size), + (unsigned long) tree_to_uhwi (DECL_SIZE_UNIT (decl))); if (tree_int_cst_lt (DECL_SIZE_UNIT (decl), size)) { diff --git a/gcc/fortran/trans-const.c b/gcc/fortran/trans-const.c index a217c471411..a801ce8c60a 100644 --- a/gcc/fortran/trans-const.c +++ b/gcc/fortran/trans-const.c @@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-const.h" #include "trans-types.h" #include "target-memory.h" +#include "wide-int.h" tree gfc_rank_cst[GFC_MAX_DIMENSIONS + 1]; @@ -144,10 +145,9 @@ gfc_conv_string_init (tree length, gfc_expr * expr) gcc_assert (expr->expr_type == EXPR_CONSTANT); gcc_assert (expr->ts.type == BT_CHARACTER); - gcc_assert (INTEGER_CST_P (length)); - gcc_assert (TREE_INT_CST_HIGH (length) == 0); + gcc_assert (cst_fits_uhwi_p (length)); - len = TREE_INT_CST_LOW (length); + len = tree_to_hwi (length); slen = expr->value.character.length; if (len > slen) @@ -200,8 +200,8 @@ gfc_init_constants (void) tree gfc_conv_mpz_to_tree (mpz_t i, int kind) { - double_int val = mpz_get_double_int (gfc_get_int_type (kind), i, true); - return double_int_to_tree (gfc_get_int_type (kind), val); + wide_int val = wide_int::from_mpz (gfc_get_int_type (kind), i, true); + return wide_int_to_tree (gfc_get_int_type (kind), val); } /* Converts a backend tree into a GMP integer. */ @@ -209,8 +209,8 @@ gfc_conv_mpz_to_tree (mpz_t i, int kind) void gfc_conv_tree_to_mpz (mpz_t i, tree source) { - double_int val = tree_to_double_int (source); - mpz_set_double_int (i, val, TYPE_UNSIGNED (TREE_TYPE (source))); + wide_int val = source; + val.to_mpz (i, TYPE_SIGN (TREE_TYPE (source))); } /* Converts a real constant into backend form. */ diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c index 17089316602..a6dbbbe9c65 100644 --- a/gcc/fortran/trans-decl.c +++ b/gcc/fortran/trans-decl.c @@ -405,10 +405,10 @@ gfc_can_put_var_on_stack (tree size) if (gfc_option.flag_max_stack_var_size < 0) return 1; - if (TREE_INT_CST_HIGH (size) != 0) + if (!cst_fits_uhwi_p (size)) return 0; - low = TREE_INT_CST_LOW (size); + low = tree_to_hwi (size); if (low > (unsigned HOST_WIDE_INT) gfc_option.flag_max_stack_var_size) return 0; diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c index dd4c8fc62c1..deb066aef1a 100644 --- a/gcc/fortran/trans-expr.c +++ b/gcc/fortran/trans-expr.c @@ -38,7 +38,7 @@ along with GCC; see the file COPYING3. If not see /* Only for gfc_trans_assign and gfc_trans_pointer_assign. */ #include "trans-stmt.h" #include "dependency.h" - +#include "wide-int.h" /* Convert a scalar to an array descriptor. To be used for assumed-rank arrays. */ @@ -2081,13 +2081,14 @@ gfc_conv_cst_int_power (gfc_se * se, tree lhs, tree rhs) HOST_WIDE_INT m; unsigned HOST_WIDE_INT n; int sgn; + wide_int wrhs = rhs; /* If exponent is too large, we won't expand it anyway, so don't bother with large integer values. */ - if (!TREE_INT_CST (rhs).fits_shwi ()) + if (!wrhs.fits_shwi_p ()) return 0; - m = TREE_INT_CST (rhs).to_shwi (); + m = wrhs.to_shwi (); /* There's no ABS for HOST_WIDE_INT, so here we go. It also takes care of the asymmetric range of the integer type. */ n = (unsigned HOST_WIDE_INT) (m < 0 ? -m : m); @@ -2622,11 +2623,11 @@ gfc_string_to_single_character (tree len, tree str, int kind) { if (len == NULL - || !INTEGER_CST_P (len) || TREE_INT_CST_HIGH (len) != 0 + || !cst_fits_uhwi_p (len) || !POINTER_TYPE_P (TREE_TYPE (str))) return NULL_TREE; - if (TREE_INT_CST_LOW (len) == 1) + if (tree_to_hwi (len) == 1) { str = fold_convert (gfc_get_pchar_type (kind), str); return build_fold_indirect_ref_loc (input_location, str); @@ -2638,8 +2639,8 @@ gfc_string_to_single_character (tree len, tree str, int kind) && TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST && array_ref_low_bound (TREE_OPERAND (str, 0)) == TREE_OPERAND (TREE_OPERAND (str, 0), 1) - && TREE_INT_CST_LOW (len) > 1 - && TREE_INT_CST_LOW (len) + && tree_to_uhwi (len) > 1 + && tree_to_uhwi (len) == (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0))) { @@ -2736,8 +2737,9 @@ gfc_optimize_len_trim (tree len, tree str, int kind) && TREE_CODE (TREE_OPERAND (TREE_OPERAND (str, 0), 0)) == STRING_CST && array_ref_low_bound (TREE_OPERAND (str, 0)) == TREE_OPERAND (TREE_OPERAND (str, 0), 1) - && TREE_INT_CST_LOW (len) >= 1 - && TREE_INT_CST_LOW (len) + && tree_fits_uhwi_p (len) + && tree_to_uhwi (len) >= 1 + && tree_to_uhwi (len) == (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (TREE_OPERAND (TREE_OPERAND (str, 0), 0))) { diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c index 6b85b5b78db..bdfc2d78a05 100644 --- a/gcc/fortran/trans-intrinsic.c +++ b/gcc/fortran/trans-intrinsic.c @@ -39,6 +39,7 @@ along with GCC; see the file COPYING3. If not see #include "trans-array.h" /* Only for gfc_trans_assign and gfc_trans_pointer_assign. */ #include "trans-stmt.h" +#include "wide-int.h" /* This maps Fortran intrinsic math functions to external library or GCC builtin functions. */ @@ -983,12 +984,10 @@ trans_this_image (gfc_se * se, gfc_expr *expr) if (INTEGER_CST_P (dim_arg)) { - int hi, co_dim; + wide_int wdim_arg = dim_arg; - hi = TREE_INT_CST_HIGH (dim_arg); - co_dim = TREE_INT_CST_LOW (dim_arg); - if (hi || co_dim < 1 - || co_dim > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) + if (wdim_arg.ltu_p (1) + || wdim_arg.gtu_p (GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))) gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " "dimension index", expr->value.function.isym->name, &expr->where); @@ -1345,14 +1344,10 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) if (INTEGER_CST_P (bound)) { - int hi, low; - - hi = TREE_INT_CST_HIGH (bound); - low = TREE_INT_CST_LOW (bound); - if (hi || low < 0 - || ((!as || as->type != AS_ASSUMED_RANK) - && low >= GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))) - || low > GFC_MAX_DIMENSIONS) + wide_int wbound = bound; + if (((!as || as->type != AS_ASSUMED_RANK) + && wbound.geu_p (GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc)))) + || wbound.gtu_p (GFC_MAX_DIMENSIONS)) gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " "dimension index", upper ? "UBOUND" : "LBOUND", &expr->where); @@ -1547,11 +1542,8 @@ conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr) if (INTEGER_CST_P (bound)) { - int hi, low; - - hi = TREE_INT_CST_HIGH (bound); - low = TREE_INT_CST_LOW (bound); - if (hi || low < 1 || low > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) + wide_int wbound = bound; + if (wbound.ltu_p (1) || wbound.gtu_p (GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc)))) gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " "dimension index", expr->value.function.isym->name, &expr->where); diff --git a/gcc/fortran/trans-io.c b/gcc/fortran/trans-io.c index ec17dc97c21..fd5642209d2 100644 --- a/gcc/fortran/trans-io.c +++ b/gcc/fortran/trans-io.c @@ -292,8 +292,8 @@ gfc_build_io_library_fndecls (void) = build_pointer_type (gfc_intio_type_node); types[IOPARM_type_parray] = pchar_type_node; types[IOPARM_type_pchar] = pchar_type_node; - pad_size = 16 * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (pchar_type_node)); - pad_size += 32 * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (integer_type_node)); + pad_size = 16 * tree_to_hwi (TYPE_SIZE_UNIT (pchar_type_node)); + pad_size += 32 * tree_to_hwi (TYPE_SIZE_UNIT (integer_type_node)); pad_idx = build_index_type (size_int (pad_size - 1)); types[IOPARM_type_pad] = build_array_type (char_type_node, pad_idx); diff --git a/gcc/fortran/trans-types.c b/gcc/fortran/trans-types.c index 806accc7015..0d85ce1fd1e 100644 --- a/gcc/fortran/trans-types.c +++ b/gcc/fortran/trans-types.c @@ -861,8 +861,6 @@ gfc_init_types (void) int index; tree type; unsigned n; - unsigned HOST_WIDE_INT hi; - unsigned HOST_WIDE_INT lo; /* Create and name the types. */ #define PUSH_TYPE(name, node) \ @@ -954,13 +952,10 @@ gfc_init_types (void) descriptor. */ n = TYPE_PRECISION (gfc_array_index_type) - GFC_DTYPE_SIZE_SHIFT; - lo = ~ (unsigned HOST_WIDE_INT) 0; - if (n > HOST_BITS_PER_WIDE_INT) - hi = lo >> (2*HOST_BITS_PER_WIDE_INT - n); - else - hi = 0, lo >>= HOST_BITS_PER_WIDE_INT - n; - gfc_max_array_element_size - = build_int_cst_wide (long_unsigned_type_node, lo, hi); + gfc_max_array_element_size + = wide_int_to_tree (long_unsigned_type_node, + wide_int::max_value (n, UNSIGNED, + TYPE_PRECISION (long_unsigned_type_node))); boolean_type_node = gfc_get_logical_type (gfc_default_logical_kind); boolean_true_node = build_int_cst (boolean_type_node, 1); @@ -1449,7 +1444,7 @@ gfc_get_dtype (tree type) if (tree_int_cst_lt (gfc_max_array_element_size, size)) gfc_fatal_error ("Array element size too big at %C"); - i += TREE_INT_CST_LOW (size) << GFC_DTYPE_SIZE_SHIFT; + i += tree_to_hwi (size) << GFC_DTYPE_SIZE_SHIFT; } dtype = build_int_cst (gfc_array_index_type, i); @@ -1887,7 +1882,7 @@ gfc_get_array_type_bounds (tree etype, int dimen, int codimen, tree * lbound, if (stride) rtype = build_range_type (gfc_array_index_type, gfc_index_zero_node, int_const_binop (MINUS_EXPR, stride, - integer_one_node)); + build_int_cst (TREE_TYPE (stride), 1))); else rtype = gfc_array_range_type; arraytype = build_array_type (etype, rtype); diff --git a/gcc/function.c b/gcc/function.c index 4685cb02727..b8ba63923e1 100644 --- a/gcc/function.c +++ b/gcc/function.c @@ -3813,8 +3813,8 @@ locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, { tree s2 = sizetree; if (where_pad != none - && (!host_integerp (sizetree, 1) - || (tree_low_cst (sizetree, 1) * BITS_PER_UNIT) % round_boundary)) + && (!tree_fits_uhwi_p (sizetree) + || (tree_to_uhwi (sizetree) * BITS_PER_UNIT) % round_boundary)) s2 = round_up (s2, round_boundary / BITS_PER_UNIT); SUB_PARM_SIZE (locate->slot_offset, s2); } @@ -3856,7 +3856,7 @@ locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, #ifdef PUSH_ROUNDING if (passed_mode != BLKmode) - sizetree = size_int (PUSH_ROUNDING (TREE_INT_CST_LOW (sizetree))); + sizetree = size_int (PUSH_ROUNDING (tree_to_hwi (sizetree))); #endif /* Pad_below needs the pre-rounded size to know how much to pad below @@ -3866,8 +3866,8 @@ locate_and_pad_parm (enum machine_mode passed_mode, tree type, int in_regs, pad_below (&locate->offset, passed_mode, sizetree); if (where_pad != none - && (!host_integerp (sizetree, 1) - || (tree_low_cst (sizetree, 1) * BITS_PER_UNIT) % round_boundary)) + && (!tree_fits_uhwi_p (sizetree) + || (tree_to_uhwi (sizetree) * BITS_PER_UNIT) % round_boundary)) sizetree = round_up (sizetree, round_boundary / BITS_PER_UNIT); ADD_PARM_SIZE (locate->size, sizetree); @@ -3958,7 +3958,7 @@ pad_below (struct args_size *offset_ptr, enum machine_mode passed_mode, tree siz else { if (TREE_CODE (sizetree) != INTEGER_CST - || (TREE_INT_CST_LOW (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY) + || (tree_to_hwi (sizetree) * BITS_PER_UNIT) % PARM_BOUNDARY) { /* Round the size up to multiple of PARM_BOUNDARY bits. */ tree s2 = round_up (sizetree, PARM_BOUNDARY / BITS_PER_UNIT); diff --git a/gcc/gcse.c b/gcc/gcse.c index 422d6f060eb..a899c3f3c2d 100644 --- a/gcc/gcse.c +++ b/gcc/gcse.c @@ -1997,6 +1997,13 @@ prune_insertions_deletions (int n_elems) bitmap_clear_bit (pre_delete_map[i], j); } + if (dump_file) + { + dump_bitmap_vector (dump_file, "pre_insert_map", "", pre_insert_map, n_edges); + dump_bitmap_vector (dump_file, "pre_delete_map", "", pre_delete_map, + last_basic_block); + } + sbitmap_free (prune_exprs); free (insertions); free (deletions); diff --git a/gcc/gdbinit.in b/gcc/gdbinit.in index 503ef24e301..73cabfebc36 100644 --- a/gcc/gdbinit.in +++ b/gcc/gdbinit.in @@ -94,6 +94,15 @@ Print the expression that is $ in C syntax. Works only when an inferior is executing. end +define pmz +set mpz_out_str(stderr, 10, $) +end + +document pmz +Print the mpz value that is $ +Works only when an inferior is executing. +end + define ptc output (enum tree_code) $.common.code echo \n diff --git a/gcc/genemit.c b/gcc/genemit.c index 692ef526abf..7b1e471c769 100644 --- a/gcc/genemit.c +++ b/gcc/genemit.c @@ -204,6 +204,7 @@ gen_exp (rtx x, enum rtx_code subroutine_type, char *used) case CONST_DOUBLE: case CONST_FIXED: + case CONST_WIDE_INT: /* These shouldn't be written in MD files. Instead, the appropriate routines in varasm.c should be called. */ gcc_unreachable (); diff --git a/gcc/gengenrtl.c b/gcc/gengenrtl.c index 5b5a3ca0308..1f93dd5f7ee 100644 --- a/gcc/gengenrtl.c +++ b/gcc/gengenrtl.c @@ -142,6 +142,7 @@ static int excluded_rtx (int idx) { return ((strcmp (defs[idx].enumname, "CONST_DOUBLE") == 0) + || (strcmp (defs[idx].enumname, "CONST_WIDE_INT") == 0) || (strcmp (defs[idx].enumname, "CONST_FIXED") == 0)); } diff --git a/gcc/gengtype-lex.l b/gcc/gengtype-lex.l index f46cd17586c..7ece2ab8e60 100644 --- a/gcc/gengtype-lex.l +++ b/gcc/gengtype-lex.l @@ -57,7 +57,7 @@ ITYPE {IWORD}({WS}{IWORD})* /* Include '::' in identifiers to capture C++ scope qualifiers. */ ID {CID}({HWS}::{HWS}{CID})* EOID [^[:alnum:]_] -CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend +CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend|static %x in_struct in_struct_comment in_comment %option warn noyywrap nounput nodefault perf-report @@ -110,6 +110,7 @@ CXX_KEYWORD inline|public:|private:|protected:|template|operator|friend "const"/{EOID} /* don't care */ {CXX_KEYWORD}/{EOID} | "~" | +"^" | "&" { *yylval = XDUPVAR (const char, yytext, yyleng, yyleng + 1); return IGNORABLE_CXX_KEYWORD; diff --git a/gcc/gengtype-parse.c b/gcc/gengtype-parse.c index 68d372eb684..acd0bb03df1 100644 --- a/gcc/gengtype-parse.c +++ b/gcc/gengtype-parse.c @@ -230,6 +230,17 @@ require_template_declaration (const char *tmpl_name) /* Read the comma-separated list of identifiers. */ while (token () != '>') { + if (token () == ENUM) + { + advance (); + str = concat (str, "enum ", (char *) 0); + continue; + } + if (token () == NUM) + { + str = concat (str, advance (), (char *) 0); + continue; + } const char *id = require2 (ID, ','); if (id == NULL) id = ","; diff --git a/gcc/gengtype.c b/gcc/gengtype.c index 20854966fac..969bbaa2e3e 100644 --- a/gcc/gengtype.c +++ b/gcc/gengtype.c @@ -1717,7 +1717,7 @@ open_base_files (void) static const char *const ifiles[] = { "config.h", "system.h", "coretypes.h", "tm.h", "hashtab.h", "splay-tree.h", "obstack.h", "bitmap.h", "input.h", - "tree.h", "rtl.h", "function.h", "insn-config.h", "expr.h", + "tree.h", "rtl.h", "wide-int.h", "function.h", "insn-config.h", "expr.h", "hard-reg-set.h", "basic-block.h", "cselib.h", "insn-addr.h", "optabs.h", "libfuncs.h", "debug.h", "ggc.h", "cgraph.h", "tree-flow.h", "reload.h", "cpp-id-data.h", "tree-chrec.h", @@ -5445,6 +5445,7 @@ main (int argc, char **argv) POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos)); POS_HERE (do_scalar_typedef ("FIXED_VALUE_TYPE", &pos)); POS_HERE (do_scalar_typedef ("double_int", &pos)); + POS_HERE (do_scalar_typedef ("wide_int", &pos)); POS_HERE (do_scalar_typedef ("uint64_t", &pos)); POS_HERE (do_scalar_typedef ("uint8", &pos)); POS_HERE (do_scalar_typedef ("uintptr_t", &pos)); diff --git a/gcc/genpreds.c b/gcc/genpreds.c index 98488e30b8b..29fafbe79ef 100644 --- a/gcc/genpreds.c +++ b/gcc/genpreds.c @@ -612,7 +612,7 @@ write_one_predicate_function (struct pred_data *p) add_mode_tests (p); /* A normal predicate can legitimately not look at enum machine_mode - if it accepts only CONST_INTs and/or CONST_DOUBLEs. */ + if it accepts only CONST_INTs and/or CONST_WIDE_INT and/or CONST_DOUBLEs. */ printf ("int\n%s (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)\n{\n", p->name); write_predicate_stmts (p->exp); @@ -809,8 +809,11 @@ add_constraint (const char *name, const char *regclass, if (is_const_int || is_const_dbl) { enum rtx_code appropriate_code +#if TARGET_SUPPORTS_WIDE_INT + = is_const_int ? CONST_INT : CONST_WIDE_INT; +#else = is_const_int ? CONST_INT : CONST_DOUBLE; - +#endif /* Consider relaxing this requirement in the future. */ if (regclass || GET_CODE (exp) != AND @@ -1075,12 +1078,17 @@ write_tm_constrs_h (void) if (needs_ival) puts (" if (CONST_INT_P (op))\n" " ival = INTVAL (op);"); +#if TARGET_SUPPORTS_WIDE_INT + if (needs_lval || needs_hval) + error ("you can't use lval or hval"); +#else if (needs_hval) puts (" if (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode)" " hval = CONST_DOUBLE_HIGH (op);"); if (needs_lval) puts (" if (GET_CODE (op) == CONST_DOUBLE && mode == VOIDmode)" " lval = CONST_DOUBLE_LOW (op);"); +#endif if (needs_rval) puts (" if (GET_CODE (op) == CONST_DOUBLE && mode != VOIDmode)" " rval = CONST_DOUBLE_REAL_VALUE (op);"); diff --git a/gcc/gensupport.c b/gcc/gensupport.c index c7a27c42617..f15ac7ff9e5 100644 --- a/gcc/gensupport.c +++ b/gcc/gensupport.c @@ -2791,7 +2791,13 @@ static const struct std_pred_table std_preds[] = { {"scratch_operand", false, false, {SCRATCH, REG}}, {"immediate_operand", false, true, {UNKNOWN}}, {"const_int_operand", false, false, {CONST_INT}}, +#if TARGET_SUPPORTS_WIDE_INT + {"const_wide_int_operand", false, false, {CONST_WIDE_INT}}, + {"const_scalar_int_operand", false, false, {CONST_INT, CONST_WIDE_INT}}, + {"const_double_operand", false, false, {CONST_DOUBLE}}, +#else {"const_double_operand", false, false, {CONST_INT, CONST_DOUBLE}}, +#endif {"nonimmediate_operand", false, false, {SUBREG, REG, MEM}}, {"nonmemory_operand", false, true, {SUBREG, REG}}, {"push_operand", false, false, {MEM}}, diff --git a/gcc/gimple-fold.c b/gcc/gimple-fold.c index 62c71b6843e..1086b0faf61 100644 --- a/gcc/gimple-fold.c +++ b/gcc/gimple-fold.c @@ -1049,7 +1049,7 @@ gimple_extract_devirt_binfo_from_cst (tree cst) continue; pos = int_bit_position (fld); - size = tree_low_cst (DECL_SIZE (fld), 1); + size = tree_to_uhwi (DECL_SIZE (fld)); if (pos <= offset && (pos + size) > offset) break; } @@ -1112,7 +1112,7 @@ gimple_fold_call (gimple_stmt_iterator *gsi, bool inplace) if (binfo) { HOST_WIDE_INT token - = TREE_INT_CST_LOW (OBJ_TYPE_REF_TOKEN (callee)); + = tree_to_hwi (OBJ_TYPE_REF_TOKEN (callee)); tree fndecl = gimple_get_virt_method_for_binfo (token, binfo); if (fndecl) { @@ -2674,9 +2674,9 @@ get_base_constructor (tree base, HOST_WIDE_INT *bit_offset, { if (!integer_zerop (TREE_OPERAND (base, 1))) { - if (!host_integerp (TREE_OPERAND (base, 1), 0)) + if (!tree_fits_shwi_p (TREE_OPERAND (base, 1))) return NULL_TREE; - *bit_offset += (mem_ref_offset (base).low + *bit_offset += (mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT); } @@ -2771,9 +2771,10 @@ fold_array_ctor_reference (tree type, tree ctor, { unsigned HOST_WIDE_INT cnt; tree cfield, cval; - double_int low_bound, elt_size; - double_int index, max_index; - double_int access_index; + addr_wide_int low_bound; + addr_wide_int elt_size; + addr_wide_int index, max_index; + addr_wide_int access_index; tree domain_type = NULL_TREE, index_type = NULL_TREE; HOST_WIDE_INT inner_offset; @@ -2785,31 +2786,28 @@ fold_array_ctor_reference (tree type, tree ctor, /* Static constructors for variably sized objects makes no sense. */ gcc_assert (TREE_CODE (TYPE_MIN_VALUE (domain_type)) == INTEGER_CST); index_type = TREE_TYPE (TYPE_MIN_VALUE (domain_type)); - low_bound = tree_to_double_int (TYPE_MIN_VALUE (domain_type)); + low_bound = TYPE_MIN_VALUE (domain_type); } else - low_bound = double_int_zero; + low_bound = 0; /* Static constructors for variably sized objects makes no sense. */ gcc_assert (TREE_CODE(TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))) == INTEGER_CST); - elt_size = - tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor)))); - + elt_size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ctor))); /* We can handle only constantly sized accesses that are known to not be larger than size of array element. */ if (!TYPE_SIZE_UNIT (type) || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST - || elt_size.slt (tree_to_double_int (TYPE_SIZE_UNIT (type)))) + || elt_size.lts_p (addr_wide_int (TYPE_SIZE_UNIT (type)))) return NULL_TREE; /* Compute the array index we look for. */ - access_index = double_int::from_uhwi (offset / BITS_PER_UNIT) - .udiv (elt_size, TRUNC_DIV_EXPR); + access_index = addr_wide_int (offset / BITS_PER_UNIT).udiv_trunc (elt_size); access_index += low_bound; if (index_type) access_index = access_index.ext (TYPE_PRECISION (index_type), - TYPE_UNSIGNED (index_type)); + TYPE_SIGN (index_type)); /* And offset within the access. */ inner_offset = offset % (elt_size.to_uhwi () * BITS_PER_UNIT); @@ -2819,9 +2817,9 @@ fold_array_ctor_reference (tree type, tree ctor, if (inner_offset + size > elt_size.to_uhwi () * BITS_PER_UNIT) return NULL_TREE; - index = low_bound - double_int_one; + index = low_bound - 1; if (index_type) - index = index.ext (TYPE_PRECISION (index_type), TYPE_UNSIGNED (index_type)); + index = index.ext (TYPE_PRECISION (index_type), TYPE_SIGN (index_type)); FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), cnt, cfield, cval) { @@ -2831,26 +2829,26 @@ fold_array_ctor_reference (tree type, tree ctor, if (cfield) { if (TREE_CODE (cfield) == INTEGER_CST) - max_index = index = tree_to_double_int (cfield); + max_index = index = cfield; else { gcc_assert (TREE_CODE (cfield) == RANGE_EXPR); - index = tree_to_double_int (TREE_OPERAND (cfield, 0)); - max_index = tree_to_double_int (TREE_OPERAND (cfield, 1)); + index = TREE_OPERAND (cfield, 0); + max_index = TREE_OPERAND (cfield, 1); } } else { - index += double_int_one; + index += 1; if (index_type) index = index.ext (TYPE_PRECISION (index_type), - TYPE_UNSIGNED (index_type)); + TYPE_SIGN (index_type)); max_index = index; } /* Do we have match? */ - if (access_index.cmp (index, 1) >= 0 - && access_index.cmp (max_index, 1) <= 0) + if (access_index.cmpu (index) >= 0 + && access_index.cmpu (max_index) <= 0) return fold_ctor_reference (type, cval, inner_offset, size, from_decl); } @@ -2877,10 +2875,9 @@ fold_nonarray_ctor_reference (tree type, tree ctor, tree byte_offset = DECL_FIELD_OFFSET (cfield); tree field_offset = DECL_FIELD_BIT_OFFSET (cfield); tree field_size = DECL_SIZE (cfield); - double_int bitoffset; - double_int byte_offset_cst = tree_to_double_int (byte_offset); - double_int bits_per_unit_cst = double_int::from_uhwi (BITS_PER_UNIT); - double_int bitoffset_end, access_end; + addr_wide_int bitoffset; + addr_wide_int byte_offset_cst = byte_offset; + addr_wide_int bitoffset_end, access_end; /* Variable sized objects in static constructors makes no sense, but field_size can be NULL for flexible array members. */ @@ -2891,30 +2888,29 @@ fold_nonarray_ctor_reference (tree type, tree ctor, : TREE_CODE (TREE_TYPE (cfield)) == ARRAY_TYPE)); /* Compute bit offset of the field. */ - bitoffset = tree_to_double_int (field_offset) - + byte_offset_cst * bits_per_unit_cst; + bitoffset = (addr_wide_int (field_offset) + + byte_offset_cst * addr_wide_int (BITS_PER_UNIT)); /* Compute bit offset where the field ends. */ if (field_size != NULL_TREE) - bitoffset_end = bitoffset + tree_to_double_int (field_size); + bitoffset_end = bitoffset + addr_wide_int (field_size); else - bitoffset_end = double_int_zero; + bitoffset_end = 0; - access_end = double_int::from_uhwi (offset) - + double_int::from_uhwi (size); + access_end = addr_wide_int (offset) + size; /* Is there any overlap between [OFFSET, OFFSET+SIZE) and [BITOFFSET, BITOFFSET_END)? */ - if (access_end.cmp (bitoffset, 0) > 0 + if (access_end.cmps (bitoffset) > 0 && (field_size == NULL_TREE - || double_int::from_uhwi (offset).slt (bitoffset_end))) + || addr_wide_int (offset).lts_p (bitoffset_end))) { - double_int inner_offset = double_int::from_uhwi (offset) - bitoffset; + addr_wide_int inner_offset = addr_wide_int (offset) - bitoffset; /* We do have overlap. Now see if field is large enough to cover the access. Give up for accesses spanning multiple fields. */ - if (access_end.cmp (bitoffset_end, 0) > 0) + if (access_end.cmps (bitoffset_end) > 0) return NULL_TREE; - if (double_int::from_uhwi (offset).slt (bitoffset)) + if (addr_wide_int (offset).lts_p (bitoffset)) return NULL_TREE; return fold_ctor_reference (type, cval, inner_offset.to_uhwi (), size, @@ -3004,38 +3000,42 @@ fold_const_aggregate_ref_1 (tree t, tree (*valueize) (tree)) && (idx = (*valueize) (TREE_OPERAND (t, 1))) && TREE_CODE (idx) == INTEGER_CST) { - tree low_bound, unit_size; - double_int doffset; + tree low_bound = array_ref_low_bound (t); + tree unit_size = array_ref_element_size (t); /* If the resulting bit-offset is constant, track it. */ - if ((low_bound = array_ref_low_bound (t), - TREE_CODE (low_bound) == INTEGER_CST) - && (unit_size = array_ref_element_size (t), - host_integerp (unit_size, 1)) - && (doffset = (TREE_INT_CST (idx) - TREE_INT_CST (low_bound)) - .sext (TYPE_PRECISION (TREE_TYPE (idx))), - doffset.fits_shwi ())) + if ((TREE_CODE (low_bound) == INTEGER_CST) + && (tree_fits_uhwi_p (unit_size))) { - offset = doffset.to_shwi (); - offset *= TREE_INT_CST_LOW (unit_size); - offset *= BITS_PER_UNIT; - - base = TREE_OPERAND (t, 0); - ctor = get_base_constructor (base, &offset, valueize); - /* Empty constructor. Always fold to 0. */ - if (ctor == error_mark_node) - return build_zero_cst (TREE_TYPE (t)); - /* Out of bound array access. Value is undefined, - but don't fold. */ - if (offset < 0) - return NULL_TREE; - /* We can not determine ctor. */ - if (!ctor) - return NULL_TREE; - return fold_ctor_reference (TREE_TYPE (t), ctor, offset, - TREE_INT_CST_LOW (unit_size) - * BITS_PER_UNIT, - base); + addr_wide_int woffset + = (addr_wide_int (idx) - addr_wide_int (low_bound)) + .sext (TYPE_PRECISION (TREE_TYPE (idx))); + + if (woffset.fits_shwi_p ()) + { + offset = woffset.to_shwi (); + /* TODO: This code seems wrong, multiply then check + to see if it fits. */ + offset *= tree_to_hwi (unit_size); + offset *= BITS_PER_UNIT; + + base = TREE_OPERAND (t, 0); + ctor = get_base_constructor (base, &offset, valueize); + /* Empty constructor. Always fold to 0. */ + if (ctor == error_mark_node) + return build_zero_cst (TREE_TYPE (t)); + /* Out of bound array access. Value is undefined, + but don't fold. */ + if (offset < 0) + return NULL_TREE; + /* We can not determine ctor. */ + if (!ctor) + return NULL_TREE; + return fold_ctor_reference (TREE_TYPE (t), ctor, offset, + tree_to_uhwi (unit_size) + * BITS_PER_UNIT, + base); + } } } /* Fallthru. */ @@ -3105,7 +3105,7 @@ gimple_get_virt_method_for_binfo (HOST_WIDE_INT token, tree known_binfo) if (TREE_CODE (v) == POINTER_PLUS_EXPR) { - offset = tree_low_cst (TREE_OPERAND (v, 1), 1) * BITS_PER_UNIT; + offset = tree_to_uhwi (TREE_OPERAND (v, 1)) * BITS_PER_UNIT; v = TREE_OPERAND (v, 0); } else @@ -3121,7 +3121,7 @@ gimple_get_virt_method_for_binfo (HOST_WIDE_INT token, tree known_binfo) || DECL_INITIAL (v) == error_mark_node) return NULL_TREE; gcc_checking_assert (TREE_CODE (TREE_TYPE (v)) == ARRAY_TYPE); - size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (v))), 1); + size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (v)))); offset += token * size; fn = fold_ctor_reference (TREE_TYPE (TREE_TYPE (v)), DECL_INITIAL (v), offset, size, vtable); @@ -3240,7 +3240,7 @@ gimple_val_nonnegative_real_p (tree val) arg1 = gimple_call_arg (def_stmt, 1); if (TREE_CODE (arg1) == INTEGER_CST - && (TREE_INT_CST_LOW (arg1) & 1) == 0) + && (tree_to_hwi (arg1) & 1) == 0) return true; break; @@ -3261,7 +3261,7 @@ gimple_val_nonnegative_real_p (tree val) if ((n & 1) == 0) { REAL_VALUE_TYPE cint; - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); if (real_identical (&c, &cint)) return true; } diff --git a/gcc/gimple-pretty-print.c b/gcc/gimple-pretty-print.c index 1d4068079ec..67050d0e77b 100644 --- a/gcc/gimple-pretty-print.c +++ b/gcc/gimple-pretty-print.c @@ -716,7 +716,7 @@ dump_gimple_call (pretty_printer *buffer, gimple gs, int spc, int flags) pp_string (buffer, " [ "); /* Get the transaction code properties. */ - props = TREE_INT_CST_LOW (t); + props = tree_to_hwi (t); if (props & PR_INSTRUMENTEDCODE) pp_string (buffer, "instrumentedCode "); diff --git a/gcc/gimple-ssa-strength-reduction.c b/gcc/gimple-ssa-strength-reduction.c index e85e6293db4..78f0fbab082 100644 --- a/gcc/gimple-ssa-strength-reduction.c +++ b/gcc/gimple-ssa-strength-reduction.c @@ -48,6 +48,8 @@ along with GCC; see the file COPYING3. If not see #include "expmed.h" #include "params.h" #include "hash-table.h" +#include "wide-int-print.h" + /* Information about a strength reduction candidate. Each statement in the candidate table represents an expression of one of the @@ -229,7 +231,7 @@ struct slsr_cand_d tree stride; /* The index constant i. */ - double_int index; + max_wide_int index; /* The type of the candidate. This is normally the type of base_expr, but casts may have occurred when combining feeding instructions. @@ -304,7 +306,7 @@ typedef const struct cand_chain_d *const_cand_chain_t; struct incr_info_d { /* The increment that relates a candidate to its basis. */ - double_int incr; + max_wide_int incr; /* How many times the increment occurs in the candidate tree. */ unsigned count; @@ -552,7 +554,7 @@ record_potential_basis (slsr_cand_t c) static slsr_cand_t alloc_cand_and_find_basis (enum cand_kind kind, gimple gs, tree base, - double_int index, tree stride, tree ctype, + const max_wide_int &index, tree stride, tree ctype, unsigned savings) { slsr_cand_t c = (slsr_cand_t) obstack_alloc (&cand_obstack, @@ -601,8 +603,8 @@ stmt_cost (gimple gs, bool speed) case MULT_EXPR: rhs2 = gimple_assign_rhs2 (gs); - if (host_integerp (rhs2, 0)) - return mult_by_coeff_cost (TREE_INT_CST_LOW (rhs2), lhs_mode, speed); + if (tree_fits_shwi_p (rhs2)) + return mult_by_coeff_cost (tree_to_shwi (rhs2), lhs_mode, speed); gcc_assert (TREE_CODE (rhs1) != INTEGER_CST); return mul_cost (speed, lhs_mode); @@ -743,8 +745,8 @@ slsr_process_phi (gimple phi, bool speed) CAND_PHI. */ base_type = TREE_TYPE (arg0_base); - c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, double_int_zero, - integer_one_node, base_type, savings); + c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base, + 0, integer_one_node, base_type, savings); /* Add the candidate to the statement-candidate mapping. */ add_cand_for_stmt (phi, c); @@ -770,38 +772,36 @@ slsr_process_phi (gimple phi, bool speed) *PINDEX: C1 + (C2 * C3) + C4 */ static bool -restructure_reference (tree *pbase, tree *poffset, double_int *pindex, +restructure_reference (tree *pbase, tree *poffset, max_wide_int *pindex, tree *ptype) { tree base = *pbase, offset = *poffset; - double_int index = *pindex; - double_int bpu = double_int::from_uhwi (BITS_PER_UNIT); - tree mult_op0, mult_op1, t1, t2, type; - double_int c1, c2, c3, c4; + max_wide_int index = *pindex; + wide_int bpu = BITS_PER_UNIT; + tree mult_op0, t1, t2, type; + max_wide_int c1, c2, c3, c4; if (!base || !offset || TREE_CODE (base) != MEM_REF || TREE_CODE (offset) != MULT_EXPR || TREE_CODE (TREE_OPERAND (offset, 1)) != INTEGER_CST - || !index.umod (bpu, FLOOR_MOD_EXPR).is_zero ()) + || !index.umod_floor (bpu).zero_p ()) return false; t1 = TREE_OPERAND (base, 0); - c1 = mem_ref_offset (base); + c1 = max_wide_int::from_wide_int (mem_ref_offset (base)); type = TREE_TYPE (TREE_OPERAND (base, 1)); mult_op0 = TREE_OPERAND (offset, 0); - mult_op1 = TREE_OPERAND (offset, 1); - - c3 = tree_to_double_int (mult_op1); + c3 = TREE_OPERAND (offset, 1); if (TREE_CODE (mult_op0) == PLUS_EXPR) if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST) { t2 = TREE_OPERAND (mult_op0, 0); - c2 = tree_to_double_int (TREE_OPERAND (mult_op0, 1)); + c2 = TREE_OPERAND (mult_op0, 1); } else return false; @@ -811,7 +811,7 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex, if (TREE_CODE (TREE_OPERAND (mult_op0, 1)) == INTEGER_CST) { t2 = TREE_OPERAND (mult_op0, 0); - c2 = -tree_to_double_int (TREE_OPERAND (mult_op0, 1)); + c2 = -(max_wide_int)TREE_OPERAND (mult_op0, 1); } else return false; @@ -819,14 +819,14 @@ restructure_reference (tree *pbase, tree *poffset, double_int *pindex, else { t2 = mult_op0; - c2 = double_int_zero; + c2 = 0; } - c4 = index.udiv (bpu, FLOOR_DIV_EXPR); + c4 = index.udiv_floor (bpu); *pbase = t1; *poffset = fold_build2 (MULT_EXPR, sizetype, t2, - double_int_to_tree (sizetype, c3)); + wide_int_to_tree (sizetype, c3)); *pindex = c1 + c2 * c3 + c4; *ptype = type; @@ -843,7 +843,7 @@ slsr_process_ref (gimple gs) HOST_WIDE_INT bitsize, bitpos; enum machine_mode mode; int unsignedp, volatilep; - double_int index; + max_wide_int index; slsr_cand_t c; if (gimple_vdef (gs)) @@ -859,7 +859,7 @@ slsr_process_ref (gimple gs) base = get_inner_reference (ref_expr, &bitsize, &bitpos, &offset, &mode, &unsignedp, &volatilep, false); - index = double_int::from_uhwi (bitpos); + index = bitpos; if (!restructure_reference (&base, &offset, &index, &type)) return; @@ -880,7 +880,7 @@ static slsr_cand_t create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - double_int index; + max_wide_int index; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -912,7 +912,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) ============================ X = B + ((i' * S) * Z) */ base = base_cand->base_expr; - index = base_cand->index * tree_to_double_int (base_cand->stride); + index = base_cand->index * base_cand->stride; stride = stride_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -931,7 +931,7 @@ create_mul_ssa_cand (gimple gs, tree base_in, tree stride_in, bool speed) /* No interpretations had anything useful to propagate, so produce X = (Y + 0) * Z. */ base = base_in; - index = double_int_zero; + index = 0; stride = stride_in; ctype = TREE_TYPE (base_in); } @@ -950,7 +950,7 @@ static slsr_cand_t create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - double_int index, temp; + max_wide_int index, temp; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -968,9 +968,8 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) X = (B + i') * (S * c) */ base = base_cand->base_expr; index = base_cand->index; - temp = tree_to_double_int (base_cand->stride) - * tree_to_double_int (stride_in); - stride = double_int_to_tree (TREE_TYPE (stride_in), temp); + temp = (max_wide_int (base_cand->stride) * stride_in); + stride = wide_int_to_tree (TREE_TYPE (stride_in), temp); ctype = base_cand->cand_type; if (has_single_use (base_in)) savings = (base_cand->dead_savings @@ -991,7 +990,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) + stmt_cost (base_cand->cand_stmt, speed)); } else if (base_cand->kind == CAND_ADD - && base_cand->index.is_one () + && base_cand->index.one_p () && TREE_CODE (base_cand->stride) == INTEGER_CST) { /* Y = B + (1 * S), S constant @@ -999,7 +998,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) =========================== X = (B + S) * c */ base = base_cand->base_expr; - index = tree_to_double_int (base_cand->stride); + index = base_cand->stride; stride = stride_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1018,7 +1017,7 @@ create_mul_imm_cand (gimple gs, tree base_in, tree stride_in, bool speed) /* No interpretations had anything useful to propagate, so produce X = (Y + 0) * c. */ base = base_in; - index = double_int_zero; + index = 0; stride = stride_in; ctype = TREE_TYPE (base_in); } @@ -1081,7 +1080,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, bool subtract_p, bool speed) { tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL; - double_int index; + max_wide_int index; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); @@ -1092,7 +1091,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, while (addend_cand && !base && addend_cand->kind != CAND_PHI) { if (addend_cand->kind == CAND_MULT - && addend_cand->index.is_zero () + && addend_cand->index.zero_p () && TREE_CODE (addend_cand->stride) == INTEGER_CST) { /* Z = (B + 0) * S, S constant @@ -1100,7 +1099,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, =========================== X = Y + ((+/-1 * S) * B) */ base = base_in; - index = tree_to_double_int (addend_cand->stride); + index = addend_cand->stride; if (subtract_p) index = -index; stride = addend_cand->base_expr; @@ -1119,7 +1118,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, while (base_cand && !base && base_cand->kind != CAND_PHI) { if (base_cand->kind == CAND_ADD - && (base_cand->index.is_zero () + && (base_cand->index.zero_p () || operand_equal_p (base_cand->stride, integer_zero_node, 0))) { @@ -1128,7 +1127,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, ============================ X = B + (+/-1 * Z) */ base = base_cand->base_expr; - index = subtract_p ? double_int_minus_one : double_int_one; + index = subtract_p ? -1 : 1; stride = addend_in; ctype = base_cand->cand_type; if (has_single_use (base_in)) @@ -1142,7 +1141,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, while (subtrahend_cand && !base && subtrahend_cand->kind != CAND_PHI) { if (subtrahend_cand->kind == CAND_MULT - && subtrahend_cand->index.is_zero () + && subtrahend_cand->index.zero_p () && TREE_CODE (subtrahend_cand->stride) == INTEGER_CST) { /* Z = (B + 0) * S, S constant @@ -1150,7 +1149,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, =========================== Value: X = Y + ((-1 * S) * B) */ base = base_in; - index = tree_to_double_int (subtrahend_cand->stride); + index = subtrahend_cand->stride; index = -index; stride = subtrahend_cand->base_expr; ctype = TREE_TYPE (base_in); @@ -1177,7 +1176,7 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, /* No interpretations had anything useful to propagate, so produce X = Y + (1 * Z). */ base = base_in; - index = subtract_p ? double_int_minus_one : double_int_one; + index = subtract_p ? -1 : 1; stride = addend_in; ctype = TREE_TYPE (base_in); } @@ -1192,22 +1191,21 @@ create_add_ssa_cand (gimple gs, tree base_in, tree addend_in, about BASE_IN into the new candidate. Return the new candidate. */ static slsr_cand_t -create_add_imm_cand (gimple gs, tree base_in, double_int index_in, bool speed) +create_add_imm_cand (gimple gs, tree base_in, max_wide_int index_in, bool speed) { enum cand_kind kind = CAND_ADD; tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE; - double_int index, multiple; + max_wide_int index, multiple; unsigned savings = 0; slsr_cand_t c; slsr_cand_t base_cand = base_cand_from_table (base_in); while (base_cand && !base && base_cand->kind != CAND_PHI) { - bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (base_cand->stride)); + signop sign = TYPE_SIGN (TREE_TYPE (base_cand->stride)); if (TREE_CODE (base_cand->stride) == INTEGER_CST - && index_in.multiple_of (tree_to_double_int (base_cand->stride), - unsigned_p, &multiple)) + && index_in.multiple_of_p (base_cand->stride, sign, &multiple)) { /* Y = (B + i') * S, S constant, c = kS for some integer k X = Y + c @@ -1292,10 +1290,10 @@ slsr_process_add (gimple gs, tree rhs1, tree rhs2, bool speed) } else { - double_int index; + max_wide_int index; /* Record an interpretation for the add-immediate. */ - index = tree_to_double_int (rhs2); + index = rhs2; if (subtract_p) index = -index; @@ -1443,10 +1441,10 @@ slsr_process_cast (gimple gs, tree rhs1, bool speed) The first of these is somewhat arbitrary, but the choice of 1 for the stride simplifies the logic for propagating casts into their uses. */ - c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, double_int_zero, - integer_one_node, ctype, 0); - c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, double_int_zero, - integer_one_node, ctype, 0); + c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, + 0, integer_one_node, ctype, 0); + c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, + 0, integer_one_node, ctype, 0); c->next_interp = c2->cand_num; } @@ -1500,10 +1498,10 @@ slsr_process_copy (gimple gs, tree rhs1, bool speed) The first of these is somewhat arbitrary, but the choice of 1 for the stride simplifies the logic for propagating casts into their uses. */ - c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, double_int_zero, - integer_one_node, TREE_TYPE (rhs1), 0); - c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, double_int_zero, - integer_one_node, TREE_TYPE (rhs1), 0); + c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, + 0, integer_one_node, TREE_TYPE (rhs1), 0); + c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, + 0, integer_one_node, TREE_TYPE (rhs1), 0); c->next_interp = c2->cand_num; } @@ -1613,7 +1611,7 @@ dump_candidate (slsr_cand_t c) fputs (" MULT : (", dump_file); print_generic_expr (dump_file, c->base_expr, 0); fputs (" + ", dump_file); - dump_double_int (dump_file, c->index, false); + print_decs (c->index, dump_file); fputs (") * ", dump_file); print_generic_expr (dump_file, c->stride, 0); fputs (" : ", dump_file); @@ -1622,7 +1620,7 @@ dump_candidate (slsr_cand_t c) fputs (" ADD : ", dump_file); print_generic_expr (dump_file, c->base_expr, 0); fputs (" + (", dump_file); - dump_double_int (dump_file, c->index, false); + print_decs (c->index, dump_file); fputs (" * ", dump_file); print_generic_expr (dump_file, c->stride, 0); fputs (") : ", dump_file); @@ -1633,7 +1631,7 @@ dump_candidate (slsr_cand_t c) fputs (" + (", dump_file); print_generic_expr (dump_file, c->stride, 0); fputs (") + ", dump_file); - dump_double_int (dump_file, c->index, false); + print_decs (c->index, dump_file); fputs (" : ", dump_file); break; case CAND_PHI: @@ -1712,7 +1710,7 @@ dump_incr_vec (void) for (i = 0; i < incr_vec_len; i++) { fprintf (dump_file, "%3d increment: ", i); - dump_double_int (dump_file, incr_vec[i].incr, false); + print_decs (incr_vec[i].incr, dump_file); fprintf (dump_file, "\n count: %d", incr_vec[i].count); fprintf (dump_file, "\n cost: %d", incr_vec[i].cost); fputs ("\n initializer: ", dump_file); @@ -1728,23 +1726,11 @@ dump_incr_vec (void) static void replace_ref (tree *expr, slsr_cand_t c) { - tree add_expr, mem_ref, acc_type = TREE_TYPE (*expr); - unsigned HOST_WIDE_INT misalign; - unsigned align; - - /* Ensure the memory reference carries the minimum alignment - requirement for the data type. See PR58041. */ - get_object_alignment_1 (*expr, &align, &misalign); - if (misalign != 0) - align = (misalign & -misalign); - if (align < TYPE_ALIGN (acc_type)) - acc_type = build_aligned_type (acc_type, align); - - add_expr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (c->base_expr), - c->base_expr, c->stride); - mem_ref = fold_build2 (MEM_REF, acc_type, add_expr, - double_int_to_tree (c->cand_type, c->index)); - + tree add_expr = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (c->base_expr), + c->base_expr, c->stride); + tree mem_ref = fold_build2 (MEM_REF, TREE_TYPE (*expr), add_expr, + wide_int_to_tree (c->cand_type, c->index)); + /* Gimplify the base addressing expression for the new MEM_REF tree. */ gimple_stmt_iterator gsi = gsi_for_stmt (c->cand_stmt); TREE_OPERAND (mem_ref, 0) @@ -1798,7 +1784,7 @@ phi_dependent_cand_p (slsr_cand_t c) /* Calculate the increment required for candidate C relative to its basis. */ -static double_int +static max_wide_int cand_increment (slsr_cand_t c) { slsr_cand_t basis; @@ -1821,12 +1807,12 @@ cand_increment (slsr_cand_t c) for this candidate, return the absolute value of that increment instead. */ -static inline double_int +static inline max_wide_int cand_abs_increment (slsr_cand_t c) { - double_int increment = cand_increment (c); + max_wide_int increment = cand_increment (c); - if (!address_arithmetic_p && increment.is_negative ()) + if (!address_arithmetic_p && increment.neg_p (SIGNED)) increment = -increment; return increment; @@ -1845,17 +1831,18 @@ cand_already_replaced (slsr_cand_t c) replace_conditional_candidate. */ static void -replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump) +replace_mult_candidate (slsr_cand_t c, tree basis_name, const max_wide_int &bump_in) { tree target_type = TREE_TYPE (gimple_assign_lhs (c->cand_stmt)); enum tree_code cand_code = gimple_assign_rhs_code (c->cand_stmt); + max_wide_int bump = bump_in; /* It is highly unlikely, but possible, that the resulting bump doesn't fit in a HWI. Abandon the replacement in this case. This does not affect siblings or dependents of C. Restriction to signed HWI is conservative for unsigned types but allows for safe negation without twisted logic. */ - if (bump.fits_shwi () + if (bump.fits_shwi_p () && bump.to_shwi () != HOST_WIDE_INT_MIN /* It is not useful to replace casts, copies, or adds of an SSA name and a constant. */ @@ -1873,13 +1860,13 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump) types, introduce a cast. */ if (!useless_type_conversion_p (target_type, TREE_TYPE (basis_name))) basis_name = introduce_cast_before_cand (c, target_type, basis_name); - if (bump.is_negative ()) + if (bump.neg_p (SIGNED)) { code = MINUS_EXPR; bump = -bump; } - bump_tree = double_int_to_tree (target_type, bump); + bump_tree = wide_int_to_tree (target_type, bump); if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -1887,7 +1874,7 @@ replace_mult_candidate (slsr_cand_t c, tree basis_name, double_int bump) print_gimple_stmt (dump_file, c->cand_stmt, 0, 0); } - if (bump.is_zero ()) + if (bump.zero_p ()) { tree lhs = gimple_assign_lhs (c->cand_stmt); gimple copy_stmt = gimple_build_assign (lhs, basis_name); @@ -1948,14 +1935,13 @@ static void replace_unconditional_candidate (slsr_cand_t c) { slsr_cand_t basis; - double_int stride, bump; + max_wide_int bump; if (cand_already_replaced (c)) return; basis = lookup_cand (c->basis); - stride = tree_to_double_int (c->stride); - bump = cand_increment (c) * stride; + bump = cand_increment (c) * c->stride; replace_mult_candidate (c, gimple_assign_lhs (basis->cand_stmt), bump); } @@ -1965,7 +1951,7 @@ replace_unconditional_candidate (slsr_cand_t c) MAX_INCR_VEC_LEN increments have been found. */ static inline int -incr_vec_index (double_int increment) +incr_vec_index (max_wide_int increment) { unsigned i; @@ -1985,7 +1971,7 @@ incr_vec_index (double_int increment) static tree create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, - double_int increment, edge e, location_t loc, + max_wide_int increment, edge e, location_t loc, bool known_stride) { basic_block insert_bb; @@ -1996,7 +1982,7 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, /* If the add candidate along this incoming edge has the same index as C's hidden basis, the hidden basis represents this edge correctly. */ - if (increment.is_zero ()) + if (increment.zero_p ()) return basis_name; basis_type = TREE_TYPE (basis_name); @@ -2006,21 +1992,21 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, { tree bump_tree; enum tree_code code = PLUS_EXPR; - double_int bump = increment * tree_to_double_int (c->stride); - if (bump.is_negative ()) + max_wide_int bump = increment * c->stride; + if (bump.neg_p (SIGNED)) { code = MINUS_EXPR; bump = -bump; } - bump_tree = double_int_to_tree (basis_type, bump); + bump_tree = wide_int_to_tree (basis_type, bump); new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name, bump_tree); } else { int i; - bool negate_incr = (!address_arithmetic_p && increment.is_negative ()); + bool negate_incr = (!address_arithmetic_p && increment.neg_p (SIGNED)); i = incr_vec_index (negate_incr ? -increment : increment); gcc_assert (i >= 0); @@ -2030,10 +2016,10 @@ create_add_on_incoming_edge (slsr_cand_t c, tree basis_name, new_stmt = gimple_build_assign_with_ops (code, lhs, basis_name, incr_vec[i].initializer); } - else if (increment.is_one ()) + else if (increment.one_p ()) new_stmt = gimple_build_assign_with_ops (PLUS_EXPR, lhs, basis_name, c->stride); - else if (increment.is_minus_one ()) + else if (increment.minus_one_p ()) new_stmt = gimple_build_assign_with_ops (MINUS_EXPR, lhs, basis_name, c->stride); else @@ -2094,11 +2080,11 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name, /* If the phi argument is the base name of the CAND_PHI, then this incoming arc should use the hidden basis. */ if (operand_equal_p (arg, phi_cand->base_expr, 0)) - if (basis->index.is_zero ()) + if (basis->index.zero_p ()) feeding_def = gimple_assign_lhs (basis->cand_stmt); else { - double_int incr = -basis->index; + max_wide_int incr = -basis->index; feeding_def = create_add_on_incoming_edge (c, basis_name, incr, e, loc, known_stride); } @@ -2115,7 +2101,7 @@ create_phi_basis (slsr_cand_t c, gimple from_phi, tree basis_name, else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; feeding_def = create_add_on_incoming_edge (c, basis_name, diff, e, loc, known_stride); } @@ -2161,7 +2147,7 @@ replace_conditional_candidate (slsr_cand_t c) tree basis_name, name; slsr_cand_t basis; location_t loc; - double_int stride, bump; + max_wide_int bump; /* Look up the LHS SSA name from C's basis. This will be the RHS1 of the adds we will introduce to create new phi arguments. */ @@ -2174,8 +2160,7 @@ replace_conditional_candidate (slsr_cand_t c) name = create_phi_basis (c, lookup_cand (c->def_phi)->cand_stmt, basis_name, loc, KNOWN_STRIDE); /* Replace C with an add of the new basis phi and a constant. */ - stride = tree_to_double_int (c->stride); - bump = c->index * stride; + bump = c->index * c->stride; replace_mult_candidate (c, name, bump); } @@ -2307,14 +2292,15 @@ count_candidates (slsr_cand_t c) candidates with the same increment, also record T_0 for subsequent use. */ static void -record_increment (slsr_cand_t c, double_int increment, bool is_phi_adjust) +record_increment (slsr_cand_t c, const max_wide_int &increment_in, bool is_phi_adjust) { bool found = false; unsigned i; + max_wide_int increment = increment_in; /* Treat increments that differ only in sign as identical so as to share initializers, unless we are generating pointer arithmetic. */ - if (!address_arithmetic_p && increment.is_negative ()) + if (!address_arithmetic_p && increment.neg_p (SIGNED)) increment = -increment; for (i = 0; i < incr_vec_len; i++) @@ -2358,8 +2344,8 @@ record_increment (slsr_cand_t c, double_int increment, bool is_phi_adjust) if (c->kind == CAND_ADD && !is_phi_adjust && c->index == increment - && (increment.sgt (double_int_one) - || increment.slt (double_int_minus_one)) + && (increment.gts_p (1) + || increment.lts_p (-1)) && (gimple_assign_rhs_code (c->cand_stmt) == PLUS_EXPR || gimple_assign_rhs_code (c->cand_stmt) == POINTER_PLUS_EXPR)) { @@ -2417,7 +2403,7 @@ record_phi_increments (slsr_cand_t basis, gimple phi) else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; record_increment (arg_cand, diff, PHI_ADJUST); } } @@ -2468,7 +2454,7 @@ record_increments (slsr_cand_t c) uses. */ static int -phi_incr_cost (slsr_cand_t c, double_int incr, gimple phi, int *savings) +phi_incr_cost (slsr_cand_t c, const max_wide_int &incr, gimple phi, int *savings) { unsigned i; int cost = 0; @@ -2493,7 +2479,7 @@ phi_incr_cost (slsr_cand_t c, double_int incr, gimple phi, int *savings) else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; if (incr == diff) { @@ -2558,10 +2544,10 @@ optimize_cands_for_speed_p (slsr_cand_t c) static int lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, - double_int incr, bool count_phis) + const max_wide_int &incr, bool count_phis) { int local_cost, sib_cost, savings = 0; - double_int cand_incr = cand_abs_increment (c); + max_wide_int cand_incr = cand_abs_increment (c); if (cand_already_replaced (c)) local_cost = cost_in; @@ -2604,11 +2590,11 @@ lowest_cost_path (int cost_in, int repl_savings, slsr_cand_t c, would go dead. */ static int -total_savings (int repl_savings, slsr_cand_t c, double_int incr, +total_savings (int repl_savings, slsr_cand_t c, const max_wide_int &incr, bool count_phis) { int savings = 0; - double_int cand_incr = cand_abs_increment (c); + max_wide_int cand_incr = cand_abs_increment (c); if (incr == cand_incr && !cand_already_replaced (c)) savings += repl_savings + c->dead_savings; @@ -2658,7 +2644,7 @@ analyze_increments (slsr_cand_t first_dep, enum machine_mode mode, bool speed) /* If somehow this increment is bigger than a HWI, we won't be optimizing candidates that use it. And if the increment has a count of zero, nothing will be done with it. */ - if (!incr_vec[i].incr.fits_shwi () || !incr_vec[i].count) + if (!incr_vec[i].incr.fits_shwi_p () || !incr_vec[i].count) incr_vec[i].cost = COST_INFINITE; /* Increments of 0, 1, and -1 are always profitable to replace, @@ -2812,7 +2798,7 @@ ncd_for_two_cands (basic_block bb1, basic_block bb2, candidates, return the earliest candidate in the block in *WHERE. */ static basic_block -ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi, +ncd_with_phi (slsr_cand_t c, const max_wide_int &incr, gimple phi, basic_block ncd, slsr_cand_t *where) { unsigned i; @@ -2832,7 +2818,7 @@ ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi, else { slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int diff = arg_cand->index - basis->index; + max_wide_int diff = arg_cand->index - basis->index; if ((incr == diff) || (!address_arithmetic_p && incr == -diff)) ncd = ncd_for_two_cands (ncd, gimple_bb (arg_cand->cand_stmt), @@ -2851,7 +2837,7 @@ ncd_with_phi (slsr_cand_t c, double_int incr, gimple phi, return the earliest candidate in the block in *WHERE. */ static basic_block -ncd_of_cand_and_phis (slsr_cand_t c, double_int incr, slsr_cand_t *where) +ncd_of_cand_and_phis (slsr_cand_t c, const max_wide_int &incr, slsr_cand_t *where) { basic_block ncd = NULL; @@ -2876,7 +2862,7 @@ ncd_of_cand_and_phis (slsr_cand_t c, double_int incr, slsr_cand_t *where) *WHERE. */ static basic_block -nearest_common_dominator_for_cands (slsr_cand_t c, double_int incr, +nearest_common_dominator_for_cands (slsr_cand_t c, const max_wide_int &incr, slsr_cand_t *where) { basic_block sib_ncd = NULL, dep_ncd = NULL, this_ncd = NULL, ncd; @@ -2952,13 +2938,13 @@ insert_initializers (slsr_cand_t c) slsr_cand_t where = NULL; gimple init_stmt; tree stride_type, new_name, incr_tree; - double_int incr = incr_vec[i].incr; + max_wide_int incr = incr_vec[i].incr; if (!profitable_increment_p (i) - || incr.is_one () - || (incr.is_minus_one () + || incr.one_p () + || (incr.minus_one_p () && gimple_assign_rhs_code (c->cand_stmt) != POINTER_PLUS_EXPR) - || incr.is_zero ()) + || incr.zero_p ()) continue; /* We may have already identified an existing initializer that @@ -2987,7 +2973,7 @@ insert_initializers (slsr_cand_t c) /* Create the initializer and insert it in the latest possible dominating position. */ - incr_tree = double_int_to_tree (stride_type, incr); + incr_tree = wide_int_to_tree (stride_type, incr); init_stmt = gimple_build_assign_with_ops (MULT_EXPR, new_name, c->stride, incr_tree); if (where) @@ -3044,9 +3030,9 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi) { int j; slsr_cand_t arg_cand = base_cand_from_table (arg); - double_int increment = arg_cand->index - basis->index; + max_wide_int increment = arg_cand->index - basis->index; - if (!address_arithmetic_p && increment.is_negative ()) + if (!address_arithmetic_p && increment.neg_p (SIGNED)) increment = -increment; j = incr_vec_index (increment); @@ -3057,7 +3043,7 @@ all_phi_incrs_profitable (slsr_cand_t c, gimple phi) c->cand_num); print_gimple_stmt (dump_file, phi, 0, 0); fputs (" increment: ", dump_file); - dump_double_int (dump_file, increment, false); + print_decs (increment, dump_file); if (j < 0) fprintf (dump_file, "\n Not replaced; incr_vec overflow.\n"); @@ -3152,7 +3138,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) tree orig_rhs1, orig_rhs2; tree rhs2; enum tree_code orig_code, repl_code; - double_int cand_incr; + max_wide_int cand_incr; orig_code = gimple_assign_rhs_code (c->cand_stmt); orig_rhs1 = gimple_assign_rhs1 (c->cand_stmt); @@ -3200,7 +3186,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) from the basis name, or an add of the stride to the basis name, respectively. It may be necessary to introduce a cast (or reuse an existing cast). */ - else if (cand_incr.is_one ()) + else if (cand_incr.one_p ()) { tree stride_type = TREE_TYPE (c->stride); tree orig_type = TREE_TYPE (orig_rhs2); @@ -3215,7 +3201,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) c); } - else if (cand_incr.is_minus_one ()) + else if (cand_incr.minus_one_p ()) { tree stride_type = TREE_TYPE (c->stride); tree orig_type = TREE_TYPE (orig_rhs2); @@ -3242,7 +3228,7 @@ replace_one_candidate (slsr_cand_t c, unsigned i, tree basis_name) fputs (" (duplicate, not actually replacing)\n", dump_file); } - else if (cand_incr.is_zero ()) + else if (cand_incr.zero_p ()) { tree lhs = gimple_assign_lhs (c->cand_stmt); tree lhs_type = TREE_TYPE (lhs); @@ -3292,7 +3278,7 @@ replace_profitable_candidates (slsr_cand_t c) { if (!cand_already_replaced (c)) { - double_int increment = cand_abs_increment (c); + max_wide_int increment = cand_abs_increment (c); enum tree_code orig_code = gimple_assign_rhs_code (c->cand_stmt); int i; diff --git a/gcc/gimple.c b/gcc/gimple.c index f5074199381..93be1781687 100644 --- a/gcc/gimple.c +++ b/gcc/gimple.c @@ -3034,16 +3034,16 @@ gimple_compare_field_offset (tree f1, tree f2) /* Fortran and C do not always agree on what DECL_OFFSET_ALIGN should be, so handle differing ones specially by decomposing the offset into a byte and bit offset manually. */ - if (host_integerp (DECL_FIELD_OFFSET (f1), 0) - && host_integerp (DECL_FIELD_OFFSET (f2), 0)) + if (tree_fits_shwi_p (DECL_FIELD_OFFSET (f1)) + && tree_fits_shwi_p (DECL_FIELD_OFFSET (f2))) { unsigned HOST_WIDE_INT byte_offset1, byte_offset2; unsigned HOST_WIDE_INT bit_offset1, bit_offset2; - bit_offset1 = TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (f1)); - byte_offset1 = (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f1)) + bit_offset1 = tree_to_hwi (DECL_FIELD_BIT_OFFSET (f1)); + byte_offset1 = (tree_to_hwi (DECL_FIELD_OFFSET (f1)) + bit_offset1 / BITS_PER_UNIT); - bit_offset2 = TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (f2)); - byte_offset2 = (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (f2)) + bit_offset2 = tree_to_hwi (DECL_FIELD_BIT_OFFSET (f2)); + byte_offset2 = (tree_to_hwi (DECL_FIELD_OFFSET (f2)) + bit_offset2 / BITS_PER_UNIT); if (byte_offset1 != byte_offset2) return false; diff --git a/gcc/gimplify.c b/gcc/gimplify.c index 4d39d539f2d..aadf055a1af 100644 --- a/gcc/gimplify.c +++ b/gcc/gimplify.c @@ -695,7 +695,7 @@ gimple_add_tmp_var (tree tmp) /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ - if (!host_integerp (DECL_SIZE_UNIT (tmp), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (tmp))) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; @@ -1711,11 +1711,7 @@ preprocess_case_label_vec_for_gimple (vec<tree> labels, low = CASE_HIGH (labels[i - 1]); if (!low) low = CASE_LOW (labels[i - 1]); - if ((TREE_INT_CST_LOW (low) + 1 - != TREE_INT_CST_LOW (high)) - || (TREE_INT_CST_HIGH (low) - + (TREE_INT_CST_LOW (high) == 0) - != TREE_INT_CST_HIGH (high))) + if ((wide_int (low) + 1) != high) break; } if (i == len) @@ -4320,12 +4316,12 @@ gimple_fold_indirect_ref (tree t) if (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (TREE_TYPE (addrtype)) == VECTOR_TYPE && useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (addrtype))) - && host_integerp (off, 1)) + && tree_fits_uhwi_p (off)) { - unsigned HOST_WIDE_INT offset = tree_low_cst (off, 1); + unsigned HOST_WIDE_INT offset = tree_to_uhwi (off); tree part_width = TYPE_SIZE (type); unsigned HOST_WIDE_INT part_widthi - = tree_low_cst (part_width, 0) / BITS_PER_UNIT; + = tree_to_shwi (part_width) / BITS_PER_UNIT; unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT; tree index = bitsize_int (indexi); if (offset / part_widthi @@ -4349,9 +4345,7 @@ gimple_fold_indirect_ref (tree t) || DECL_P (TREE_OPERAND (addr, 0))) return fold_build2 (MEM_REF, type, addr, - build_int_cst_wide (ptype, - TREE_INT_CST_LOW (off), - TREE_INT_CST_HIGH (off))); + wide_int_to_tree (ptype, off)); } /* *(foo *)fooarrptr => (*fooarrptr)[0] */ diff --git a/gcc/go/go-gcc.cc b/gcc/go/go-gcc.cc index 1ecfaffd73a..c88844612e0 100644 --- a/gcc/go/go-gcc.cc +++ b/gcc/go/go-gcc.cc @@ -782,9 +782,8 @@ Gcc_backend::type_size(Btype* btype) if (t == error_mark_node) return 1; t = TYPE_SIZE_UNIT(t); - gcc_assert(TREE_CODE(t) == INTEGER_CST); - gcc_assert(TREE_INT_CST_HIGH(t) == 0); - unsigned HOST_WIDE_INT val_wide = TREE_INT_CST_LOW(t); + gcc_assert(cst_fits_uhwi_p (t)); + unsigned HOST_WIDE_INT val_wide = tree_to_hwi (t); size_t ret = static_cast<size_t>(val_wide); gcc_assert(ret == val_wide); return ret; diff --git a/gcc/go/gofrontend/expressions.cc b/gcc/go/gofrontend/expressions.cc index 2b60d90a5dc..7e8e6e88eb2 100644 --- a/gcc/go/gofrontend/expressions.cc +++ b/gcc/go/gofrontend/expressions.cc @@ -3356,9 +3356,9 @@ Type_conversion_expression::do_get_tree(Translate_context* context) tree int_type_tree = type_to_tree(int_type->get_backend(gogo)); expr_tree = fold_convert(int_type_tree, expr_tree); - if (host_integerp(expr_tree, 0)) + if (tree_fits_shwi_p(expr_tree)) { - HOST_WIDE_INT intval = tree_low_cst(expr_tree, 0); + HOST_WIDE_INT intval = tree_to_shwi(expr_tree); std::string s; Lex::append_char(intval, true, &s, this->location()); Expression* se = Expression::make_string(s, this->location()); diff --git a/gcc/godump.c b/gcc/godump.c index 81efa2877cb..327d023ec0a 100644 --- a/gcc/godump.c +++ b/gcc/godump.c @@ -36,6 +36,7 @@ along with GCC; see the file COPYING3. If not see #include "pointer-set.h" #include "obstack.h" #include "debug.h" +#include "wide-int-print.h" /* We dump this information from the debug hooks. This gives us a stable and maintainable API to hook into. In order to work @@ -728,12 +729,12 @@ go_format_type (struct godump_container *container, tree type, && tree_int_cst_sgn (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) == 0 && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != NULL_TREE && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST - && host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)) + && tree_fits_shwi_p (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { char buf[100]; snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DEC "+1", - tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0)); + tree_to_shwi (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))); obstack_grow (ob, buf, strlen (buf)); } obstack_1grow (ob, ']'); @@ -967,7 +968,7 @@ go_output_typedef (struct godump_container *container, tree decl) const char *name; struct macro_hash_value *mhval; void **slot; - char buf[100]; + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; name = IDENTIFIER_POINTER (TREE_PURPOSE (element)); @@ -981,18 +982,15 @@ go_output_typedef (struct godump_container *container, tree decl) if (*slot != NULL) macro_hash_del (*slot); - if (host_integerp (TREE_VALUE (element), 0)) + if (tree_fits_shwi_p (TREE_VALUE (element))) snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DEC, - tree_low_cst (TREE_VALUE (element), 0)); - else if (host_integerp (TREE_VALUE (element), 1)) + tree_to_shwi (TREE_VALUE (element))); + else if (tree_fits_uhwi_p (TREE_VALUE (element))) snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_UNSIGNED, ((unsigned HOST_WIDE_INT) - tree_low_cst (TREE_VALUE (element), 1))); + tree_to_uhwi (TREE_VALUE (element)))); else - snprintf (buf, sizeof buf, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - ((unsigned HOST_WIDE_INT) - TREE_INT_CST_HIGH (TREE_VALUE (element))), - TREE_INT_CST_LOW (TREE_VALUE (element))); + print_hex (wide_int (element), buf); mhval->value = xstrdup (buf); *slot = mhval; diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c index 663cc825908..aed82a7005d 100644 --- a/gcc/graphite-clast-to-gimple.c +++ b/gcc/graphite-clast-to-gimple.c @@ -63,14 +63,14 @@ gmp_cst_to_tree (tree type, mpz_t val) { tree t = type ? type : integer_type_node; mpz_t tmp; - double_int di; + wide_int wi; mpz_init (tmp); mpz_set (tmp, val); - di = mpz_get_double_int (t, tmp, true); + wi = wide_int::from_mpz (t, tmp, true); mpz_clear (tmp); - return double_int_to_tree (t, di); + return wide_int_to_tree (t, wi); } /* Sets RES to the min of V1 and V2. */ diff --git a/gcc/graphite-scop-detection.c b/gcc/graphite-scop-detection.c index ab2897d5c43..78510f3e052 100644 --- a/gcc/graphite-scop-detection.c +++ b/gcc/graphite-scop-detection.c @@ -159,10 +159,10 @@ graphite_can_represent_init (tree e) case MULT_EXPR: if (chrec_contains_symbols (TREE_OPERAND (e, 0))) return graphite_can_represent_init (TREE_OPERAND (e, 0)) - && host_integerp (TREE_OPERAND (e, 1), 0); + && tree_fits_shwi_p (TREE_OPERAND (e, 1)); else return graphite_can_represent_init (TREE_OPERAND (e, 1)) - && host_integerp (TREE_OPERAND (e, 0), 0); + && tree_fits_shwi_p (TREE_OPERAND (e, 0)); case PLUS_EXPR: case POINTER_PLUS_EXPR: diff --git a/gcc/graphite-sese-to-poly.c b/gcc/graphite-sese-to-poly.c index c4c3eb40449..56be4caf0dd 100644 --- a/gcc/graphite-sese-to-poly.c +++ b/gcc/graphite-sese-to-poly.c @@ -52,8 +52,8 @@ along with GCC; see the file COPYING3. If not see static inline void tree_int_to_gmp (tree t, mpz_t res) { - double_int di = tree_to_double_int (t); - mpz_set_double_int (res, di, TYPE_UNSIGNED (TREE_TYPE (t))); + wide_int wi = t; + wi.to_mpz (res, TYPE_SIGN (TREE_TYPE (t))); } /* Returns the index of the PHI argument defined in the outermost @@ -1005,7 +1005,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop, /* loop_i <= expr_nb_iters */ else if (!chrec_contains_undetermined (nb_iters)) { - double_int nit; + max_wide_int nit; isl_pw_aff *aff; isl_set *valid; isl_local_space *ls; @@ -1041,7 +1041,7 @@ build_loop_iteration_domains (scop_p scop, struct loop *loop, isl_constraint *c; mpz_init (g); - mpz_set_double_int (g, nit, false); + nit.to_mpz (g, SIGNED); mpz_sub_ui (g, g, 1); approx = extract_affine_gmp (g, isl_set_get_space (inner)); x = isl_pw_aff_ge_set (approx, aff); @@ -1525,9 +1525,9 @@ pdr_add_data_dimensions (isl_set *extent, scop_p scop, data_reference_p dr) subscript - low >= 0 and high - subscript >= 0 in case one of the two bounds isn't known. Do the same here? */ - if (host_integerp (low, 0) + if (tree_fits_shwi_p (low) && high - && host_integerp (high, 0) + && tree_fits_shwi_p (high) /* 1-element arrays at end of structures may extend over their declared size. */ && !(array_at_struct_end_p (ref) diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c index a52bf7f5524..d37d15ff5c1 100644 --- a/gcc/ipa-cp.c +++ b/gcc/ipa-cp.c @@ -1384,7 +1384,7 @@ propagate_aggs_accross_jump_function (struct cgraph_edge *cs, if (item->offset < 0) continue; gcc_checking_assert (is_gimple_ip_invariant (item->value)); - val_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (item->value)), 1); + val_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (item->value))); if (merge_agg_lats_step (dest_plats, item->offset, val_size, &aglat, pre_existing, &ret)) diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c index b06f640b3f8..e833bf796db 100644 --- a/gcc/ipa-prop.c +++ b/gcc/ipa-prop.c @@ -284,7 +284,7 @@ ipa_print_node_jump_functions_for_edge (FILE *f, struct cgraph_edge *cs) item->offset); if (TYPE_P (item->value)) fprintf (f, "clobber of " HOST_WIDE_INT_PRINT_DEC " bits", - tree_low_cst (TYPE_SIZE (item->value), 1)); + tree_to_uhwi (TYPE_SIZE (item->value))); else { fprintf (f, "cst: "); @@ -1026,7 +1026,7 @@ compute_complex_assign_jump_func (struct ipa_node_params *info, || max_size == -1 || max_size != size) return; - offset += mem_ref_offset (base).low * BITS_PER_UNIT; + offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT; ssa = TREE_OPERAND (base, 0); if (TREE_CODE (ssa) != SSA_NAME || !SSA_NAME_IS_DEFAULT_DEF (ssa) @@ -1081,7 +1081,7 @@ get_ancestor_addr_info (gimple assign, tree *obj_p, HOST_WIDE_INT *offset) || TREE_CODE (SSA_NAME_VAR (parm)) != PARM_DECL) return NULL_TREE; - *offset += mem_ref_offset (expr).low * BITS_PER_UNIT; + *offset += mem_ref_offset (expr).to_short_addr () * BITS_PER_UNIT; *obj_p = obj; return expr; } @@ -1217,7 +1217,7 @@ type_like_member_ptr_p (tree type, tree *method_ptr, tree *delta) fld = TYPE_FIELDS (type); if (!fld || !POINTER_TYPE_P (TREE_TYPE (fld)) || TREE_CODE (TREE_TYPE (TREE_TYPE (fld))) != METHOD_TYPE - || !host_integerp (DECL_FIELD_OFFSET (fld), 1)) + || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) return false; if (method_ptr) @@ -1225,7 +1225,7 @@ type_like_member_ptr_p (tree type, tree *method_ptr, tree *delta) fld = DECL_CHAIN (fld); if (!fld || INTEGRAL_TYPE_P (fld) - || !host_integerp (DECL_FIELD_OFFSET (fld), 1)) + || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) return false; if (delta) *delta = fld; @@ -1295,13 +1295,13 @@ determine_known_aggregate_parts (gimple call, tree arg, if (TREE_CODE (arg) == SSA_NAME) { tree type_size; - if (!host_integerp (TYPE_SIZE (TREE_TYPE (TREE_TYPE (arg))), 1)) + if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (TREE_TYPE (arg))))) return; check_ref = true; arg_base = arg; arg_offset = 0; type_size = TYPE_SIZE (TREE_TYPE (TREE_TYPE (arg))); - arg_size = tree_low_cst (type_size, 1); + arg_size = tree_to_uhwi (type_size); ao_ref_init_from_ptr_and_size (&r, arg_base, NULL_TREE); } else if (TREE_CODE (arg) == ADDR_EXPR) @@ -1902,7 +1902,7 @@ ipa_analyze_virtual_call_uses (struct cgraph_node *node, cs = ipa_note_param_call (node, index, call); ii = cs->indirect_info; ii->offset = anc_offset; - ii->otr_token = tree_low_cst (OBJ_TYPE_REF_TOKEN (target), 1); + ii->otr_token = tree_to_uhwi (OBJ_TYPE_REF_TOKEN (target)); ii->otr_type = TREE_TYPE (TREE_TYPE (OBJ_TYPE_REF_OBJECT (target))); ii->polymorphic = 1; } @@ -2114,7 +2114,7 @@ ipa_intraprocedural_devirtualization (gimple call) if (!binfo) return NULL_TREE; token = OBJ_TYPE_REF_TOKEN (otr); - fndecl = gimple_get_virt_method_for_binfo (tree_low_cst (token, 1), + fndecl = gimple_get_virt_method_for_binfo (tree_to_uhwi (token), binfo); return fndecl; } @@ -3410,9 +3410,8 @@ ipa_modify_call_arguments (struct cgraph_edge *cs, gimple stmt, if (TYPE_ALIGN (type) > align) align = TYPE_ALIGN (type); } - misalign += (tree_to_double_int (off) - .sext (TYPE_PRECISION (TREE_TYPE (off))).low - * BITS_PER_UNIT); + misalign += (addr_wide_int (off).sext (TYPE_PRECISION (TREE_TYPE (off))) + * BITS_PER_UNIT).to_short_addr (); misalign = misalign & (align - 1); if (misalign != 0) align = (misalign & -misalign); diff --git a/gcc/java/Make-lang.in b/gcc/java/Make-lang.in index 8a6210fea3b..1fad1798bbd 100644 --- a/gcc/java/Make-lang.in +++ b/gcc/java/Make-lang.in @@ -270,7 +270,7 @@ java/jcf-dump.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(JAVA_TREE_H) \ java/jcf-dump.c java/jcf-reader.c java/jcf.h java/javaop.h java/javaop.def \ version.h $(GGC_H) intl.h java/zipfile.h $(DIAGNOSTIC_H) java/boehm.o: java/boehm.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ - $(TREE_H) $(JAVA_TREE_H) java/parse.h + $(TREE_H) $(JAVA_TREE_H) java/parse.h wide-int.h java/builtins.o: java/builtins.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(JAVA_TREE_H) $(GGC_H) $(FLAGS_H) $(OPTABS_H) $(EXPR_H) langhooks.h \ gt-java-builtins.h @@ -290,13 +290,14 @@ java/expr.o: java/expr.c $(CONFIG_H) $(JAVA_TREE_H) java/jcf.h $(REAL_H) \ java/javaop.h java/java-opcodes.h \ java/java-except.h java/java-except.h java/parse.h \ $(SYSTEM_H) coretypes.h $(TM_H) $(GGC_H) gt-java-expr.h $(TARGET_H) \ - tree-iterator.h + tree-iterator.h wide-int.h java/jcf-depend.o: java/jcf-depend.c $(CONFIG_H) $(SYSTEM_H) coretypes.h \ java/jcf.h java/jcf-parse.o: java/jcf-parse.c $(CONFIG_H) $(JAVA_TREE_H) $(FLAGS_H) \ input.h java/java-except.h $(SYSTEM_H) coretypes.h \ java/parse.h $(GGC_H) debug.h $(REAL_H) gt-java-jcf-parse.h \ - java/jcf-reader.c java/zipfile.h java/jcf.h $(BITMAP_H) + java/jcf-reader.c java/zipfile.h java/jcf.h $(BITMAP_H) \ + wide-int.h java/jvgenmain.o: java/jvgenmain.c $(CONFIG_H) $(JAVA_TREE_H) $(SYSTEM_H) \ coretypes.h $(TM_H) intl.h $(DIAGNOSTIC_H) java/lang.o: java/lang.c $(CONFIG_H) $(JAVA_TREE_H) java/jcf.h input.h \ diff --git a/gcc/java/boehm.c b/gcc/java/boehm.c index 5910f0322dc..7e7bbd1b6da 100644 --- a/gcc/java/boehm.c +++ b/gcc/java/boehm.c @@ -32,8 +32,9 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */ #include "java-tree.h" #include "parse.h" #include "diagnostic-core.h" +#include "wide-int.h" -static void mark_reference_fields (tree, double_int *, unsigned int, +static void mark_reference_fields (tree, wide_int *, unsigned int, int *, int *, int *, HOST_WIDE_INT *); /* A procedure-based object descriptor. We know that our @@ -47,7 +48,7 @@ static void mark_reference_fields (tree, double_int *, unsigned int, /* Recursively mark reference fields. */ static void mark_reference_fields (tree field, - double_int *mask, + wide_int *mask, unsigned int ubit, int *pointer_after_end, int *all_bits_set, @@ -136,16 +137,17 @@ get_boehm_type_descriptor (tree type) int last_set_index = 0; HOST_WIDE_INT last_view_index = -1; int pointer_after_end = 0; - double_int mask; + wide_int mask; tree field, value, value_type; - mask = double_int_zero; - /* If the GC wasn't requested, just use a null pointer. */ if (! flag_use_boehm_gc) return null_pointer_node; value_type = java_type_for_mode (ptr_mode, 1); + + mask = wide_int::zero (TYPE_PRECISION (value_type)); + /* If we have a type of unknown size, use a proc. */ if (int_size_in_bytes (type) == -1) goto procedure_object_descriptor; @@ -194,7 +196,7 @@ get_boehm_type_descriptor (tree type) that we don't have to emit reflection data for run time marking. */ count = 0; - mask = double_int_zero; + mask = wide_int::zero (TYPE_PRECISION (value_type)); ++last_set_index; while (last_set_index) { @@ -203,13 +205,13 @@ get_boehm_type_descriptor (tree type) last_set_index >>= 1; ++count; } - value = double_int_to_tree (value_type, mask); + value = wide_int_to_tree (value_type, mask); } else if (! pointer_after_end) { /* Bottom two bits for bitmap mark type are 01. */ mask = mask.set_bit (0); - value = double_int_to_tree (value_type, mask); + value = wide_int_to_tree (value_type, mask); } else { @@ -233,5 +235,5 @@ uses_jv_markobj_p (tree dtable) point in asserting unless we hit the bad case. */ gcc_assert (!flag_reduced_reflection || TARGET_VTABLE_USES_DESCRIPTORS == 0); v = (*CONSTRUCTOR_ELTS (dtable))[3].value; - return (PROCEDURE_OBJECT_DESCRIPTOR == TREE_INT_CST_LOW (v)); + return (PROCEDURE_OBJECT_DESCRIPTOR == tree_to_hwi (v)); } diff --git a/gcc/java/class.c b/gcc/java/class.c index cb6789643d3..251873e0811 100644 --- a/gcc/java/class.c +++ b/gcc/java/class.c @@ -1576,14 +1576,14 @@ get_dispatch_vector (tree type) HOST_WIDE_INT i; tree method; tree super = CLASSTYPE_SUPER (type); - HOST_WIDE_INT nvirtuals = tree_low_cst (TYPE_NVIRTUALS (type), 0); + HOST_WIDE_INT nvirtuals = tree_to_shwi (TYPE_NVIRTUALS (type)); vtable = make_tree_vec (nvirtuals); TYPE_VTABLE (type) = vtable; if (super != NULL_TREE) { tree super_vtable = get_dispatch_vector (super); - for (i = tree_low_cst (TYPE_NVIRTUALS (super), 0); --i >= 0; ) + for (i = tree_to_shwi (TYPE_NVIRTUALS (super)); --i >= 0; ) TREE_VEC_ELT (vtable, i) = TREE_VEC_ELT (super_vtable, i); } @@ -1592,8 +1592,8 @@ get_dispatch_vector (tree type) { tree method_index = get_method_index (method); if (method_index != NULL_TREE - && host_integerp (method_index, 0)) - TREE_VEC_ELT (vtable, tree_low_cst (method_index, 0)) = method; + && tree_fits_shwi_p (method_index)) + TREE_VEC_ELT (vtable, tree_to_shwi (method_index)) = method; } } diff --git a/gcc/java/expr.c b/gcc/java/expr.c index a434913d475..068ac29a9fa 100644 --- a/gcc/java/expr.c +++ b/gcc/java/expr.c @@ -44,6 +44,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */ #include "ggc.h" #include "tree-iterator.h" #include "target.h" +#include "wide-int.h" static void flush_quick_stack (void); static void push_value (tree); @@ -1049,8 +1050,8 @@ build_newarray (int atype_value, tree length) tree prim_type = decode_newarray_type (atype_value); tree type = build_java_array_type (prim_type, - host_integerp (length, 0) == INTEGER_CST - ? tree_low_cst (length, 0) : -1); + tree_fits_shwi_p (length) + ? tree_to_shwi (length) : -1); /* Pass a reference to the primitive type class and save the runtime some work. */ @@ -1069,8 +1070,8 @@ build_anewarray (tree class_type, tree length) { tree type = build_java_array_type (class_type, - host_integerp (length, 0) - ? tree_low_cst (length, 0) : -1); + tree_fits_shwi_p (length) + ? tree_to_shwi (length) : -1); return build_call_nary (promote_type (type), build_address_of (soft_anewarray_node), @@ -1258,7 +1259,7 @@ expand_java_pushc (int ival, tree type) else if (type == float_type_node || type == double_type_node) { REAL_VALUE_TYPE x; - REAL_VALUE_FROM_INT (x, ival, 0, TYPE_MODE (type)); + REAL_VALUE_FROM_INT (x, ival, TYPE_MODE (type)); value = build_real (type, x); } else @@ -2672,7 +2673,7 @@ build_jni_stub (tree method) special way, we would do that here. */ for (tem = method_args; tem != NULL_TREE; tem = DECL_CHAIN (tem)) { - int arg_bits = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (tem))); + int arg_bits = tree_to_hwi (TYPE_SIZE (TREE_TYPE (tem))); #ifdef PARM_BOUNDARY arg_bits = (((arg_bits + PARM_BOUNDARY - 1) / PARM_BOUNDARY) * PARM_BOUNDARY); diff --git a/gcc/java/jcf-parse.c b/gcc/java/jcf-parse.c index fbd4e00e029..e217f24852c 100644 --- a/gcc/java/jcf-parse.c +++ b/gcc/java/jcf-parse.c @@ -40,6 +40,7 @@ The Free Software Foundation is independent of Sun Microsystems, Inc. */ #include "cgraph.h" #include "bitmap.h" #include "target.h" +#include "wide-int.h" #ifdef HAVE_LOCALE_H #include <locale.h> @@ -1039,14 +1040,14 @@ get_constant (JCF *jcf, int index) case CONSTANT_Long: { unsigned HOST_WIDE_INT num; - double_int val; + wide_int val; num = JPOOL_UINT (jcf, index); - val = double_int::from_uhwi (num).llshift (32, 64); + val = wide_int (num).sforce_to_size (32).lshift_widen (32, 64); num = JPOOL_UINT (jcf, index + 1); - val |= double_int::from_uhwi (num); + val |= wide_int (num); - value = double_int_to_tree (long_type_node, val); + value = wide_int_to_tree (long_type_node, val); break; } diff --git a/gcc/java/typeck.c b/gcc/java/typeck.c index 9dbb3f0291b..004ebf151b7 100644 --- a/gcc/java/typeck.c +++ b/gcc/java/typeck.c @@ -217,7 +217,7 @@ java_array_type_length (tree array_type) { tree high = TYPE_MAX_VALUE (index_type); if (TREE_CODE (high) == INTEGER_CST) - return TREE_INT_CST_LOW (high) + 1; + return tree_to_uhwi (high) + 1; } } return -1; diff --git a/gcc/lcm.c b/gcc/lcm.c index c13d2a6aa51..7471b0e4c38 100644 --- a/gcc/lcm.c +++ b/gcc/lcm.c @@ -64,6 +64,7 @@ along with GCC; see the file COPYING3. If not see #include "sbitmap.h" #include "dumpfile.h" +#define LCM_DEBUG_INFO 1 /* Edge based LCM routines. */ static void compute_antinout_edge (sbitmap *, sbitmap *, sbitmap *, sbitmap *); static void compute_earliest (struct edge_list *, int, sbitmap *, sbitmap *, @@ -106,6 +107,7 @@ compute_antinout_edge (sbitmap *antloc, sbitmap *transp, sbitmap *antin, /* We want a maximal solution, so make an optimistic initialization of ANTIN. */ bitmap_vector_ones (antin, last_basic_block); + bitmap_vector_clear (antout, last_basic_block); /* Put every block on the worklist; this is necessary because of the optimistic initialization of ANTIN above. */ @@ -432,6 +434,7 @@ pre_edge_lcm (int n_exprs, sbitmap *transp, /* Allocate an extra element for the exit block in the laterin vector. */ laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs); + bitmap_vector_clear (laterin, last_basic_block); compute_laterin (edge_list, earliest, antloc, later, laterin); #ifdef LCM_DEBUG_INFO diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c index b47901b69b7..17e4cb1c7fa 100644 --- a/gcc/loop-doloop.c +++ b/gcc/loop-doloop.c @@ -409,7 +409,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, basic_block loop_end = desc->out_edge->src; enum machine_mode mode; rtx true_prob_val; - double_int iterations; + max_wide_int iterations; jump_insn = BB_END (loop_end); @@ -461,9 +461,9 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, /* Determine if the iteration counter will be non-negative. Note that the maximum value loaded is iterations_max - 1. */ if (max_loop_iterations (loop, &iterations) - && (iterations.ule (double_int_one.llshift - (GET_MODE_PRECISION (mode) - 1, - GET_MODE_PRECISION (mode))))) + && (iterations.leu_p (wide_int::set_bit_in_zero + (GET_MODE_PRECISION (mode) - 1, + GET_MODE_PRECISION (mode))))) nonneg = 1; break; @@ -549,14 +549,14 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, { rtx init; unsigned level = get_loop_level (loop) + 1; - double_int iter; + wide_int iter; rtx iter_rtx; if (!max_loop_iterations (loop, &iter) - || !iter.fits_shwi ()) + || !iter.fits_shwi_p ()) iter_rtx = const0_rtx; else - iter_rtx = GEN_INT (iter.to_shwi()); + iter_rtx = GEN_INT (iter.to_shwi ()); init = gen_doloop_begin (counter_reg, desc->const_iter ? desc->niter_expr : const0_rtx, iter_rtx, @@ -618,7 +618,7 @@ doloop_optimize (struct loop *loop) struct niter_desc *desc; unsigned word_mode_size; unsigned HOST_WIDE_INT word_mode_max; - double_int iter; + max_wide_int iter; int entered_at_top; if (dump_file) @@ -671,10 +671,10 @@ doloop_optimize (struct loop *loop) count = copy_rtx (desc->niter_expr); iterations = desc->const_iter ? desc->niter_expr : const0_rtx; if (!max_loop_iterations (loop, &iter) - || !iter.fits_shwi ()) + || !iter.fits_shwi_p ()) iterations_max = const0_rtx; else - iterations_max = GEN_INT (iter.to_shwi()); + iterations_max = GEN_INT (iter.to_shwi ()); level = get_loop_level (loop) + 1; /* Generate looping insn. If the pattern FAILs then give up trying @@ -697,7 +697,7 @@ doloop_optimize (struct loop *loop) computed, we must be sure that the number of iterations fits into the new mode. */ && (word_mode_size >= GET_MODE_PRECISION (mode) - || iter.ule (double_int::from_shwi (word_mode_max)))) + || iter.leu_p (word_mode_max))) { if (word_mode_size > GET_MODE_PRECISION (mode)) { diff --git a/gcc/loop-iv.c b/gcc/loop-iv.c index 3248b56c0a5..2d8e918c43e 100644 --- a/gcc/loop-iv.c +++ b/gcc/loop-iv.c @@ -2614,8 +2614,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, max = (up - down) / inc + 1; if (!desc->infinite && !desc->assumptions) - record_niter_bound (loop, double_int::from_uhwi (max), - false, true); + record_niter_bound (loop, max, false, true); if (iv0.step == const0_rtx) { @@ -2654,8 +2653,8 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, iv1.step = const0_rtx; if (INTVAL (iv0.step) < 0) { - iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, mode); - iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, mode); + iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode); + iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode); } iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); @@ -2829,8 +2828,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, desc->niter = val & GET_MODE_MASK (desc->mode); if (!desc->infinite && !desc->assumptions) - record_niter_bound (loop, double_int::from_uhwi (desc->niter), - false, true); + record_niter_bound (loop, desc->niter, false, true); } else { @@ -2839,8 +2837,7 @@ iv_number_of_iterations (struct loop *loop, rtx insn, rtx condition, goto zero_iter_simplify; if (!desc->infinite && !desc->assumptions) - record_niter_bound (loop, double_int::from_uhwi (max), - false, true); + record_niter_bound (loop, max, false, true); /* simplify_using_initial_values does a copy propagation on the registers in the expression for the number of iterations. This prolongs life @@ -2865,8 +2862,7 @@ zero_iter_simplify: zero_iter: desc->const_iter = true; desc->niter = 0; - record_niter_bound (loop, double_int_zero, - true, true); + record_niter_bound (loop, 0, true, true); desc->noloop_assumptions = NULL_RTX; desc->niter_expr = const0_rtx; return; diff --git a/gcc/loop-unroll.c b/gcc/loop-unroll.c index fa0260175d8..a23cf2fe5db 100644 --- a/gcc/loop-unroll.c +++ b/gcc/loop-unroll.c @@ -643,7 +643,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i; struct niter_desc *desc; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_UNROLL)) { @@ -693,7 +693,7 @@ decide_unroll_constant_iterations (struct loop *loop, int flags) if (desc->niter < 2 * nunroll || ((estimated_loop_iterations (loop, &iterations) || max_loop_iterations (loop, &iterations)) - && iterations.ult (double_int::from_shwi (2 * nunroll)))) + && iterations.ltu_p (2 * nunroll))) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -814,11 +814,11 @@ unroll_loop_constant_iterations (struct loop *loop) desc->noloop_assumptions = NULL_RTX; desc->niter -= exit_mod; - loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod); + loop->nb_iterations_upper_bound -= exit_mod; if (loop->any_estimate - && double_int::from_uhwi (exit_mod).ule + && wide_int (exit_mod).leu_p (loop->nb_iterations_estimate)) - loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod); + loop->nb_iterations_estimate -= exit_mod; else loop->any_estimate = false; } @@ -858,11 +858,11 @@ unroll_loop_constant_iterations (struct loop *loop) apply_opt_in_copies (opt_info, exit_mod + 1, false, false); desc->niter -= exit_mod + 1; - loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod + 1); + loop->nb_iterations_upper_bound -= exit_mod + 1; if (loop->any_estimate - && double_int::from_uhwi (exit_mod + 1).ule + && wide_int (exit_mod + 1).leu_p (loop->nb_iterations_estimate)) - loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod + 1); + loop->nb_iterations_estimate -= exit_mod + 1; else loop->any_estimate = false; desc->noloop_assumptions = NULL_RTX; @@ -914,14 +914,10 @@ unroll_loop_constant_iterations (struct loop *loop) desc->niter /= max_unroll + 1; loop->nb_iterations_upper_bound - = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = loop->nb_iterations_upper_bound.udiv_trunc (max_unroll + 1); if (loop->any_estimate) loop->nb_iterations_estimate - = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = loop->nb_iterations_estimate.udiv_trunc (max_unroll + 1); desc->niter_expr = GEN_INT (desc->niter); /* Remove the edges. */ @@ -942,7 +938,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_UNROLL)) { @@ -998,7 +994,7 @@ decide_unroll_runtime_iterations (struct loop *loop, int flags) /* Check whether the loop rolls. */ if ((estimated_loop_iterations (loop, &iterations) || max_loop_iterations (loop, &iterations)) - && iterations.ult (double_int::from_shwi (2 * nunroll))) + && iterations.ltu_p (2 * nunroll)) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); @@ -1309,14 +1305,10 @@ unroll_loop_runtime_iterations (struct loop *loop) simplify_gen_binary (UDIV, desc->mode, old_niter, GEN_INT (max_unroll + 1)); loop->nb_iterations_upper_bound - = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = loop->nb_iterations_upper_bound.udiv_trunc (max_unroll + 1); if (loop->any_estimate) loop->nb_iterations_estimate - = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll - + 1), - TRUNC_DIV_EXPR); + = loop->nb_iterations_estimate.udiv_trunc (max_unroll + 1); if (exit_at_end) { desc->niter_expr = @@ -1324,7 +1316,7 @@ unroll_loop_runtime_iterations (struct loop *loop) desc->noloop_assumptions = NULL_RTX; --loop->nb_iterations_upper_bound; if (loop->any_estimate - && loop->nb_iterations_estimate != double_int_zero) + && loop->nb_iterations_estimate != 0) --loop->nb_iterations_estimate; else loop->any_estimate = false; @@ -1344,7 +1336,7 @@ static void decide_peel_simple (struct loop *loop, int flags) { unsigned npeel; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_PEEL)) { @@ -1388,7 +1380,8 @@ decide_peel_simple (struct loop *loop, int flags) /* If we have realistic estimate on number of iterations, use it. */ if (estimated_loop_iterations (loop, &iterations)) { - if (double_int::from_shwi (npeel).ule (iterations)) + /* TODO: unsigned/signed confusion */ + if (wide_int::from_shwi (npeel).leu_p (iterations)) { if (dump_file) { @@ -1405,7 +1398,7 @@ decide_peel_simple (struct loop *loop, int flags) /* If we have small enough bound on iterations, we can still peel (completely unroll). */ else if (max_loop_iterations (loop, &iterations) - && iterations.ult (double_int::from_shwi (npeel))) + && iterations.ltu_p (npeel)) npeel = iterations.to_shwi () + 1; else { @@ -1499,7 +1492,7 @@ decide_unroll_stupid (struct loop *loop, int flags) { unsigned nunroll, nunroll_by_av, i; struct niter_desc *desc; - double_int iterations; + max_wide_int iterations; if (!(flags & UAP_UNROLL_ALL)) { @@ -1556,7 +1549,7 @@ decide_unroll_stupid (struct loop *loop, int flags) /* Check whether the loop rolls. */ if ((estimated_loop_iterations (loop, &iterations) || max_loop_iterations (loop, &iterations)) - && iterations.ult (double_int::from_shwi (2 * nunroll))) + && iterations.ltu_p (2 * nunroll)) { if (dump_file) fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n"); diff --git a/gcc/lto-streamer-in.c b/gcc/lto-streamer-in.c index 64303593272..63cd17f2ffa 100644 --- a/gcc/lto-streamer-in.c +++ b/gcc/lto-streamer-in.c @@ -695,14 +695,28 @@ input_cfg (struct lto_input_block *ib, struct function *fn, loop->any_upper_bound = streamer_read_hwi (ib); if (loop->any_upper_bound) { - loop->nb_iterations_upper_bound.low = streamer_read_uhwi (ib); - loop->nb_iterations_upper_bound.high = streamer_read_hwi (ib); + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + int i; + int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib); + int len = streamer_read_uhwi (ib); + for (i = 0; i < len; i++) + a[i] = streamer_read_hwi (ib); + + loop->nb_iterations_upper_bound + = max_wide_int::from_array (a, len); } loop->any_estimate = streamer_read_hwi (ib); if (loop->any_estimate) { - loop->nb_iterations_estimate.low = streamer_read_uhwi (ib); - loop->nb_iterations_estimate.high = streamer_read_hwi (ib); + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + int i; + int prec ATTRIBUTE_UNUSED = streamer_read_uhwi (ib); + int len = streamer_read_uhwi (ib); + for (i = 0; i < len; i++) + a[i] = streamer_read_hwi (ib); + + loop->nb_iterations_estimate + = max_wide_int::from_array (a, len); } place_new_loop (fn, loop); @@ -1251,12 +1265,17 @@ lto_input_tree_1 (struct lto_input_block *ib, struct data_in *data_in, } else if (tag == LTO_integer_cst) { - /* For shared integer constants in singletons we can use the existing - tree integer constant merging code. */ + /* For shared integer constants in singletons we can use the + existing tree integer constant merging code. */ tree type = stream_read_tree (ib, data_in); - unsigned HOST_WIDE_INT low = streamer_read_uhwi (ib); - HOST_WIDE_INT high = streamer_read_hwi (ib); - result = build_int_cst_wide (type, low, high); + unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); + unsigned HOST_WIDE_INT i; + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + + for (i = 0; i < len; i++) + a[i] = streamer_read_hwi (ib); + result = wide_int_to_tree (type, wide_int::from_array + (a, len, TYPE_PRECISION (type), false)); streamer_tree_cache_append (data_in->reader_cache, result, hash); } else if (tag == LTO_tree_scc) diff --git a/gcc/lto-streamer-out.c b/gcc/lto-streamer-out.c index ea0ff177c47..3aa1f7c5cd3 100644 --- a/gcc/lto-streamer-out.c +++ b/gcc/lto-streamer-out.c @@ -708,8 +708,10 @@ hash_tree (struct streamer_tree_cache_d *cache, tree t) if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) { - v = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), v); - v = iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), v); + int i; + v = iterative_hash_host_wide_int (TREE_INT_CST_NUNITS (t), v); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + v = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), v); } if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST)) @@ -1620,14 +1622,24 @@ output_cfg (struct output_block *ob, struct function *fn) streamer_write_hwi (ob, loop->any_upper_bound); if (loop->any_upper_bound) { - streamer_write_uhwi (ob, loop->nb_iterations_upper_bound.low); - streamer_write_hwi (ob, loop->nb_iterations_upper_bound.high); + int len = loop->nb_iterations_upper_bound.get_len (); + int i; + + streamer_write_uhwi (ob, loop->nb_iterations_upper_bound.get_precision ()); + streamer_write_uhwi (ob, len); + for (i = 0; i < len; i++) + streamer_write_hwi (ob, loop->nb_iterations_upper_bound.elt (i)); } streamer_write_hwi (ob, loop->any_estimate); if (loop->any_estimate) { - streamer_write_uhwi (ob, loop->nb_iterations_estimate.low); - streamer_write_hwi (ob, loop->nb_iterations_estimate.high); + int len = loop->nb_iterations_estimate.get_len (); + int i; + + streamer_write_uhwi (ob, loop->nb_iterations_estimate.get_precision ()); + streamer_write_uhwi (ob, len); + for (i = 0; i < len; i++) + streamer_write_hwi (ob, loop->nb_iterations_estimate.elt (i)); } } @@ -2261,7 +2273,7 @@ write_symbol (struct streamer_tree_cache_d *cache, if (kind == GCCPK_COMMON && DECL_SIZE_UNIT (t) && TREE_CODE (DECL_SIZE_UNIT (t)) == INTEGER_CST) - size = TREE_INT_CST_LOW (DECL_SIZE_UNIT (t)); + size = tree_to_hwi (DECL_SIZE_UNIT (t)); else size = 0; diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c index 87a756d5763..aeedb18d528 100644 --- a/gcc/lto/lto-lang.c +++ b/gcc/lto/lto-lang.c @@ -313,11 +313,10 @@ static bool get_nonnull_operand (tree arg_num_expr, unsigned HOST_WIDE_INT *valp) { /* Verify the arg number is a constant. */ - if (TREE_CODE (arg_num_expr) != INTEGER_CST - || TREE_INT_CST_HIGH (arg_num_expr) != 0) + if (!cst_fits_uhwi_p (arg_num_expr)) return false; - *valp = TREE_INT_CST_LOW (arg_num_expr); + *valp = tree_to_hwi (arg_num_expr); return true; } diff --git a/gcc/lto/lto.c b/gcc/lto/lto.c index c854589c673..07a009b2d21 100644 --- a/gcc/lto/lto.c +++ b/gcc/lto/lto.c @@ -1778,8 +1778,8 @@ compare_tree_sccs_1 (tree t1, tree t2, tree **map) if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) { - compare_values (TREE_INT_CST_LOW); - compare_values (TREE_INT_CST_HIGH); + if (!wide_int::eq_p (t1, t2)) + return false; } if (CODE_CONTAINS_STRUCT (code, TS_REAL_CST)) diff --git a/gcc/objc/Make-lang.in b/gcc/objc/Make-lang.in index f04d60686c8..349a4fd25c3 100644 --- a/gcc/objc/Make-lang.in +++ b/gcc/objc/Make-lang.in @@ -126,7 +126,7 @@ objc/objc-act.o : objc/objc-act.c \ $(START_HDRS) \ $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ toplev.h $(FUNCTION_H) debug.h $(LANGHOOKS_DEF_H) \ - $(HASH_TABLE_H) $(GIMPLE_H) \ + $(HASH_TABLE_H) $(GIMPLE_H) wide-int.h \ $(C_PRAGMA_H) $(C_TARGET_H) \ objc/objc-encoding.h \ objc/objc-map.h \ diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c index 95ec4ecd40f..21bac389f89 100644 --- a/gcc/objc/objc-act.c +++ b/gcc/objc/objc-act.c @@ -50,6 +50,7 @@ along with GCC; see the file COPYING3. If not see #include "cgraph.h" #include "tree-iterator.h" #include "hash-table.h" +#include "wide-int.h" #include "langhooks-def.h" /* Different initialization, code gen and meta data generation for each runtime. */ @@ -3021,8 +3022,8 @@ check_string_class_template (void) #define AT_LEAST_AS_LARGE_AS(F, T) \ (F && TREE_CODE (F) == FIELD_DECL \ - && (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (F))) \ - >= TREE_INT_CST_LOW (TYPE_SIZE (T)))) + && (tree_to_hwi (TYPE_SIZE (TREE_TYPE (F))) \ + >= tree_to_hwi (TYPE_SIZE (T)))) if (!AT_LEAST_AS_LARGE_AS (field_decl, ptr_type_node)) return 0; @@ -4878,14 +4879,9 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags) which specifies the index of the format string argument. Add 2. */ number = TREE_VALUE (second_argument); - if (number - && TREE_CODE (number) == INTEGER_CST - && TREE_INT_CST_HIGH (number) == 0) - { - TREE_VALUE (second_argument) - = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (number) + 2); - } + if (number && TREE_CODE (number) == INTEGER_CST) + TREE_VALUE (second_argument) + = wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2); /* This is the third argument, the "first-to-check", which specifies the index of the first argument to @@ -4893,15 +4889,9 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags) in which case we don't need to add 2. Add 2 if not 0. */ number = TREE_VALUE (third_argument); - if (number - && TREE_CODE (number) == INTEGER_CST - && TREE_INT_CST_HIGH (number) == 0 - && TREE_INT_CST_LOW (number) != 0) - { - TREE_VALUE (third_argument) - = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (number) + 2); - } + if (number && TREE_CODE (number) == INTEGER_CST) + TREE_VALUE (third_argument) + = wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2); } filtered_attributes = chainon (filtered_attributes, new_attribute); @@ -4933,15 +4923,9 @@ objc_decl_method_attributes (tree *node, tree attributes, int flags) { /* Get the value of the argument and add 2. */ tree number = TREE_VALUE (argument); - if (number - && TREE_CODE (number) == INTEGER_CST - && TREE_INT_CST_HIGH (number) == 0 - && TREE_INT_CST_LOW (number) != 0) - { - TREE_VALUE (argument) - = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (number) + 2); - } + if (number && TREE_CODE (number) == INTEGER_CST) + TREE_VALUE (argument) + = wide_int_to_tree (TREE_TYPE (number), wide_int (number) + 2); argument = TREE_CHAIN (argument); } @@ -8893,7 +8877,7 @@ gen_declaration (tree decl) if (DECL_INITIAL (decl) && TREE_CODE (DECL_INITIAL (decl)) == INTEGER_CST) sprintf (errbuf + strlen (errbuf), ": " HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (DECL_INITIAL (decl))); + tree_to_hwi (DECL_INITIAL (decl))); } return errbuf; @@ -8933,7 +8917,7 @@ gen_type_name_0 (tree type) char sz[20]; sprintf (sz, HOST_WIDE_INT_PRINT_DEC, - (TREE_INT_CST_LOW + (tree_to_hwi (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1)); strcat (errbuf, sz); } diff --git a/gcc/objc/objc-encoding.c b/gcc/objc/objc-encoding.c index c2f7444c448..1c7ba105fa8 100644 --- a/gcc/objc/objc-encoding.c +++ b/gcc/objc/objc-encoding.c @@ -393,12 +393,12 @@ encode_array (tree type, int curtype, int format) array. */ sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); } - else if (TREE_INT_CST_LOW (TYPE_SIZE (array_of)) == 0) + else if (tree_to_hwi (TYPE_SIZE (array_of)) == 0) sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); else sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (an_int_cst) - / TREE_INT_CST_LOW (TYPE_SIZE (array_of))); + tree_to_hwi (an_int_cst) + / tree_to_hwi (TYPE_SIZE (array_of))); obstack_grow (&util_obstack, buffer, strlen (buffer)); encode_type (array_of, curtype, format); @@ -425,7 +425,7 @@ encode_vector (tree type, int curtype, int format) sprintf (buffer, "![" HOST_WIDE_INT_PRINT_DEC ",%d", /* We want to compute the equivalent of sizeof (<vector>). Code inspired by c_sizeof_or_alignof_type. */ - ((TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type)) + ((tree_to_hwi (TYPE_SIZE_UNIT (type)) / (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT))), /* We want to compute the equivalent of __alignof__ (<vector>). Code inspired by @@ -820,7 +820,7 @@ encode_field (tree field_decl, int curtype, int format) between GNU and NeXT runtimes. */ if (DECL_BIT_FIELD_TYPE (field_decl)) { - int size = tree_low_cst (DECL_SIZE (field_decl), 1); + int size = tree_to_uhwi (DECL_SIZE (field_decl)); if (flag_next_runtime) encode_next_bitfield (size); diff --git a/gcc/objc/objc-next-runtime-abi-01.c b/gcc/objc/objc-next-runtime-abi-01.c index 27ba615ca0e..0a042ac4bf0 100644 --- a/gcc/objc/objc-next-runtime-abi-01.c +++ b/gcc/objc/objc-next-runtime-abi-01.c @@ -1199,7 +1199,7 @@ generate_v1_objc_protocol_extension (tree proto_interface, build_v1_objc_protocol_extension_template (); /* uint32_t size */ - size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_protocol_extension_template)); + size = tree_to_hwi (TYPE_SIZE_UNIT (objc_protocol_extension_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, size)); /* Try for meaningful diagnostics. */ @@ -1343,7 +1343,7 @@ generate_v1_property_table (tree context, tree klass_ctxt) is_proto ? context : klass_ctxt); - init_val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v1_property_template)); + init_val = tree_to_hwi (TYPE_SIZE_UNIT (objc_v1_property_template)); if (is_proto) snprintf (buf, BUFSIZE, "_OBJC_ProtocolPropList_%s", IDENTIFIER_POINTER (PROTOCOL_NAME (context))); @@ -1723,7 +1723,7 @@ build_v1_category_initializer (tree type, tree cat_name, tree class_name, if (flag_objc_abi >= 1) { - int val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_category_template)); + int val = tree_to_hwi (TYPE_SIZE_UNIT (objc_category_template)); expr = build_int_cst (NULL_TREE, val); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, expr); ltyp = objc_prop_list_ptr; @@ -1825,7 +1825,7 @@ generate_objc_class_ext (tree property_list, tree context) build_objc_class_ext_template (); /* uint32_t size */ - size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_class_ext_template)); + size = tree_to_hwi (TYPE_SIZE_UNIT (objc_class_ext_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, size)); ltyp = const_string_type_node; diff --git a/gcc/objc/objc-next-runtime-abi-02.c b/gcc/objc/objc-next-runtime-abi-02.c index 4bb02c76737..0138650d7db 100644 --- a/gcc/objc/objc-next-runtime-abi-02.c +++ b/gcc/objc/objc-next-runtime-abi-02.c @@ -2318,7 +2318,7 @@ generate_v2_meth_descriptor_table (tree chain, tree protocol, decl = start_var_decl (method_list_template, buf); - entsize = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_method_template)); + entsize = tree_to_hwi (TYPE_SIZE_UNIT (objc_method_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, entsize)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (NULL_TREE, size)); initlist = @@ -2432,7 +2432,7 @@ generate_v2_property_table (tree context, tree klass_ctxt) is_proto ? context : klass_ctxt); - init_val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_property_template)); + init_val = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_property_template)); if (is_proto) snprintf (buf, BUFSIZE, "_OBJC_ProtocolPropList_%s", IDENTIFIER_POINTER (PROTOCOL_NAME (context))); @@ -2507,7 +2507,7 @@ build_v2_protocol_initializer (tree type, tree protocol_name, tree protocol_list /* const uint32_t size; = sizeof(struct protocol_t) */ expr = build_int_cst (integer_type_node, - TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_protocol_template))); + tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_protocol_template))); CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, expr); /* const uint32_t flags; = 0 */ CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, integer_zero_node); @@ -2621,7 +2621,7 @@ generate_v2_dispatch_table (tree chain, const char *name, tree attr) decl = start_var_decl (method_list_template, name); - init_val = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_method_template)); + init_val = tree_to_hwi (TYPE_SIZE_UNIT (objc_method_template)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, build_int_cst (integer_type_node, init_val)); CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, @@ -2848,7 +2848,7 @@ build_v2_ivar_list_initializer (tree class_name, tree type, tree field_decl) build_int_cst (integer_type_node, val)); /* Set size. */ - val = TREE_INT_CST_LOW (DECL_SIZE_UNIT (field_decl)); + val = tree_to_hwi (DECL_SIZE_UNIT (field_decl)); CONSTRUCTOR_APPEND_ELT (ivar, NULL_TREE, build_int_cst (integer_type_node, val)); @@ -2917,7 +2917,7 @@ generate_v2_ivars_list (tree chain, const char *name, tree attr, tree templ) initlist = build_v2_ivar_list_initializer (CLASS_NAME (templ), objc_v2_ivar_template, chain); - ivar_t_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_ivar_template)); + ivar_t_size = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_ivar_template)); decl = start_var_decl (ivar_list_template, name); CONSTRUCTOR_APPEND_ELT (inits, NULL_TREE, @@ -3175,7 +3175,7 @@ generate_v2_class_structs (struct imp_entry *impent) buf, meta_clac_meth); } - instanceStart = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_class_template)); + instanceStart = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_class_template)); /* Currently there are no class ivars and generation of class variables for the root of the inheritance has been removed. It @@ -3185,7 +3185,7 @@ generate_v2_class_structs (struct imp_entry *impent) class_ivars = NULL_TREE; /* TODO: Add total size of class variables when implemented. */ - instanceSize = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (objc_v2_class_template)); + instanceSize = tree_to_hwi (TYPE_SIZE_UNIT (objc_v2_class_template)); /* So now build the META CLASS structs. */ /* static struct class_ro_t _OBJC_METACLASS_Foo = { ... }; */ @@ -3267,7 +3267,7 @@ generate_v2_class_structs (struct imp_entry *impent) if (field && TREE_CODE (field) == FIELD_DECL) instanceSize = int_byte_position (field) * BITS_PER_UNIT - + tree_low_cst (DECL_SIZE (field), 0); + + tree_to_shwi (DECL_SIZE (field)); else instanceSize = 0; instanceSize /= BITS_PER_UNIT; diff --git a/gcc/objcp/Make-lang.in b/gcc/objcp/Make-lang.in index ec10fc890c2..4c2f8b66492 100644 --- a/gcc/objcp/Make-lang.in +++ b/gcc/objcp/Make-lang.in @@ -145,7 +145,7 @@ objcp/objcp-act.o : objc/objc-act.c \ $(START_HDRS) \ $(GGC_H) $(DIAGNOSTIC_H) $(FLAGS_H) input.h \ toplev.h $(FUNCTION_H) output.h debug.h $(LANGHOOKS_DEF_H) \ - $(HASHTAB_H) $(GIMPLE_H) \ + $(HASHTAB_H) $(GIMPLE_H) wide-int.h \ $(RTL_H) $(EXPR_H) $(TARGET_H) \ objcp/objcp-decl.h \ objc/objc-encoding.h \ diff --git a/gcc/omp-low.c b/gcc/omp-low.c index e5eaddb51fc..d00c1880be4 100644 --- a/gcc/omp-low.c +++ b/gcc/omp-low.c @@ -1962,9 +1962,7 @@ scan_omp_1_op (tree *tp, int *walk_subtrees, void *data) if (tem != TREE_TYPE (t)) { if (TREE_CODE (t) == INTEGER_CST) - *tp = build_int_cst_wide (tem, - TREE_INT_CST_LOW (t), - TREE_INT_CST_HIGH (t)); + *tp = wide_int_to_tree (tem, t); else TREE_TYPE (t) = tem; } @@ -5753,7 +5751,7 @@ expand_omp_atomic (struct omp_region *region) HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ - index = tree_low_cst (TYPE_SIZE_UNIT (type), 1); + index = tree_to_uhwi (TYPE_SIZE_UNIT (type)); index = exact_log2 (index); if (index >= 0 && index <= 4) { @@ -6511,9 +6509,9 @@ lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p, /* When possible, use a strict equality expression. This can let VRP type optimizations deduce the value and remove a copy. */ - if (host_integerp (fd->loop.step, 0)) + if (tree_fits_shwi_p (fd->loop.step)) { - HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step); + HOST_WIDE_INT step = tree_to_shwi (fd->loop.step); if (step == 1 || step == -1) cond_code = EQ_EXPR; } @@ -6531,7 +6529,7 @@ lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p, /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */ vinit = fd->loop.n1; if (cond_code == EQ_EXPR - && host_integerp (fd->loop.n2, 0) + && tree_fits_shwi_p (fd->loop.n2) && ! integer_zerop (fd->loop.n2)) vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0); diff --git a/gcc/optabs.c b/gcc/optabs.c index a3051ad9d9a..05d9d76a5b0 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -851,7 +851,8 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab, if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD) { carries = outof_input; - tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); + tmp = immed_wide_int_const (wide_int::from_shwi (BITS_PER_WORD, + op1_mode), op1_mode); tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 0, true, methods); } @@ -866,13 +867,14 @@ expand_subword_shift (enum machine_mode op1_mode, optab binoptab, outof_input, const1_rtx, 0, unsignedp, methods); if (shift_mask == BITS_PER_WORD - 1) { - tmp = immed_double_const (-1, -1, op1_mode); + tmp = immed_wide_int_const (wide_int::minus_one (op1_mode), op1_mode); tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp, 0, true, methods); } else { - tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode); + tmp = immed_wide_int_const (wide_int::from_shwi (BITS_PER_WORD - 1, + op1_mode), op1_mode); tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 0, true, methods); } @@ -1035,7 +1037,8 @@ expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab, is true when the effective shift value is less than BITS_PER_WORD. Set SUPERWORD_OP1 to the shift count that should be used to shift OUTOF_INPUT into INTO_TARGET when the condition is false. */ - tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); + tmp = immed_wide_int_const (wide_int::from_shwi (BITS_PER_WORD, op1_mode), + op1_mode); if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1) { /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1 @@ -2885,7 +2888,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, const struct real_format *fmt; int bitpos, word, nwords, i; enum machine_mode imode; - double_int mask; + wide_int mask; rtx temp, insns; /* The format has to have a simple sign bit. */ @@ -2921,7 +2924,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; } - mask = double_int_zero.set_bit (bitpos); + mask = wide_int::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode)); if (code == ABS) mask = ~mask; @@ -2943,7 +2946,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, { temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, op0_piece, - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), targ_piece, 1, OPTAB_LIB_WIDEN); if (temp != targ_piece) emit_move_insn (targ_piece, temp); @@ -2961,7 +2964,7 @@ expand_absneg_bit (enum rtx_code code, enum machine_mode mode, { temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, gen_lowpart (imode, op0), - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); target = lowpart_subreg_maybe_copy (mode, temp, imode); @@ -3560,7 +3563,7 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target, } else { - double_int mask; + wide_int mask; if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) { @@ -3582,10 +3585,9 @@ expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target, op1 = operand_subword_force (op1, word, mode); } - mask = double_int_zero.set_bit (bitpos); - + mask = wide_int::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode)); sign = expand_binop (imode, and_optab, op1, - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); } @@ -3629,7 +3631,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, int bitpos, bool op0_is_abs) { enum machine_mode imode; - double_int mask; + wide_int mask, nmask; int word, nwords, i; rtx temp, insns; @@ -3653,7 +3655,7 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; } - mask = double_int_zero.set_bit (bitpos); + mask = wide_int::set_bit_in_zero (bitpos, GET_MODE_PRECISION (imode)); if (target == 0 || target == op0 @@ -3673,14 +3675,16 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, if (i == word) { if (!op0_is_abs) - op0_piece - = expand_binop (imode, and_optab, op0_piece, - immed_double_int_const (~mask, imode), - NULL_RTX, 1, OPTAB_LIB_WIDEN); - + { + nmask = ~mask; + op0_piece + = expand_binop (imode, and_optab, op0_piece, + immed_wide_int_const (nmask, imode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + } op1 = expand_binop (imode, and_optab, operand_subword_force (op1, i, mode), - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); temp = expand_binop (imode, ior_optab, op0_piece, op1, @@ -3700,15 +3704,17 @@ expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, else { op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1), - immed_double_int_const (mask, imode), + immed_wide_int_const (mask, imode), NULL_RTX, 1, OPTAB_LIB_WIDEN); op0 = gen_lowpart (imode, op0); if (!op0_is_abs) - op0 = expand_binop (imode, and_optab, op0, - immed_double_int_const (~mask, imode), - NULL_RTX, 1, OPTAB_LIB_WIDEN); - + { + nmask = ~mask; + op0 = expand_binop (imode, and_optab, op0, + immed_wide_int_const (nmask, imode), + NULL_RTX, 1, OPTAB_LIB_WIDEN); + } temp = expand_binop (imode, ior_optab, op0, op1, gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); target = lowpart_subreg_maybe_copy (mode, temp, imode); diff --git a/gcc/postreload.c b/gcc/postreload.c index 97d7e5d0332..82a0aa32201 100644 --- a/gcc/postreload.c +++ b/gcc/postreload.c @@ -295,27 +295,25 @@ reload_cse_simplify_set (rtx set, rtx insn) #ifdef LOAD_EXTEND_OP if (extend_op != UNKNOWN) { - HOST_WIDE_INT this_val; + wide_int result; - /* ??? I'm lazy and don't wish to handle CONST_DOUBLE. Other - constants, such as SYMBOL_REF, cannot be extended. */ - if (!CONST_INT_P (this_rtx)) + if (!CONST_SCALAR_INT_P (this_rtx)) continue; - this_val = INTVAL (this_rtx); switch (extend_op) { case ZERO_EXTEND: - this_val &= GET_MODE_MASK (GET_MODE (src)); + result = wide_int (std::make_pair (this_rtx, GET_MODE (src))) + .zext (word_mode); break; case SIGN_EXTEND: - /* ??? In theory we're already extended. */ - if (this_val == trunc_int_for_mode (this_val, GET_MODE (src))) - break; + result = wide_int (std::make_pair (this_rtx, GET_MODE (src))) + .sext (word_mode); + break; default: gcc_unreachable (); } - this_rtx = GEN_INT (this_val); + this_rtx = immed_wide_int_const (result, GET_MODE (src)); } #endif this_cost = set_src_cost (this_rtx, speed); diff --git a/gcc/predict.c b/gcc/predict.c index ec793382658..22da3ad55df 100644 --- a/gcc/predict.c +++ b/gcc/predict.c @@ -994,15 +994,15 @@ strips_small_constant (tree t1, tree t2) return NULL; else if (TREE_CODE (t1) == SSA_NAME) ret = t1; - else if (host_integerp (t1, 0)) - value = tree_low_cst (t1, 0); + else if (tree_fits_shwi_p (t1)) + value = tree_to_shwi (t1); else return NULL; if (!t2) return ret; - else if (host_integerp (t2, 0)) - value = tree_low_cst (t2, 0); + else if (tree_fits_shwi_p (t2)) + value = tree_to_shwi (t2); else if (TREE_CODE (t2) == SSA_NAME) { if (ret) @@ -1098,7 +1098,7 @@ is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop, code = invert_tree_comparison (code, false); bound = iv0.base; base = iv1.base; - if (host_integerp (iv1.step, 0)) + if (tree_fits_shwi_p (iv1.step)) step = iv1.step; else return false; @@ -1107,7 +1107,7 @@ is_comparison_with_loop_invariant_p (gimple stmt, struct loop *loop, { bound = iv1.base; base = iv0.base; - if (host_integerp (iv0.step, 0)) + if (tree_fits_shwi_p (iv0.step)) step = iv0.step; else return false; @@ -1241,81 +1241,62 @@ predict_iv_comparison (struct loop *loop, basic_block bb, /* If loop bound, base and compare bound are all constants, we can calculate the probability directly. */ - if (host_integerp (loop_bound_var, 0) - && host_integerp (compare_var, 0) - && host_integerp (compare_base, 0)) + if (tree_fits_shwi_p (loop_bound_var) + && tree_fits_shwi_p (compare_var) + && tree_fits_shwi_p (compare_base)) { int probability; - bool of, overflow = false; - double_int mod, compare_count, tem, loop_count; + bool overflow, overall_overflow = false; + max_wide_int compare_count, tem, loop_count; - double_int loop_bound = tree_to_double_int (loop_bound_var); - double_int compare_bound = tree_to_double_int (compare_var); - double_int base = tree_to_double_int (compare_base); - double_int compare_step = tree_to_double_int (compare_step_var); + max_wide_int loop_bound = loop_bound_var; + max_wide_int compare_bound = compare_var; + max_wide_int base = compare_base; + max_wide_int compare_step = compare_step_var; /* (loop_bound - base) / compare_step */ - tem = loop_bound.sub_with_overflow (base, &of); - overflow |= of; - loop_count = tem.divmod_with_overflow (compare_step, - 0, TRUNC_DIV_EXPR, - &mod, &of); - overflow |= of; - - if ((!compare_step.is_negative ()) + tem = loop_bound.sub (base, SIGNED, &overflow); + overall_overflow |= overflow; + loop_count = tem.div_trunc (compare_step, SIGNED, &overflow); + overall_overflow |= overflow; + + if ((!compare_step.neg_p (SIGNED)) ^ (compare_code == LT_EXPR || compare_code == LE_EXPR)) { /* (loop_bound - compare_bound) / compare_step */ - tem = loop_bound.sub_with_overflow (compare_bound, &of); - overflow |= of; - compare_count = tem.divmod_with_overflow (compare_step, - 0, TRUNC_DIV_EXPR, - &mod, &of); - overflow |= of; + tem = loop_bound.sub (compare_bound, SIGNED, &overflow); + overall_overflow |= overflow; + compare_count = tem.div_trunc (compare_step, SIGNED, &overflow); + overall_overflow |= overflow; } else { /* (compare_bound - base) / compare_step */ - tem = compare_bound.sub_with_overflow (base, &of); - overflow |= of; - compare_count = tem.divmod_with_overflow (compare_step, - 0, TRUNC_DIV_EXPR, - &mod, &of); - overflow |= of; + tem = compare_bound.sub (base, SIGNED, &overflow); + overall_overflow |= overflow; + compare_count = tem.div_trunc (compare_step, SIGNED, &overflow); + overall_overflow |= overflow; } if (compare_code == LE_EXPR || compare_code == GE_EXPR) ++compare_count; if (loop_bound_code == LE_EXPR || loop_bound_code == GE_EXPR) ++loop_count; - if (compare_count.is_negative ()) - compare_count = double_int_zero; - if (loop_count.is_negative ()) - loop_count = double_int_zero; - if (loop_count.is_zero ()) + if (compare_count.neg_p (SIGNED)) + compare_count = 0; + if (loop_count.neg_p (SIGNED)) + loop_count = 0; + if (loop_count.zero_p ()) probability = 0; - else if (compare_count.scmp (loop_count) == 1) + else if (compare_count.cmps (loop_count) == 1) probability = REG_BR_PROB_BASE; else { - /* If loop_count is too big, such that REG_BR_PROB_BASE * loop_count - could overflow, shift both loop_count and compare_count right - a bit so that it doesn't overflow. Note both counts are known not - to be negative at this point. */ - int clz_bits = clz_hwi (loop_count.high); - gcc_assert (REG_BR_PROB_BASE < 32768); - if (clz_bits < 16) - { - loop_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT); - compare_count.arshift (16 - clz_bits, HOST_BITS_PER_DOUBLE_INT); - } - tem = compare_count.mul_with_sign (double_int::from_shwi - (REG_BR_PROB_BASE), true, &of); - gcc_assert (!of); - tem = tem.divmod (loop_count, true, TRUNC_DIV_EXPR, &mod); + tem = compare_count * REG_BR_PROB_BASE; + tem = tem.udiv_trunc (loop_count); probability = tem.to_uhwi (); } - if (!overflow) + if (!overall_overflow) predict_edge (then_edge, PRED_LOOP_IV_COMPARE, probability); return; @@ -1498,10 +1479,10 @@ predict_loops (void) if (TREE_CODE (niter) == INTEGER_CST) { - if (host_integerp (niter, 1) + if (tree_fits_uhwi_p (niter) && max && compare_tree_int (niter, max - 1) == -1) - nitercst = tree_low_cst (niter, 1) + 1; + nitercst = tree_to_uhwi (niter) + 1; else nitercst = max; predictor = PRED_LOOP_ITERATIONS; @@ -1615,7 +1596,7 @@ predict_loops (void) if (loop_bound_var) predict_iv_comparison (loop, bb, loop_bound_var, loop_iv_base, loop_bound_code, - tree_low_cst (loop_bound_step, 0)); + tree_to_shwi (loop_bound_step)); } /* Free basic blocks from get_loop_body. */ diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c index d2bda9ec84c..3620bd6e6c2 100644 --- a/gcc/print-rtl.c +++ b/gcc/print-rtl.c @@ -612,6 +612,12 @@ print_rtx (const_rtx in_rtx) fprintf (outfile, " [%s]", s); } break; + + case CONST_WIDE_INT: + if (! flag_simple) + fprintf (outfile, " "); + hwivec_output_hex (outfile, CONST_WIDE_INT_VEC (in_rtx)); + break; #endif case CODE_LABEL: diff --git a/gcc/print-tree.c b/gcc/print-tree.c index 029c3a25e6d..fcbd0fc6387 100644 --- a/gcc/print-tree.c +++ b/gcc/print-tree.c @@ -31,6 +31,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-flow.h" #include "tree-dump.h" #include "dumpfile.h" +#include "wide-int-print.h" /* Define the hash table of nodes already seen. Such nodes are not repeated; brief cross-references are used. */ @@ -121,16 +122,7 @@ print_node_brief (FILE *file, const char *prefix, const_tree node, int indent) fprintf (file, " overflow"); fprintf (file, " "); - if (TREE_INT_CST_HIGH (node) == 0) - fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, TREE_INT_CST_LOW (node)); - else if (TREE_INT_CST_HIGH (node) == -1 - && TREE_INT_CST_LOW (node) != 0) - fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED, - -TREE_INT_CST_LOW (node)); - else - fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (node), - (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (node)); + print_dec (wide_int (node), file, TYPE_SIGN (TREE_TYPE (node))); } if (TREE_CODE (node) == REAL_CST) { @@ -335,7 +327,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent) if (TREE_VISITED (node)) fputs (" visited", file); - if (code != TREE_VEC && code != SSA_NAME) + if (code != TREE_VEC && code != INTEGER_CST && code != SSA_NAME) { if (TREE_LANG_FLAG_0 (node)) fputs (" tree_0", file); @@ -743,17 +735,7 @@ print_node (FILE *file, const char *prefix, tree node, int indent) fprintf (file, " overflow"); fprintf (file, " "); - if (TREE_INT_CST_HIGH (node) == 0) - fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, - TREE_INT_CST_LOW (node)); - else if (TREE_INT_CST_HIGH (node) == -1 - && TREE_INT_CST_LOW (node) != 0) - fprintf (file, "-" HOST_WIDE_INT_PRINT_UNSIGNED, - -TREE_INT_CST_LOW (node)); - else - fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (node), - (unsigned HOST_WIDE_INT) TREE_INT_CST_LOW (node)); + print_dec (wide_int (node), file, TYPE_SIGN (TREE_TYPE (node))); break; case REAL_CST: diff --git a/gcc/read-rtl.c b/gcc/read-rtl.c index 10adf472a08..707ef3fd1cd 100644 --- a/gcc/read-rtl.c +++ b/gcc/read-rtl.c @@ -811,6 +811,29 @@ validate_const_int (const char *string) fatal_with_file_and_line ("invalid decimal constant \"%s\"\n", string); } +static void +validate_const_wide_int (const char *string) +{ + const char *cp; + int valid = 1; + + cp = string; + while (*cp && ISSPACE (*cp)) + cp++; + /* Skip the leading 0x. */ + if (cp[0] == '0' || cp[1] == 'x') + cp += 2; + else + valid = 0; + if (*cp == 0) + valid = 0; + for (; *cp; cp++) + if (! ISXDIGIT (*cp)) + valid = 0; + if (!valid) + fatal_with_file_and_line ("invalid hex constant \"%s\"\n", string); +} + /* Record that PTR uses iterator ITERATOR. */ static void @@ -1324,6 +1347,56 @@ read_rtx_code (const char *code_name) gcc_unreachable (); } + if (CONST_WIDE_INT_P (return_rtx)) + { + read_name (&name); + validate_const_wide_int (name.string); + { + hwivec hwiv; + const char *s = name.string; + int len; + int index = 0; + int gs = HOST_BITS_PER_WIDE_INT/4; + int pos; + char * buf = XALLOCAVEC (char, gs + 1); + unsigned HOST_WIDE_INT wi; + int wlen; + + /* Skip the leading spaces. */ + while (*s && ISSPACE (*s)) + s++; + + /* Skip the leading 0x. */ + gcc_assert (s[0] == '0'); + gcc_assert (s[1] == 'x'); + s += 2; + + len = strlen (s); + pos = len - gs; + wlen = (len + gs - 1) / gs; /* Number of words needed */ + + return_rtx = const_wide_int_alloc (wlen); + + hwiv = CONST_WIDE_INT_VEC (return_rtx); + while (pos > 0) + { +#if HOST_BITS_PER_WIDE_INT == 64 + sscanf (s + pos, "%16" HOST_WIDE_INT_PRINT "x", &wi); +#else + sscanf (s + pos, "%8" HOST_WIDE_INT_PRINT "x", &wi); +#endif + XHWIVEC_ELT (hwiv, index++) = wi; + pos -= gs; + } + strncpy (buf, s, gs - pos); + buf [gs - pos] = 0; + sscanf (buf, "%" HOST_WIDE_INT_PRINT "x", &wi); + XHWIVEC_ELT (hwiv, index++) = wi; + /* TODO: After reading, do we want to canonicalize with: + value = lookup_const_wide_int (value); ? */ + } + } + c = read_skip_spaces (); /* Syntactic sugar for AND and IOR, allowing Lisp-like arbitrary number of arguments for them. */ diff --git a/gcc/real.c b/gcc/real.c index b80aeac843f..559670a634f 100644 --- a/gcc/real.c +++ b/gcc/real.c @@ -29,6 +29,7 @@ #include "realmpfr.h" #include "tm_p.h" #include "dfp.h" +#include "wide-int.h" /* The floating point model used internally is not exactly IEEE 754 compliant, and close to the description in the ISO C99 standard, @@ -1377,42 +1378,38 @@ real_to_integer (const REAL_VALUE_TYPE *r) } } -/* Likewise, but to an integer pair, HI+LOW. */ +/* Likewise, but producing a wide-int of PRECISION. If + the value cannot be represented in precision, FAIL is set to + TRUE. */ -void -real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, - const REAL_VALUE_TYPE *r) +wide_int +real_to_integer (const REAL_VALUE_TYPE *r, bool *fail, int precision) { - REAL_VALUE_TYPE t; - HOST_WIDE_INT low, high; + HOST_WIDE_INT val[2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; int exp; + int words; + wide_int result; + int w; switch (r->cl) { case rvc_zero: underflow: - low = high = 0; - break; + return wide_int::zero (precision); case rvc_inf: case rvc_nan: overflow: - high = (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1); + *fail = true; + if (r->sign) - low = 0; + return wide_int::set_bit_in_zero (precision - 1, precision); else - { - high--; - low = -1; - } - break; + return ~wide_int::set_bit_in_zero (precision - 1, precision); case rvc_normal: if (r->decimal) - { - decimal_real_to_integer2 (plow, phigh, r); - return; - } + return decimal_real_to_integer (r, fail, precision); exp = REAL_EXP (r); if (exp <= 0) @@ -1421,42 +1418,49 @@ real_to_integer2 (HOST_WIDE_INT *plow, HOST_WIDE_INT *phigh, undefined, so it doesn't matter what we return, and some callers expect to be able to use this routine for both signed and unsigned conversions. */ - if (exp > HOST_BITS_PER_DOUBLE_INT) + if (exp > precision) goto overflow; - rshift_significand (&t, r, HOST_BITS_PER_DOUBLE_INT - exp); - if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) + words = (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT; + + for (int i = 0; i < 2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT; i++) + val[i] = 0; + +#if (HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_LONG) + for (int i = 0; i < words; i++) { - high = t.sig[SIGSZ-1]; - low = t.sig[SIGSZ-2]; + int j = SIGSZ - words + i; + val[i] = (j < 0) ? 0 : r->sig[j]; } - else +#else + gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG); + for (int i = 0; i < words; i++) { - gcc_assert (HOST_BITS_PER_WIDE_INT == 2*HOST_BITS_PER_LONG); - high = t.sig[SIGSZ-1]; - high = high << (HOST_BITS_PER_LONG - 1) << 1; - high |= t.sig[SIGSZ-2]; - - low = t.sig[SIGSZ-3]; - low = low << (HOST_BITS_PER_LONG - 1) << 1; - low |= t.sig[SIGSZ-4]; + int j = SIGSZ - (words * 2) + (i + 2) + 1; + if (j < 0) + val[i] = 0; + else + { + val[i] = r->sig[j]; + val[i] <<= HOST_BITS_PER_LONG; + val[i] |= r->sig[j - 1]; + } } +#endif + w = SIGSZ * HOST_BITS_PER_LONG + words * HOST_BITS_PER_WIDE_INT; + result = wide_int::from_array (val, + (w + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT, w, w); + result = result.rshiftu ((words * HOST_BITS_PER_WIDE_INT) - exp); + result = result.zforce_to_size (precision); if (r->sign) - { - if (low == 0) - high = -high; - else - low = -low, high = ~high; - } - break; + return -result; + else + return result; default: gcc_unreachable (); } - - *plow = low; - *phigh = high; } /* A subroutine of real_to_decimal. Compute the quotient and remainder @@ -2144,43 +2148,131 @@ real_from_string3 (REAL_VALUE_TYPE *r, const char *s, enum machine_mode mode) real_convert (r, mode, r); } -/* Initialize R from the integer pair HIGH+LOW. */ +/* Initialize R from a HOST_WIDE_INT. */ void real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, - unsigned HOST_WIDE_INT low, HOST_WIDE_INT high, - int unsigned_p) + HOST_WIDE_INT val, + signop sgn) { - if (low == 0 && high == 0) + if (val == 0) get_zero (r, 0); else { memset (r, 0, sizeof (*r)); r->cl = rvc_normal; - r->sign = high < 0 && !unsigned_p; - SET_REAL_EXP (r, HOST_BITS_PER_DOUBLE_INT); + r->sign = val < 0 && sgn == SIGNED; + SET_REAL_EXP (r, HOST_BITS_PER_WIDE_INT); + /* TODO: This fails for -MAXHOSTWIDEINT, wide_int version would + have worked. */ if (r->sign) + val = -val; + + if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) + r->sig[SIGSZ-1] = val; + else { - high = ~high; - if (low == 0) - high += 1; - else - low = -low; + gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT); + r->sig[SIGSZ-1] = val >> (HOST_BITS_PER_LONG - 1) >> 1; + r->sig[SIGSZ-2] = val; } - if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) + normalize (r); + } + + if (DECIMAL_FLOAT_MODE_P (mode)) + decimal_from_integer (r); + else if (mode != VOIDmode) + real_convert (r, mode, r); +} + +/* Initialize R from the integer pair HIGH+LOW. */ + +void +real_from_integer (REAL_VALUE_TYPE *r, enum machine_mode mode, + wide_int val, signop sgn) +{ + if (val.zero_p ()) + get_zero (r, 0); + else + { + unsigned int len = val.get_precision (); + int i, j, e=0; + int maxbitlen = MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT; + const unsigned int realmax = SIGNIFICAND_BITS/HOST_BITS_PER_WIDE_INT * HOST_BITS_PER_WIDE_INT; + + memset (r, 0, sizeof (*r)); + r->cl = rvc_normal; + r->sign = val.neg_p (sgn); + + if (len == 0) + len = 1; + + /* We have to ensure we can negate the largest negative number. */ + val = val.force_to_size (maxbitlen, sgn); + + if (r->sign) + val = -val; + else + val = val; + + /* Ensure a multiple of HOST_BITS_PER_WIDE_INT, ceiling, as elt + won't work with precisions that are not a multiple of + HOST_BITS_PER_WIDE_INT. */ + len += HOST_BITS_PER_WIDE_INT - 1; + + /* Ensure we can represent the largest negative number. */ + len += 1; + + len = len/HOST_BITS_PER_WIDE_INT * HOST_BITS_PER_WIDE_INT; + + /* Cap the size to the size allowed by real.h. */ + if (len > realmax) { - r->sig[SIGSZ-1] = high; - r->sig[SIGSZ-2] = low; + HOST_WIDE_INT cnt_l_z; + cnt_l_z = val.clz ().to_shwi (); + + if (maxbitlen - cnt_l_z > realmax) + { + e = maxbitlen - cnt_l_z - realmax; + + /* This value is too large, we must shift it right to + preserve all the bits we can, and then bump the + exponent up by that amount. */ + val = val.rshiftu (e); + } + len = realmax; } + + /* Clear out top bits so elt will work with precisions that aren't + a multiple of HOST_BITS_PER_WIDE_INT. */ + val = val.force_to_size (len, sgn); + len = len / HOST_BITS_PER_WIDE_INT; + + SET_REAL_EXP (r, len * HOST_BITS_PER_WIDE_INT + e); + + j = SIGSZ - 1; + if (HOST_BITS_PER_LONG == HOST_BITS_PER_WIDE_INT) + for (i = len - 1; i >= 0; i--) + { + r->sig[j--] = val.elt (i); + if (j < 0) + break; + } else { gcc_assert (HOST_BITS_PER_LONG*2 == HOST_BITS_PER_WIDE_INT); - r->sig[SIGSZ-1] = high >> (HOST_BITS_PER_LONG - 1) >> 1; - r->sig[SIGSZ-2] = high; - r->sig[SIGSZ-3] = low >> (HOST_BITS_PER_LONG - 1) >> 1; - r->sig[SIGSZ-4] = low; + for (i = len - 1; i >= 0; i--) + { + HOST_WIDE_INT e = val.elt (i); + r->sig[j--] = e >> (HOST_BITS_PER_LONG - 1) >> 1; + if (j < 0) + break; + r->sig[j--] = e; + if (j < 0) + break; + } } normalize (r); @@ -2270,7 +2362,7 @@ ten_to_ptwo (int n) for (i = 0; i < n; ++i) t *= t; - real_from_integer (&tens[n], VOIDmode, t, 0, 1); + real_from_integer (&tens[n], VOIDmode, t, UNSIGNED); } else { @@ -2309,7 +2401,7 @@ real_digit (int n) gcc_assert (n <= 9); if (n > 0 && num[n].cl == rvc_zero) - real_from_integer (&num[n], VOIDmode, n, 0, 1); + real_from_integer (&num[n], VOIDmode, wide_int (n), UNSIGNED); return &num[n]; } diff --git a/gcc/real.h b/gcc/real.h index 2ff84f6d295..9d93989fa49 100644 --- a/gcc/real.h +++ b/gcc/real.h @@ -21,6 +21,7 @@ #define GCC_REAL_H #include "machmode.h" +#include "signop.h" /* An expanded form of the represented number. */ @@ -267,8 +268,6 @@ extern void real_to_hexadecimal (char *, const REAL_VALUE_TYPE *, /* Render R as an integer. */ extern HOST_WIDE_INT real_to_integer (const REAL_VALUE_TYPE *); -extern void real_to_integer2 (HOST_WIDE_INT *, HOST_WIDE_INT *, - const REAL_VALUE_TYPE *); /* Initialize R from a decimal or hexadecimal string. Return -1 if the value underflows, +1 if overflows, and 0 otherwise. */ @@ -276,9 +275,9 @@ extern int real_from_string (REAL_VALUE_TYPE *, const char *); /* Wrapper to allow different internal representation for decimal floats. */ extern void real_from_string3 (REAL_VALUE_TYPE *, const char *, enum machine_mode); -/* Initialize R from an integer pair HIGH/LOW. */ +/* Initialize R from an integer. */ extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode, - unsigned HOST_WIDE_INT, HOST_WIDE_INT, int); + HOST_WIDE_INT, signop); extern long real_to_target_fmt (long *, const REAL_VALUE_TYPE *, const struct real_format *); @@ -361,11 +360,8 @@ extern const struct real_format arm_half_format; #define REAL_VALUE_TO_TARGET_SINGLE(IN, OUT) \ ((OUT) = real_to_target (NULL, &(IN), mode_for_size (32, MODE_FLOAT, 0))) -#define REAL_VALUE_FROM_INT(r, lo, hi, mode) \ - real_from_integer (&(r), mode, lo, hi, 0) - -#define REAL_VALUE_FROM_UNSIGNED_INT(r, lo, hi, mode) \ - real_from_integer (&(r), mode, lo, hi, 1) +#define REAL_VALUE_FROM_INT(r, val, mode) \ + real_from_integer (&(r), mode, val, SIGNED) /* Real values to IEEE 754 decimal floats. */ @@ -383,9 +379,6 @@ extern const struct real_format arm_half_format; extern REAL_VALUE_TYPE real_value_truncate (enum machine_mode, REAL_VALUE_TYPE); -#define REAL_VALUE_TO_INT(plow, phigh, r) \ - real_to_integer2 (plow, phigh, &(r)) - extern REAL_VALUE_TYPE real_value_negate (const REAL_VALUE_TYPE *); extern REAL_VALUE_TYPE real_value_abs (const REAL_VALUE_TYPE *); diff --git a/gcc/recog.c b/gcc/recog.c index 2d44416892e..fe33163376b 100644 --- a/gcc/recog.c +++ b/gcc/recog.c @@ -1145,7 +1145,7 @@ immediate_operand (rtx op, enum machine_mode mode) : mode, op)); } -/* Returns 1 if OP is an operand that is a CONST_INT. */ +/* Returns 1 if OP is an operand that is a CONST_INT of mode MODE. */ int const_int_operand (rtx op, enum machine_mode mode) @@ -1160,8 +1160,64 @@ const_int_operand (rtx op, enum machine_mode mode) return 1; } +#if TARGET_SUPPORTS_WIDE_INT +/* Returns 1 if OP is an operand that is a CONST_INT or CONST_WIDE_INT + of mode MODE. */ +int +const_scalar_int_operand (rtx op, enum machine_mode mode) +{ + if (!CONST_SCALAR_INT_P (op)) + return 0; + + if (CONST_INT_P (op)) + return const_int_operand (op, mode); + + if (mode != VOIDmode) + { + int prec = GET_MODE_PRECISION (mode); + int bitsize = GET_MODE_BITSIZE (mode); + + if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize) + return 0; + + if (prec == bitsize) + return 1; + else + { + /* Multiword partial int. */ + HOST_WIDE_INT x + = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1); + return (wide_int::sext (x, prec & (HOST_BITS_PER_WIDE_INT - 1)) + == x); + } + } + return 1; +} + +/* Returns 1 if OP is an operand that is a CONST_WIDE_INT of mode + MODE. This most likely is not as useful as + const_scalar_int_operand, but is here for consistancy. */ +int +const_wide_int_operand (rtx op, enum machine_mode mode) +{ + if (!CONST_WIDE_INT_P (op)) + return 0; + + return const_scalar_int_operand (op, mode); +} + /* Returns 1 if OP is an operand that is a constant integer or constant - floating-point number. */ + floating-point number of MODE. */ + +int +const_double_operand (rtx op, enum machine_mode mode) +{ + return (GET_CODE (op) == CONST_DOUBLE) + && (GET_MODE (op) == mode || mode == VOIDmode); +} +#else +/* Returns 1 if OP is an operand that is a constant integer or constant + floating-point number of MODE. */ int const_double_operand (rtx op, enum machine_mode mode) @@ -1177,8 +1233,9 @@ const_double_operand (rtx op, enum machine_mode mode) && (mode == VOIDmode || GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)); } - -/* Return 1 if OP is a general operand that is not an immediate operand. */ +#endif +/* Return 1 if OP is a general operand that is not an immediate + operand of mode MODE. */ int nonimmediate_operand (rtx op, enum machine_mode mode) @@ -1186,7 +1243,8 @@ nonimmediate_operand (rtx op, enum machine_mode mode) return (general_operand (op, mode) && ! CONSTANT_P (op)); } -/* Return 1 if OP is a register reference or immediate value of mode MODE. */ +/* Return 1 if OP is a register reference or immediate value of mode + MODE. */ int nonmemory_operand (rtx op, enum machine_mode mode) diff --git a/gcc/rtl.c b/gcc/rtl.c index b2d88f783b7..074e4252999 100644 --- a/gcc/rtl.c +++ b/gcc/rtl.c @@ -109,7 +109,7 @@ const enum rtx_class rtx_class[NUM_RTX_CODE] = { const unsigned char rtx_code_size[NUM_RTX_CODE] = { #define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) \ (((ENUM) == CONST_INT || (ENUM) == CONST_DOUBLE \ - || (ENUM) == CONST_FIXED) \ + || (ENUM) == CONST_FIXED || (ENUM) == CONST_WIDE_INT) \ ? RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (HOST_WIDE_INT) \ : RTX_HDR_SIZE + (sizeof FORMAT - 1) * sizeof (rtunion)), @@ -181,18 +181,24 @@ shallow_copy_rtvec (rtvec vec) unsigned int rtx_size (const_rtx x) { + if (CONST_WIDE_INT_P (x)) + return (RTX_HDR_SIZE + + sizeof (struct hwivec_def) + + ((CONST_WIDE_INT_NUNITS (x) - 1) + * sizeof (HOST_WIDE_INT))); if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_HAS_BLOCK_INFO_P (x)) return RTX_HDR_SIZE + sizeof (struct block_symbol); return RTX_CODE_SIZE (GET_CODE (x)); } -/* Allocate an rtx of code CODE. The CODE is stored in the rtx; - all the rest is initialized to zero. */ +/* Allocate an rtx of code CODE with EXTRA bytes in it. The CODE is + stored in the rtx; all the rest is initialized to zero. */ rtx -rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) +rtx_alloc_stat_v (RTX_CODE code MEM_STAT_DECL, int extra) { - rtx rt = ggc_alloc_rtx_def_stat (RTX_CODE_SIZE (code) PASS_MEM_STAT); + rtx rt = ggc_alloc_rtx_def_stat (RTX_CODE_SIZE (code) + extra + PASS_MEM_STAT); /* We want to clear everything up to the FLD array. Normally, this is one int, but we don't want to assume that and it isn't very @@ -210,6 +216,29 @@ rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) return rt; } +/* Allocate an rtx of code CODE. The CODE is stored in the rtx; + all the rest is initialized to zero. */ + +rtx +rtx_alloc_stat (RTX_CODE code MEM_STAT_DECL) +{ + return rtx_alloc_stat_v (code PASS_MEM_STAT, 0); +} + +/* Write the wide constant OP0 to OUTFILE. */ + +void +hwivec_output_hex (FILE *outfile, const_hwivec op0) +{ + int i = HWI_GET_NUM_ELEM (op0); + gcc_assert (i > 0); + if (XHWIVEC_ELT (op0, i-1) == 0) + fprintf (outfile, "0x"); + fprintf (outfile, HOST_WIDE_INT_PRINT_HEX, XHWIVEC_ELT (op0, --i)); + while (--i >= 0) + fprintf (outfile, HOST_WIDE_INT_PRINT_PADDED_HEX, XHWIVEC_ELT (op0, i)); +} + /* Return true if ORIG is a sharable CONST. */ @@ -428,7 +457,6 @@ rtx_equal_p_cb (const_rtx x, const_rtx y, rtx_equal_p_callback_function cb) if (XWINT (x, i) != XWINT (y, i)) return 0; break; - case 'n': case 'i': if (XINT (x, i) != XINT (y, i)) @@ -646,6 +674,10 @@ iterative_hash_rtx (const_rtx x, hashval_t hash) return iterative_hash_object (i, hash); case CONST_INT: return iterative_hash_object (INTVAL (x), hash); + case CONST_WIDE_INT: + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hash = iterative_hash_object (CONST_WIDE_INT_ELT (x, i), hash); + return hash; case SYMBOL_REF: if (XSTR (x, 0)) return iterative_hash (XSTR (x, 0), strlen (XSTR (x, 0)) + 1, @@ -811,6 +843,16 @@ rtl_check_failed_block_symbol (const char *file, int line, const char *func) /* XXX Maybe print the vector? */ void +hwivec_check_failed_bounds (const_hwivec r, int n, const char *file, int line, + const char *func) +{ + internal_error + ("RTL check: access of hwi elt %d of vector with last elt %d in %s, at %s:%d", + n, GET_NUM_ELEM (r) - 1, func, trim_filename (file), line); +} + +/* XXX Maybe print the vector? */ +void rtvec_check_failed_bounds (const_rtvec r, int n, const char *file, int line, const char *func) { diff --git a/gcc/rtl.def b/gcc/rtl.def index b4ce1b99f3f..5259eb9a716 100644 --- a/gcc/rtl.def +++ b/gcc/rtl.def @@ -342,6 +342,9 @@ DEF_RTL_EXPR(TRAP_IF, "trap_if", "ee", RTX_EXTRA) /* numeric integer constant */ DEF_RTL_EXPR(CONST_INT, "const_int", "w", RTX_CONST_OBJ) +/* numeric integer constant */ +DEF_RTL_EXPR(CONST_WIDE_INT, "const_wide_int", "", RTX_CONST_OBJ) + /* fixed-point constant */ DEF_RTL_EXPR(CONST_FIXED, "const_fixed", "www", RTX_CONST_OBJ) diff --git a/gcc/rtl.h b/gcc/rtl.h index b5bfdffebc1..1d6035bc050 100644 --- a/gcc/rtl.h +++ b/gcc/rtl.h @@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see #ifndef GCC_RTL_H #define GCC_RTL_H +#include <utility> #include "statistics.h" #include "machmode.h" #include "input.h" @@ -28,6 +29,7 @@ along with GCC; see the file COPYING3. If not see #include "fixed-value.h" #include "alias.h" #include "hashtab.h" +#include "wide-int.h" #include "flags.h" /* Value used by some passes to "recognize" noop moves as valid @@ -249,6 +251,14 @@ struct GTY(()) object_block { vec<rtx, va_gc> *anchors; }; +struct GTY((variable_size)) hwivec_def { + int num_elem; /* number of elements */ + HOST_WIDE_INT elem[1]; +}; + +#define HWI_GET_NUM_ELEM(HWIVEC) ((HWIVEC)->num_elem) +#define HWI_PUT_NUM_ELEM(HWIVEC, NUM) ((HWIVEC)->num_elem = (NUM)) + /* RTL expression ("rtx"). */ struct GTY((chain_next ("RTX_NEXT (&%h)"), @@ -344,6 +354,7 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"), struct block_symbol block_sym; struct real_value rv; struct fixed_value fv; + struct hwivec_def hwiv; } GTY ((special ("rtx_def"), desc ("GET_CODE (&%0)"))) u; }; @@ -383,13 +394,13 @@ struct GTY((chain_next ("RTX_NEXT (&%h)"), for a variable number of things. The principle use is inside PARALLEL expressions. */ +#define NULL_RTVEC (rtvec) 0 + struct GTY((variable_size)) rtvec_def { int num_elem; /* number of elements */ rtx GTY ((length ("%h.num_elem"))) elem[1]; }; -#define NULL_RTVEC (rtvec) 0 - #define GET_NUM_ELEM(RTVEC) ((RTVEC)->num_elem) #define PUT_NUM_ELEM(RTVEC, NUM) ((RTVEC)->num_elem = (NUM)) @@ -399,12 +410,38 @@ struct GTY((variable_size)) rtvec_def { /* Predicate yielding nonzero iff X is an rtx for a memory location. */ #define MEM_P(X) (GET_CODE (X) == MEM) +#if TARGET_SUPPORTS_WIDE_INT + +/* Match CONST_*s that can represent compile-time constant integers. */ +#define CASE_CONST_SCALAR_INT \ + case CONST_INT: \ + case CONST_WIDE_INT + +/* Match CONST_*s for which pointer equality corresponds to value + equality. */ +#define CASE_CONST_UNIQUE \ + case CONST_INT: \ + case CONST_WIDE_INT: \ + case CONST_DOUBLE: \ + case CONST_FIXED + +/* Match all CONST_* rtxes. */ +#define CASE_CONST_ANY \ + case CONST_INT: \ + case CONST_WIDE_INT: \ + case CONST_DOUBLE: \ + case CONST_FIXED: \ + case CONST_VECTOR + +#else + /* Match CONST_*s that can represent compile-time constant integers. */ #define CASE_CONST_SCALAR_INT \ case CONST_INT: \ case CONST_DOUBLE -/* Match CONST_*s for which pointer equality corresponds to value equality. */ +/* Match CONST_*s for which pointer equality corresponds to value +equality. */ #define CASE_CONST_UNIQUE \ case CONST_INT: \ case CONST_DOUBLE: \ @@ -416,10 +453,17 @@ struct GTY((variable_size)) rtvec_def { case CONST_DOUBLE: \ case CONST_FIXED: \ case CONST_VECTOR +#endif + + + /* Predicate yielding nonzero iff X is an rtx for a constant integer. */ #define CONST_INT_P(X) (GET_CODE (X) == CONST_INT) +/* Predicate yielding nonzero iff X is an rtx for a constant integer. */ +#define CONST_WIDE_INT_P(X) (GET_CODE (X) == CONST_WIDE_INT) + /* Predicate yielding nonzero iff X is an rtx for a constant fixed-point. */ #define CONST_FIXED_P(X) (GET_CODE (X) == CONST_FIXED) @@ -432,8 +476,13 @@ struct GTY((variable_size)) rtvec_def { (GET_CODE (X) == CONST_DOUBLE && GET_MODE (X) == VOIDmode) /* Predicate yielding true iff X is an rtx for a integer const. */ +#if TARGET_SUPPORTS_WIDE_INT +#define CONST_SCALAR_INT_P(X) \ + (CONST_INT_P (X) || CONST_WIDE_INT_P (X)) +#else #define CONST_SCALAR_INT_P(X) \ (CONST_INT_P (X) || CONST_DOUBLE_AS_INT_P (X)) +#endif /* Predicate yielding true iff X is an rtx for a double-int. */ #define CONST_DOUBLE_AS_FLOAT_P(X) \ @@ -594,6 +643,13 @@ struct GTY((variable_size)) rtvec_def { __FUNCTION__); \ &_rtx->u.hwint[_n]; })) +#define XHWIVEC_ELT(HWIVEC, I) __extension__ \ +(*({ __typeof (HWIVEC) const _hwivec = (HWIVEC); const int _i = (I); \ + if (_i < 0 || _i >= HWI_GET_NUM_ELEM (_hwivec)) \ + hwivec_check_failed_bounds (_hwivec, _i, __FILE__, __LINE__, \ + __FUNCTION__); \ + &_hwivec->elem[_i]; })) + #define XCWINT(RTX, N, C) __extension__ \ (*({ __typeof (RTX) const _rtx = (RTX); \ if (GET_CODE (_rtx) != (C)) \ @@ -630,6 +686,11 @@ struct GTY((variable_size)) rtvec_def { __FUNCTION__); \ &_symbol->u.block_sym; }) +#define HWIVEC_CHECK(RTX,C) __extension__ \ +({ __typeof (RTX) const _symbol = (RTX); \ + RTL_CHECKC1 (_symbol, 0, C); \ + &_symbol->u.hwiv; }) + extern void rtl_check_failed_bounds (const_rtx, int, const char *, int, const char *) ATTRIBUTE_NORETURN; @@ -650,6 +711,9 @@ extern void rtl_check_failed_code_mode (const_rtx, enum rtx_code, enum machine_m ATTRIBUTE_NORETURN; extern void rtl_check_failed_block_symbol (const char *, int, const char *) ATTRIBUTE_NORETURN; +extern void hwivec_check_failed_bounds (const_rtvec, int, const char *, int, + const char *) + ATTRIBUTE_NORETURN; extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int, const char *) ATTRIBUTE_NORETURN; @@ -662,12 +726,14 @@ extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int, #define RTL_CHECKC2(RTX, N, C1, C2) ((RTX)->u.fld[N]) #define RTVEC_ELT(RTVEC, I) ((RTVEC)->elem[I]) #define XWINT(RTX, N) ((RTX)->u.hwint[N]) +#define XHWIVEC_ELT(HWIVEC, I) ((HWIVEC)->elem[I]) #define XCWINT(RTX, N, C) ((RTX)->u.hwint[N]) #define XCMWINT(RTX, N, C, M) ((RTX)->u.hwint[N]) #define XCNMWINT(RTX, N, C, M) ((RTX)->u.hwint[N]) #define XCNMPRV(RTX, C, M) (&(RTX)->u.rv) #define XCNMPFV(RTX, C, M) (&(RTX)->u.fv) #define BLOCK_SYMBOL_CHECK(RTX) (&(RTX)->u.block_sym) +#define HWIVEC_CHECK(RTX,C) (&(RTX)->u.hwiv) #endif @@ -810,8 +876,8 @@ extern void rtl_check_failed_flag (const char *, const_rtx, const char *, #define XCCFI(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cfi) #define XCCSELIB(RTX, N, C) (RTL_CHECKC1 (RTX, N, C).rt_cselib) -#define XCVECEXP(RTX, N, M, C) RTVEC_ELT (XCVEC (RTX, N, C), M) -#define XCVECLEN(RTX, N, C) GET_NUM_ELEM (XCVEC (RTX, N, C)) +#define XCVECEXP(RTX, N, M, C) RTVEC_ELT (XCVEC (RTX, N, C), M) +#define XCVECLEN(RTX, N, C) GET_NUM_ELEM (XCVEC (RTX, N, C)) #define XC2EXP(RTX, N, C1, C2) (RTL_CHECKC2 (RTX, N, C1, C2).rt_rtx) @@ -1152,9 +1218,19 @@ rhs_regno (const_rtx x) #define INTVAL(RTX) XCWINT(RTX, 0, CONST_INT) #define UINTVAL(RTX) ((unsigned HOST_WIDE_INT) INTVAL (RTX)) +/* For a CONST_WIDE_INT, CONST_WIDE_INT_NUNITS is the number of + elements actually needed to represent the constant. + CONST_WIDE_INT_ELT gets one of the elements. 0 is the least + significant HOST_WIDE_INT. */ +#define CONST_WIDE_INT_VEC(RTX) HWIVEC_CHECK (RTX, CONST_WIDE_INT) +#define CONST_WIDE_INT_NUNITS(RTX) HWI_GET_NUM_ELEM (CONST_WIDE_INT_VEC (RTX)) +#define CONST_WIDE_INT_ELT(RTX, N) XHWIVEC_ELT (CONST_WIDE_INT_VEC (RTX), N) + /* For a CONST_DOUBLE: +#if TARGET_SUPPORTS_WIDE_INT == 0 For a VOIDmode, there are two integers CONST_DOUBLE_LOW is the low-order word and ..._HIGH the high-order. +#endif For a float, there is a REAL_VALUE_TYPE structure, and CONST_DOUBLE_REAL_VALUE(r) is a pointer to it. */ #define CONST_DOUBLE_LOW(r) XCMWINT (r, 0, CONST_DOUBLE, VOIDmode) @@ -1309,6 +1385,83 @@ struct address_info { bool autoinc_p; }; +#ifndef GENERATOR_FILE + +/* Accessors for rtx_mode. */ +static inline rtx +get_rtx (const rtx_mode_t p) +{ + return p.first; +} + +static inline enum machine_mode +get_mode (const rtx_mode_t p) +{ + return p.second; +} + +/* Specialization of to_shwi1 function in wide-int.h for rtl. This + cannot be in wide-int.h because of circular includes. */ +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, const rtx_mode_t& rp) +{ + const rtx rcst = get_rtx (rp); + enum machine_mode mode = get_mode (rp); + + *p = GET_MODE_PRECISION (mode); + + switch (GET_CODE (rcst)) + { + case CONST_INT: + *l = 1; + return &INTVAL (rcst); + + case CONST_WIDE_INT: + *l = CONST_WIDE_INT_NUNITS (rcst); + return &CONST_WIDE_INT_ELT (rcst, 0); + + case CONST_DOUBLE: + *l = 2; + return &CONST_DOUBLE_LOW (rcst); + + default: + gcc_unreachable (); + } +} + +/* Specialization of to_shwi2 function in wide-int.h for rtl. This + cannot be in wide-int.h because of circular includes. */ +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, const rtx_mode_t& rp) +{ + const rtx rcst = get_rtx (rp); + + switch (GET_CODE (rcst)) + { + case CONST_INT: + *l = 1; + return &INTVAL (rcst); + + case CONST_WIDE_INT: + *l = CONST_WIDE_INT_NUNITS (rcst); + return &CONST_WIDE_INT_ELT (rcst, 0); + + case CONST_DOUBLE: + *l = 2; + return &CONST_DOUBLE_LOW (rcst); + + default: + gcc_unreachable (); + } +} + +#endif + + extern void init_rtlanal (void); extern int rtx_cost (rtx, enum rtx_code, int, bool); extern int address_cost (rtx, enum machine_mode, addr_space_t, bool); @@ -1764,6 +1917,12 @@ extern rtx plus_constant (enum machine_mode, rtx, HOST_WIDE_INT); /* In rtl.c */ extern rtx rtx_alloc_stat (RTX_CODE MEM_STAT_DECL); #define rtx_alloc(c) rtx_alloc_stat (c MEM_STAT_INFO) +extern rtx rtx_alloc_stat_v (RTX_CODE MEM_STAT_DECL, int); +#define rtx_alloc_v(c, SZ) rtx_alloc_stat_v (c MEM_STAT_INFO, SZ) +#define const_wide_int_alloc(NWORDS) \ + rtx_alloc_v (CONST_WIDE_INT, \ + (sizeof (struct hwivec_def) \ + + ((NWORDS)-1) * sizeof (HOST_WIDE_INT))) \ extern rtvec rtvec_alloc (int); extern rtvec shallow_copy_rtvec (rtvec); @@ -1820,10 +1979,17 @@ extern void start_sequence (void); extern void push_to_sequence (rtx); extern void push_to_sequence2 (rtx, rtx); extern void end_sequence (void); +#if TARGET_SUPPORTS_WIDE_INT == 0 extern double_int rtx_to_double_int (const_rtx); -extern rtx immed_double_int_const (double_int, enum machine_mode); +#endif +extern void hwivec_output_hex (FILE *, const_hwivec); +#ifndef GENERATOR_FILE +extern rtx immed_wide_int_const (const wide_int &cst, enum machine_mode mode); +#endif +#if TARGET_SUPPORTS_WIDE_INT == 0 extern rtx immed_double_const (HOST_WIDE_INT, HOST_WIDE_INT, enum machine_mode); +#endif /* In loop-iv.c */ diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 95a314f0f3f..0bf12bb8c35 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -279,8 +279,8 @@ rtx_addr_can_trap_p_1 (const_rtx x, HOST_WIDE_INT offset, HOST_WIDE_INT size, if (!decl) decl_size = -1; else if (DECL_P (decl) && DECL_SIZE_UNIT (decl)) - decl_size = (host_integerp (DECL_SIZE_UNIT (decl), 0) - ? tree_low_cst (DECL_SIZE_UNIT (decl), 0) + decl_size = (tree_fits_shwi_p (DECL_SIZE_UNIT (decl)) + ? tree_to_shwi (DECL_SIZE_UNIT (decl)) : -1); else if (TREE_CODE (decl) == STRING_CST) decl_size = TREE_STRING_LENGTH (decl); @@ -3099,6 +3099,8 @@ commutative_operand_precedence (rtx op) /* Constants always come the second operand. Prefer "nice" constants. */ if (code == CONST_INT) return -8; + if (code == CONST_WIDE_INT) + return -8; if (code == CONST_DOUBLE) return -7; if (code == CONST_FIXED) @@ -3111,6 +3113,8 @@ commutative_operand_precedence (rtx op) case RTX_CONST_OBJ: if (code == CONST_INT) return -6; + if (code == CONST_WIDE_INT) + return -6; if (code == CONST_DOUBLE) return -5; if (code == CONST_FIXED) @@ -5297,7 +5301,10 @@ get_address_mode (rtx mem) /* Split up a CONST_DOUBLE or integer constant rtx into two rtx's for single words, storing in *FIRST the word that comes first in memory in the target - and in *SECOND the other. */ + and in *SECOND the other. + + TODO: This function needs to be rewritten to work on any size + integer. */ void split_double (rtx value, rtx *first, rtx *second) @@ -5374,6 +5381,22 @@ split_double (rtx value, rtx *first, rtx *second) } } } + else if (GET_CODE (value) == CONST_WIDE_INT) + { + /* All of this is scary code and needs to be converted to + properly work with any size integer. */ + gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2); + if (WORDS_BIG_ENDIAN) + { + *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1)); + *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0)); + } + else + { + *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0)); + *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1)); + } + } else if (!CONST_DOUBLE_P (value)) { if (WORDS_BIG_ENDIAN) diff --git a/gcc/sched-vis.c b/gcc/sched-vis.c index 2cf3d102728..ccc0f0685fa 100644 --- a/gcc/sched-vis.c +++ b/gcc/sched-vis.c @@ -432,6 +432,23 @@ print_value (pretty_printer *pp, const_rtx x, int verbose) pp_scalar (pp, HOST_WIDE_INT_PRINT_HEX, (unsigned HOST_WIDE_INT) INTVAL (x)); break; + + case CONST_WIDE_INT: + { + const char *sep = "<"; + int i; + for (i = CONST_WIDE_INT_NUNITS (x) - 1; i >= 0; i--) + { + pp_string (pp, sep); + sep = ","; + sprintf (tmp, HOST_WIDE_INT_PRINT_HEX, + (unsigned HOST_WIDE_INT) CONST_WIDE_INT_ELT (x, i)); + pp_string (pp, tmp); + } + pp_greater (pp); + } + break; + case CONST_DOUBLE: if (FLOAT_MODE_P (GET_MODE (x))) { diff --git a/gcc/sdbout.c b/gcc/sdbout.c index 13c11c29682..274febf01c8 100644 --- a/gcc/sdbout.c +++ b/gcc/sdbout.c @@ -534,10 +534,10 @@ plain_type_1 (tree type, int level) = (TYPE_DOMAIN (type) && TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != 0 && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != 0 - && host_integerp (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - && host_integerp (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) - ? (tree_low_cst (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), 0) - - tree_low_cst (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), 0) + 1) + && tree_fits_shwi_p (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + && tree_fits_shwi_p (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + ? (tree_to_shwi (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + - tree_to_shwi (TYPE_MIN_VALUE (TYPE_DOMAIN (type))) + 1) : 0); return PUSH_DERIVED_LEVEL (DT_ARY, m); @@ -993,8 +993,8 @@ sdbout_field_types (tree type) if (TREE_CODE (tail) == FIELD_DECL && DECL_NAME (tail) && DECL_SIZE (tail) - && host_integerp (DECL_SIZE (tail), 1) - && host_integerp (bit_position (tail), 0)) + && tree_fits_uhwi_p (DECL_SIZE (tail)) + && tree_fits_shwi_p (bit_position (tail))) { if (POINTER_TYPE_P (TREE_TYPE (tail))) sdbout_one_type (TREE_TYPE (TREE_TYPE (tail))); @@ -1133,7 +1133,7 @@ sdbout_one_type (tree type) continue; PUT_SDB_DEF (IDENTIFIER_POINTER (child_type_name)); - PUT_SDB_INT_VAL (tree_low_cst (BINFO_OFFSET (child), 0)); + PUT_SDB_INT_VAL (tree_to_shwi (BINFO_OFFSET (child))); PUT_SDB_SCL (member_scl); sdbout_type (BINFO_TYPE (child)); PUT_SDB_ENDEF; @@ -1151,10 +1151,10 @@ sdbout_one_type (tree type) if (TREE_CODE (value) == CONST_DECL) value = DECL_INITIAL (value); - if (host_integerp (value, 0)) + if (tree_fits_hwi_p (value)) { PUT_SDB_DEF (IDENTIFIER_POINTER (TREE_PURPOSE (tem))); - PUT_SDB_INT_VAL (tree_low_cst (value, 0)); + PUT_SDB_INT_VAL (tree_to_shwi (value)); PUT_SDB_SCL (C_MOE); PUT_SDB_TYPE (T_MOE); PUT_SDB_ENDEF; @@ -1172,8 +1172,8 @@ sdbout_one_type (tree type) if (TREE_CODE (tem) == FIELD_DECL && DECL_NAME (tem) && DECL_SIZE (tem) - && host_integerp (DECL_SIZE (tem), 1) - && host_integerp (bit_position (tem), 0)) + && tree_fits_uhwi_p (DECL_SIZE (tem)) + && tree_fits_shwi_p (bit_position (tem))) { const char *name; @@ -1184,7 +1184,7 @@ sdbout_one_type (tree type) PUT_SDB_INT_VAL (int_bit_position (tem)); PUT_SDB_SCL (C_FIELD); sdbout_type (DECL_BIT_FIELD_TYPE (tem)); - PUT_SDB_SIZE (tree_low_cst (DECL_SIZE (tem), 1)); + PUT_SDB_SIZE (tree_to_uhwi (DECL_SIZE (tem))); } else { diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c index 47e769534db..828cac3b449 100644 --- a/gcc/sel-sched-ir.c +++ b/gcc/sel-sched-ir.c @@ -1141,10 +1141,10 @@ lhs_and_rhs_separable_p (rtx lhs, rtx rhs) if (lhs == NULL || rhs == NULL) return false; - /* Do not schedule CONST, CONST_INT and CONST_DOUBLE etc as rhs: no point - to use reg, if const can be used. Moreover, scheduling const as rhs may - lead to mode mismatch cause consts don't have modes but they could be - merged from branches where the same const used in different modes. */ + /* Do not schedule constants as rhs: no point to use reg, if const + can be used. Moreover, scheduling const as rhs may lead to mode + mismatch cause consts don't have modes but they could be merged + from branches where the same const used in different modes. */ if (CONSTANT_P (rhs)) return false; diff --git a/gcc/signop.h b/gcc/signop.h new file mode 100644 index 00000000000..05dac902df5 --- /dev/null +++ b/gcc/signop.h @@ -0,0 +1,35 @@ +/* Operations with SIGNED and UNSIGNED. -*- C++ -*- + Copyright (C) 2012-2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef SIGNOP_H +#define SIGNOP_H + +/* This type is used for the large number of functions that produce + different results depending on if the operands are signed types or + unsigned types. The signedness of a tree type can be found by + using the TYPE_SIGN macro. */ + +enum signop_e { + SIGNED, + UNSIGNED +}; + +typedef enum signop_e signop; + +#endif diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 17a3d12e076..8fa2866aa05 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -86,6 +86,22 @@ mode_signbit_p (enum machine_mode mode, const_rtx x) if (width <= HOST_BITS_PER_WIDE_INT && CONST_INT_P (x)) val = INTVAL (x); +#if TARGET_SUPPORTS_WIDE_INT + else if (CONST_WIDE_INT_P (x)) + { + unsigned int i; + unsigned int elts = CONST_WIDE_INT_NUNITS (x); + if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) + return false; + for (i = 0; i < elts - 1; i++) + if (CONST_WIDE_INT_ELT (x, i) != 0) + return false; + val = CONST_WIDE_INT_ELT (x, elts - 1); + width %= HOST_BITS_PER_WIDE_INT; + if (width == 0) + width = HOST_BITS_PER_WIDE_INT; + } +#else else if (width <= HOST_BITS_PER_DOUBLE_INT && CONST_DOUBLE_AS_INT_P (x) && CONST_DOUBLE_LOW (x) == 0) @@ -93,8 +109,9 @@ mode_signbit_p (enum machine_mode mode, const_rtx x) val = CONST_DOUBLE_HIGH (x); width -= HOST_BITS_PER_WIDE_INT; } +#endif else - /* FIXME: We don't yet have a representation for wider modes. */ + /* X is not an integer constant. */ return false; if (width < HOST_BITS_PER_WIDE_INT) @@ -298,13 +315,13 @@ delegitimize_mem_from_attrs (rtx x) &mode, &unsignedp, &volatilep, false); if (bitsize != GET_MODE_BITSIZE (mode) || (bitpos % BITS_PER_UNIT) - || (toffset && !host_integerp (toffset, 0))) + || (toffset && !tree_fits_shwi_p (toffset))) decl = NULL; else { offset += bitpos / BITS_PER_UNIT; if (toffset) - offset += TREE_INT_CST_LOW (toffset); + offset += tree_to_hwi (toffset); } break; } @@ -1524,7 +1541,6 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, rtx op, enum machine_mode op_mode) { unsigned int width = GET_MODE_PRECISION (mode); - unsigned int op_width = GET_MODE_PRECISION (op_mode); if (code == VEC_DUPLICATE) { @@ -1592,336 +1608,117 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, if (code == FLOAT && CONST_SCALAR_INT_P (op)) { - HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; - if (CONST_INT_P (op)) - lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); - else - lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); + if (op_mode == VOIDmode) + { + /* CONST_INT have VOIDmode as the mode. We assume that all + the bits of the constant are significant, though, this is + a dangerous assumption as many times CONST_INTs are + created and used with garbage in the bits outside of the + precision of the implied mode of the const_int. */ + op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0); + } - REAL_VALUE_FROM_INT (d, lv, hv, mode); + real_from_integer (&d, mode, std::make_pair (op, op_mode), SIGNED); d = real_value_truncate (mode, d); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op)) { - HOST_WIDE_INT hv, lv; REAL_VALUE_TYPE d; - if (CONST_INT_P (op)) - lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv); - else - lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op); - - if (op_mode == VOIDmode - || GET_MODE_PRECISION (op_mode) > HOST_BITS_PER_DOUBLE_INT) - /* We should never get a negative number. */ - gcc_assert (hv >= 0); - else if (GET_MODE_PRECISION (op_mode) <= HOST_BITS_PER_WIDE_INT) - hv = 0, lv &= GET_MODE_MASK (op_mode); + if (op_mode == VOIDmode) + { + /* CONST_INT have VOIDmode as the mode. We assume that all + the bits of the constant are significant, though, this is + a dangerous assumption as many times CONST_INTs are + created and used with garbage in the bits outside of the + precision of the implied mode of the const_int. */ + op_mode = mode_for_size (MAX_BITSIZE_MODE_ANY_INT, MODE_INT, 0); + } - REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode); + real_from_integer (&d, mode, std::make_pair (op, op_mode), UNSIGNED); d = real_value_truncate (mode, d); return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } - if (CONST_INT_P (op) - && width <= HOST_BITS_PER_WIDE_INT && width > 0) + if (CONST_SCALAR_INT_P (op) && width > 0) { - HOST_WIDE_INT arg0 = INTVAL (op); - HOST_WIDE_INT val; + wide_int result; + enum machine_mode imode = op_mode == VOIDmode ? mode : op_mode; + wide_int op0 = std::make_pair (op, imode); + +#if TARGET_SUPPORTS_WIDE_INT == 0 + /* This assert keeps the simplification from producing a result + that cannot be represented in a CONST_DOUBLE but a lot of + upstream callers expect that this function never fails to + simplify something and so you if you added this to the test + above the code would die later anyway. If this assert + happens, you just need to make the port support wide int. */ + gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); +#endif switch (code) { case NOT: - val = ~ arg0; + result = ~op0; break; case NEG: - val = - arg0; + result = -op0; break; case ABS: - val = (arg0 >= 0 ? arg0 : - arg0); + result = op0.abs (); break; case FFS: - arg0 &= GET_MODE_MASK (mode); - val = ffs_hwi (arg0); + result = op0.ffs (); break; case CLZ: - arg0 &= GET_MODE_MASK (mode); - if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val)) - ; - else - val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 1; + result = op0.clz (); break; case CLRSB: - arg0 &= GET_MODE_MASK (mode); - if (arg0 == 0) - val = GET_MODE_PRECISION (mode) - 1; - else if (arg0 >= 0) - val = GET_MODE_PRECISION (mode) - floor_log2 (arg0) - 2; - else if (arg0 < 0) - val = GET_MODE_PRECISION (mode) - floor_log2 (~arg0) - 2; + result = op0.clrsb (); break; - + case CTZ: - arg0 &= GET_MODE_MASK (mode); - if (arg0 == 0) - { - /* Even if the value at zero is undefined, we have to come - up with some replacement. Seems good enough. */ - if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val)) - val = GET_MODE_PRECISION (mode); - } - else - val = ctz_hwi (arg0); + result = op0.ctz (); break; case POPCOUNT: - arg0 &= GET_MODE_MASK (mode); - val = 0; - while (arg0) - val++, arg0 &= arg0 - 1; + result = op0.popcount (); break; case PARITY: - arg0 &= GET_MODE_MASK (mode); - val = 0; - while (arg0) - val++, arg0 &= arg0 - 1; - val &= 1; + result = op0.parity (); break; case BSWAP: - { - unsigned int s; - - val = 0; - for (s = 0; s < width; s += 8) - { - unsigned int d = width - s - 8; - unsigned HOST_WIDE_INT byte; - byte = (arg0 >> s) & 0xff; - val |= byte << d; - } - } + result = op0.bswap (); break; case TRUNCATE: - val = arg0; + result = op0.zforce_to_size (width); break; case ZERO_EXTEND: - /* When zero-extending a CONST_INT, we need to know its - original mode. */ - gcc_assert (op_mode != VOIDmode); - if (op_width == HOST_BITS_PER_WIDE_INT) - { - /* If we were really extending the mode, - we would have to distinguish between zero-extension - and sign-extension. */ - gcc_assert (width == op_width); - val = arg0; - } - else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) - val = arg0 & GET_MODE_MASK (op_mode); - else - return 0; + result = op0.zforce_to_size (width); break; case SIGN_EXTEND: - if (op_mode == VOIDmode) - op_mode = mode; - op_width = GET_MODE_PRECISION (op_mode); - if (op_width == HOST_BITS_PER_WIDE_INT) - { - /* If we were really extending the mode, - we would have to distinguish between zero-extension - and sign-extension. */ - gcc_assert (width == op_width); - val = arg0; - } - else if (op_width < HOST_BITS_PER_WIDE_INT) - { - val = arg0 & GET_MODE_MASK (op_mode); - if (val_signbit_known_set_p (op_mode, val)) - val |= ~GET_MODE_MASK (op_mode); - } - else - return 0; + result = op0.sforce_to_size (width); break; case SQRT: - case FLOAT_EXTEND: - case FLOAT_TRUNCATE: - case SS_TRUNCATE: - case US_TRUNCATE: - case SS_NEG: - case US_NEG: - case SS_ABS: - return 0; - - default: - gcc_unreachable (); - } - - return gen_int_mode (val, mode); - } - - /* We can do some operations on integer CONST_DOUBLEs. Also allow - for a DImode operation on a CONST_INT. */ - else if (width <= HOST_BITS_PER_DOUBLE_INT - && (CONST_DOUBLE_AS_INT_P (op) || CONST_INT_P (op))) - { - double_int first, value; - - if (CONST_DOUBLE_AS_INT_P (op)) - first = double_int::from_pair (CONST_DOUBLE_HIGH (op), - CONST_DOUBLE_LOW (op)); - else - first = double_int::from_shwi (INTVAL (op)); - - switch (code) - { - case NOT: - value = ~first; - break; - - case NEG: - value = -first; - break; - - case ABS: - if (first.is_negative ()) - value = -first; - else - value = first; - break; - - case FFS: - value.high = 0; - if (first.low != 0) - value.low = ffs_hwi (first.low); - else if (first.high != 0) - value.low = HOST_BITS_PER_WIDE_INT + ffs_hwi (first.high); - else - value.low = 0; - break; - - case CLZ: - value.high = 0; - if (first.high != 0) - value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.high) - 1 - - HOST_BITS_PER_WIDE_INT; - else if (first.low != 0) - value.low = GET_MODE_PRECISION (mode) - floor_log2 (first.low) - 1; - else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, value.low)) - value.low = GET_MODE_PRECISION (mode); - break; - - case CTZ: - value.high = 0; - if (first.low != 0) - value.low = ctz_hwi (first.low); - else if (first.high != 0) - value.low = HOST_BITS_PER_WIDE_INT + ctz_hwi (first.high); - else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, value.low)) - value.low = GET_MODE_PRECISION (mode); - break; - - case POPCOUNT: - value = double_int_zero; - while (first.low) - { - value.low++; - first.low &= first.low - 1; - } - while (first.high) - { - value.low++; - first.high &= first.high - 1; - } - break; - - case PARITY: - value = double_int_zero; - while (first.low) - { - value.low++; - first.low &= first.low - 1; - } - while (first.high) - { - value.low++; - first.high &= first.high - 1; - } - value.low &= 1; - break; - - case BSWAP: - { - unsigned int s; - - value = double_int_zero; - for (s = 0; s < width; s += 8) - { - unsigned int d = width - s - 8; - unsigned HOST_WIDE_INT byte; - - if (s < HOST_BITS_PER_WIDE_INT) - byte = (first.low >> s) & 0xff; - else - byte = (first.high >> (s - HOST_BITS_PER_WIDE_INT)) & 0xff; - - if (d < HOST_BITS_PER_WIDE_INT) - value.low |= byte << d; - else - value.high |= byte << (d - HOST_BITS_PER_WIDE_INT); - } - } - break; - - case TRUNCATE: - /* This is just a change-of-mode, so do nothing. */ - value = first; - break; - - case ZERO_EXTEND: - gcc_assert (op_mode != VOIDmode); - - if (op_width > HOST_BITS_PER_WIDE_INT) - return 0; - - value = double_int::from_uhwi (first.low & GET_MODE_MASK (op_mode)); - break; - - case SIGN_EXTEND: - if (op_mode == VOIDmode - || op_width > HOST_BITS_PER_WIDE_INT) - return 0; - else - { - value.low = first.low & GET_MODE_MASK (op_mode); - if (val_signbit_known_set_p (op_mode, value.low)) - value.low |= ~GET_MODE_MASK (op_mode); - - value.high = HWI_SIGN_EXTEND (value.low); - } - break; - - case SQRT: - return 0; - default: return 0; } - return immed_double_int_const (value, mode); + return immed_wide_int_const (result, mode); } else if (CONST_DOUBLE_AS_FLOAT_P (op) @@ -1973,11 +1770,10 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, } return CONST_DOUBLE_FROM_REAL_VALUE (d, mode); } - else if (CONST_DOUBLE_AS_FLOAT_P (op) && SCALAR_FLOAT_MODE_P (GET_MODE (op)) && GET_MODE_CLASS (mode) == MODE_INT - && width <= HOST_BITS_PER_DOUBLE_INT && width > 0) + && width > 0) { /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX operators are intentionally left unspecified (to ease implementation @@ -1986,9 +1782,13 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, /* This was formerly used only for non-IEEE float. eggert@twinsun.com says it is safe for IEEE also. */ - HOST_WIDE_INT xh, xl, th, tl; REAL_VALUE_TYPE x, t; REAL_VALUE_FROM_CONST_DOUBLE (x, op); + wide_int wmax, wmin; + /* This is part of the abi to real_to_integer, but we check + things before making this call. */ + bool fail; + switch (code) { case FIX: @@ -1996,45 +1796,18 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, return const0_rtx; /* Test against the signed upper bound. */ - if (width > HOST_BITS_PER_WIDE_INT) - { - th = ((unsigned HOST_WIDE_INT) 1 - << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1; - tl = -1; - } - else - { - th = 0; - tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1; - } - real_from_integer (&t, VOIDmode, tl, th, 0); + wmax = wide_int::max_value (width, SIGNED); + real_from_integer (&t, VOIDmode, wmax, SIGNED); if (REAL_VALUES_LESS (t, x)) - { - xh = th; - xl = tl; - break; - } + return immed_wide_int_const (wmax, mode); /* Test against the signed lower bound. */ - if (width > HOST_BITS_PER_WIDE_INT) - { - th = (unsigned HOST_WIDE_INT) (-1) - << (width - HOST_BITS_PER_WIDE_INT - 1); - tl = 0; - } - else - { - th = -1; - tl = (unsigned HOST_WIDE_INT) (-1) << (width - 1); - } - real_from_integer (&t, VOIDmode, tl, th, 0); + wmin = wide_int::min_value (width, SIGNED); + real_from_integer (&t, VOIDmode, wmin, SIGNED); if (REAL_VALUES_LESS (x, t)) - { - xh = th; - xl = tl; - break; - } - REAL_VALUE_TO_INT (&xl, &xh, x); + return immed_wide_int_const (wmin, mode); + + return immed_wide_int_const (real_to_integer (&x, &fail, width), mode); break; case UNSIGNED_FIX: @@ -2042,37 +1815,17 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, return const0_rtx; /* Test against the unsigned upper bound. */ - if (width == HOST_BITS_PER_DOUBLE_INT) - { - th = -1; - tl = -1; - } - else if (width >= HOST_BITS_PER_WIDE_INT) - { - th = ((unsigned HOST_WIDE_INT) 1 - << (width - HOST_BITS_PER_WIDE_INT)) - 1; - tl = -1; - } - else - { - th = 0; - tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1; - } - real_from_integer (&t, VOIDmode, tl, th, 1); + wmax = wide_int::max_value (width, UNSIGNED); + real_from_integer (&t, VOIDmode, wmax, UNSIGNED); if (REAL_VALUES_LESS (t, x)) - { - xh = th; - xl = tl; - break; - } + return immed_wide_int_const (wmax, mode); - REAL_VALUE_TO_INT (&xl, &xh, x); + return immed_wide_int_const (real_to_integer (&t, &fail, width), mode); break; default: gcc_unreachable (); } - return immed_double_const (xl, xh, mode); } return NULL_RTX; @@ -2261,49 +2014,52 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (SCALAR_INT_MODE_P (mode)) { - double_int coeff0, coeff1; + wide_int coeff0; + wide_int coeff1; rtx lhs = op0, rhs = op1; - coeff0 = double_int_one; - coeff1 = double_int_one; + coeff0 = wide_int::one (GET_MODE_PRECISION (mode)); + coeff1 = wide_int::one (GET_MODE_PRECISION (mode)); if (GET_CODE (lhs) == NEG) { - coeff0 = double_int_minus_one; + coeff0 = wide_int::minus_one (GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == MULT - && CONST_INT_P (XEXP (lhs, 1))) + && CONST_SCALAR_INT_P (XEXP (lhs, 1))) { - coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1))); + coeff0 = std::make_pair (XEXP (lhs, 1), mode); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT && CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) >= 0 - && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode)) { - coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1))); + coeff0 = wide_int::set_bit_in_zero (INTVAL (XEXP (lhs, 1)), + GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) { - coeff1 = double_int_minus_one; + coeff1 = wide_int::minus_one (GET_MODE_PRECISION (mode)); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == MULT && CONST_INT_P (XEXP (rhs, 1))) { - coeff1 = double_int::from_shwi (INTVAL (XEXP (rhs, 1))); + coeff1 = std::make_pair (XEXP (rhs, 1), mode); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT && CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) >= 0 - && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode)) { - coeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1))); + coeff1 = wide_int::set_bit_in_zero (INTVAL (XEXP (rhs, 1)), + GET_MODE_PRECISION (mode)); rhs = XEXP (rhs, 0); } @@ -2311,11 +2067,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx orig = gen_rtx_PLUS (mode, op0, op1); rtx coeff; - double_int val; bool speed = optimize_function_for_speed_p (cfun); - val = coeff0 + coeff1; - coeff = immed_double_int_const (val, mode); + coeff = immed_wide_int_const (coeff0 + coeff1, mode); tem = simplify_gen_binary (MULT, mode, lhs, coeff); return set_src_cost (tem, speed) <= set_src_cost (orig, speed) @@ -2437,49 +2191,52 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (SCALAR_INT_MODE_P (mode)) { - double_int coeff0, negcoeff1; + wide_int coeff0; + wide_int negcoeff1; rtx lhs = op0, rhs = op1; - coeff0 = double_int_one; - negcoeff1 = double_int_minus_one; + coeff0 = wide_int::one (GET_MODE_PRECISION (mode)); + negcoeff1 = wide_int::minus_one (GET_MODE_PRECISION (mode)); if (GET_CODE (lhs) == NEG) { - coeff0 = double_int_minus_one; + coeff0 = wide_int::minus_one (GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == MULT - && CONST_INT_P (XEXP (lhs, 1))) + && CONST_SCALAR_INT_P (XEXP (lhs, 1))) { - coeff0 = double_int::from_shwi (INTVAL (XEXP (lhs, 1))); + coeff0 = std::make_pair (XEXP (lhs, 1), mode); lhs = XEXP (lhs, 0); } else if (GET_CODE (lhs) == ASHIFT && CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) >= 0 - && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (mode)) { - coeff0 = double_int_zero.set_bit (INTVAL (XEXP (lhs, 1))); + coeff0 = wide_int::set_bit_in_zero (INTVAL (XEXP (lhs, 1)), + GET_MODE_PRECISION (mode)); lhs = XEXP (lhs, 0); } if (GET_CODE (rhs) == NEG) { - negcoeff1 = double_int_one; + negcoeff1 = wide_int::one (GET_MODE_PRECISION (mode)); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == MULT && CONST_INT_P (XEXP (rhs, 1))) { - negcoeff1 = double_int::from_shwi (-INTVAL (XEXP (rhs, 1))); + negcoeff1 = -wide_int (std::make_pair (XEXP (rhs, 1), mode)); rhs = XEXP (rhs, 0); } else if (GET_CODE (rhs) == ASHIFT && CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) >= 0 - && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (mode)) { - negcoeff1 = double_int_zero.set_bit (INTVAL (XEXP (rhs, 1))); + negcoeff1 = wide_int::set_bit_in_zero (INTVAL (XEXP (rhs, 1)), + GET_MODE_PRECISION (mode)); negcoeff1 = -negcoeff1; rhs = XEXP (rhs, 0); } @@ -2488,11 +2245,9 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx orig = gen_rtx_MINUS (mode, op0, op1); rtx coeff; - double_int val; bool speed = optimize_function_for_speed_p (cfun); - val = coeff0 + negcoeff1; - coeff = immed_double_int_const (val, mode); + coeff = immed_wide_int_const (coeff0 + negcoeff1, mode); tem = simplify_gen_binary (MULT, mode, lhs, coeff); return set_src_cost (tem, speed) <= set_src_cost (orig, speed) @@ -2644,26 +2399,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && trueop1 == CONST1_RTX (mode)) return op0; - /* Convert multiply by constant power of two into shift unless - we are still generating RTL. This test is a kludge. */ - if (CONST_INT_P (trueop1) - && (val = exact_log2 (UINTVAL (trueop1))) >= 0 - /* If the mode is larger than the host word size, and the - uppermost bit is set, then this isn't a power of two due - to implicit sign extension. */ - && (width <= HOST_BITS_PER_WIDE_INT - || val != HOST_BITS_PER_WIDE_INT - 1)) - return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); - - /* Likewise for multipliers wider than a word. */ - if (CONST_DOUBLE_AS_INT_P (trueop1) - && GET_MODE (op0) == mode - && CONST_DOUBLE_LOW (trueop1) == 0 - && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0 - && (val < HOST_BITS_PER_DOUBLE_INT - 1 - || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_DOUBLE_INT)) - return simplify_gen_binary (ASHIFT, mode, op0, - GEN_INT (val + HOST_BITS_PER_WIDE_INT)); + /* Convert multiply by constant power of two into shift. */ + if (CONST_SCALAR_INT_P (trueop1)) + { + val = wide_int (std::make_pair (trueop1, mode)) + .exact_log2 ().to_shwi (); + if (val >= 0 && val < GET_MODE_BITSIZE (mode)) + return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val)); + } /* x*2 is x+x and x*(-1) is -x */ if (CONST_DOUBLE_AS_FLOAT_P (trueop1) @@ -3764,9 +3507,9 @@ rtx simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, rtx op0, rtx op1) { - HOST_WIDE_INT arg0, arg1, arg0s, arg1s; - HOST_WIDE_INT val; +#if TARGET_SUPPORTS_WIDE_INT == 0 unsigned int width = GET_MODE_PRECISION (mode); +#endif if (VECTOR_MODE_P (mode) && code != VEC_CONCAT @@ -3959,299 +3702,129 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, /* We can fold some multi-word operations. */ if (GET_MODE_CLASS (mode) == MODE_INT - && width == HOST_BITS_PER_DOUBLE_INT - && (CONST_DOUBLE_AS_INT_P (op0) || CONST_INT_P (op0)) - && (CONST_DOUBLE_AS_INT_P (op1) || CONST_INT_P (op1))) + && CONST_SCALAR_INT_P (op0) + && CONST_SCALAR_INT_P (op1)) { - double_int o0, o1, res, tmp; + wide_int result; + wide_int wop0 = std::make_pair (op0, mode); bool overflow; - - o0 = rtx_to_double_int (op0); - o1 = rtx_to_double_int (op1); - + unsigned int bitsize = GET_MODE_BITSIZE (mode); + rtx_mode_t pop1 = std::make_pair (op1, mode); + +#if TARGET_SUPPORTS_WIDE_INT == 0 + /* This assert keeps the simplification from producing a result + that cannot be represented in a CONST_DOUBLE but a lot of + upstream callers expect that this function never fails to + simplify something and so you if you added this to the test + above the code would die later anyway. If this assert + happens, you just need to make the port support wide int. */ + gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT); +#endif switch (code) { case MINUS: - /* A - B == A + (-B). */ - o1 = -o1; - - /* Fall through.... */ + result = wop0 - pop1; + break; case PLUS: - res = o0 + o1; + result = wop0 + pop1; break; case MULT: - res = o0 * o1; + result = wop0 * pop1; break; case DIV: - res = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR, - &tmp, &overflow); + result = wop0.div_trunc (pop1, SIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; - + case MOD: - tmp = o0.divmod_with_overflow (o1, false, TRUNC_DIV_EXPR, - &res, &overflow); + result = wop0.mod_trunc (pop1, SIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; case UDIV: - res = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR, - &tmp, &overflow); + result = wop0.div_trunc (pop1, UNSIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; case UMOD: - tmp = o0.divmod_with_overflow (o1, true, TRUNC_DIV_EXPR, - &res, &overflow); + result = wop0.mod_trunc (pop1, UNSIGNED, &overflow); if (overflow) - return 0; + return NULL_RTX; break; case AND: - res = o0 & o1; + result = wop0 & pop1; break; case IOR: - res = o0 | o1; + result = wop0 | pop1; break; case XOR: - res = o0 ^ o1; + result = wop0 ^ pop1; break; case SMIN: - res = o0.smin (o1); + result = wop0.smin (pop1); break; case SMAX: - res = o0.smax (o1); + result = wop0.smax (pop1); break; case UMIN: - res = o0.umin (o1); + result = wop0.umin (pop1); break; case UMAX: - res = o0.umax (o1); - break; - - case LSHIFTRT: case ASHIFTRT: - case ASHIFT: - case ROTATE: case ROTATERT: - { - unsigned HOST_WIDE_INT cnt; - - if (SHIFT_COUNT_TRUNCATED) - { - o1.high = 0; - o1.low &= GET_MODE_PRECISION (mode) - 1; - } - - if (!o1.fits_uhwi () - || o1.to_uhwi () >= GET_MODE_PRECISION (mode)) - return 0; - - cnt = o1.to_uhwi (); - unsigned short prec = GET_MODE_PRECISION (mode); - - if (code == LSHIFTRT || code == ASHIFTRT) - res = o0.rshift (cnt, prec, code == ASHIFTRT); - else if (code == ASHIFT) - res = o0.alshift (cnt, prec); - else if (code == ROTATE) - res = o0.lrotate (cnt, prec); - else /* code == ROTATERT */ - res = o0.rrotate (cnt, prec); - } + result = wop0.umax (pop1); break; - default: - return 0; - } - - return immed_double_int_const (res, mode); - } - - if (CONST_INT_P (op0) && CONST_INT_P (op1) - && width <= HOST_BITS_PER_WIDE_INT && width != 0) - { - /* Get the integer argument values in two forms: - zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */ - - arg0 = INTVAL (op0); - arg1 = INTVAL (op1); - - if (width < HOST_BITS_PER_WIDE_INT) - { - arg0 &= GET_MODE_MASK (mode); - arg1 &= GET_MODE_MASK (mode); - - arg0s = arg0; - if (val_signbit_known_set_p (mode, arg0s)) - arg0s |= ~GET_MODE_MASK (mode); - - arg1s = arg1; - if (val_signbit_known_set_p (mode, arg1s)) - arg1s |= ~GET_MODE_MASK (mode); - } - else - { - arg0s = arg0; - arg1s = arg1; - } - - /* Compute the value of the arithmetic. */ - - switch (code) - { - case PLUS: - val = arg0s + arg1s; - break; - - case MINUS: - val = arg0s - arg1s; - break; - - case MULT: - val = arg0s * arg1s; - break; - - case DIV: - if (arg1s == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = arg0s / arg1s; - break; - - case MOD: - if (arg1s == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = arg0s % arg1s; - break; - - case UDIV: - if (arg1 == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = (unsigned HOST_WIDE_INT) arg0 / arg1; - break; - - case UMOD: - if (arg1 == 0 - || ((unsigned HOST_WIDE_INT) arg0s - == (unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1) - && arg1s == -1)) - return 0; - val = (unsigned HOST_WIDE_INT) arg0 % arg1; - break; - - case AND: - val = arg0 & arg1; - break; - - case IOR: - val = arg0 | arg1; - break; + case LSHIFTRT: + if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED)) + return NULL_RTX; - case XOR: - val = arg0 ^ arg1; + result = wop0.rshiftu (pop1, bitsize, TRUNC); break; - - case LSHIFTRT: - case ASHIFT: + case ASHIFTRT: - /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure - the value is in range. We can't return any old value for - out-of-range arguments because either the middle-end (via - shift_truncation_mask) or the back-end might be relying on - target-specific knowledge. Nor can we rely on - shift_truncation_mask, since the shift might not be part of an - ashlM3, lshrM3 or ashrM3 instruction. */ - if (SHIFT_COUNT_TRUNCATED) - arg1 = (unsigned HOST_WIDE_INT) arg1 % width; - else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode)) - return 0; - - val = (code == ASHIFT - ? ((unsigned HOST_WIDE_INT) arg0) << arg1 - : ((unsigned HOST_WIDE_INT) arg0) >> arg1); + if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED)) + return NULL_RTX; - /* Sign-extend the result for arithmetic right shifts. */ - if (code == ASHIFTRT && arg0s < 0 && arg1 > 0) - val |= ((unsigned HOST_WIDE_INT) (-1)) << (width - arg1); + result = wop0.rshifts (pop1, bitsize, TRUNC); break; + + case ASHIFT: + if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED)) + return NULL_RTX; - case ROTATERT: - if (arg1 < 0) - return 0; - - arg1 %= width; - val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1)) - | (((unsigned HOST_WIDE_INT) arg0) >> arg1)); + result = wop0.lshift (pop1, bitsize, TRUNC); break; - + case ROTATE: - if (arg1 < 0) - return 0; - - arg1 %= width; - val = ((((unsigned HOST_WIDE_INT) arg0) << arg1) - | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1))); - break; - - case COMPARE: - /* Do nothing here. */ - return 0; - - case SMIN: - val = arg0s <= arg1s ? arg0s : arg1s; - break; - - case UMIN: - val = ((unsigned HOST_WIDE_INT) arg0 - <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); - break; + if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED)) + return NULL_RTX; - case SMAX: - val = arg0s > arg1s ? arg0s : arg1s; + result = wop0.lrotate (pop1); break; + + case ROTATERT: + if (wide_int (std::make_pair (op1, mode)).neg_p (SIGNED)) + return NULL_RTX; - case UMAX: - val = ((unsigned HOST_WIDE_INT) arg0 - > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1); + result = wop0.rrotate (pop1); break; - case SS_PLUS: - case US_PLUS: - case SS_MINUS: - case US_MINUS: - case SS_MULT: - case US_MULT: - case SS_DIV: - case US_DIV: - case SS_ASHIFT: - case US_ASHIFT: - /* ??? There are simplifications that can be done. */ - return 0; - default: - gcc_unreachable (); + return NULL_RTX; } - - return gen_int_mode (val, mode); + return immed_wide_int_const (result, mode); } return NULL_RTX; @@ -4934,10 +4507,11 @@ comparison_result (enum rtx_code code, int known_results) } } -/* Check if the given comparison (done in the given MODE) is actually a - tautology or a contradiction. - If no simplification is possible, this function returns zero. - Otherwise, it returns either const_true_rtx or const0_rtx. */ +/* Check if the given comparison (done in the given MODE) is actually + a tautology or a contradiction. If the mode is VOID_mode, the + comparison is done in "infinite precision". If no simplification + is possible, this function returns zero. Otherwise, it returns + either const_true_rtx or const0_rtx. */ rtx simplify_const_relational_operation (enum rtx_code code, @@ -5061,59 +4635,22 @@ simplify_const_relational_operation (enum rtx_code code, /* Otherwise, see if the operands are both integers. */ if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode) - && (CONST_DOUBLE_AS_INT_P (trueop0) || CONST_INT_P (trueop0)) - && (CONST_DOUBLE_AS_INT_P (trueop1) || CONST_INT_P (trueop1))) + && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1)) { - int width = GET_MODE_PRECISION (mode); - HOST_WIDE_INT l0s, h0s, l1s, h1s; - unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; - - /* Get the two words comprising each integer constant. */ - if (CONST_DOUBLE_AS_INT_P (trueop0)) - { - l0u = l0s = CONST_DOUBLE_LOW (trueop0); - h0u = h0s = CONST_DOUBLE_HIGH (trueop0); - } - else - { - l0u = l0s = INTVAL (trueop0); - h0u = h0s = HWI_SIGN_EXTEND (l0s); - } - - if (CONST_DOUBLE_AS_INT_P (trueop1)) - { - l1u = l1s = CONST_DOUBLE_LOW (trueop1); - h1u = h1s = CONST_DOUBLE_HIGH (trueop1); - } - else - { - l1u = l1s = INTVAL (trueop1); - h1u = h1s = HWI_SIGN_EXTEND (l1s); - } - - /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT, - we have to sign or zero-extend the values. */ - if (width != 0 && width < HOST_BITS_PER_WIDE_INT) - { - l0u &= GET_MODE_MASK (mode); - l1u &= GET_MODE_MASK (mode); - - if (val_signbit_known_set_p (mode, l0s)) - l0s |= ~GET_MODE_MASK (mode); - - if (val_signbit_known_set_p (mode, l1s)) - l1s |= ~GET_MODE_MASK (mode); - } - if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) - h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); - - if (h0u == h1u && l0u == l1u) + /* It would be nice if we really had a mode here. However, the + largest int representable on the target is as good as + infinite. */ + enum machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode; + wide_int wo0; + rtx_mode_t ptrueop1 = std::make_pair (trueop1, cmode); + + wo0 = std::make_pair (trueop0, cmode); + if (wo0 == ptrueop1) return comparison_result (code, CMP_EQ); else { - int cr; - cr = (h0s < h1s || (h0s == h1s && l0u < l1u)) ? CMP_LT : CMP_GT; - cr |= (h0u < h1u || (h0u == h1u && l0u < l1u)) ? CMP_LTU : CMP_GTU; + int cr = wo0.lts_p (ptrueop1) ? CMP_LT : CMP_GT; + cr |= wo0.ltu_p (ptrueop1) ? CMP_LTU : CMP_GTU; return comparison_result (code, cr); } } @@ -5569,9 +5106,9 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, return 0; } -/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED - or CONST_VECTOR, - returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. +/* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE + or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or + CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR. Works by unpacking OP into a collection of 8-bit values represented as a little-endian array of 'unsigned char', selecting by BYTE, @@ -5581,13 +5118,11 @@ static rtx simplify_immed_subreg (enum machine_mode outermode, rtx op, enum machine_mode innermode, unsigned int byte) { - /* We support up to 512-bit values (for V8DFmode). */ enum { - max_bitsize = 512, value_bit = 8, value_mask = (1 << value_bit) - 1 }; - unsigned char value[max_bitsize / value_bit]; + unsigned char value[MAX_BITSIZE_MODE_ANY_MODE/value_bit]; int value_start; int i; int elem; @@ -5599,6 +5134,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, rtvec result_v = NULL; enum mode_class outer_class; enum machine_mode outer_submode; + int max_bitsize; /* Some ports misuse CCmode. */ if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (op)) @@ -5608,6 +5144,10 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, if (COMPLEX_MODE_P (outermode)) return NULL_RTX; + /* We support any size mode. */ + max_bitsize = MAX (GET_MODE_BITSIZE (outermode), + GET_MODE_BITSIZE (innermode)); + /* Unpack the value. */ if (GET_CODE (op) == CONST_VECTOR) @@ -5657,8 +5197,20 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, *vp++ = INTVAL (el) < 0 ? -1 : 0; break; + case CONST_WIDE_INT: + { + wide_int val = std::make_pair (el, innermode); + unsigned char extend = val.sign_mask (); + + for (i = 0; i < elem_bitsize; i += value_bit) + *vp++ = val.extract_to_hwi (i, value_bit); + for (; i < elem_bitsize; i += value_bit) + *vp++ = extend; + } + break; + case CONST_DOUBLE: - if (GET_MODE (el) == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (el) == VOIDmode) { unsigned char extend = 0; /* If this triggers, someone should have generated a @@ -5681,7 +5233,8 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, } else { - long tmp[max_bitsize / 32]; + /* This is big enough for anything on the platform. */ + long tmp[MAX_BITSIZE_MODE_ANY_MODE / 32]; int bitsize = GET_MODE_BITSIZE (GET_MODE (el)); gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el))); @@ -5801,24 +5354,27 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, case MODE_INT: case MODE_PARTIAL_INT: { - unsigned HOST_WIDE_INT hi = 0, lo = 0; - - for (i = 0; - i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize; - i += value_bit) - lo |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; - for (; i < elem_bitsize; i += value_bit) - hi |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) - << (i - HOST_BITS_PER_WIDE_INT); - - /* immed_double_const doesn't call trunc_int_for_mode. I don't - know why. */ - if (elem_bitsize <= HOST_BITS_PER_WIDE_INT) - elems[elem] = gen_int_mode (lo, outer_submode); - else if (elem_bitsize <= HOST_BITS_PER_DOUBLE_INT) - elems[elem] = immed_double_const (lo, hi, outer_submode); - else - return NULL_RTX; + int u; + int base = 0; + int units + = (GET_MODE_BITSIZE (outer_submode) + HOST_BITS_PER_WIDE_INT - 1) + / HOST_BITS_PER_WIDE_INT; + HOST_WIDE_INT tmp[MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT]; + wide_int r; + + for (u = 0; u < units; u++) + { + unsigned HOST_WIDE_INT buf = 0; + for (i = 0; + i < HOST_BITS_PER_WIDE_INT && base + i < elem_bitsize; + i += value_bit) + buf |= (unsigned HOST_WIDE_INT)(*vp++ & value_mask) << i; + + tmp[u] = buf; + base += HOST_BITS_PER_WIDE_INT; + } + r = wide_int::from_array (tmp, units, GET_MODE_PRECISION (outer_submode)); + elems[elem] = immed_wide_int_const (r, outer_submode); } break; @@ -5826,7 +5382,7 @@ simplify_immed_subreg (enum machine_mode outermode, rtx op, case MODE_DECIMAL_FLOAT: { REAL_VALUE_TYPE r; - long tmp[max_bitsize / 32]; + long tmp[MAX_BITSIZE_MODE_ANY_INT / 32]; /* real_from_target wants its input in words affected by FLOAT_WORDS_BIG_ENDIAN. However, we ignore this, diff --git a/gcc/stmt.c b/gcc/stmt.c index 1dc447a5c85..10be545e543 100644 --- a/gcc/stmt.c +++ b/gcc/stmt.c @@ -1633,8 +1633,8 @@ dump_case_nodes (FILE *f, struct case_node *root, dump_case_nodes (f, root->left, indent_step, indent_level); - low = tree_low_cst (root->low, 0); - high = tree_low_cst (root->high, 0); + low = tree_to_shwi (root->low); + high = tree_to_shwi (root->high); fputs (";; ", f); if (high == low) @@ -1711,7 +1711,7 @@ expand_switch_as_decision_tree_p (tree range, who knows... */ max_ratio = optimize_insn_for_size_p () ? 3 : 10; if (count < case_values_threshold () - || ! host_integerp (range, /*pos=*/1) + || ! tree_fits_uhwi_p (range) || compare_tree_int (range, max_ratio * count) > 0) return true; @@ -1876,7 +1876,7 @@ emit_case_dispatch_table (tree index_expr, tree index_type, /* Get table of labels to jump to, in order of case index. */ - ncases = tree_low_cst (range, 0) + 1; + ncases = tree_to_shwi (range) + 1; labelvec = XALLOCAVEC (rtx, ncases); memset (labelvec, 0, ncases * sizeof (rtx)); @@ -1886,11 +1886,11 @@ emit_case_dispatch_table (tree index_expr, tree index_type, value since that should fit in a HOST_WIDE_INT while the actual values may not. */ HOST_WIDE_INT i_low - = tree_low_cst (fold_build2 (MINUS_EXPR, index_type, - n->low, minval), 1); + = tree_to_uhwi (fold_build2 (MINUS_EXPR, index_type, + n->low, minval)); HOST_WIDE_INT i_high - = tree_low_cst (fold_build2 (MINUS_EXPR, index_type, - n->high, minval), 1); + = tree_to_uhwi (fold_build2 (MINUS_EXPR, index_type, + n->high, minval)); HOST_WIDE_INT i; for (i = i_low; i <= i_high; i ++) @@ -2088,9 +2088,7 @@ expand_case (gimple stmt) original type. Make sure to drop overflow flags. */ low = fold_convert (index_type, low); if (TREE_OVERFLOW (low)) - low = build_int_cst_wide (index_type, - TREE_INT_CST_LOW (low), - TREE_INT_CST_HIGH (low)); + low = wide_int_to_tree (index_type, low); /* The canonical from of a case label in GIMPLE is that a simple case has an empty CASE_HIGH. For the casesi and tablejump expanders, @@ -2099,9 +2097,7 @@ expand_case (gimple stmt) high = low; high = fold_convert (index_type, high); if (TREE_OVERFLOW (high)) - high = build_int_cst_wide (index_type, - TREE_INT_CST_LOW (high), - TREE_INT_CST_HIGH (high)); + high = wide_int_to_tree (index_type, high); basic_block case_bb = label_to_block_fn (cfun, lab); edge case_edge = find_edge (bb, case_bb); diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c index 6f6b3107841..a9ee566d604 100644 --- a/gcc/stor-layout.c +++ b/gcc/stor-layout.c @@ -331,9 +331,9 @@ mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) unsigned HOST_WIDE_INT uhwi; unsigned int ui; - if (!host_integerp (size, 1)) + if (!tree_fits_uhwi_p (size)) return BLKmode; - uhwi = tree_low_cst (size, 1); + uhwi = tree_to_uhwi (size); ui = uhwi; if (uhwi != ui) return BLKmode; @@ -481,10 +481,10 @@ mode_for_array (tree elem_type, tree size) return TYPE_MODE (elem_type); limit_p = true; - if (host_integerp (size, 1) && host_integerp (elem_size, 1)) + if (tree_fits_uhwi_p (size) && tree_fits_uhwi_p (elem_size)) { - int_size = tree_low_cst (size, 1); - int_elem_size = tree_low_cst (elem_size, 1); + int_size = tree_to_uhwi (size); + int_elem_size = tree_to_uhwi (elem_size); if (int_elem_size > 0 && int_size % int_elem_size == 0 && targetm.array_mode_supported_p (TYPE_MODE (elem_type), @@ -690,7 +690,7 @@ layout_decl (tree decl, unsigned int known_align) if (size != 0 && TREE_CODE (size) == INTEGER_CST && compare_tree_int (size, larger_than_size) > 0) { - int size_as_int = TREE_INT_CST_LOW (size); + int size_as_int = tree_to_hwi (size); if (compare_tree_int (size, size_as_int) == 0) warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int); @@ -1055,7 +1055,7 @@ excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, offset = offset % align; return ((offset + size + align - 1) / align - > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1) + > ((unsigned HOST_WIDE_INT) tree_to_uhwi (TYPE_SIZE (type)) / align)); } #endif @@ -1115,14 +1115,14 @@ place_field (record_layout_info rli, tree field) /* Work out the known alignment so far. Note that A & (-A) is the value of the least-significant bit in A that is one. */ if (! integer_zerop (rli->bitpos)) - known_align = (tree_low_cst (rli->bitpos, 1) - & - tree_low_cst (rli->bitpos, 1)); + known_align = (tree_to_uhwi (rli->bitpos) + & - tree_to_uhwi (rli->bitpos)); else if (integer_zerop (rli->offset)) known_align = 0; - else if (host_integerp (rli->offset, 1)) + else if (tree_fits_uhwi_p (rli->offset)) known_align = (BITS_PER_UNIT - * (tree_low_cst (rli->offset, 1) - & - tree_low_cst (rli->offset, 1))); + * (tree_to_uhwi (rli->offset) + & - tree_to_uhwi (rli->offset))); else known_align = rli->offset_align; @@ -1196,15 +1196,16 @@ place_field (record_layout_info rli, tree field) || TYPE_ALIGN (type) <= BITS_PER_UNIT) && maximum_field_alignment == 0 && ! integer_zerop (DECL_SIZE (field)) - && host_integerp (DECL_SIZE (field), 1) - && host_integerp (rli->offset, 1) - && host_integerp (TYPE_SIZE (type), 1)) + && tree_fits_uhwi_p (DECL_SIZE (field)) + /* BUG!!! rli->offset is checked as unsigned but used as signed. */ + && tree_fits_uhwi_p (rli->offset) + && tree_fits_uhwi_p (TYPE_SIZE (type))) { unsigned int type_align = TYPE_ALIGN (type); tree dsize = DECL_SIZE (field); - HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); - HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); - HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); + HOST_WIDE_INT field_size = tree_to_uhwi (dsize); + HOST_WIDE_INT offset = tree_to_shwi (rli->offset); + HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) @@ -1240,15 +1241,16 @@ place_field (record_layout_info rli, tree field) && DECL_BIT_FIELD_TYPE (field) && ! DECL_PACKED (field) && ! integer_zerop (DECL_SIZE (field)) - && host_integerp (DECL_SIZE (field), 1) - && host_integerp (rli->offset, 1) - && host_integerp (TYPE_SIZE (type), 1)) + && tree_fits_uhwi_p (DECL_SIZE (field)) + /* BUG!!! rli->offset is checked as unsigned but used as signed. */ + && tree_fits_shwi_p (rli->offset) + && tree_fits_uhwi_p (TYPE_SIZE (type))) { unsigned int type_align = TYPE_ALIGN (type); tree dsize = DECL_SIZE (field); - HOST_WIDE_INT field_size = tree_low_cst (dsize, 1); - HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0); - HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0); + HOST_WIDE_INT field_size = tree_to_uhwi (dsize); + HOST_WIDE_INT offset = tree_to_shwi (rli->offset); + HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); #ifdef ADJUST_FIELD_ALIGN if (! TYPE_USER_ALIGN (type)) @@ -1302,18 +1304,19 @@ place_field (record_layout_info rli, tree field) if (DECL_BIT_FIELD_TYPE (field) && !integer_zerop (DECL_SIZE (field)) && !integer_zerop (DECL_SIZE (rli->prev_field)) - && host_integerp (DECL_SIZE (rli->prev_field), 0) - && host_integerp (TYPE_SIZE (type), 0) + && tree_fits_shwi_p (DECL_SIZE (rli->prev_field)) + /* BUG!!! TYPE_SIZE (type) is checked as unsigned but used as signed. */ + && tree_fits_shwi_p (TYPE_SIZE (type)) && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) { /* We're in the middle of a run of equal type size fields; make sure we realign if we run out of bits. (Not decl size, type size!) */ - HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1); + HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field)); if (rli->remaining_in_alignment < bitsize) { - HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1); + HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type)); /* out of bits; bump up to next 'word'. */ rli->bitpos @@ -1385,13 +1388,13 @@ place_field (record_layout_info rli, tree field) until we see a bitfield (and come by here again) we just skip calculating it. */ if (DECL_SIZE (field) != NULL - && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1) - && host_integerp (DECL_SIZE (field), 1)) + && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field))) + && tree_fits_uhwi_p (DECL_SIZE (field))) { unsigned HOST_WIDE_INT bitsize - = tree_low_cst (DECL_SIZE (field), 1); + = tree_to_uhwi (DECL_SIZE (field)); unsigned HOST_WIDE_INT typesize - = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1); + = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))); if (typesize < bitsize) rli->remaining_in_alignment = 0; @@ -1423,14 +1426,14 @@ place_field (record_layout_info rli, tree field) approximate this by seeing if its position changed), lay out the field again; perhaps we can use an integral mode for it now. */ if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) - actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)); + actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) + & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); else if (integer_zerop (DECL_FIELD_OFFSET (field))) actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); - else if (host_integerp (DECL_FIELD_OFFSET (field), 1)) + else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) actual_align = (BITS_PER_UNIT - * (tree_low_cst (DECL_FIELD_OFFSET (field), 1) - & - tree_low_cst (DECL_FIELD_OFFSET (field), 1))); + * (tree_to_uhwi (DECL_FIELD_OFFSET (field)) + & - tree_to_uhwi (DECL_FIELD_OFFSET (field)))); else actual_align = DECL_OFFSET_ALIGN (field); /* ACTUAL_ALIGN is still the actual alignment *within the record* . @@ -1586,7 +1589,7 @@ compute_record_mode (tree type) line. */ SET_TYPE_MODE (type, BLKmode); - if (! host_integerp (TYPE_SIZE (type), 1)) + if (! tree_fits_uhwi_p (TYPE_SIZE (type))) return; /* A record which has any BLKmode members must itself be @@ -1602,9 +1605,9 @@ compute_record_mode (tree type) && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) && !(TYPE_SIZE (TREE_TYPE (field)) != 0 && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) - || ! host_integerp (bit_position (field), 1) + || ! tree_fits_uhwi_p (bit_position (field)) || DECL_SIZE (field) == 0 - || ! host_integerp (DECL_SIZE (field), 1)) + || ! tree_fits_uhwi_p (DECL_SIZE (field))) return; /* If this field is the whole struct, remember its mode so @@ -1623,8 +1626,8 @@ compute_record_mode (tree type) matches the type's size. This only applies to RECORD_TYPE. This does not apply to unions. */ if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode - && host_integerp (TYPE_SIZE (type), 1) - && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type))) + && tree_fits_uhwi_p (TYPE_SIZE (type)) + && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type))) SET_TYPE_MODE (type, mode); else SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1)); @@ -1765,11 +1768,11 @@ finish_bitfield_representative (tree repr, tree field) size = size_diffop (DECL_FIELD_OFFSET (field), DECL_FIELD_OFFSET (repr)); - gcc_assert (host_integerp (size, 1)); - bitsize = (tree_low_cst (size, 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1) - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1) - + tree_low_cst (DECL_SIZE (field), 1)); + gcc_assert (tree_fits_uhwi_p (size)); + bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)) + + tree_to_uhwi (DECL_SIZE (field))); /* Round up bitsize to multiples of BITS_PER_UNIT. */ bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); @@ -1787,11 +1790,11 @@ finish_bitfield_representative (tree repr, tree field) return; maxsize = size_diffop (DECL_FIELD_OFFSET (nextf), DECL_FIELD_OFFSET (repr)); - if (host_integerp (maxsize, 1)) + if (tree_fits_uhwi_p (maxsize)) { - maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT - + tree_low_cst (DECL_FIELD_BIT_OFFSET (nextf), 1) - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)); + maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT + + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf)) + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); /* If the group ends within a bitfield nextf does not need to be aligned to BITS_PER_UNIT. Thus round up. */ maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); @@ -1808,9 +1811,9 @@ finish_bitfield_representative (tree repr, tree field) use bitsize as fallback for this case. */ tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)), DECL_FIELD_OFFSET (repr)); - if (host_integerp (maxsize, 1)) - maxbitsize = (tree_low_cst (maxsize, 1) * BITS_PER_UNIT - - tree_low_cst (DECL_FIELD_BIT_OFFSET (repr), 1)); + if (tree_fits_uhwi_p (maxsize)) + maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT + - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); else maxbitsize = bitsize; } @@ -1921,8 +1924,8 @@ finish_bitfield_layout (record_layout_info rli) representative to be generated. That will at most generate worse code but still maintain correctness with respect to the C++ memory model. */ - else if (!((host_integerp (DECL_FIELD_OFFSET (repr), 1) - && host_integerp (DECL_FIELD_OFFSET (field), 1)) + else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)) + && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) || operand_equal_p (DECL_FIELD_OFFSET (repr), DECL_FIELD_OFFSET (field), 0))) { @@ -2205,12 +2208,8 @@ layout_type (tree type) && tree_int_cst_lt (ub, lb)) { unsigned prec = TYPE_PRECISION (TREE_TYPE (lb)); - lb = double_int_to_tree - (ssizetype, - tree_to_double_int (lb).sext (prec)); - ub = double_int_to_tree - (ssizetype, - tree_to_double_int (ub).sext (prec)); + lb = wide_int_to_tree (ssizetype, addr_wide_int (lb).sext (prec)); + ub = wide_int_to_tree (ssizetype, addr_wide_int (ub).sext (prec)); } length = fold_convert (sizetype, @@ -2486,16 +2485,14 @@ initialize_sizetypes (void) TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)); TYPE_SIZE (sizetype) = bitsize_int (precision); TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype))); - set_min_and_max_values_for_integral_type (sizetype, precision, - /*is_unsigned=*/true); + set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED); SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT)); TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)); TYPE_SIZE (bitsizetype) = bitsize_int (bprecision); TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype))); - set_min_and_max_values_for_integral_type (bitsizetype, bprecision, - /*is_unsigned=*/true); + set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED); /* Create the signed variants of *sizetype. */ ssizetype = make_signed_type (TYPE_PRECISION (sizetype)); @@ -2515,52 +2512,10 @@ initialize_sizetypes (void) void set_min_and_max_values_for_integral_type (tree type, int precision, - bool is_unsigned) + signop sgn) { - tree min_value; - tree max_value; - - if (is_unsigned) - { - min_value = build_int_cst (type, 0); - max_value - = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0 - ? -1 - : ((HOST_WIDE_INT) 1 << precision) - 1, - precision - HOST_BITS_PER_WIDE_INT > 0 - ? ((unsigned HOST_WIDE_INT) ~0 - >> (HOST_BITS_PER_WIDE_INT - - (precision - HOST_BITS_PER_WIDE_INT))) - : 0); - } - else - { - min_value - = build_int_cst_wide (type, - (precision - HOST_BITS_PER_WIDE_INT > 0 - ? 0 - : (HOST_WIDE_INT) (-1) << (precision - 1)), - (((HOST_WIDE_INT) (-1) - << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 - ? precision - HOST_BITS_PER_WIDE_INT - 1 - : 0)))); - max_value - = build_int_cst_wide (type, - (precision - HOST_BITS_PER_WIDE_INT > 0 - ? -1 - : (HOST_WIDE_INT) - (((unsigned HOST_WIDE_INT) 1 - << (precision - 1)) - 1)), - (precision - HOST_BITS_PER_WIDE_INT - 1 > 0 - ? (HOST_WIDE_INT) - ((((unsigned HOST_WIDE_INT) 1 - << (precision - HOST_BITS_PER_WIDE_INT - - 1))) - 1) - : 0)); - } - - TYPE_MIN_VALUE (type) = min_value; - TYPE_MAX_VALUE (type) = max_value; + TYPE_MIN_VALUE (type) = wide_int_to_tree (type, wide_int::min_value (precision, sgn)); + TYPE_MAX_VALUE (type) = wide_int_to_tree (type, wide_int::max_value (precision, sgn)); } /* Set the extreme values of TYPE based on its precision in bits, @@ -2573,14 +2528,7 @@ fixup_signed_type (tree type) { int precision = TYPE_PRECISION (type); - /* We can not represent properly constants greater then - HOST_BITS_PER_DOUBLE_INT, still we need the types - as they are used by i386 vector extensions and friends. */ - if (precision > HOST_BITS_PER_DOUBLE_INT) - precision = HOST_BITS_PER_DOUBLE_INT; - - set_min_and_max_values_for_integral_type (type, precision, - /*is_unsigned=*/false); + set_min_and_max_values_for_integral_type (type, precision, SIGNED); /* Lay out the type: set its alignment, size, etc. */ layout_type (type); @@ -2595,16 +2543,9 @@ fixup_unsigned_type (tree type) { int precision = TYPE_PRECISION (type); - /* We can not represent properly constants greater then - HOST_BITS_PER_DOUBLE_INT, still we need the types - as they are used by i386 vector extensions and friends. */ - if (precision > HOST_BITS_PER_DOUBLE_INT) - precision = HOST_BITS_PER_DOUBLE_INT; - TYPE_UNSIGNED (type) = 1; - set_min_and_max_values_for_integral_type (type, precision, - /*is_unsigned=*/true); + set_min_and_max_values_for_integral_type (type, precision, UNSIGNED); /* Lay out the type: set its alignment, size, etc. */ layout_type (type); diff --git a/gcc/targhooks.c b/gcc/targhooks.c index d3a3f5fdd42..396f4518731 100644 --- a/gcc/targhooks.c +++ b/gcc/targhooks.c @@ -979,7 +979,7 @@ tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED, HOST_WIDE_INT default_vector_alignment (const_tree type) { - return tree_low_cst (TYPE_SIZE (type), 0); + return tree_to_shwi (TYPE_SIZE (type)); } bool diff --git a/gcc/testsuite/gcc.dg/20020219-1.c b/gcc/testsuite/gcc.dg/20020219-1.c index d2ba755f50f..e3d22a76af1 100644 --- a/gcc/testsuite/gcc.dg/20020219-1.c +++ b/gcc/testsuite/gcc.dg/20020219-1.c @@ -1,5 +1,5 @@ /* PR c/4389 - This testcase failed because host_integerp (x, 0) was returning + This testcase failed because tree_fits_shwi_p (x) was returning 1 even for constants bigger than 2^31. It fails under under hppa hpux without -mdisable-indexing because the pointer x - 1 is used as the base address of an indexed load. Because the struct A is not diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c b/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c index 0952b5a04f8..0bd1a188278 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/pr45427.c @@ -25,5 +25,5 @@ int main() return 0; } -/* { dg-final { scan-tree-dump-times "bounded by 0" 0 "cunrolli"} } */ +/* { dg-final { scan-tree-dump-times "bounded by 0x0\[^0-9a-f\]" 0 "cunrolli"} } */ /* { dg-final { cleanup-tree-dump "cunrolli" } } */ diff --git a/gcc/trans-mem.c b/gcc/trans-mem.c index 220ded277b5..9d5155d6c7c 100644 --- a/gcc/trans-mem.c +++ b/gcc/trans-mem.c @@ -1072,8 +1072,8 @@ tm_log_add (basic_block entry_block, tree addr, gimple stmt) if (entry_block && transaction_invariant_address_p (lp->addr, entry_block) && TYPE_SIZE_UNIT (type) != NULL - && host_integerp (TYPE_SIZE_UNIT (type), 1) - && (tree_low_cst (TYPE_SIZE_UNIT (type), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)) + && ((HOST_WIDE_INT)(tree_to_uhwi (TYPE_SIZE_UNIT (type))) < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE)) /* We must be able to copy this type normally. I.e., no special constructors and the like. */ @@ -1156,9 +1156,9 @@ tm_log_emit_stmt (tree addr, gimple stmt) code = BUILT_IN_TM_LOG_DOUBLE; else if (type == long_double_type_node) code = BUILT_IN_TM_LOG_LDOUBLE; - else if (host_integerp (size, 1)) + else if (tree_fits_uhwi_p (size)) { - unsigned int n = tree_low_cst (size, 1); + unsigned int n = tree_to_uhwi (size); switch (n) { case 1: @@ -2074,9 +2074,9 @@ build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) else if (type == long_double_type_node) code = BUILT_IN_TM_LOAD_LDOUBLE; else if (TYPE_SIZE_UNIT (type) != NULL - && host_integerp (TYPE_SIZE_UNIT (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { - switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) { case 1: code = BUILT_IN_TM_LOAD_1; @@ -2146,9 +2146,9 @@ build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi) else if (type == long_double_type_node) code = BUILT_IN_TM_STORE_LDOUBLE; else if (TYPE_SIZE_UNIT (type) != NULL - && host_integerp (TYPE_SIZE_UNIT (type), 1)) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))) { - switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1)) + switch (tree_to_uhwi (TYPE_SIZE_UNIT (type))) { case 1: code = BUILT_IN_TM_STORE_1; @@ -3087,7 +3087,7 @@ expand_block_edges (struct tm_region *const region, basic_block bb) // TM_ABORT directly get what they deserve. tree arg = gimple_call_arg (stmt, 0); if (TREE_CODE (arg) == INTEGER_CST - && (TREE_INT_CST_LOW (arg) & AR_OUTERABORT) != 0 + && (tree_to_hwi (arg) & AR_OUTERABORT) != 0 && !decl_is_tm_clone (current_function_decl)) { // Find the GTMA_IS_OUTER transaction. diff --git a/gcc/tree-affine.c b/gcc/tree-affine.c index 46a183a07b4..3da68137151 100644 --- a/gcc/tree-affine.c +++ b/gcc/tree-affine.c @@ -27,33 +27,37 @@ along with GCC; see the file COPYING3. If not see #include "gimple.h" #include "flags.h" #include "dumpfile.h" +#include "wide-int-print.h" + /* Extends CST as appropriate for the affine combinations COMB. */ -double_int -double_int_ext_for_comb (double_int cst, aff_tree *comb) +max_wide_int +wide_int_ext_for_comb (max_wide_int cst, aff_tree *comb) { return cst.sext (TYPE_PRECISION (comb->type)); } - /* Initializes affine combination COMB so that its value is zero in TYPE. */ static void aff_combination_zero (aff_tree *comb, tree type) { + int i; comb->type = type; - comb->offset = double_int_zero; + comb->offset = 0; comb->n = 0; + for (i = 0; i < MAX_AFF_ELTS; i++) + comb->elts[i].coef = 0; comb->rest = NULL_TREE; } /* Sets COMB to CST. */ void -aff_combination_const (aff_tree *comb, tree type, double_int cst) +aff_combination_const (aff_tree *comb, tree type, const max_wide_int &cst) { aff_combination_zero (comb, type); - comb->offset = double_int_ext_for_comb (cst, comb); + comb->offset = wide_int_ext_for_comb (cst, comb);; } /* Sets COMB to single element ELT. */ @@ -65,37 +69,35 @@ aff_combination_elt (aff_tree *comb, tree type, tree elt) comb->n = 1; comb->elts[0].val = elt; - comb->elts[0].coef = double_int_one; + comb->elts[0].coef = 1; } /* Scales COMB by SCALE. */ void -aff_combination_scale (aff_tree *comb, double_int scale) +aff_combination_scale (aff_tree *comb, max_wide_int scale) { unsigned i, j; - scale = double_int_ext_for_comb (scale, comb); - if (scale.is_one ()) + scale = wide_int_ext_for_comb (scale, comb); + if (scale.one_p ()) return; - if (scale.is_zero ()) + if (scale.zero_p ()) { aff_combination_zero (comb, comb->type); return; } - comb->offset - = double_int_ext_for_comb (scale * comb->offset, comb); + comb->offset = wide_int_ext_for_comb (scale * comb->offset, comb); for (i = 0, j = 0; i < comb->n; i++) { - double_int new_coef; + max_wide_int new_coef; - new_coef - = double_int_ext_for_comb (scale * comb->elts[i].coef, comb); + new_coef = wide_int_ext_for_comb (scale * comb->elts[i].coef, comb); /* A coefficient may become zero due to overflow. Remove the zero elements. */ - if (new_coef.is_zero ()) + if (new_coef.zero_p ()) continue; comb->elts[j].coef = new_coef; comb->elts[j].val = comb->elts[i].val; @@ -117,30 +119,29 @@ aff_combination_scale (aff_tree *comb, double_int scale) } else comb->rest = fold_build2 (MULT_EXPR, type, comb->rest, - double_int_to_tree (type, scale)); + wide_int_to_tree (type, scale)); } } /* Adds ELT * SCALE to COMB. */ void -aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) +aff_combination_add_elt (aff_tree *comb, tree elt, max_wide_int scale) { unsigned i; tree type; - scale = double_int_ext_for_comb (scale, comb); - if (scale.is_zero ()) + scale = wide_int_ext_for_comb (scale, comb); + if (scale.zero_p ()) return; for (i = 0; i < comb->n; i++) if (operand_equal_p (comb->elts[i].val, elt, 0)) { - double_int new_coef; + max_wide_int new_coef; - new_coef = comb->elts[i].coef + scale; - new_coef = double_int_ext_for_comb (new_coef, comb); - if (!new_coef.is_zero ()) + new_coef = wide_int_ext_for_comb (comb->elts[i].coef + scale, comb); + if (!new_coef.zero_p ()) { comb->elts[i].coef = new_coef; return; @@ -152,7 +153,7 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) if (comb->rest) { gcc_assert (comb->n == MAX_AFF_ELTS - 1); - comb->elts[comb->n].coef = double_int_one; + comb->elts[comb->n].coef = 1; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; @@ -171,12 +172,12 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) if (POINTER_TYPE_P (type)) type = sizetype; - if (scale.is_one ()) + if (scale.one_p ()) elt = fold_convert (type, elt); else elt = fold_build2 (MULT_EXPR, type, fold_convert (type, elt), - double_int_to_tree (type, scale)); + wide_int_to_tree (type, scale)); if (comb->rest) comb->rest = fold_build2 (PLUS_EXPR, type, comb->rest, @@ -188,9 +189,9 @@ aff_combination_add_elt (aff_tree *comb, tree elt, double_int scale) /* Adds CST to C. */ static void -aff_combination_add_cst (aff_tree *c, double_int cst) +aff_combination_add_cst (aff_tree *c, const max_wide_int &cst) { - c->offset = double_int_ext_for_comb (c->offset + cst, c); + c->offset = wide_int_ext_for_comb (c->offset + cst, c); } /* Adds COMB2 to COMB1. */ @@ -204,7 +205,7 @@ aff_combination_add (aff_tree *comb1, aff_tree *comb2) for (i = 0; i < comb2->n; i++) aff_combination_add_elt (comb1, comb2->elts[i].val, comb2->elts[i].coef); if (comb2->rest) - aff_combination_add_elt (comb1, comb2->rest, double_int_one); + aff_combination_add_elt (comb1, comb2->rest, 1); } /* Converts affine combination COMB to TYPE. */ @@ -229,11 +230,11 @@ aff_combination_convert (aff_tree *comb, tree type) if (TYPE_PRECISION (type) == TYPE_PRECISION (comb_type)) return; - comb->offset = double_int_ext_for_comb (comb->offset, comb); + comb->offset = wide_int_ext_for_comb (comb->offset, comb); for (i = j = 0; i < comb->n; i++) { - double_int new_coef = double_int_ext_for_comb (comb->elts[i].coef, comb); - if (new_coef.is_zero ()) + max_wide_int new_coef = comb->elts[i].coef; + if (new_coef.zero_p ()) continue; comb->elts[j].coef = new_coef; comb->elts[j].val = fold_convert (type, comb->elts[i].val); @@ -243,7 +244,7 @@ aff_combination_convert (aff_tree *comb, tree type) comb->n = j; if (comb->n < MAX_AFF_ELTS && comb->rest) { - comb->elts[comb->n].coef = double_int_one; + comb->elts[comb->n].coef = 1; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; @@ -268,7 +269,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) switch (code) { case INTEGER_CST: - aff_combination_const (comb, type, tree_to_double_int (expr)); + aff_combination_const (comb, type, expr); return; case POINTER_PLUS_EXPR: @@ -282,7 +283,7 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); tree_to_aff_combination (TREE_OPERAND (expr, 1), type, &tmp); if (code == MINUS_EXPR) - aff_combination_scale (&tmp, double_int_minus_one); + aff_combination_scale (&tmp, -1); aff_combination_add (comb, &tmp); return; @@ -291,19 +292,19 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) if (TREE_CODE (cst) != INTEGER_CST) break; tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, tree_to_double_int (cst)); + aff_combination_scale (comb, cst); return; case NEGATE_EXPR: tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, double_int_minus_one); + aff_combination_scale (comb, -1); return; case BIT_NOT_EXPR: /* ~x = -x - 1 */ tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb); - aff_combination_scale (comb, double_int_minus_one); - aff_combination_add_cst (comb, double_int_minus_one); + aff_combination_scale (comb, -1); + aff_combination_add_cst (comb, -1); return; case ADDR_EXPR: @@ -321,11 +322,10 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) false); if (bitpos % BITS_PER_UNIT != 0) break; - aff_combination_const (comb, type, - double_int::from_uhwi (bitpos / BITS_PER_UNIT)); + aff_combination_const (comb, type, bitpos / BITS_PER_UNIT); core = build_fold_addr_expr (core); if (TREE_CODE (core) == ADDR_EXPR) - aff_combination_add_elt (comb, core, double_int_one); + aff_combination_add_elt (comb, core, 1); else { tree_to_aff_combination (core, type, &tmp); @@ -368,18 +368,18 @@ tree_to_aff_combination (tree expr, tree type, aff_tree *comb) combination COMB. */ static tree -add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, - aff_tree *comb) +add_elt_to_tree (tree expr, tree type, tree elt, max_wide_int scale, + aff_tree *comb ATTRIBUTE_UNUSED) { enum tree_code code; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; - scale = double_int_ext_for_comb (scale, comb); + scale = wide_int_ext_for_comb (scale, comb); elt = fold_convert (type1, elt); - if (scale.is_one ()) + if (scale.one_p ()) { if (!expr) return fold_convert (type, elt); @@ -389,7 +389,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, return fold_build2 (PLUS_EXPR, type, expr, elt); } - if (scale.is_minus_one ()) + if (scale.minus_one_p ()) { if (!expr) return fold_convert (type, fold_build1 (NEGATE_EXPR, type1, elt)); @@ -405,9 +405,9 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, if (!expr) return fold_convert (type, fold_build2 (MULT_EXPR, type1, elt, - double_int_to_tree (type1, scale))); + wide_int_to_tree (type1, scale))); - if (scale.is_negative ()) + if (scale.neg_p (SIGNED)) { code = MINUS_EXPR; scale = -scale; @@ -416,7 +416,7 @@ add_elt_to_tree (tree expr, tree type, tree elt, double_int scale, code = PLUS_EXPR; elt = fold_build2 (MULT_EXPR, type1, elt, - double_int_to_tree (type1, scale)); + wide_int_to_tree (type1, scale)); if (POINTER_TYPE_P (type)) { if (code == MINUS_EXPR) @@ -434,7 +434,7 @@ aff_combination_to_tree (aff_tree *comb) tree type = comb->type; tree expr = NULL_TREE; unsigned i; - double_int off, sgn; + max_wide_int off, sgn; tree type1 = type; if (POINTER_TYPE_P (type)) type1 = sizetype; @@ -446,21 +446,21 @@ aff_combination_to_tree (aff_tree *comb) comb); if (comb->rest) - expr = add_elt_to_tree (expr, type, comb->rest, double_int_one, comb); + expr = add_elt_to_tree (expr, type, comb->rest, 1, comb); /* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is unsigned. */ - if (comb->offset.is_negative ()) + if (comb->offset.neg_p (SIGNED)) { off = -comb->offset; - sgn = double_int_minus_one; + sgn = -1; } else { off = comb->offset; - sgn = double_int_one; + sgn = 1; } - return add_elt_to_tree (expr, type, double_int_to_tree (type1, off), sgn, + return add_elt_to_tree (expr, type, wide_int_to_tree (type1, off), sgn, comb); } @@ -487,7 +487,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m) comb->elts[m] = comb->elts[comb->n]; if (comb->rest) { - comb->elts[comb->n].coef = double_int_one; + comb->elts[comb->n].coef = 1; comb->elts[comb->n].val = comb->rest; comb->rest = NULL_TREE; comb->n++; @@ -499,7 +499,7 @@ aff_combination_remove_elt (aff_tree *comb, unsigned m) static void -aff_combination_add_product (aff_tree *c, double_int coef, tree val, +aff_combination_add_product (aff_tree *c, const max_wide_int &coef, tree val, aff_tree *r) { unsigned i; @@ -550,7 +550,7 @@ aff_combination_mult (aff_tree *c1, aff_tree *c2, aff_tree *r) for (i = 0; i < c2->n; i++) aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r); if (c2->rest) - aff_combination_add_product (c1, double_int_one, c2->rest, r); + aff_combination_add_product (c1, 1, c2->rest, r); aff_combination_add_product (c1, c2->offset, NULL, r); } @@ -597,7 +597,7 @@ aff_combination_expand (aff_tree *comb ATTRIBUTE_UNUSED, aff_tree to_add, current, curre; tree e, rhs; gimple def; - double_int scale; + max_wide_int scale; void **slot; struct name_expansion *exp; @@ -742,25 +742,25 @@ free_affine_expand_cache (struct pointer_map_t **cache) is set to true. */ static bool -double_int_constant_multiple_p (double_int val, double_int div, - bool *mult_set, double_int *mult) +wide_int_constant_multiple_p (max_wide_int val, max_wide_int div, + bool *mult_set, max_wide_int *mult) { - double_int rem, cst; + max_wide_int rem, cst; - if (val.is_zero ()) + if (val.zero_p ()) { - if (*mult_set && !mult->is_zero ()) + if (*mult_set && !mult->zero_p ()) return false; *mult_set = true; - *mult = double_int_zero; + *mult = 0; return true; } - if (div.is_zero ()) + if (div.zero_p ()) return false; - cst = val.sdivmod (div, FLOOR_DIV_EXPR, &rem); - if (!rem.is_zero ()) + cst = val.sdivmod_floor (div, &rem); + if (!rem.zero_p ()) return false; if (*mult_set && *mult != cst) @@ -776,14 +776,14 @@ double_int_constant_multiple_p (double_int val, double_int div, bool aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, - double_int *mult) + max_wide_int *mult) { bool mult_set = false; unsigned i; - if (val->n == 0 && val->offset.is_zero ()) + if (val->n == 0 && val->offset.zero_p ()) { - *mult = double_int_zero; + *mult = 0; return true; } if (val->n != div->n) @@ -792,8 +792,8 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, if (val->rest || div->rest) return false; - if (!double_int_constant_multiple_p (val->offset, div->offset, - &mult_set, mult)) + if (!wide_int_constant_multiple_p (val->offset, div->offset, + &mult_set, mult)) return false; for (i = 0; i < div->n; i++) @@ -802,8 +802,8 @@ aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div, = aff_combination_find_elt (val, div->elts[i].val, NULL); if (!elt) return false; - if (!double_int_constant_multiple_p (elt->coef, div->elts[i].coef, - &mult_set, mult)) + if (!wide_int_constant_multiple_p (elt->coef, div->elts[i].coef, + &mult_set, mult)) return false; } @@ -817,13 +817,13 @@ static void print_aff (FILE *file, aff_tree *val) { unsigned i; - bool uns = TYPE_UNSIGNED (val->type); + signop sgn = TYPE_SIGN (val->type); if (POINTER_TYPE_P (val->type)) - uns = false; + sgn = SIGNED; fprintf (file, "{\n type = "); print_generic_expr (file, val->type, TDF_VOPS|TDF_MEMSYMS); fprintf (file, "\n offset = "); - dump_double_int (file, val->offset, uns); + print_dec (val->offset, file, sgn); if (val->n > 0) { fprintf (file, "\n elements = {\n"); @@ -833,7 +833,7 @@ print_aff (FILE *file, aff_tree *val) print_generic_expr (file, val->elts[i].val, TDF_VOPS|TDF_MEMSYMS); fprintf (file, " * "); - dump_double_int (file, val->elts[i].coef, uns); + print_dec (val->elts[i].coef, file, sgn); if (i != val->n - 1) fprintf (file, ", \n"); } @@ -860,7 +860,7 @@ debug_aff (aff_tree *val) location is stored to SIZE. */ void -get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size) +get_inner_reference_aff (tree ref, aff_tree *addr, max_wide_int *size) { HOST_WIDE_INT bitsize, bitpos; tree toff; @@ -882,36 +882,36 @@ get_inner_reference_aff (tree ref, aff_tree *addr, double_int *size) } aff_combination_const (&tmp, sizetype, - double_int::from_shwi (bitpos / BITS_PER_UNIT)); + max_wide_int (bitpos / BITS_PER_UNIT)); aff_combination_add (addr, &tmp); - *size = double_int::from_shwi ((bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT); + *size = (bitsize + BITS_PER_UNIT - 1) / BITS_PER_UNIT; } /* Returns true if a region of size SIZE1 at position 0 and a region of size SIZE2 at position DIFF cannot overlap. */ bool -aff_comb_cannot_overlap_p (aff_tree *diff, double_int size1, double_int size2) +aff_comb_cannot_overlap_p (aff_tree *diff, const max_wide_int &size1, const max_wide_int &size2) { - double_int d, bound; + max_wide_int d, bound; /* Unless the difference is a constant, we fail. */ if (diff->n != 0) return false; d = diff->offset; - if (d.is_negative ()) + if (d.neg_p (SIGNED)) { /* The second object is before the first one, we succeed if the last element of the second object is before the start of the first one. */ - bound = d + size2 + double_int_minus_one; - return bound.is_negative (); + bound = d + size2 - 1; + return bound.neg_p (SIGNED); } else { /* We succeed if the second object starts after the first one ends. */ - return size1.sle (d); + return size1.les_p (d); } } diff --git a/gcc/tree-affine.h b/gcc/tree-affine.h index b2558f7a4bd..be91ff6969e 100644 --- a/gcc/tree-affine.h +++ b/gcc/tree-affine.h @@ -20,6 +20,8 @@ along with GCC; see the file COPYING3. If not see /* Affine combination of trees. We keep track of at most MAX_AFF_ELTS elements to make things simpler; this is sufficient in most cases. */ +#include <wide-int.h> + #define MAX_AFF_ELTS 8 /* Element of an affine combination. */ @@ -30,7 +32,7 @@ struct aff_comb_elt tree val; /* Its coefficient in the combination. */ - double_int coef; + max_wide_int coef; }; typedef struct affine_tree_combination @@ -39,7 +41,7 @@ typedef struct affine_tree_combination tree type; /* Constant offset. */ - double_int offset; + max_wide_int offset; /* Number of elements of the combination. */ unsigned n; @@ -58,25 +60,25 @@ typedef struct affine_tree_combination tree rest; } aff_tree; -double_int double_int_ext_for_comb (double_int, aff_tree *); -void aff_combination_const (aff_tree *, tree, double_int); +max_wide_int wide_int_ext_for_comb (max_wide_int, aff_tree *); +void aff_combination_const (aff_tree *, tree, const max_wide_int &); void aff_combination_elt (aff_tree *, tree, tree); -void aff_combination_scale (aff_tree *, double_int); +void aff_combination_scale (aff_tree *, max_wide_int); void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *); void aff_combination_add (aff_tree *, aff_tree *); -void aff_combination_add_elt (aff_tree *, tree, double_int); +void aff_combination_add_elt (aff_tree *, tree, max_wide_int); void aff_combination_remove_elt (aff_tree *, unsigned); void aff_combination_convert (aff_tree *, tree); void tree_to_aff_combination (tree, tree, aff_tree *); tree aff_combination_to_tree (aff_tree *); void unshare_aff_combination (aff_tree *); -bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, double_int *); +bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, max_wide_int *); void aff_combination_expand (aff_tree *, struct pointer_map_t **); void tree_to_aff_combination_expand (tree, tree, aff_tree *, struct pointer_map_t **); -void get_inner_reference_aff (tree, aff_tree *, double_int *); +void get_inner_reference_aff (tree, aff_tree *, max_wide_int *); void free_affine_expand_cache (struct pointer_map_t **); -bool aff_comb_cannot_overlap_p (aff_tree *, double_int, double_int); +bool aff_comb_cannot_overlap_p (aff_tree *, const max_wide_int &, const max_wide_int &); /* Debugging functions. */ void debug_aff (aff_tree *); diff --git a/gcc/tree-call-cdce.c b/gcc/tree-call-cdce.c index 8edcad93b8b..65db1ecf5dc 100644 --- a/gcc/tree-call-cdce.c +++ b/gcc/tree-call-cdce.c @@ -195,7 +195,7 @@ check_pow (gimple pow_call) return false; if (REAL_VALUES_LESS (bcv, dconst1)) return false; - real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1); + real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED); if (REAL_VALUES_LESS (mv, bcv)) return false; return true; @@ -412,7 +412,7 @@ gen_conditions_for_pow_cst_base (tree base, tree expn, REAL_VALUE_TYPE bcv = TREE_REAL_CST (base); gcc_assert (!REAL_VALUES_EQUAL (bcv, dconst1) && !REAL_VALUES_LESS (bcv, dconst1)); - real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, 0, 1); + real_from_integer (&mv, TYPE_MODE (TREE_TYPE (base)), 256, UNSIGNED); gcc_assert (!REAL_VALUES_LESS (mv, bcv)); exp_domain = get_domain (0, false, false, diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index af8685c7a75..ee5f8e684ca 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -41,6 +41,8 @@ along with GCC; see the file COPYING3. If not see #include "pointer-set.h" #include "tree-inline.h" #include "target.h" +#include "wide-int.h" +#include "wide-int-print.h" /* This file contains functions for building the Control Flow Graph (CFG) for a function tree. */ @@ -1379,12 +1381,12 @@ group_case_labels_stmt (gimple stmt) { tree merge_case = gimple_switch_label (stmt, i); basic_block merge_bb = label_to_block (CASE_LABEL (merge_case)); - double_int bhp1 = tree_to_double_int (base_high) + double_int_one; + wide_int bhp1 = wide_int (base_high) + 1; /* Merge the cases if they jump to the same place, and their ranges are consecutive. */ if (merge_bb == base_bb - && tree_to_double_int (CASE_LOW (merge_case)) == bhp1) + && wide_int (CASE_LOW (merge_case)) == bhp1) { base_high = CASE_HIGH (merge_case) ? CASE_HIGH (merge_case) : CASE_LOW (merge_case); @@ -2694,24 +2696,25 @@ verify_expr (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) if (TREE_CODE (t) == BIT_FIELD_REF) { - if (!host_integerp (TREE_OPERAND (t, 1), 1) - || !host_integerp (TREE_OPERAND (t, 2), 1)) + if (!tree_fits_uhwi_p (TREE_OPERAND (t, 1)) + || !tree_fits_uhwi_p (TREE_OPERAND (t, 2))) { error ("invalid position or size operand to BIT_FIELD_REF"); return t; } if (INTEGRAL_TYPE_P (TREE_TYPE (t)) && (TYPE_PRECISION (TREE_TYPE (t)) - != TREE_INT_CST_LOW (TREE_OPERAND (t, 1)))) + != tree_to_uhwi (TREE_OPERAND (t, 1)))) { error ("integral result type precision does not match " "field size of BIT_FIELD_REF"); return t; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (t)) + && !AGGREGATE_TYPE_P (TREE_TYPE (t)) && TYPE_MODE (TREE_TYPE (t)) != BLKmode && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (t))) - != TREE_INT_CST_LOW (TREE_OPERAND (t, 1)))) + != tree_to_uhwi (TREE_OPERAND (t, 1)))) { error ("mode precision of non-integral result does not " "match field size of BIT_FIELD_REF"); @@ -3520,7 +3523,7 @@ verify_gimple_assign_binary (gimple stmt) only allow shifting by a constant multiple of the element size. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type)) && (TREE_CODE (rhs2) != INTEGER_CST - || !div_if_zero_remainder (EXACT_DIV_EXPR, rhs2, + || !div_if_zero_remainder (rhs2, TYPE_SIZE (TREE_TYPE (rhs1_type))))) { error ("non-element sized vector shift of floating point vector"); @@ -6270,7 +6273,7 @@ move_stmt_eh_region_tree_nr (tree old_t_nr, struct move_stmt_d *p) { int old_nr, new_nr; - old_nr = tree_low_cst (old_t_nr, 0); + old_nr = tree_to_shwi (old_t_nr); new_nr = move_stmt_eh_region_nr (old_nr, p); return build_int_cst (integer_type_node, new_nr); @@ -7160,13 +7163,13 @@ print_loop (FILE *file, struct loop *loop, int indent, int verbosity) if (loop->any_upper_bound) { fprintf (file, ", upper_bound = "); - dump_double_int (file, loop->nb_iterations_upper_bound, true); + print_decu (loop->nb_iterations_upper_bound, file); } if (loop->any_estimate) { fprintf (file, ", estimate = "); - dump_double_int (file, loop->nb_iterations_estimate, true); + print_decu (loop->nb_iterations_estimate, file); } fprintf (file, ")\n"); diff --git a/gcc/tree-chrec.c b/gcc/tree-chrec.c index c18ccd3f933..8aeefb90072 100644 --- a/gcc/tree-chrec.c +++ b/gcc/tree-chrec.c @@ -460,7 +460,7 @@ chrec_fold_multiply (tree type, static tree tree_fold_binomial (tree type, tree n, unsigned int k) { - double_int num, denom, idx, di_res; + wide_int num, denom, idx, di_res; bool overflow; unsigned int i; tree res; @@ -472,20 +472,20 @@ tree_fold_binomial (tree type, tree n, unsigned int k) return fold_convert (type, n); /* Numerator = n. */ - num = TREE_INT_CST (n); + num = n; /* Check that k <= n. */ - if (num.ult (double_int::from_uhwi (k))) + if (num.ltu_p (k)) return NULL_TREE; /* Denominator = 2. */ - denom = double_int::from_uhwi (2); + denom = wide_int::two (TYPE_PRECISION (TREE_TYPE (n))); /* Index = Numerator-1. */ - idx = num - double_int_one; + idx = num - 1; /* Numerator = Numerator*Index = n*(n-1). */ - num = num.mul_with_sign (idx, false, &overflow); + num = num.smul (idx, &overflow); if (overflow) return NULL_TREE; @@ -495,17 +495,17 @@ tree_fold_binomial (tree type, tree n, unsigned int k) --idx; /* Numerator *= Index. */ - num = num.mul_with_sign (idx, false, &overflow); + num = num.smul (idx, &overflow); if (overflow) return NULL_TREE; /* Denominator *= i. */ - denom *= double_int::from_uhwi (i); + denom *= i; } /* Result = Numerator / Denominator. */ - di_res = num.div (denom, true, EXACT_DIV_EXPR); - res = build_int_cst_wide (type, di_res.low, di_res.high); + di_res = num.udiv_trunc (denom); + res = wide_int_to_tree (type, di_res); return int_fits_type_p (res, type) ? res : NULL_TREE; } diff --git a/gcc/tree-data-ref.c b/gcc/tree-data-ref.c index 10431c09237..13cca6f7867 100644 --- a/gcc/tree-data-ref.c +++ b/gcc/tree-data-ref.c @@ -772,8 +772,8 @@ dr_analyze_innermost (struct data_reference *dr, struct loop *nest) { if (!integer_zerop (TREE_OPERAND (base, 1))) { - double_int moff = mem_ref_offset (base); - tree mofft = double_int_to_tree (sizetype, moff); + addr_wide_int moff = mem_ref_offset (base); + tree mofft = wide_int_to_tree (sizetype, moff); if (!poffset) poffset = mofft; else @@ -1369,10 +1369,10 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b, if (!loop_nest) { aff_tree off1, off2; - double_int size1, size2; + max_wide_int size1, size2; get_inner_reference_aff (DR_REF (a), &off1, &size1); get_inner_reference_aff (DR_REF (b), &off2, &size2); - aff_combination_scale (&off1, double_int_minus_one); + aff_combination_scale (&off1, -1); aff_combination_add (&off2, &off1); if (aff_comb_cannot_overlap_p (&off2, size1, size2)) return false; @@ -1747,15 +1747,15 @@ analyze_ziv_subscript (tree chrec_a, static tree max_stmt_executions_tree (struct loop *loop) { - double_int nit; + max_wide_int nit; if (!max_stmt_executions (loop, &nit)) return chrec_dont_know; - if (!double_int_fits_to_tree_p (unsigned_type_node, nit)) + if (!nit.fits_to_tree_p (unsigned_type_node)) return chrec_dont_know; - return double_int_to_tree (unsigned_type_node, nit); + return wide_int_to_tree (unsigned_type_node, nit); } /* Determine whether the CHREC is always positive/negative. If the expression @@ -2833,16 +2833,16 @@ gcd_of_steps_may_divide_p (const_tree chrec, const_tree cst) HOST_WIDE_INT cd = 0, val; tree step; - if (!host_integerp (cst, 0)) + if (!tree_fits_shwi_p (cst)) return true; - val = tree_low_cst (cst, 0); + val = tree_to_shwi (cst); while (TREE_CODE (chrec) == POLYNOMIAL_CHREC) { step = CHREC_RIGHT (chrec); - if (!host_integerp (step, 0)) + if (!tree_fits_shwi_p (step)) return true; - cd = gcd (cd, tree_low_cst (step, 0)); + cd = gcd (cd, tree_to_shwi (step)); chrec = CHREC_LEFT (chrec); } diff --git a/gcc/tree-dfa.c b/gcc/tree-dfa.c index 57aae95a074..204b57c9202 100644 --- a/gcc/tree-dfa.c +++ b/gcc/tree-dfa.c @@ -383,7 +383,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, HOST_WIDE_INT bitsize = -1; HOST_WIDE_INT maxsize = -1; tree size_tree = NULL_TREE; - double_int bit_offset = double_int_zero; + addr_wide_int bit_offset = 0; HOST_WIDE_INT hbit_offset; bool seen_variable_array_ref = false; tree base_type; @@ -403,10 +403,10 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, } if (size_tree != NULL_TREE) { - if (! host_integerp (size_tree, 1)) + if (! tree_fits_uhwi_p (size_tree)) bitsize = -1; else - bitsize = TREE_INT_CST_LOW (size_tree); + bitsize = tree_to_uhwi (size_tree); } /* Initially, maxsize is the same as the accessed element size. @@ -422,7 +422,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, switch (TREE_CODE (exp)) { case BIT_FIELD_REF: - bit_offset += tree_to_double_int (TREE_OPERAND (exp, 2)); + bit_offset += TREE_OPERAND (exp, 2); break; case COMPONENT_REF: @@ -432,11 +432,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (this_offset && TREE_CODE (this_offset) == INTEGER_CST) { - double_int doffset = tree_to_double_int (this_offset); - doffset = doffset.lshift (BITS_PER_UNIT == 8 + addr_wide_int woffset = this_offset; + woffset = woffset.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - doffset += tree_to_double_int (DECL_FIELD_BIT_OFFSET (field)); - bit_offset = bit_offset + doffset; + woffset += DECL_FIELD_BIT_OFFSET (field); + bit_offset += woffset; /* If we had seen a variable array ref already and we just referenced the last field of a struct or a union member @@ -453,13 +453,13 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, { tree fsize = DECL_SIZE_UNIT (field); tree ssize = TYPE_SIZE_UNIT (stype); - if (host_integerp (fsize, 0) - && host_integerp (ssize, 0) - && doffset.fits_shwi ()) - maxsize += ((TREE_INT_CST_LOW (ssize) - - TREE_INT_CST_LOW (fsize)) + if (tree_fits_shwi_p (fsize) + && tree_fits_shwi_p (ssize) + && woffset.fits_shwi_p ()) + maxsize += ((tree_to_shwi (ssize) + - tree_to_shwi (fsize)) * BITS_PER_UNIT - - doffset.to_shwi ()); + - woffset.to_shwi ()); else maxsize = -1; } @@ -473,9 +473,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, because that would get us out of the structure otherwise. */ if (maxsize != -1 && csize - && host_integerp (csize, 1) - && bit_offset.fits_shwi ()) - maxsize = TREE_INT_CST_LOW (csize) + && tree_fits_uhwi_p (csize) + && bit_offset.fits_shwi_p ()) + maxsize = tree_to_shwi (csize) - bit_offset.to_shwi (); else maxsize = -1; @@ -496,13 +496,13 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, && (unit_size = array_ref_element_size (exp), TREE_CODE (unit_size) == INTEGER_CST)) { - double_int doffset - = (TREE_INT_CST (index) - TREE_INT_CST (low_bound)) - .sext (TYPE_PRECISION (TREE_TYPE (index))); - doffset *= tree_to_double_int (unit_size); - doffset = doffset.lshift (BITS_PER_UNIT == 8 + addr_wide_int woffset + = (addr_wide_int (index) - low_bound) + .sext (TYPE_PRECISION (TREE_TYPE (index))); + woffset *= addr_wide_int (unit_size); + woffset = woffset.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - bit_offset = bit_offset + doffset; + bit_offset += woffset; /* An array ref with a constant index up in the structure hierarchy will constrain the size of any variable array ref @@ -517,10 +517,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, because that would get us outside of the array otherwise. */ if (maxsize != -1 && asize - && host_integerp (asize, 1) - && bit_offset.fits_shwi ()) - maxsize = TREE_INT_CST_LOW (asize) - - bit_offset.to_shwi (); + && tree_fits_uhwi_p (asize) + && bit_offset.fits_shwi_p ()) + maxsize = tree_to_uhwi (asize) - bit_offset.to_shwi (); else maxsize = -1; @@ -535,7 +534,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, break; case IMAGPART_EXPR: - bit_offset += double_int::from_uhwi (bitsize); + bit_offset += bitsize; break; case VIEW_CONVERT_EXPR: @@ -549,11 +548,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); else { - double_int off = mem_ref_offset (exp); + addr_wide_int off = mem_ref_offset (exp); off = off.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - off = off + bit_offset; - if (off.fits_shwi ()) + off += bit_offset; + if (off.fits_shwi_p ()) { bit_offset = off; exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); @@ -571,7 +570,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (TMR_INDEX (exp) || TMR_INDEX2 (exp)) { exp = TREE_OPERAND (TMR_BASE (exp), 0); - bit_offset = double_int_zero; + bit_offset = 0; maxsize = -1; goto done; } @@ -579,11 +578,11 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, exp = TREE_OPERAND (TMR_BASE (exp), 0); else { - double_int off = mem_ref_offset (exp); + addr_wide_int off = mem_ref_offset (exp); off = off.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); off += bit_offset; - if (off.fits_shwi ()) + if (off.fits_shwi_p ()) { bit_offset = off; exp = TREE_OPERAND (TMR_BASE (exp), 0); @@ -600,7 +599,7 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, } done: - if (!bit_offset.fits_shwi ()) + if (!bit_offset.fits_shwi_p ()) { *poffset = 0; *psize = bitsize; @@ -624,9 +623,9 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, if (seen_variable_array_ref && maxsize != -1 - && (!host_integerp (TYPE_SIZE (base_type), 1) + && (!tree_fits_uhwi_p (TYPE_SIZE (base_type)) || (hbit_offset + maxsize - == (signed) TREE_INT_CST_LOW (TYPE_SIZE (base_type))))) + == (signed) tree_to_uhwi (TYPE_SIZE (base_type))))) maxsize = -1; /* In case of a decl or constant base object we can do better. */ @@ -636,16 +635,16 @@ get_ref_base_and_extent (tree exp, HOST_WIDE_INT *poffset, /* If maxsize is unknown adjust it according to the size of the base decl. */ if (maxsize == -1 - && host_integerp (DECL_SIZE (exp), 1)) - maxsize = TREE_INT_CST_LOW (DECL_SIZE (exp)) - hbit_offset; + && tree_fits_uhwi_p (DECL_SIZE (exp))) + maxsize = tree_to_uhwi (DECL_SIZE (exp)) - hbit_offset; } else if (CONSTANT_CLASS_P (exp)) { /* If maxsize is unknown adjust it according to the size of the base type constant. */ if (maxsize == -1 - && host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)) - maxsize = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp))) - hbit_offset; + && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) + maxsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) - hbit_offset; } /* ??? Due to negative offsets in ARRAY_REF we can end up with diff --git a/gcc/tree-dump.c b/gcc/tree-dump.c index 12a2be7e49e..7f9428dde94 100644 --- a/gcc/tree-dump.c +++ b/gcc/tree-dump.c @@ -29,6 +29,8 @@ along with GCC; see the file COPYING3. If not see #include "langhooks.h" #include "tree-iterator.h" #include "tree-pretty-print.h" +#include "wide-int.h" +#include "wide-int-print.h" static unsigned int queue (dump_info_p, const_tree, int); static void dump_index (dump_info_p, unsigned int); @@ -560,9 +562,8 @@ dequeue_and_dump (dump_info_p di) break; case INTEGER_CST: - if (TREE_INT_CST_HIGH (t)) - dump_int (di, "high", TREE_INT_CST_HIGH (t)); - dump_int (di, "low", TREE_INT_CST_LOW (t)); + fprintf (di->stream, "int: "); + print_decs (wide_int (t), di->stream); break; case STRING_CST: diff --git a/gcc/tree-flow-inline.h b/gcc/tree-flow-inline.h index d5b2185102c..59505d05c50 100644 --- a/gcc/tree-flow-inline.h +++ b/gcc/tree-flow-inline.h @@ -1247,7 +1247,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, { case BIT_FIELD_REF: { - HOST_WIDE_INT this_off = TREE_INT_CST_LOW (TREE_OPERAND (exp, 2)); + HOST_WIDE_INT this_off = tree_to_hwi (TREE_OPERAND (exp, 2)); if (this_off % BITS_PER_UNIT) return NULL_TREE; byte_offset += this_off / BITS_PER_UNIT; @@ -1262,12 +1262,12 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, if (!this_offset || TREE_CODE (this_offset) != INTEGER_CST - || (TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)) + || (tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)) % BITS_PER_UNIT)) return NULL_TREE; - hthis_offset = TREE_INT_CST_LOW (this_offset); - hthis_offset += (TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)) + hthis_offset = tree_to_hwi (this_offset); + hthis_offset += (tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)) / BITS_PER_UNIT); byte_offset += hthis_offset; } @@ -1290,10 +1290,10 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, && (unit_size = array_ref_element_size (exp), TREE_CODE (unit_size) == INTEGER_CST)) { - HOST_WIDE_INT hindex = TREE_INT_CST_LOW (index); + HOST_WIDE_INT hindex = tree_to_hwi (index); - hindex -= TREE_INT_CST_LOW (low_bound); - hindex *= TREE_INT_CST_LOW (unit_size); + hindex -= tree_to_hwi (low_bound); + hindex *= tree_to_hwi (unit_size); byte_offset += hindex; } else @@ -1305,7 +1305,7 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, break; case IMAGPART_EXPR: - byte_offset += TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (exp))); + byte_offset += tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (exp))); break; case VIEW_CONVERT_EXPR: @@ -1323,9 +1323,8 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, { if (!integer_zerop (TREE_OPERAND (exp, 1))) { - double_int off = mem_ref_offset (exp); - gcc_assert (off.high == -1 || off.high == 0); - byte_offset += off.to_shwi (); + addr_wide_int off = mem_ref_offset (exp); + byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); } @@ -1346,9 +1345,8 @@ get_addr_base_and_unit_offset_1 (tree exp, HOST_WIDE_INT *poffset, return NULL_TREE; if (!integer_zerop (TMR_OFFSET (exp))) { - double_int off = mem_ref_offset (exp); - gcc_assert (off.high == -1 || off.high == 0); - byte_offset += off.to_shwi (); + addr_wide_int off = mem_ref_offset (exp); + byte_offset += off.to_short_addr (); } exp = TREE_OPERAND (base, 0); } diff --git a/gcc/tree-flow.h b/gcc/tree-flow.h index caa8d7457fb..47d6680be18 100644 --- a/gcc/tree-flow.h +++ b/gcc/tree-flow.h @@ -30,6 +30,7 @@ along with GCC; see the file COPYING3. If not see #include "cgraph.h" #include "ipa-reference.h" #include "tree-ssa-alias.h" +#include "wide-int.h" /* This structure is used to map a gimple statement to a label, @@ -591,7 +592,7 @@ struct tree_niter_desc a loop (provided that assumptions == true and may_be_zero == false), more precisely the number of executions of the latch of the loop. */ - double_int max; /* The upper bound on the number of iterations of + max_wide_int max; /* The upper bound on the number of iterations of the loop. */ /* The simplified shape of the exit condition. The loop exits if diff --git a/gcc/tree-inline.c b/gcc/tree-inline.c index 55e527014f1..1394b50b55a 100644 --- a/gcc/tree-inline.c +++ b/gcc/tree-inline.c @@ -825,8 +825,7 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data) *walk_subtrees = 0; else if (TREE_CODE (*tp) == INTEGER_CST) - *tp = build_int_cst_wide (new_type, TREE_INT_CST_LOW (*tp), - TREE_INT_CST_HIGH (*tp)); + *tp = wide_int_to_tree (new_type, *tp); else { *tp = copy_node (*tp); @@ -1000,8 +999,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data) *walk_subtrees = 0; else if (TREE_CODE (*tp) == INTEGER_CST) - *tp = build_int_cst_wide (new_type, TREE_INT_CST_LOW (*tp), - TREE_INT_CST_HIGH (*tp)); + *tp = wide_int_to_tree (new_type, *tp); else { *tp = copy_node (*tp); @@ -1192,7 +1190,7 @@ remap_eh_region_tree_nr (tree old_t_nr, copy_body_data *id) { int old_nr, new_nr; - old_nr = tree_low_cst (old_t_nr, 0); + old_nr = tree_to_shwi (old_t_nr); new_nr = remap_eh_region_nr (old_nr, id); return build_int_cst (integer_type_node, new_nr); diff --git a/gcc/tree-mudflap.c b/gcc/tree-mudflap.c index adf2f7bd7d2..fe566f32d67 100644 --- a/gcc/tree-mudflap.c +++ b/gcc/tree-mudflap.c @@ -861,10 +861,10 @@ mf_xform_derefs_1 (gimple_stmt_iterator *iter, tree *tp, addr = build1 (ADDR_EXPR, build_pointer_type (type), t); limit = fold_build2_loc (location, MINUS_EXPR, mf_uintptr_type, - fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, - fold_convert (mf_uintptr_type, addr), - size), - integer_one_node); + fold_build2_loc (location, PLUS_EXPR, mf_uintptr_type, + fold_convert (mf_uintptr_type, addr), + size), + integer_one_node); } break; diff --git a/gcc/tree-object-size.c b/gcc/tree-object-size.c index 1a52a416a63..7066c56c45e 100644 --- a/gcc/tree-object-size.c +++ b/gcc/tree-object-size.c @@ -77,8 +77,8 @@ static unsigned HOST_WIDE_INT offset_limit; static void init_offset_limit (void) { - if (host_integerp (TYPE_MAX_VALUE (sizetype), 1)) - offset_limit = tree_low_cst (TYPE_MAX_VALUE (sizetype), 1); + if (tree_fits_uhwi_p (TYPE_MAX_VALUE (sizetype))) + offset_limit = tree_to_uhwi (TYPE_MAX_VALUE (sizetype)); else offset_limit = -1; offset_limit /= 2; @@ -106,7 +106,7 @@ compute_object_offset (const_tree expr, const_tree var) t = TREE_OPERAND (expr, 1); off = size_binop (PLUS_EXPR, DECL_FIELD_OFFSET (t), - size_int (tree_low_cst (DECL_FIELD_BIT_OFFSET (t), 1) + size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t)) / BITS_PER_UNIT)); break; @@ -141,7 +141,7 @@ compute_object_offset (const_tree expr, const_tree var) case MEM_REF: gcc_assert (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR); - return double_int_to_tree (sizetype, mem_ref_offset (expr)); + return wide_int_to_tree (sizetype, mem_ref_offset (expr)); default: return error_mark_node; @@ -191,10 +191,10 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, } if (sz != unknown[object_size_type]) { - double_int dsz = double_int::from_uhwi (sz) - mem_ref_offset (pt_var); - if (dsz.is_negative ()) + addr_wide_int dsz = addr_wide_int (sz) - mem_ref_offset (pt_var); + if (dsz.neg_p (SIGNED)) sz = 0; - else if (dsz.fits_uhwi ()) + else if (dsz.fits_uhwi_p ()) sz = dsz.to_uhwi (); else sz = unknown[object_size_type]; @@ -205,16 +205,16 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, } else if (pt_var && DECL_P (pt_var) - && host_integerp (DECL_SIZE_UNIT (pt_var), 1) + && tree_fits_uhwi_p (DECL_SIZE_UNIT (pt_var)) && (unsigned HOST_WIDE_INT) - tree_low_cst (DECL_SIZE_UNIT (pt_var), 1) < offset_limit) + tree_to_uhwi (DECL_SIZE_UNIT (pt_var)) < offset_limit) pt_var_size = DECL_SIZE_UNIT (pt_var); else if (pt_var && TREE_CODE (pt_var) == STRING_CST && TYPE_SIZE_UNIT (TREE_TYPE (pt_var)) - && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (pt_var))) && (unsigned HOST_WIDE_INT) - tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (pt_var)), 1) + tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (pt_var))) < offset_limit) pt_var_size = TYPE_SIZE_UNIT (TREE_TYPE (pt_var)); else @@ -239,7 +239,7 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, if (var != pt_var && TREE_CODE (var) == ARRAY_REF) var = TREE_OPERAND (var, 0); if (! TYPE_SIZE_UNIT (TREE_TYPE (var)) - || ! host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (var)), 1) + || ! tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (var))) || (pt_var_size && tree_int_cst_lt (pt_var_size, TYPE_SIZE_UNIT (TREE_TYPE (var))))) @@ -367,8 +367,8 @@ addr_object_size (struct object_size_info *osi, const_tree ptr, else bytes = pt_var_size; - if (host_integerp (bytes, 1)) - return tree_low_cst (bytes, 1); + if (tree_fits_uhwi_p (bytes)) + return tree_to_uhwi (bytes); return unknown[object_size_type]; } @@ -397,9 +397,9 @@ alloc_object_size (const_gimple call, int object_size_type) { tree p = TREE_VALUE (alloc_size); - arg1 = TREE_INT_CST_LOW (TREE_VALUE (p))-1; + arg1 = tree_to_hwi (TREE_VALUE (p))-1; if (TREE_CHAIN (p)) - arg2 = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (p)))-1; + arg2 = tree_to_hwi (TREE_VALUE (TREE_CHAIN (p)))-1; } if (DECL_BUILT_IN_CLASS (callee) == BUILT_IN_NORMAL) @@ -430,8 +430,8 @@ alloc_object_size (const_gimple call, int object_size_type) else if (arg1 >= 0) bytes = fold_convert (sizetype, gimple_call_arg (call, arg1)); - if (bytes && host_integerp (bytes, 1)) - return tree_low_cst (bytes, 1); + if (bytes && tree_fits_uhwi_p (bytes)) + return tree_to_uhwi (bytes); return unknown[object_size_type]; } @@ -791,13 +791,13 @@ plus_stmt_object_size (struct object_size_info *osi, tree var, gimple stmt) && (TREE_CODE (op0) == SSA_NAME || TREE_CODE (op0) == ADDR_EXPR)) { - if (! host_integerp (op1, 1)) + if (! tree_fits_uhwi_p (op1)) bytes = unknown[object_size_type]; else if (TREE_CODE (op0) == SSA_NAME) - return merge_object_sizes (osi, var, op0, tree_low_cst (op1, 1)); + return merge_object_sizes (osi, var, op0, tree_to_uhwi (op1)); else { - unsigned HOST_WIDE_INT off = tree_low_cst (op1, 1); + unsigned HOST_WIDE_INT off = tree_to_uhwi (op1); /* op0 will be ADDR_EXPR here. */ bytes = addr_object_size (osi, op0, object_size_type); @@ -1223,10 +1223,10 @@ compute_object_sizes (void) { tree ost = gimple_call_arg (call, 1); - if (host_integerp (ost, 1)) + if (tree_fits_uhwi_p (ost)) { unsigned HOST_WIDE_INT object_size_type - = tree_low_cst (ost, 1); + = tree_to_uhwi (ost); if (object_size_type < 2) result = fold_convert (size_type_node, diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c index dceea8cc89a..dbc3bf48098 100644 --- a/gcc/tree-predcom.c +++ b/gcc/tree-predcom.c @@ -201,6 +201,7 @@ along with GCC; see the file COPYING3. If not see #include "tree-pass.h" #include "tree-affine.h" #include "tree-inline.h" +#include "wide-int-print.h" /* The maximum number of iterations between the considered memory references. */ @@ -228,7 +229,7 @@ typedef struct dref_d unsigned distance; /* Number of iterations offset from the first reference in the component. */ - double_int offset; + max_wide_int offset; /* Number of the reference in a component, in dominance ordering. */ unsigned pos; @@ -344,7 +345,7 @@ dump_dref (FILE *file, dref ref) DR_IS_READ (ref->ref) ? "" : ", write"); fprintf (file, " offset "); - dump_double_int (file, ref->offset, false); + print_decs (ref->offset, file); fprintf (file, "\n"); fprintf (file, " distance %u\n", ref->distance); @@ -617,7 +618,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) tree_to_aff_combination_expand (DR_OFFSET (dr), type, offset, &name_expansions); - aff_combination_const (&delta, type, tree_to_double_int (DR_INIT (dr))); + aff_combination_const (&delta, type, max_wide_int (DR_INIT (dr))); aff_combination_add (offset, &delta); } @@ -629,7 +630,7 @@ aff_combination_dr_offset (struct data_reference *dr, aff_tree *offset) static bool determine_offset (struct data_reference *a, struct data_reference *b, - double_int *off) + max_wide_int *off) { aff_tree diff, baseb, step; tree typea, typeb; @@ -650,7 +651,7 @@ determine_offset (struct data_reference *a, struct data_reference *b, { /* If the references have loop invariant address, check that they access exactly the same location. */ - *off = double_int_zero; + *off = 0; return (operand_equal_p (DR_OFFSET (a), DR_OFFSET (b), 0) && operand_equal_p (DR_INIT (a), DR_INIT (b), 0)); } @@ -659,7 +660,7 @@ determine_offset (struct data_reference *a, struct data_reference *b, is a multiple of step. */ aff_combination_dr_offset (a, &diff); aff_combination_dr_offset (b, &baseb); - aff_combination_scale (&baseb, double_int_minus_one); + aff_combination_scale (&baseb, -1); aff_combination_add (&diff, &baseb); tree_to_aff_combination_expand (DR_STEP (a), TREE_TYPE (DR_STEP (a)), @@ -733,7 +734,7 @@ split_data_refs_to_components (struct loop *loop, FOR_EACH_VEC_ELT (depends, i, ddr) { - double_int dummy_off; + max_wide_int dummy_off; if (DDR_ARE_DEPENDENT (ddr) == chrec_known) continue; @@ -776,7 +777,7 @@ split_data_refs_to_components (struct loop *loop, dataref = XCNEW (struct dref_d); dataref->ref = dr; dataref->stmt = DR_STMT (dr); - dataref->offset = double_int_zero; + dataref->offset = 0; dataref->distance = 0; dataref->always_accessed @@ -832,7 +833,7 @@ suitable_component_p (struct loop *loop, struct component *comp) first = comp->refs[0]; ok = suitable_reference_p (first->ref, &comp->comp_step); gcc_assert (ok); - first->offset = double_int_zero; + first->offset = 0; for (i = 1; comp->refs.iterate (i, &a); i++) { @@ -896,7 +897,7 @@ order_drefs (const void *a, const void *b) { const dref *const da = (const dref *) a; const dref *const db = (const dref *) b; - int offcmp = (*da)->offset.scmp ((*db)->offset); + int offcmp = (*da)->offset.cmps ((*db)->offset); if (offcmp != 0) return offcmp; @@ -918,16 +919,16 @@ static void add_ref_to_chain (chain_p chain, dref ref) { dref root = get_chain_root (chain); - double_int dist; + max_wide_int dist; - gcc_assert (root->offset.sle (ref->offset)); + gcc_assert (root->offset.les_p (ref->offset)); dist = ref->offset - root->offset; - if (double_int::from_uhwi (MAX_DISTANCE).ule (dist)) + if (max_wide_int::from_uhwi (MAX_DISTANCE).leu_p (dist)) { free (ref); return; } - gcc_assert (dist.fits_uhwi ()); + gcc_assert (dist.fits_uhwi_p ()); chain->refs.safe_push (ref); @@ -1022,7 +1023,7 @@ valid_initializer_p (struct data_reference *ref, unsigned distance, struct data_reference *root) { aff_tree diff, base, step; - double_int off; + max_wide_int off; /* Both REF and ROOT must be accessing the same object. */ if (!operand_equal_p (DR_BASE_ADDRESS (ref), DR_BASE_ADDRESS (root), 0)) @@ -1042,7 +1043,7 @@ valid_initializer_p (struct data_reference *ref, -DISTANCE-th iteration. */ aff_combination_dr_offset (root, &diff); aff_combination_dr_offset (ref, &base); - aff_combination_scale (&base, double_int_minus_one); + aff_combination_scale (&base, -1); aff_combination_add (&diff, &base); tree_to_aff_combination_expand (DR_STEP (root), TREE_TYPE (DR_STEP (root)), @@ -1050,7 +1051,7 @@ valid_initializer_p (struct data_reference *ref, if (!aff_combination_constant_multiple_p (&diff, &step, &off)) return false; - if (off != double_int::from_uhwi (distance)) + if (off != distance) return false; return true; @@ -1178,7 +1179,7 @@ determine_roots_comp (struct loop *loop, unsigned i; dref a; chain_p chain = NULL; - double_int last_ofs = double_int_zero; + max_wide_int last_ofs = 0; /* Invariants are handled specially. */ if (comp->comp_step == RS_INVARIANT) @@ -1193,7 +1194,7 @@ determine_roots_comp (struct loop *loop, FOR_EACH_VEC_ELT (comp->refs, i, a) { if (!chain || DR_IS_WRITE (a->ref) - || double_int::from_uhwi (MAX_DISTANCE).ule (a->offset - last_ofs)) + || max_wide_int (MAX_DISTANCE).leu_p (a->offset - last_ofs)) { if (nontrivial_chain_p (chain)) { diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c index f00ac4cdcac..498763058aa 100644 --- a/gcc/tree-pretty-print.c +++ b/gcc/tree-pretty-print.c @@ -32,6 +32,7 @@ along with GCC; see the file COPYING3. If not see #include "dumpfile.h" #include "value-prof.h" #include "predict.h" +#include "wide-int-print.h" /* Local functions, macros and variables. */ static const char *op_symbol (const_tree); @@ -269,8 +270,8 @@ dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) if (min && max && integer_zerop (min) - && host_integerp (max, 0)) - pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); + && tree_fits_shwi_p (max)) + pp_wide_integer (buffer, tree_to_shwi (max) + 1); else { if (min) @@ -1028,32 +1029,25 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, NB: Neither of the following divisors can be trivially used to recover the original literal: - TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) + tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ - pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); + pp_wide_integer (buffer, tree_to_hwi (node)); pp_string (buffer, "B"); /* pseudo-unit */ } - else if (host_integerp (node, 0)) - pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); - else if (host_integerp (node, 1)) - pp_unsigned_wide_integer (buffer, TREE_INT_CST_LOW (node)); + else if (tree_fits_shwi_p (node)) + pp_wide_integer (buffer, tree_to_shwi (node)); + else if (tree_fits_uhwi_p (node)) + pp_unsigned_wide_integer (buffer, tree_to_uhwi (node)); else { - tree val = node; - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); - HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); + wide_int val = node; - if (tree_int_cst_sgn (val) < 0) + if (val.neg_p (TYPE_SIGN (TREE_TYPE (node)))) { pp_minus (buffer); - high = ~high + !low; - low = -low; + val = -val; } - /* Would "%x%0*x" or "%x%*0x" get zero-padding on all - systems? */ - sprintf (pp_buffer (buffer)->digit_buffer, - HOST_WIDE_INT_PRINT_DOUBLE_HEX, - (unsigned HOST_WIDE_INT) high, low); + print_hex (val, pp_buffer (buffer)->digit_buffer); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } break; @@ -1301,7 +1295,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, tree field, val; bool is_struct_init = false; bool is_array_init = false; - double_int curidx = double_int_zero; + wide_int curidx = 0; pp_left_brace (buffer); if (TREE_CLOBBER_P (node)) pp_string (buffer, "CLOBBER"); @@ -1316,7 +1310,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, { tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))); is_array_init = true; - curidx = tree_to_double_int (minv); + curidx = max_wide_int (minv); } FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { @@ -1330,7 +1324,7 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, } else if (is_array_init && (TREE_CODE (field) != INTEGER_CST - || tree_to_double_int (field) != curidx)) + || max_wide_int (field) != curidx)) { pp_left_bracket (buffer); if (TREE_CODE (field) == RANGE_EXPR) @@ -1341,17 +1335,17 @@ dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, dump_generic_node (buffer, TREE_OPERAND (field, 1), spc, flags, false); if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST) - curidx = tree_to_double_int (TREE_OPERAND (field, 1)); + curidx = TREE_OPERAND (field, 1); } else dump_generic_node (buffer, field, spc, flags, false); if (TREE_CODE (field) == INTEGER_CST) - curidx = tree_to_double_int (field); + curidx = field; pp_string (buffer, "]="); } } if (is_array_init) - curidx += double_int_one; + curidx += 1; if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c index 2bce84843b9..87840091cd8 100644 --- a/gcc/tree-sra.c +++ b/gcc/tree-sra.c @@ -731,17 +731,17 @@ type_internals_preclude_sra_p (tree type, const char **msg) *msg = "zero structure field size"; return true; } - if (!host_integerp (DECL_FIELD_OFFSET (fld), 1)) + if (!tree_fits_uhwi_p (DECL_FIELD_OFFSET (fld))) { *msg = "structure field offset not fixed"; return true; } - if (!host_integerp (DECL_SIZE (fld), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE (fld))) { *msg = "structure field size not fixed"; return true; } - if (!host_integerp (bit_position (fld), 0)) + if (!tree_fits_shwi_p (bit_position (fld))) { *msg = "structure field size too big"; return true; @@ -978,7 +978,7 @@ completely_scalarize_record (tree base, tree decl, HOST_WIDE_INT offset, struct access *access; HOST_WIDE_INT size; - size = tree_low_cst (DECL_SIZE (fld), 1); + size = tree_to_uhwi (DECL_SIZE (fld)); access = create_access_1 (base, pos, size); access->expr = nref; access->type = ft; @@ -997,7 +997,7 @@ completely_scalarize_record (tree base, tree decl, HOST_WIDE_INT offset, static void completely_scalarize_var (tree var) { - HOST_WIDE_INT size = tree_low_cst (DECL_SIZE (var), 1); + HOST_WIDE_INT size = tree_to_uhwi (DECL_SIZE (var)); struct access *access; access = create_access_1 (var, 0, size); @@ -1345,11 +1345,11 @@ compare_access_positions (const void *a, const void *b) return TYPE_PRECISION (f2->type) - TYPE_PRECISION (f1->type); /* Put any integral type with non-full precision last. */ else if (INTEGRAL_TYPE_P (f1->type) - && (TREE_INT_CST_LOW (TYPE_SIZE (f1->type)) + && (tree_to_hwi (TYPE_SIZE (f1->type)) != TYPE_PRECISION (f1->type))) return 1; else if (INTEGRAL_TYPE_P (f2->type) - && (TREE_INT_CST_LOW (TYPE_SIZE (f2->type)) + && (tree_to_hwi (TYPE_SIZE (f2->type)) != TYPE_PRECISION (f2->type))) return -1; /* Stabilize the sort. */ @@ -1411,7 +1411,7 @@ make_fancy_name_1 (tree expr) index = TREE_OPERAND (expr, 1); if (TREE_CODE (index) != INTEGER_CST) break; - sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, TREE_INT_CST_LOW (index)); + sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, tree_to_hwi (index)); obstack_grow (&name_obstack, buffer, strlen (buffer)); break; @@ -1425,7 +1425,7 @@ make_fancy_name_1 (tree expr) { obstack_1grow (&name_obstack, '$'); sprintf (buffer, HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (TREE_OPERAND (expr, 1))); + tree_to_hwi (TREE_OPERAND (expr, 1))); obstack_grow (&name_obstack, buffer, strlen (buffer)); } break; @@ -1620,14 +1620,14 @@ build_user_friendly_ref_for_offset (tree *res, tree type, HOST_WIDE_INT offset, continue; tr_pos = bit_position (fld); - if (!tr_pos || !host_integerp (tr_pos, 1)) + if (!tr_pos || !tree_fits_uhwi_p (tr_pos)) continue; - pos = TREE_INT_CST_LOW (tr_pos); + pos = tree_to_uhwi (tr_pos); gcc_assert (TREE_CODE (type) == RECORD_TYPE || pos == 0); tr_size = DECL_SIZE (fld); - if (!tr_size || !host_integerp (tr_size, 1)) + if (!tr_size || !tree_fits_uhwi_p (tr_size)) continue; - size = TREE_INT_CST_LOW (tr_size); + size = tree_to_uhwi (tr_size); if (size == 0) { if (pos != offset) @@ -1650,9 +1650,9 @@ build_user_friendly_ref_for_offset (tree *res, tree type, HOST_WIDE_INT offset, case ARRAY_TYPE: tr_size = TYPE_SIZE (TREE_TYPE (type)); - if (!tr_size || !host_integerp (tr_size, 1)) + if (!tr_size || !tree_fits_uhwi_p (tr_size)) return false; - el_size = tree_low_cst (tr_size, 1); + el_size = tree_to_uhwi (tr_size); minidx = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); if (TREE_CODE (minidx) != INTEGER_CST || el_size == 0) @@ -1728,12 +1728,12 @@ maybe_add_sra_candidate (tree var) reject (var, "has incomplete type"); return false; } - if (!host_integerp (TYPE_SIZE (type), 1)) + if (!tree_fits_uhwi_p (TYPE_SIZE (type))) { reject (var, "type size not fixed"); return false; } - if (tree_low_cst (TYPE_SIZE (type), 1) == 0) + if (tree_to_uhwi (TYPE_SIZE (type)) == 0) { reject (var, "type size is zero"); return false; @@ -2088,7 +2088,7 @@ expr_with_var_bounded_array_refs_p (tree expr) while (handled_component_p (expr)) { if (TREE_CODE (expr) == ARRAY_REF - && !host_integerp (array_ref_low_bound (expr), 0)) + && !tree_fits_shwi_p (array_ref_low_bound (expr))) return true; expr = TREE_OPERAND (expr, 0); } @@ -2457,7 +2457,7 @@ analyze_all_variable_accesses (void) if (TREE_CODE (var) == VAR_DECL && type_consists_of_records_p (TREE_TYPE (var))) { - if ((unsigned) tree_low_cst (TYPE_SIZE (TREE_TYPE (var)), 1) + if ((unsigned) tree_to_uhwi (TYPE_SIZE (TREE_TYPE (var))) <= max_total_scalarization_size) { completely_scalarize_var (var); @@ -2768,12 +2768,12 @@ sra_modify_expr (tree *expr, gimple_stmt_iterator *gsi, bool write) { HOST_WIDE_INT start_offset, chunk_size; if (bfr - && host_integerp (TREE_OPERAND (bfr, 1), 1) - && host_integerp (TREE_OPERAND (bfr, 2), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (bfr, 1)) + && tree_fits_uhwi_p (TREE_OPERAND (bfr, 2))) { - chunk_size = tree_low_cst (TREE_OPERAND (bfr, 1), 1); + chunk_size = tree_to_uhwi (TREE_OPERAND (bfr, 1)); start_offset = access->offset - + tree_low_cst (TREE_OPERAND (bfr, 2), 1); + + tree_to_uhwi (TREE_OPERAND (bfr, 2)); } else start_offset = chunk_size = 0; @@ -3662,8 +3662,8 @@ find_param_candidates (void) continue; if (!COMPLETE_TYPE_P (type) - || !host_integerp (TYPE_SIZE (type), 1) - || tree_low_cst (TYPE_SIZE (type), 1) == 0 + || !tree_fits_uhwi_p (TYPE_SIZE (type)) + || tree_to_uhwi (TYPE_SIZE (type)) == 0 || (AGGREGATE_TYPE_P (type) && type_internals_preclude_sra_p (type, &msg))) continue; @@ -4036,9 +4036,9 @@ splice_param_accesses (tree parm, bool *ro_grp) } if (POINTER_TYPE_P (TREE_TYPE (parm))) - agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))), 1); + agg_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm)))); else - agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (parm)), 1); + agg_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (parm))); if (total_size >= agg_size) return NULL; @@ -4057,13 +4057,13 @@ decide_one_param_reduction (struct access *repr) tree parm; parm = repr->base; - cur_parm_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (parm)), 1); + cur_parm_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (parm))); gcc_assert (cur_parm_size > 0); if (POINTER_TYPE_P (TREE_TYPE (parm))) { by_ref = true; - agg_size = tree_low_cst (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm))), 1); + agg_size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (parm)))); } else { @@ -4490,7 +4490,7 @@ sra_ipa_modify_expr (tree *expr, bool convert, if (TREE_CODE (base) == MEM_REF) { - offset += mem_ref_offset (base).low * BITS_PER_UNIT; + offset += mem_ref_offset (base).to_short_addr () * BITS_PER_UNIT; base = TREE_OPERAND (base, 0); } diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c index cfd42ad21d5..94fe59f6ce7 100644 --- a/gcc/tree-ssa-address.c +++ b/gcc/tree-ssa-address.c @@ -189,15 +189,16 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, struct mem_addr_template *templ; if (addr->step && !integer_onep (addr->step)) - st = immed_double_int_const (tree_to_double_int (addr->step), pointer_mode); + st = immed_wide_int_const (wide_int (addr->step), pointer_mode); else st = NULL_RTX; if (addr->offset && !integer_zerop (addr->offset)) - off = immed_double_int_const - (tree_to_double_int (addr->offset) - .sext (TYPE_PRECISION (TREE_TYPE (addr->offset))), - pointer_mode); + { + addr_wide_int dc = addr_wide_int (addr->offset) + .sext (TYPE_PRECISION (TREE_TYPE (addr->offset))); + off = immed_wide_int_const (dc, pointer_mode); + } else off = NULL_RTX; @@ -394,7 +395,7 @@ move_fixed_address_to_symbol (struct mem_address *parts, aff_tree *addr) for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.is_one ()) + if (!addr->elts[i].coef.one_p ()) continue; val = addr->elts[i].val; @@ -422,7 +423,7 @@ move_hint_to_base (tree type, struct mem_address *parts, tree base_hint, for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.is_one ()) + if (!addr->elts[i].coef.one_p ()) continue; val = addr->elts[i].val; @@ -454,7 +455,7 @@ move_pointer_to_base (struct mem_address *parts, aff_tree *addr) for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.is_one ()) + if (!addr->elts[i].coef.one_p ()) continue; val = addr->elts[i].val; @@ -490,7 +491,7 @@ move_variant_to_index (struct mem_address *parts, aff_tree *addr, tree v) return; parts->index = fold_convert (sizetype, val); - parts->step = double_int_to_tree (sizetype, addr->elts[i].coef); + parts->step = wide_int_to_tree (sizetype, addr->elts[i].coef); aff_combination_remove_elt (addr, i); } @@ -533,16 +534,16 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, addr_space_t as = TYPE_ADDR_SPACE (type); enum machine_mode address_mode = targetm.addr_space.address_mode (as); HOST_WIDE_INT coef; - double_int best_mult, amult, amult_neg; + addr_wide_int best_mult, amult, amult_neg; unsigned best_mult_cost = 0, acost; tree mult_elt = NULL_TREE, elt; unsigned i, j; enum tree_code op_code; - best_mult = double_int_zero; + best_mult = 0; for (i = 0; i < addr->n; i++) { - if (!addr->elts[i].coef.fits_shwi ()) + if (!addr->elts[i].coef.fits_shwi_p ()) continue; coef = addr->elts[i].coef.to_shwi (); @@ -555,7 +556,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, if (acost > best_mult_cost) { best_mult_cost = acost; - best_mult = addr->elts[i].coef; + best_mult = addr_wide_int::from_wide_int (addr->elts[i].coef); } } @@ -565,8 +566,8 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, /* Collect elements multiplied by best_mult. */ for (i = j = 0; i < addr->n; i++) { - amult = addr->elts[i].coef; - amult_neg = double_int_ext_for_comb (-amult, addr); + amult = addr_wide_int::from_wide_int (addr->elts[i].coef); + amult_neg = -amult.sext (TYPE_PRECISION (addr->type)); if (amult == best_mult) op_code = PLUS_EXPR; @@ -590,7 +591,7 @@ most_expensive_mult_to_index (tree type, struct mem_address *parts, addr->n = j; parts->index = mult_elt; - parts->step = double_int_to_tree (sizetype, best_mult); + parts->step = wide_int_to_tree (sizetype, best_mult); } /* Splits address ADDR for a memory access of type TYPE into PARTS. @@ -618,8 +619,8 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand, parts->index = NULL_TREE; parts->step = NULL_TREE; - if (!addr->offset.is_zero ()) - parts->offset = double_int_to_tree (sizetype, addr->offset); + if (!addr->offset.zero_p ()) + parts->offset = wide_int_to_tree (sizetype, addr->offset); else parts->offset = NULL_TREE; @@ -650,9 +651,9 @@ addr_to_parts (tree type, aff_tree *addr, tree iv_cand, for (i = 0; i < addr->n; i++) { part = fold_convert (sizetype, addr->elts[i].val); - if (!addr->elts[i].coef.is_one ()) + if (!addr->elts[i].coef.one_p ()) part = fold_build2 (MULT_EXPR, sizetype, part, - double_int_to_tree (sizetype, addr->elts[i].coef)); + wide_int_to_tree (sizetype, addr->elts[i].coef)); add_to_parts (parts, part); } if (addr->rest) @@ -857,11 +858,11 @@ copy_ref_info (tree new_ref, tree old_ref) && !(TREE_CODE (new_ref) == TARGET_MEM_REF && (TMR_INDEX2 (new_ref) || (TMR_STEP (new_ref) - && (TREE_INT_CST_LOW (TMR_STEP (new_ref)) + && (tree_to_hwi (TMR_STEP (new_ref)) < align))))) { - unsigned int inc = (mem_ref_offset (old_ref) - - mem_ref_offset (new_ref)).low; + unsigned int inc = mem_ref_offset (old_ref).to_uhwi () + - mem_ref_offset (new_ref).to_uhwi (); adjust_ptr_info_misalignment (new_pi, inc); } else diff --git a/gcc/tree-ssa-alias.c b/gcc/tree-ssa-alias.c index 2ecd13915bc..d90992fed57 100644 --- a/gcc/tree-ssa-alias.c +++ b/gcc/tree-ssa-alias.c @@ -575,9 +575,9 @@ ao_ref_init_from_ptr_and_size (ao_ref *ref, tree ptr, tree size) ref->offset = 0; } if (size - && host_integerp (size, 0) - && TREE_INT_CST_LOW (size) * 8 / 8 == TREE_INT_CST_LOW (size)) - ref->max_size = ref->size = TREE_INT_CST_LOW (size) * 8; + && tree_fits_shwi_p (size) + && tree_to_shwi (size) * 8 / 8 == tree_to_shwi (size)) + ref->max_size = ref->size = tree_to_shwi (size) * 8; else ref->max_size = ref->size = -1; ref->ref_alias_set = 0; @@ -871,7 +871,7 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, tree ptrtype1, dbase2; HOST_WIDE_INT offset1p = offset1, offset2p = offset2; HOST_WIDE_INT doffset1, doffset2; - double_int moff; + addr_wide_int moff; gcc_checking_assert ((TREE_CODE (base1) == MEM_REF || TREE_CODE (base1) == TARGET_MEM_REF) @@ -883,10 +883,10 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, so that the resulting offset adjustment is positive. */ moff = mem_ref_offset (base1); moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - offset2p += (-moff).low; + if (moff.neg_p (SIGNED)) + offset2p += (-moff).to_short_addr (); else - offset1p += moff.low; + offset1p += moff.to_short_addr (); /* If only one reference is based on a variable, they cannot alias if the pointer access is beyond the extent of the variable access. @@ -957,12 +957,12 @@ indirect_ref_may_alias_decl_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, if (TREE_CODE (dbase2) == MEM_REF || TREE_CODE (dbase2) == TARGET_MEM_REF) { - double_int moff = mem_ref_offset (dbase2); + addr_wide_int moff = mem_ref_offset (dbase2); moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - doffset1 -= (-moff).low; + if (moff.neg_p (SIGNED)) + doffset1 -= (-moff).to_short_addr (); else - doffset2 -= moff.low; + doffset2 -= moff.to_short_addr (); } /* If either reference is view-converted, give up now. */ @@ -1048,21 +1048,21 @@ indirect_refs_may_alias_p (tree ref1 ATTRIBUTE_UNUSED, tree base1, && operand_equal_p (TMR_INDEX2 (base1), TMR_INDEX2 (base2), 0)))))) { - double_int moff; + addr_wide_int moff; /* The offset embedded in MEM_REFs can be negative. Bias them so that the resulting offset adjustment is positive. */ moff = mem_ref_offset (base1); moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - offset2 += (-moff).low; + if (moff.neg_p (SIGNED)) + offset2 += (-moff).to_short_addr (); else - offset1 += moff.low; + offset1 += moff.to_shwi (); moff = mem_ref_offset (base2); moff = moff.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - if (moff.is_negative ()) - offset1 += (-moff).low; + if (moff.neg_p (SIGNED)) + offset1 += (-moff).to_short_addr (); else - offset2 += moff.low; + offset2 += moff.to_short_addr (); return ranges_overlap_p (offset1, max_size1, offset2, max_size2); } if (!ptr_derefs_may_alias_p (ptr1, ptr2)) @@ -2005,15 +2005,15 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) if (!tree_int_cst_equal (TREE_OPERAND (base, 1), TREE_OPERAND (ref->base, 1))) { - double_int off1 = mem_ref_offset (base); + addr_wide_int off1 = mem_ref_offset (base); off1 = off1.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - off1 = off1 + double_int::from_shwi (offset); - double_int off2 = mem_ref_offset (ref->base); + off1 += offset; + addr_wide_int off2 = mem_ref_offset (ref->base); off2 = off2.lshift (BITS_PER_UNIT == 8 ? 3 : exact_log2 (BITS_PER_UNIT)); - off2 = off2 + double_int::from_shwi (ref_offset); - if (off1.fits_shwi () && off2.fits_shwi ()) + off2 += ref_offset; + if (off1.fits_shwi_p () && off2.fits_shwi_p ()) { offset = off1.to_shwi (); ref_offset = off2.to_shwi (); @@ -2055,7 +2055,7 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) tree len = gimple_call_arg (stmt, 2); tree base = NULL_TREE; HOST_WIDE_INT offset = 0; - if (!host_integerp (len, 0)) + if (!tree_fits_shwi_p (len)) return false; if (TREE_CODE (dest) == ADDR_EXPR) base = get_addr_base_and_unit_offset (TREE_OPERAND (dest, 0), @@ -2065,7 +2065,7 @@ stmt_kills_ref_p_1 (gimple stmt, ao_ref *ref) if (base && base == ao_ref_base (ref)) { - HOST_WIDE_INT size = TREE_INT_CST_LOW (len); + HOST_WIDE_INT size = tree_to_hwi (len); if (offset <= ref->offset / BITS_PER_UNIT && (offset + size >= ((ref->offset + ref->max_size + BITS_PER_UNIT - 1) diff --git a/gcc/tree-ssa-ccp.c b/gcc/tree-ssa-ccp.c index 6472f484e33..7780a1e8734 100644 --- a/gcc/tree-ssa-ccp.c +++ b/gcc/tree-ssa-ccp.c @@ -98,6 +98,15 @@ along with GCC; see the file COPYING3. If not see array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for final substitution and folding. + This algorithm uses wide-ints at the max precision of the target. + This means that, with one uninteresting exception, variables with + UNSIGNED types never go to VARYING because the bits above the + precision of the type of the variable are always zero. The + uninteresting case is a variable of UNSIGNED type that has the + maximum precision of the target. Such variables can go to VARYING, + but this causes no loss of infomation since these variables will + never be extended. + References: Constant propagation with conditional branches, @@ -130,7 +139,7 @@ along with GCC; see the file COPYING3. If not see #include "gimple-fold.h" #include "params.h" #include "hash-table.h" - +#include "wide-int-print.h" /* Possible lattice values. */ typedef enum @@ -148,9 +157,11 @@ struct prop_value_d { /* Propagated value. */ tree value; - /* Mask that applies to the propagated value during CCP. For - X with a CONSTANT lattice value X & ~mask == value & ~mask. */ - double_int mask; + /* Mask that applies to the propagated value during CCP. For X + with a CONSTANT lattice value X & ~mask == value & ~mask. The + zero bits in the mask cover constant values. The ones mean no + information. */ + max_wide_int mask; }; typedef struct prop_value_d prop_value_t; @@ -185,18 +196,20 @@ dump_lattice_value (FILE *outf, const char *prefix, prop_value_t val) break; case CONSTANT: if (TREE_CODE (val.value) != INTEGER_CST - || val.mask.is_zero ()) + || val.mask.zero_p ()) { fprintf (outf, "%sCONSTANT ", prefix); print_generic_expr (outf, val.value, dump_flags); } else { - double_int cval = tree_to_double_int (val.value).and_not (val.mask); - fprintf (outf, "%sCONSTANT " HOST_WIDE_INT_PRINT_DOUBLE_HEX, - prefix, cval.high, cval.low); - fprintf (outf, " (" HOST_WIDE_INT_PRINT_DOUBLE_HEX ")", - val.mask.high, val.mask.low); + wide_int cval = (max_wide_int (val.value) + .and_not (val.mask)); + fprintf (outf, "%sCONSTANT ", prefix); + print_hex (cval, outf); + fprintf (outf, " ("); + print_hex (val.mask, outf); + fprintf (outf, ")"); } break; default: @@ -238,7 +251,7 @@ debug_lattice_value (prop_value_t val) static prop_value_t get_default_value (tree var) { - prop_value_t val = { UNINITIALIZED, NULL_TREE, { 0, 0 } }; + prop_value_t val = { UNINITIALIZED, NULL_TREE, 0 }; gimple stmt; stmt = SSA_NAME_DEF_STMT (var); @@ -255,7 +268,7 @@ get_default_value (tree var) else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } } else if (is_gimple_assign (stmt) @@ -282,7 +295,7 @@ get_default_value (tree var) { /* Otherwise, VAR will never take on a constant value. */ val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } return val; @@ -325,7 +338,7 @@ get_constant_value (tree var) if (val && val->lattice_val == CONSTANT && (TREE_CODE (val->value) != INTEGER_CST - || val->mask.is_zero ())) + || val->mask.zero_p ())) return val->value; return NULL_TREE; } @@ -339,7 +352,7 @@ set_value_varying (tree var) val->lattice_val = VARYING; val->value = NULL_TREE; - val->mask = double_int_minus_one; + val->mask = -1; } /* For float types, modify the value of VAL to make ccp work correctly @@ -416,8 +429,8 @@ valid_lattice_transition (prop_value_t old_val, prop_value_t new_val) /* Bit-lattices have to agree in the still valid bits. */ if (TREE_CODE (old_val.value) == INTEGER_CST && TREE_CODE (new_val.value) == INTEGER_CST) - return tree_to_double_int (old_val.value).and_not (new_val.mask) - == tree_to_double_int (new_val.value).and_not (new_val.mask); + return (max_wide_int (old_val.value).and_not (new_val.mask) + == max_wide_int (new_val.value).and_not (new_val.mask)); /* Otherwise constant values have to agree. */ return operand_equal_p (old_val.value, new_val.value, 0); @@ -442,9 +455,7 @@ set_lattice_value (tree var, prop_value_t new_val) && TREE_CODE (new_val.value) == INTEGER_CST && TREE_CODE (old_val->value) == INTEGER_CST) { - double_int diff; - diff = tree_to_double_int (new_val.value) - ^ tree_to_double_int (old_val->value); + max_wide_int diff = max_wide_int (new_val.value) ^ old_val->value; new_val.mask = new_val.mask | old_val->mask | diff; } @@ -456,7 +467,8 @@ set_lattice_value (tree var, prop_value_t new_val) || (new_val.lattice_val == CONSTANT && TREE_CODE (new_val.value) == INTEGER_CST && (TREE_CODE (old_val->value) != INTEGER_CST - || new_val.mask != old_val->mask))) + || new_val.mask + != old_val->mask))) { /* ??? We would like to delay creation of INTEGER_CSTs from partially constants here. */ @@ -478,21 +490,21 @@ set_lattice_value (tree var, prop_value_t new_val) static prop_value_t get_value_for_expr (tree, bool); static prop_value_t bit_value_binop (enum tree_code, tree, tree, tree); -static void bit_value_binop_1 (enum tree_code, tree, double_int *, double_int *, - tree, double_int, double_int, - tree, double_int, double_int); +static void bit_value_binop_1 (enum tree_code, tree, max_wide_int *, max_wide_int *, + tree, max_wide_int, max_wide_int, + tree, max_wide_int, max_wide_int); -/* Return a double_int that can be used for bitwise simplifications +/* Return a max_wide_int that can be used for bitwise simplifications from VAL. */ -static double_int -value_to_double_int (prop_value_t val) +static max_wide_int +value_to_wide_int (prop_value_t val) { if (val.value && TREE_CODE (val.value) == INTEGER_CST) - return tree_to_double_int (val.value); - else - return double_int_zero; + return val.value; + + return 0; } /* Return the value for the address expression EXPR based on alignment @@ -510,14 +522,11 @@ get_value_from_alignment (tree expr) get_pointer_alignment_1 (expr, &align, &bitpos); val.mask = (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type) - ? double_int::mask (TYPE_PRECISION (type)) - : double_int_minus_one) - .and_not (double_int::from_uhwi (align / BITS_PER_UNIT - 1)); - val.lattice_val = val.mask.is_minus_one () ? VARYING : CONSTANT; + ? max_wide_int::mask (TYPE_PRECISION (type), false) + : -1).and_not (align / BITS_PER_UNIT - 1); + val.lattice_val = val.mask.minus_one_p () ? VARYING : CONSTANT; if (val.lattice_val == CONSTANT) - val.value - = double_int_to_tree (type, - double_int::from_uhwi (bitpos / BITS_PER_UNIT)); + val.value = wide_int_to_tree (type, bitpos / BITS_PER_UNIT); else val.value = NULL_TREE; @@ -546,7 +555,7 @@ get_value_for_expr (tree expr, bool for_bits_p) { val.lattice_val = CONSTANT; val.value = expr; - val.mask = double_int_zero; + val.mask = 0; canonicalize_float_value (&val); } else if (TREE_CODE (expr) == ADDR_EXPR) @@ -554,7 +563,7 @@ get_value_for_expr (tree expr, bool for_bits_p) else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = 1; val.value = NULL_TREE; } return val; @@ -782,7 +791,7 @@ do_dbg_cnt (void) if (!dbg_cnt (ccp)) { const_val[i].lattice_val = VARYING; - const_val[i].mask = double_int_minus_one; + const_val[i].mask = -1; const_val[i].value = NULL_TREE; } } @@ -821,11 +830,11 @@ ccp_finalize (void) /* Trailing constant bits specify the alignment, trailing value bits the misalignment. */ - tem = val->mask.low; + tem = val->mask.to_uhwi (); align = (tem & -tem); if (align > 1) set_ptr_info_alignment (get_ptr_info (name), align, - TREE_INT_CST_LOW (val->value) & (align - 1)); + tree_to_hwi (val->value) & (align - 1)); } /* Perform substitutions based on the known constant values. */ @@ -866,7 +875,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* any M VARYING = VARYING. */ val1->lattice_val = VARYING; - val1->mask = double_int_minus_one; + val1->mask = -1; val1->value = NULL_TREE; } else if (val1->lattice_val == CONSTANT @@ -879,10 +888,10 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) For INTEGER_CSTs mask unequal bits. If no equal bits remain, drop to varying. */ - val1->mask = val1->mask | val2->mask - | (tree_to_double_int (val1->value) - ^ tree_to_double_int (val2->value)); - if (val1->mask.is_minus_one ()) + val1->mask = (val1->mask | val2->mask + | (max_wide_int (val1->value) + ^ val2->value)); + if (val1->mask.minus_one_p ()) { val1->lattice_val = VARYING; val1->value = NULL_TREE; @@ -915,7 +924,7 @@ ccp_lattice_meet (prop_value_t *val1, prop_value_t *val2) { /* Any other combination is VARYING. */ val1->lattice_val = VARYING; - val1->mask = double_int_minus_one; + val1->mask = -1; val1->value = NULL_TREE; } } @@ -1070,8 +1079,8 @@ ccp_fold (gimple stmt) static void bit_value_unop_1 (enum tree_code code, tree type, - double_int *val, double_int *mask, - tree rtype, double_int rval, double_int rmask) + max_wide_int *val, max_wide_int *mask, + tree rtype, const max_wide_int &rval, const max_wide_int &rmask) { switch (code) { @@ -1082,33 +1091,32 @@ bit_value_unop_1 (enum tree_code code, tree type, case NEGATE_EXPR: { - double_int temv, temm; + max_wide_int temv, temm; /* Return ~rval + 1. */ bit_value_unop_1 (BIT_NOT_EXPR, type, &temv, &temm, type, rval, rmask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, - type, temv, temm, - type, double_int_one, double_int_zero); + type, temv, temm, type, 1, 0); break; } CASE_CONVERT: { - bool uns; + signop sgn; /* First extend mask and value according to the original type. */ - uns = TYPE_UNSIGNED (rtype); - *mask = rmask.ext (TYPE_PRECISION (rtype), uns); - *val = rval.ext (TYPE_PRECISION (rtype), uns); + sgn = TYPE_SIGN (rtype); + *mask = rmask.ext (TYPE_PRECISION (rtype), sgn); + *val = rval.ext (TYPE_PRECISION (rtype), sgn); /* Then extend mask and value according to the target type. */ - uns = TYPE_UNSIGNED (type); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); - *val = (*val).ext (TYPE_PRECISION (type), uns); + sgn = TYPE_SIGN (type); + *mask = (*mask).ext (TYPE_PRECISION (type), sgn); + *val = (*val).ext (TYPE_PRECISION (type), sgn); break; } default: - *mask = double_int_minus_one; + *mask = -1; break; } } @@ -1119,14 +1127,17 @@ bit_value_unop_1 (enum tree_code code, tree type, static void bit_value_binop_1 (enum tree_code code, tree type, - double_int *val, double_int *mask, - tree r1type, double_int r1val, double_int r1mask, - tree r2type, double_int r2val, double_int r2mask) + max_wide_int *val, max_wide_int *mask, + tree r1type, max_wide_int r1val, max_wide_int r1mask, + tree r2type, max_wide_int r2val, max_wide_int r2mask) { - bool uns = TYPE_UNSIGNED (type); - /* Assume we'll get a constant result. Use an initial varying value, - we fall back to varying in the end if necessary. */ - *mask = double_int_minus_one; + signop sgn = TYPE_SIGN (type); + int width = TYPE_PRECISION (type); + + /* Assume we'll get a constant result. Use an initial non varying + value, we fall back to varying in the end if necessary. */ + *mask = -1; + switch (code) { case BIT_AND_EXPR: @@ -1152,13 +1163,35 @@ bit_value_binop_1 (enum tree_code code, tree type, case LROTATE_EXPR: case RROTATE_EXPR: - if (r2mask.is_zero ()) + if (r2mask.zero_p ()) { - HOST_WIDE_INT shift = r2val.low; - if (code == RROTATE_EXPR) - shift = -shift; - *mask = r1mask.lrotate (shift, TYPE_PRECISION (type)); - *val = r1val.lrotate (shift, TYPE_PRECISION (type)); + wide_int shift = r2val; + if (shift.zero_p ()) + { + *mask = r1mask; + *val = r1val; + } + else + { + if (shift.neg_p (SIGNED)) + { + shift = -shift; + if (code == RROTATE_EXPR) + code = LROTATE_EXPR; + else + code = RROTATE_EXPR; + } + if (code == RROTATE_EXPR) + { + *mask = r1mask.rrotate (shift, width); + *val = r1val.rrotate (shift, width); + } + else + { + *mask = r1mask.lrotate (shift, width); + *val = r1val.lrotate (shift, width); + } + } } break; @@ -1167,53 +1200,56 @@ bit_value_binop_1 (enum tree_code code, tree type, /* ??? We can handle partially known shift counts if we know its sign. That way we can tell that (x << (y | 8)) & 255 is zero. */ - if (r2mask.is_zero ()) + if (r2mask.zero_p ()) { - HOST_WIDE_INT shift = r2val.low; - if (code == RSHIFT_EXPR) - shift = -shift; - /* We need to know if we are doing a left or a right shift - to properly shift in zeros for left shift and unsigned - right shifts and the sign bit for signed right shifts. - For signed right shifts we shift in varying in case - the sign bit was varying. */ - if (shift > 0) - { - *mask = r1mask.llshift (shift, TYPE_PRECISION (type)); - *val = r1val.llshift (shift, TYPE_PRECISION (type)); - } - else if (shift < 0) - { - shift = -shift; - *mask = r1mask.rshift (shift, TYPE_PRECISION (type), !uns); - *val = r1val.rshift (shift, TYPE_PRECISION (type), !uns); - } - else + wide_int shift = r2val; + if (shift.zero_p ()) { *mask = r1mask; *val = r1val; } + else + { + if (shift.neg_p (SIGNED)) + { + shift = -shift; + if (code == RSHIFT_EXPR) + code = LSHIFT_EXPR; + else + code = RSHIFT_EXPR; + } + if (code == RSHIFT_EXPR) + { + *mask = r1mask.ext (width, sgn).rshift (shift, sgn); + *val = r1val.ext (width, sgn).rshift (shift, sgn); + } + else + { + *mask = r1mask.lshift (shift).sext (width); + *val = r1val.lshift (shift).sext (width); + } + } } break; case PLUS_EXPR: case POINTER_PLUS_EXPR: { - double_int lo, hi; + max_wide_int lo, hi; /* Do the addition with unknown bits set to zero, to give carry-ins of zero wherever possible. */ lo = r1val.and_not (r1mask) + r2val.and_not (r2mask); - lo = lo.ext (TYPE_PRECISION (type), uns); + lo = lo.ext (width, sgn); /* Do the addition with unknown bits set to one, to give carry-ins of one wherever possible. */ hi = (r1val | r1mask) + (r2val | r2mask); - hi = hi.ext (TYPE_PRECISION (type), uns); + hi = hi.ext (width, sgn); /* Each bit in the result is known if (a) the corresponding bits in both inputs are known, and (b) the carry-in to that bit position is known. We can check condition (b) by seeing if we got the same result with minimised carries as with maximised carries. */ *mask = r1mask | r2mask | (lo ^ hi); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); + *mask = (*mask).ext (width, sgn); /* It shouldn't matter whether we choose lo or hi here. */ *val = lo; break; @@ -1221,7 +1257,7 @@ bit_value_binop_1 (enum tree_code code, tree type, case MINUS_EXPR: { - double_int temv, temm; + max_wide_int temv, temm; bit_value_unop_1 (NEGATE_EXPR, r2type, &temv, &temm, r2type, r2val, r2mask); bit_value_binop_1 (PLUS_EXPR, type, val, mask, @@ -1234,18 +1270,17 @@ bit_value_binop_1 (enum tree_code code, tree type, { /* Just track trailing zeros in both operands and transfer them to the other. */ - int r1tz = (r1val | r1mask).trailing_zeros (); - int r2tz = (r2val | r2mask).trailing_zeros (); - if (r1tz + r2tz >= HOST_BITS_PER_DOUBLE_INT) + int r1tz = (r1val | r1mask).ctz ().to_shwi (); + int r2tz = (r2val | r2mask).ctz ().to_shwi (); + if (r1tz + r2tz >= width) { - *mask = double_int_zero; - *val = double_int_zero; + *mask = 0; + *val = 0; } else if (r1tz + r2tz > 0) { - *mask = ~double_int::mask (r1tz + r2tz); - *mask = (*mask).ext (TYPE_PRECISION (type), uns); - *val = double_int_zero; + *mask = max_wide_int::mask (r1tz + r2tz, true).ext (width, sgn); + *val = 0; } break; } @@ -1253,71 +1288,78 @@ bit_value_binop_1 (enum tree_code code, tree type, case EQ_EXPR: case NE_EXPR: { - double_int m = r1mask | r2mask; + max_wide_int m = r1mask | r2mask; if (r1val.and_not (m) != r2val.and_not (m)) { - *mask = double_int_zero; - *val = ((code == EQ_EXPR) ? double_int_zero : double_int_one); + *mask = 0; + *val = ((code == EQ_EXPR) ? 0 : 1); } else { /* We know the result of a comparison is always one or zero. */ - *mask = double_int_one; - *val = double_int_zero; + *mask = 1; + *val = 0; } break; } case GE_EXPR: case GT_EXPR: - { - double_int tem = r1val; - r1val = r2val; - r2val = tem; - tem = r1mask; - r1mask = r2mask; - r2mask = tem; - code = swap_tree_comparison (code); - } - /* Fallthru. */ case LT_EXPR: case LE_EXPR: { + max_wide_int o1val, o2val, o1mask, o2mask; int minmax, maxmin; + + if ((code == GE_EXPR) || (code == GT_EXPR)) + { + o1val = r2val; + o1mask = r2mask; + o2val = r1val; + o2mask = r1mask; + code = swap_tree_comparison (code); + } + else + { + o1val = r1val; + o1mask = r1mask; + o2val = r2val; + o2mask = r2mask; + } /* If the most significant bits are not known we know nothing. */ - if (r1mask.is_negative () || r2mask.is_negative ()) + if (o1mask.neg_p (SIGNED) || o2mask.neg_p (SIGNED)) break; /* For comparisons the signedness is in the comparison operands. */ - uns = TYPE_UNSIGNED (r1type); + sgn = TYPE_SIGN (r1type); /* If we know the most significant bits we know the values value ranges by means of treating varying bits as zero or one. Do a cross comparison of the max/min pairs. */ - maxmin = (r1val | r1mask).cmp (r2val.and_not (r2mask), uns); - minmax = r1val.and_not (r1mask).cmp (r2val | r2mask, uns); - if (maxmin < 0) /* r1 is less than r2. */ + maxmin = (o1val | o1mask).cmp (o2val.and_not (o2mask), sgn); + minmax = o1val.and_not (o1mask).cmp (o2val | o2mask, sgn); + if (maxmin < 0) /* o1 is less than o2. */ { - *mask = double_int_zero; - *val = double_int_one; + *mask = 0; + *val = 1; } - else if (minmax > 0) /* r1 is not less or equal to r2. */ + else if (minmax > 0) /* o1 is not less or equal to o2. */ { - *mask = double_int_zero; - *val = double_int_zero; + *mask = 0; + *val = 0; } - else if (maxmin == minmax) /* r1 and r2 are equal. */ + else if (maxmin == minmax) /* o1 and o2 are equal. */ { /* This probably should never happen as we'd have folded the thing during fully constant value folding. */ - *mask = double_int_zero; - *val = (code == LE_EXPR ? double_int_one : double_int_zero); + *mask = 0; + *val = (code == LE_EXPR ? 1 : 0); } else { /* We know the result of a comparison is always one or zero. */ - *mask = double_int_one; - *val = double_int_zero; + *mask = 1; + *val = 0; } break; } @@ -1333,7 +1375,7 @@ static prop_value_t bit_value_unop (enum tree_code code, tree type, tree rhs) { prop_value_t rval = get_value_for_expr (rhs, true); - double_int value, mask; + max_wide_int value, mask; prop_value_t val; if (rval.lattice_val == UNDEFINED) @@ -1341,21 +1383,21 @@ bit_value_unop (enum tree_code code, tree type, tree rhs) gcc_assert ((rval.lattice_val == CONSTANT && TREE_CODE (rval.value) == INTEGER_CST) - || rval.mask.is_minus_one ()); + || rval.mask.minus_one_p ()); bit_value_unop_1 (code, type, &value, &mask, - TREE_TYPE (rhs), value_to_double_int (rval), rval.mask); - if (!mask.is_minus_one ()) + TREE_TYPE (rhs), value_to_wide_int (rval), rval.mask); + if (!mask.minus_one_p ()) { val.lattice_val = CONSTANT; val.mask = mask; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1368,7 +1410,7 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) { prop_value_t r1val = get_value_for_expr (rhs1, true); prop_value_t r2val = get_value_for_expr (rhs2, true); - double_int value, mask; + max_wide_int value, mask; prop_value_t val; if (r1val.lattice_val == UNDEFINED @@ -1376,31 +1418,31 @@ bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2) { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; return val; } gcc_assert ((r1val.lattice_val == CONSTANT && TREE_CODE (r1val.value) == INTEGER_CST) - || r1val.mask.is_minus_one ()); + || r1val.mask.minus_one_p ()); gcc_assert ((r2val.lattice_val == CONSTANT && TREE_CODE (r2val.value) == INTEGER_CST) - || r2val.mask.is_minus_one ()); + || r2val.mask.minus_one_p ()); bit_value_binop_1 (code, type, &value, &mask, - TREE_TYPE (rhs1), value_to_double_int (r1val), r1val.mask, - TREE_TYPE (rhs2), value_to_double_int (r2val), r2val.mask); - if (!mask.is_minus_one ()) + TREE_TYPE (rhs1), value_to_wide_int (r1val), r1val.mask, + TREE_TYPE (rhs2), value_to_wide_int (r2val), r2val.mask); + if (mask != -1) { val.lattice_val = CONSTANT; val.mask = mask; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1416,49 +1458,50 @@ bit_value_assume_aligned (gimple stmt) unsigned HOST_WIDE_INT aligni, misaligni = 0; prop_value_t ptrval = get_value_for_expr (ptr, true); prop_value_t alignval; - double_int value, mask; + max_wide_int value, mask; prop_value_t val; + if (ptrval.lattice_val == UNDEFINED) return ptrval; gcc_assert ((ptrval.lattice_val == CONSTANT && TREE_CODE (ptrval.value) == INTEGER_CST) - || ptrval.mask.is_minus_one ()); + || ptrval.mask.minus_one_p ()); align = gimple_call_arg (stmt, 1); - if (!host_integerp (align, 1)) + if (!tree_fits_uhwi_p (align)) return ptrval; - aligni = tree_low_cst (align, 1); + aligni = tree_to_uhwi (align); if (aligni <= 1 || (aligni & (aligni - 1)) != 0) return ptrval; if (gimple_call_num_args (stmt) > 2) { misalign = gimple_call_arg (stmt, 2); - if (!host_integerp (misalign, 1)) + if (!tree_fits_uhwi_p (misalign)) return ptrval; - misaligni = tree_low_cst (misalign, 1); + misaligni = tree_to_uhwi (misalign); if (misaligni >= aligni) return ptrval; } align = build_int_cst_type (type, -aligni); alignval = get_value_for_expr (align, true); bit_value_binop_1 (BIT_AND_EXPR, type, &value, &mask, - type, value_to_double_int (ptrval), ptrval.mask, - type, value_to_double_int (alignval), alignval.mask); - if (!mask.is_minus_one ()) + type, value_to_wide_int (ptrval), ptrval.mask, + type, value_to_wide_int (alignval), alignval.mask); + if (!mask.minus_one_p ()) { val.lattice_val = CONSTANT; val.mask = mask; - gcc_assert ((mask.low & (aligni - 1)) == 0); - gcc_assert ((value.low & (aligni - 1)) == 0); - value.low |= misaligni; + gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0); + gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0); + value |= misaligni; /* ??? Delay building trees here. */ - val.value = double_int_to_tree (type, value); + val.value = wide_int_to_tree (type, value); } else { val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; } return val; } @@ -1510,7 +1553,7 @@ evaluate_stmt (gimple stmt) /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.value = simplified; - val.mask = double_int_zero; + val.mask = 0; } } /* If the statement is likely to have a VARYING result, then do not @@ -1538,7 +1581,7 @@ evaluate_stmt (gimple stmt) /* The statement produced a constant value. */ val.lattice_val = CONSTANT; val.value = simplified; - val.mask = double_int_zero; + val.mask = 0; } } @@ -1550,7 +1593,7 @@ evaluate_stmt (gimple stmt) enum gimple_code code = gimple_code (stmt); val.lattice_val = VARYING; val.value = NULL_TREE; - val.mask = double_int_minus_one; + val.mask = -1; if (code == GIMPLE_ASSIGN) { enum tree_code subcode = gimple_assign_rhs_code (stmt); @@ -1606,20 +1649,19 @@ evaluate_stmt (gimple stmt) case BUILT_IN_STRNDUP: val.lattice_val = CONSTANT; val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = double_int::from_shwi - (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT) - / BITS_PER_UNIT - 1)); + val.mask = max_wide_int (~(((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT) + / BITS_PER_UNIT - 1)); break; case BUILT_IN_ALLOCA: case BUILT_IN_ALLOCA_WITH_ALIGN: align = (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_ALLOCA_WITH_ALIGN - ? TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)) + ? tree_to_hwi (gimple_call_arg (stmt, 1)) : BIGGEST_ALIGNMENT); val.lattice_val = CONSTANT; val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0); - val.mask = double_int::from_shwi (~(((HOST_WIDE_INT) align) - / BITS_PER_UNIT - 1)); + val.mask = max_wide_int (~(((HOST_WIDE_INT) align) + / BITS_PER_UNIT - 1)); break; /* These builtins return their first argument, unmodified. */ @@ -1654,12 +1696,12 @@ evaluate_stmt (gimple stmt) if (likelyvalue == UNDEFINED) { val.lattice_val = likelyvalue; - val.mask = double_int_zero; + val.mask = 0; } else { val.lattice_val = VARYING; - val.mask = double_int_minus_one; + val.mask = -1; } val.value = NULL_TREE; @@ -1782,10 +1824,10 @@ fold_builtin_alloca_with_align (gimple stmt) arg = get_constant_value (gimple_call_arg (stmt, 0)); if (arg == NULL_TREE || TREE_CODE (arg) != INTEGER_CST - || !host_integerp (arg, 1)) + || !tree_fits_uhwi_p (arg)) return NULL_TREE; - size = TREE_INT_CST_LOW (arg); + size = tree_to_hwi (arg); /* Heuristic: don't fold large allocas. */ threshold = (unsigned HOST_WIDE_INT)PARAM_VALUE (PARAM_LARGE_STACK_FRAME); @@ -1803,7 +1845,7 @@ fold_builtin_alloca_with_align (gimple stmt) n_elem = size * 8 / BITS_PER_UNIT; array_type = build_array_type_nelts (elem_type, n_elem); var = create_tmp_var (array_type, NULL); - DECL_ALIGN (var) = TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)); + DECL_ALIGN (var) = tree_to_hwi (gimple_call_arg (stmt, 1)); { struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs); if (pi != NULL && !pi->pt.anything) @@ -1838,7 +1880,7 @@ ccp_fold_stmt (gimple_stmt_iterator *gsi) fold more conditionals here. */ val = evaluate_stmt (stmt); if (val.lattice_val != CONSTANT - || !val.mask.is_zero ()) + || !val.mask.zero_p ()) return false; if (dump_file) @@ -2018,7 +2060,7 @@ visit_cond_stmt (gimple stmt, edge *taken_edge_p) block = gimple_bb (stmt); val = evaluate_stmt (stmt); if (val.lattice_val != CONSTANT - || !val.mask.is_zero ()) + || !val.mask.zero_p ()) return SSA_PROP_VARYING; /* Find which edge out of the conditional block will be taken and add it @@ -2090,7 +2132,7 @@ ccp_visit_stmt (gimple stmt, edge *taken_edge_p, tree *output_p) Mark them VARYING. */ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS) { - prop_value_t v = { VARYING, NULL_TREE, { -1, (HOST_WIDE_INT) -1 } }; + prop_value_t v = { VARYING, NULL_TREE, -1 }; set_lattice_value (def, v); } diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c index c3e0fac0775..863a3ef18af 100644 --- a/gcc/tree-ssa-forwprop.c +++ b/gcc/tree-ssa-forwprop.c @@ -801,9 +801,9 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0), &def_rhs_offset))) { - double_int off = mem_ref_offset (lhs); + addr_wide_int off = mem_ref_offset (lhs); tree new_ptr; - off += double_int::from_shwi (def_rhs_offset); + off += def_rhs_offset; if (TREE_CODE (def_rhs_base) == MEM_REF) { off += mem_ref_offset (def_rhs_base); @@ -813,7 +813,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, new_ptr = build_fold_addr_expr (def_rhs_base); TREE_OPERAND (lhs, 0) = new_ptr; TREE_OPERAND (lhs, 1) - = double_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs, 1)), off); + = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (lhs, 1)), off); tidy_after_forward_propagate_addr (use_stmt); /* Continue propagating into the RHS if this was not the only use. */ if (single_use_p) @@ -889,9 +889,9 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, if ((def_rhs_base = get_addr_base_and_unit_offset (TREE_OPERAND (def_rhs, 0), &def_rhs_offset))) { - double_int off = mem_ref_offset (rhs); + addr_wide_int off = mem_ref_offset (rhs); tree new_ptr; - off += double_int::from_shwi (def_rhs_offset); + off += def_rhs_offset; if (TREE_CODE (def_rhs_base) == MEM_REF) { off += mem_ref_offset (def_rhs_base); @@ -901,7 +901,7 @@ forward_propagate_addr_expr_1 (tree name, tree def_rhs, new_ptr = build_fold_addr_expr (def_rhs_base); TREE_OPERAND (rhs, 0) = new_ptr; TREE_OPERAND (rhs, 1) - = double_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs, 1)), off); + = wide_int_to_tree (TREE_TYPE (TREE_OPERAND (rhs, 1)), off); fold_stmt_inplace (use_stmt_gsi); tidy_after_forward_propagate_addr (use_stmt); return res; @@ -1429,8 +1429,8 @@ constant_pointer_difference (tree p1, tree p2) { p = TREE_OPERAND (q, 0); off = size_binop (PLUS_EXPR, off, - double_int_to_tree (sizetype, - mem_ref_offset (q))); + wide_int_to_tree (sizetype, + mem_ref_offset (q))); } else { @@ -1512,8 +1512,8 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) char *src_buf; use_operand_p use_p; - if (!host_integerp (val2, 0) - || !host_integerp (len2, 1)) + if (!tree_fits_shwi_p (val2) + || !tree_fits_uhwi_p (len2)) break; if (is_gimple_call (stmt1)) { @@ -1532,15 +1532,15 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) src1 = gimple_call_arg (stmt1, 1); len1 = gimple_call_arg (stmt1, 2); lhs1 = gimple_call_lhs (stmt1); - if (!host_integerp (len1, 1)) + if (!tree_fits_uhwi_p (len1)) break; str1 = string_constant (src1, &off1); if (str1 == NULL_TREE) break; - if (!host_integerp (off1, 1) + if (!tree_fits_uhwi_p (off1) || compare_tree_int (off1, TREE_STRING_LENGTH (str1) - 1) > 0 || compare_tree_int (len1, TREE_STRING_LENGTH (str1) - - tree_low_cst (off1, 1)) > 0 + - tree_to_uhwi (off1)) > 0 || TREE_CODE (TREE_TYPE (str1)) != ARRAY_TYPE || TYPE_MODE (TREE_TYPE (TREE_TYPE (str1))) != TYPE_MODE (char_type_node)) @@ -1554,7 +1554,7 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) src1 = gimple_assign_rhs1 (stmt1); if (TREE_CODE (ptr1) != MEM_REF || TYPE_MODE (TREE_TYPE (ptr1)) != TYPE_MODE (char_type_node) - || !host_integerp (src1, 0)) + || !tree_fits_shwi_p (src1)) break; ptr1 = build_fold_addr_expr (ptr1); callee1 = NULL_TREE; @@ -1578,16 +1578,16 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) /* If the difference between the second and first destination pointer is not constant, or is bigger than memcpy length, bail out. */ if (diff == NULL - || !host_integerp (diff, 1) + || !tree_fits_uhwi_p (diff) || tree_int_cst_lt (len1, diff)) break; /* Use maximum of difference plus memset length and memcpy length as the new memcpy length, if it is too big, bail out. */ - src_len = tree_low_cst (diff, 1); - src_len += tree_low_cst (len2, 1); - if (src_len < (unsigned HOST_WIDE_INT) tree_low_cst (len1, 1)) - src_len = tree_low_cst (len1, 1); + src_len = tree_to_uhwi (diff); + src_len += tree_to_uhwi (len2); + if (src_len < (unsigned HOST_WIDE_INT) tree_to_uhwi (len1)) + src_len = tree_to_uhwi (len1); if (src_len > 1024) break; @@ -1613,12 +1613,12 @@ simplify_builtin_call (gimple_stmt_iterator *gsi_p, tree callee2) src_buf = XALLOCAVEC (char, src_len + 1); if (callee1) memcpy (src_buf, - TREE_STRING_POINTER (str1) + tree_low_cst (off1, 1), - tree_low_cst (len1, 1)); + TREE_STRING_POINTER (str1) + tree_to_uhwi (off1), + tree_to_uhwi (len1)); else - src_buf[0] = tree_low_cst (src1, 0); - memset (src_buf + tree_low_cst (diff, 1), - tree_low_cst (val2, 0), tree_low_cst (len2, 1)); + src_buf[0] = tree_to_shwi (src1); + memset (src_buf + tree_to_uhwi (diff), + tree_to_shwi (val2), tree_to_uhwi (len2)); src_buf[src_len] = '\0'; /* Neither builtin_strncpy_read_str nor builtin_memcpy_read_str handle embedded '\0's. */ @@ -2302,10 +2302,10 @@ simplify_rotate (gimple_stmt_iterator *gsi) return false; /* CNT1 + CNT2 == B case above. */ - if (host_integerp (def_arg2[0], 1) - && host_integerp (def_arg2[1], 1) - && (unsigned HOST_WIDE_INT) tree_low_cst (def_arg2[0], 1) - + tree_low_cst (def_arg2[1], 1) == TYPE_PRECISION (rtype)) + if (tree_fits_uhwi_p (def_arg2[0]) + && tree_fits_uhwi_p (def_arg2[1]) + && (unsigned HOST_WIDE_INT) tree_to_uhwi (def_arg2[0]) + + tree_to_uhwi (def_arg2[1]) == TYPE_PRECISION (rtype)) rotcnt = def_arg2[0]; else if (TREE_CODE (def_arg2[0]) != SSA_NAME || TREE_CODE (def_arg2[1]) != SSA_NAME) @@ -2339,8 +2339,8 @@ simplify_rotate (gimple_stmt_iterator *gsi) /* Check for one shift count being Y and the other B - Y, with optional casts. */ if (cdef_code[i] == MINUS_EXPR - && host_integerp (cdef_arg1[i], 0) - && tree_low_cst (cdef_arg1[i], 0) == TYPE_PRECISION (rtype) + && tree_fits_shwi_p (cdef_arg1[i]) + && tree_to_shwi (cdef_arg1[i]) == TYPE_PRECISION (rtype) && TREE_CODE (cdef_arg2[i]) == SSA_NAME) { tree tem; @@ -2371,8 +2371,8 @@ simplify_rotate (gimple_stmt_iterator *gsi) This alternative is safe even for rotation count of 0. One shift count is Y and the other (-Y) & (B - 1). */ else if (cdef_code[i] == BIT_AND_EXPR - && host_integerp (cdef_arg2[i], 0) - && tree_low_cst (cdef_arg2[i], 0) + && tree_fits_shwi_p (cdef_arg2[i]) + && tree_to_shwi (cdef_arg2[i]) == TYPE_PRECISION (rtype) - 1 && TREE_CODE (cdef_arg1[i]) == SSA_NAME && gimple_assign_rhs_code (stmt) == BIT_IOR_EXPR) @@ -2773,7 +2773,7 @@ associate_pointerplus (gimple_stmt_iterator *gsi) if (gimple_assign_rhs1 (def_stmt) != ptr) return false; - algn = double_int_to_tree (TREE_TYPE (ptr), ~tree_to_double_int (algn)); + algn = wide_int_to_tree (TREE_TYPE (ptr), ~wide_int (algn)); gimple_assign_set_rhs_with_ops (gsi, BIT_AND_EXPR, ptr, algn); fold_stmt_inplace (gsi); update_stmt (stmt); @@ -2935,8 +2935,9 @@ combine_conversions (gimple_stmt_iterator *gsi) tree tem; tem = fold_build2 (BIT_AND_EXPR, inside_type, defop0, - double_int_to_tree - (inside_type, double_int::mask (inter_prec))); + wide_int_to_tree + (inside_type, wide_int::mask (inter_prec, false, + TYPE_PRECISION (inside_type)))); if (!useless_type_conversion_p (type, inside_type)) { tem = force_gimple_operand_gsi (gsi, tem, true, NULL_TREE, true, @@ -3020,11 +3021,11 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi) if (TREE_TYPE (op) != elem_type) return false; - size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type)); - n = TREE_INT_CST_LOW (op1) / size; + size = tree_to_hwi (TYPE_SIZE (elem_type)); + n = tree_to_hwi (op1) / size; if (n != 1) return false; - idx = TREE_INT_CST_LOW (op2) / size; + idx = tree_to_hwi (op2) / size; if (code == VEC_PERM_EXPR) { @@ -3034,7 +3035,7 @@ simplify_bitfield_ref (gimple_stmt_iterator *gsi) if (TREE_CODE (m) != VECTOR_CST) return false; nelts = VECTOR_CST_NELTS (m); - idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx)); + idx = tree_to_hwi (VECTOR_CST_ELT (m, idx)); idx %= 2 * nelts; if (idx < nelts) { @@ -3078,7 +3079,7 @@ is_combined_permutation_identity (tree mask1, tree mask2) { tree val = VECTOR_CST_ELT (mask, i); gcc_assert (TREE_CODE (val) == INTEGER_CST); - j = TREE_INT_CST_LOW (val) & (2 * nelts - 1); + j = tree_to_hwi (val) & (2 * nelts - 1); if (j == i) maybe_identity2 = false; else if (j == i + nelts) @@ -3223,7 +3224,7 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi) nelts = TYPE_VECTOR_SUBPARTS (type); elem_type = TREE_TYPE (type); - elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type)); + elem_size = tree_to_hwi (TYPE_SIZE (elem_type)); sel = XALLOCAVEC (unsigned char, nelts); orig = NULL; @@ -3258,9 +3259,9 @@ simplify_vector_constructor (gimple_stmt_iterator *gsi) return false; orig = ref; } - if (TREE_INT_CST_LOW (TREE_OPERAND (op1, 1)) != elem_size) + if (tree_to_hwi (TREE_OPERAND (op1, 1)) != elem_size) return false; - sel[i] = TREE_INT_CST_LOW (TREE_OPERAND (op1, 2)) / elem_size; + sel[i] = tree_to_hwi (TREE_OPERAND (op1, 2)) / elem_size; if (sel[i] != i) maybe_ident = false; } if (i < nelts) diff --git a/gcc/tree-ssa-loop-im.c b/gcc/tree-ssa-loop-im.c index e5e502b2901..e467afd1ec6 100644 --- a/gcc/tree-ssa-loop-im.c +++ b/gcc/tree-ssa-loop-im.c @@ -1647,7 +1647,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, /* Perform BASE + OFFSET analysis -- if MEM1 and MEM2 are based on the same object and their offset differ in such a way that the locations cannot overlap, then they cannot alias. */ - double_int size1, size2; + max_wide_int size1, size2; aff_tree off1, off2; /* Perform basic offset and type-based disambiguation. */ @@ -1663,7 +1663,7 @@ mem_refs_may_alias_p (mem_ref_p mem1, mem_ref_p mem2, get_inner_reference_aff (mem2->mem.ref, &off2, &size2); aff_combination_expand (&off1, ttae_cache); aff_combination_expand (&off2, ttae_cache); - aff_combination_scale (&off1, double_int_minus_one); + aff_combination_scale (&off1, -1); aff_combination_add (&off2, &off1); if (aff_comb_cannot_overlap_p (&off2, size1, size2)) diff --git a/gcc/tree-ssa-loop-ivcanon.c b/gcc/tree-ssa-loop-ivcanon.c index 735403a0284..8455558ebd9 100644 --- a/gcc/tree-ssa-loop-ivcanon.c +++ b/gcc/tree-ssa-loop-ivcanon.c @@ -488,7 +488,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) into unreachable (or trap when debugging experience is supposed to be good). */ if (!elt->is_exit - && elt->bound.ult (double_int::from_uhwi (npeeled))) + && elt->bound.ltu_p (max_wide_int (npeeled))) { gimple_stmt_iterator gsi = gsi_for_stmt (elt->stmt); gimple stmt = gimple_build_call @@ -505,7 +505,7 @@ remove_exits_and_undefined_stmts (struct loop *loop, unsigned int npeeled) } /* If we know the exit will be taken after peeling, update. */ else if (elt->is_exit - && elt->bound.ule (double_int::from_uhwi (npeeled))) + && elt->bound.leu_p (max_wide_int (npeeled))) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); @@ -545,7 +545,7 @@ remove_redundant_iv_tests (struct loop *loop) /* Exit is pointless if it won't be taken before loop reaches upper bound. */ if (elt->is_exit && loop->any_upper_bound - && loop->nb_iterations_upper_bound.ult (elt->bound)) + && loop->nb_iterations_upper_bound.ltu_p (elt->bound)) { basic_block bb = gimple_bb (elt->stmt); edge exit_edge = EDGE_SUCC (bb, 0); @@ -562,8 +562,7 @@ remove_redundant_iv_tests (struct loop *loop) || !integer_zerop (niter.may_be_zero) || !niter.niter || TREE_CODE (niter.niter) != INTEGER_CST - || !loop->nb_iterations_upper_bound.ult - (tree_to_double_int (niter.niter))) + || !loop->nb_iterations_upper_bound.ltu_p (niter.niter)) continue; if (dump_file && (dump_flags & TDF_DETAILS)) @@ -672,9 +671,9 @@ try_unroll_loop_completely (struct loop *loop, If the number of execution of loop is determined by standard induction variable test, then EXIT and EDGE_TO_CANCEL are the two edges leaving from the iv test. */ - if (host_integerp (niter, 1)) + if (tree_fits_uhwi_p (niter)) { - n_unroll = tree_low_cst (niter, 1); + n_unroll = tree_to_uhwi (niter); n_unroll_found = true; edge_to_cancel = EDGE_SUCC (exit->src, 0); if (edge_to_cancel == exit) @@ -943,7 +942,7 @@ canonicalize_loop_induction_variables (struct loop *loop, by find_loop_niter_by_eval. Be sure to keep it for future. */ if (niter && TREE_CODE (niter) == INTEGER_CST) { - record_niter_bound (loop, tree_to_double_int (niter), + record_niter_bound (loop, niter, exit == single_likely_exit (loop), true); } diff --git a/gcc/tree-ssa-loop-ivopts.c b/gcc/tree-ssa-loop-ivopts.c index 7cfe80d9213..f2a24d76681 100644 --- a/gcc/tree-ssa-loop-ivopts.c +++ b/gcc/tree-ssa-loop-ivopts.c @@ -1589,19 +1589,19 @@ idx_record_use (tree base, tree *idx, signedness of TOP and BOT. */ static bool -constant_multiple_of (tree top, tree bot, double_int *mul) +constant_multiple_of (tree top, tree bot, max_wide_int *mul) { tree mby; enum tree_code code; - double_int res, p0, p1; unsigned precision = TYPE_PRECISION (TREE_TYPE (top)); + max_wide_int res, p0, p1; STRIP_NOPS (top); STRIP_NOPS (bot); if (operand_equal_p (top, bot, 0)) { - *mul = double_int_one; + *mul = 1; return true; } @@ -1616,7 +1616,7 @@ constant_multiple_of (tree top, tree bot, double_int *mul) if (!constant_multiple_of (TREE_OPERAND (top, 0), bot, &res)) return false; - *mul = (res * tree_to_double_int (mby)).sext (precision); + *mul = (res * mby).sext (precision); return true; case PLUS_EXPR: @@ -1634,12 +1634,12 @@ constant_multiple_of (tree top, tree bot, double_int *mul) if (TREE_CODE (bot) != INTEGER_CST) return false; - p0 = tree_to_double_int (top).sext (precision); - p1 = tree_to_double_int (bot).sext (precision); - if (p1.is_zero ()) + p0 = max_wide_int (top).sext (precision); + p1 = max_wide_int (bot).sext (precision); + if (p1.zero_p ()) return false; - *mul = p0.sdivmod (p1, FLOOR_DIV_EXPR, &res).sext (precision); - return res.is_zero (); + *mul = p0.sdivmod_floor (p1, &res).sext (precision); + return res.zero_p (); default: return false; @@ -2054,7 +2054,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, switch (code) { case INTEGER_CST: - if (!cst_and_fits_in_hwi (expr) + if (!cst_fits_shwi_p (expr) || integer_zerop (expr)) return orig_expr; @@ -2091,7 +2091,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, case MULT_EXPR: op1 = TREE_OPERAND (expr, 1); - if (!cst_and_fits_in_hwi (op1)) + if (!cst_fits_shwi_p (op1)) return orig_expr; op0 = TREE_OPERAND (expr, 0); @@ -2113,7 +2113,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, return orig_expr; step = array_ref_element_size (expr); - if (!cst_and_fits_in_hwi (step)) + if (!cst_fits_shwi_p (step)) break; st = int_cst_value (step); @@ -2138,7 +2138,7 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, tmp = component_ref_field_offset (expr); if (top_compref - && cst_and_fits_in_hwi (tmp)) + && cst_fits_shwi_p (tmp)) { /* Strip the component reference completely. */ op0 = TREE_OPERAND (expr, 0); @@ -2390,7 +2390,7 @@ add_autoinc_candidates (struct ivopts_data *data, tree base, tree step, if (use_bb->loop_father != data->current_loop || !dominated_by_p (CDI_DOMINATORS, data->current_loop->latch, use_bb) || stmt_could_throw_p (use->stmt) - || !cst_and_fits_in_hwi (step)) + || !cst_fits_shwi_p (step)) return; cstepi = int_cst_value (step); @@ -2653,6 +2653,9 @@ new_cost (unsigned runtime, unsigned complexity) { comp_cost cost; + static int ct = 0; + ct++; + cost.cost = runtime; cost.complexity = complexity; @@ -2992,7 +2995,7 @@ get_computation_aff (struct loop *loop, tree common_type, var; tree uutype; aff_tree cbase_aff, var_aff; - double_int rat; + max_wide_int rat; if (TYPE_PRECISION (utype) > TYPE_PRECISION (ctype)) { @@ -3493,6 +3496,7 @@ get_shiftadd_cost (tree expr, enum machine_mode mode, comp_cost cost0, res = add_costs (res, force_expr_to_var_cost (multop, speed)); *cost = res; + return true; } @@ -3607,6 +3611,7 @@ force_expr_to_var_cost (tree expr, bool speed) break; default: + /* Just an arbitrary value, FIXME. */ return new_cost (target_spill_cost[speed], 0); } @@ -3629,7 +3634,7 @@ force_expr_to_var_cost (tree expr, bool speed) mult = op0; if (mult != NULL_TREE - && cst_and_fits_in_hwi (TREE_OPERAND (mult, 1)) + && cst_fits_shwi_p (TREE_OPERAND (mult, 1)) && get_shiftadd_cost (expr, mode, cost0, cost1, mult, speed, &sa_cost)) return sa_cost; @@ -3637,10 +3642,10 @@ force_expr_to_var_cost (tree expr, bool speed) break; case MULT_EXPR: - if (cst_and_fits_in_hwi (op0)) + if (cst_fits_shwi_p (op0)) cost = new_cost (mult_by_coeff_cost (int_cst_value (op0), mode, speed), 0); - else if (cst_and_fits_in_hwi (op1)) + else if (cst_fits_shwi_p (op1)) cost = new_cost (mult_by_coeff_cost (int_cst_value (op1), mode, speed), 0); else @@ -3760,7 +3765,7 @@ ptr_difference_cost (struct ivopts_data *data, type = signed_type_for (TREE_TYPE (e1)); tree_to_aff_combination (e1, type, &aff_e1); tree_to_aff_combination (e2, type, &aff_e2); - aff_combination_scale (&aff_e2, double_int_minus_one); + aff_combination_scale (&aff_e2, -1); aff_combination_add (&aff_e1, &aff_e2); return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on); @@ -3815,7 +3820,7 @@ difference_cost (struct ivopts_data *data, type = signed_type_for (TREE_TYPE (e1)); tree_to_aff_combination (e1, type, &aff_e1); tree_to_aff_combination (e2, type, &aff_e2); - aff_combination_scale (&aff_e2, double_int_minus_one); + aff_combination_scale (&aff_e2, -1); aff_combination_add (&aff_e1, &aff_e2); return force_var_cost (data, aff_combination_to_tree (&aff_e1), depends_on); @@ -3934,16 +3939,16 @@ get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase, { tree ind = TREE_OPERAND (usym, 1); if (TREE_CODE (ind) == INTEGER_CST - && host_integerp (ind, 0) - && TREE_INT_CST_LOW (ind) == 0) + && tree_fits_shwi_p (ind) + && tree_to_shwi (ind) == 0) usym = TREE_OPERAND (usym, 0); } if (TREE_CODE (csym) == ARRAY_REF) { tree ind = TREE_OPERAND (csym, 1); if (TREE_CODE (ind) == INTEGER_CST - && host_integerp (ind, 0) - && TREE_INT_CST_LOW (ind) == 0) + && tree_fits_shwi_p (ind) + && tree_to_shwi (ind) == 0) csym = TREE_OPERAND (csym, 0); } if (operand_equal_p (usym, csym, 0)) @@ -3959,7 +3964,7 @@ get_loop_invariant_expr_id (struct ivopts_data *data, tree ubase, tree_to_aff_combination (ub, TREE_TYPE (ub), &ubase_aff); tree_to_aff_combination (cb, TREE_TYPE (cb), &cbase_aff); - aff_combination_scale (&cbase_aff, double_int::from_shwi (-1 * ratio)); + aff_combination_scale (&cbase_aff, -1 * ratio); aff_combination_add (&ubase_aff, &cbase_aff); expr = aff_combination_to_tree (&ubase_aff); return get_expr_id (data, expr); @@ -3989,7 +3994,7 @@ get_computation_cost_at (struct ivopts_data *data, HOST_WIDE_INT ratio, aratio; bool var_present, symbol_present, stmt_is_after_inc; comp_cost cost; - double_int rat; + max_wide_int rat; bool speed = optimize_bb_for_speed_p (gimple_bb (at)); enum machine_mode mem_mode = (address_p ? TYPE_MODE (TREE_TYPE (*use->op_p)) @@ -4040,7 +4045,7 @@ get_computation_cost_at (struct ivopts_data *data, redundancy elimination is likely to transform the code so that it uses value of the variable before increment anyway, so it is not that much unrealistic. */ - if (cst_and_fits_in_hwi (cstep)) + if (cst_fits_shwi_p (cstep)) cstepi = int_cst_value (cstep); else cstepi = 0; @@ -4048,7 +4053,7 @@ get_computation_cost_at (struct ivopts_data *data, if (!constant_multiple_of (ustep, cstep, &rat)) return infinite_cost; - if (rat.fits_shwi ()) + if (rat.fits_shwi_p ()) ratio = rat.to_shwi (); else return infinite_cost; @@ -4065,7 +4070,7 @@ get_computation_cost_at (struct ivopts_data *data, (also holds in the case ratio == -1, TODO. */ - if (cst_and_fits_in_hwi (cbase)) + if (cst_fits_shwi_p (cbase)) { offset = - ratio * int_cst_value (cbase); cost = difference_cost (data, @@ -4320,7 +4325,7 @@ iv_period (struct iv *iv) period = build_low_bits_mask (type, (TYPE_PRECISION (type) - - tree_low_cst (pow2div, 1))); + - tree_to_uhwi (pow2div))); return period; } @@ -4518,7 +4523,7 @@ iv_elimination_compare_lt (struct ivopts_data *data, /* We need to be able to decide whether candidate is increasing or decreasing in order to choose the right comparison operator. */ - if (!cst_and_fits_in_hwi (cand->iv->step)) + if (!cst_fits_shwi_p (cand->iv->step)) return false; step = int_cst_value (cand->iv->step); @@ -4558,11 +4563,11 @@ iv_elimination_compare_lt (struct ivopts_data *data, tree_to_aff_combination (niter->niter, nit_type, &nit); tree_to_aff_combination (fold_convert (nit_type, a), nit_type, &tmpa); tree_to_aff_combination (fold_convert (nit_type, b), nit_type, &tmpb); - aff_combination_scale (&nit, double_int_minus_one); - aff_combination_scale (&tmpa, double_int_minus_one); + aff_combination_scale (&nit, -1); + aff_combination_scale (&tmpa, -1); aff_combination_add (&tmpb, &tmpa); aff_combination_add (&tmpb, &nit); - if (tmpb.n != 0 || tmpb.offset != double_int_one) + if (tmpb.n != 0 || !tmpb.offset.one_p ()) return false; /* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not @@ -4648,13 +4653,13 @@ may_eliminate_iv (struct ivopts_data *data, entire loop and compare against that instead. */ else { - double_int period_value, max_niter; + max_wide_int period_value, max_niter; max_niter = desc->max; if (stmt_after_increment (loop, cand, use->stmt)) - max_niter += double_int_one; - period_value = tree_to_double_int (period); - if (max_niter.ugt (period_value)) + max_niter += 1; + period_value = period; + if (max_niter.gtu_p (period_value)) { /* See if we can take advantage of inferred loop bound information. */ if (data->loop_single_exit_p) @@ -4662,7 +4667,7 @@ may_eliminate_iv (struct ivopts_data *data, if (!max_loop_iterations (loop, &max_niter)) return false; /* The loop bound is already adjusted by adding 1. */ - if (max_niter.ugt (period_value)) + if (max_niter.gtu_p (period_value)) return false; } else @@ -4670,6 +4675,8 @@ may_eliminate_iv (struct ivopts_data *data, } } + static int cnt = 0; + cnt++; cand_value_at (loop, cand, use->stmt, desc->niter, &bnd); *bound = aff_combination_to_tree (&bnd); diff --git a/gcc/tree-ssa-loop-niter.c b/gcc/tree-ssa-loop-niter.c index 9d6f9efb089..fd2a98729b7 100644 --- a/gcc/tree-ssa-loop-niter.c +++ b/gcc/tree-ssa-loop-niter.c @@ -38,6 +38,7 @@ along with GCC; see the file COPYING3. If not see #include "diagnostic-core.h" #include "tree-inline.h" #include "tree-pass.h" +#include "wide-int-print.h" #define SWAP(X, Y) do { affine_iv *tmp = (X); (X) = (Y); (Y) = tmp; } while (0) @@ -67,7 +68,7 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset) { tree type = TREE_TYPE (expr); tree op0, op1; - double_int off; + max_wide_int off; bool negate = false; *var = expr; @@ -88,18 +89,18 @@ split_to_var_and_offset (tree expr, tree *var, mpz_t offset) break; *var = op0; + off = op1; /* Always sign extend the offset. */ - off = tree_to_double_int (op1); off = off.sext (TYPE_PRECISION (type)); - mpz_set_double_int (offset, off, false); + off.to_mpz (offset, SIGNED); if (negate) mpz_neg (offset, offset); break; case INTEGER_CST: *var = build_int_cst_type (type, 0); - off = tree_to_double_int (expr); - mpz_set_double_int (offset, off, TYPE_UNSIGNED (type)); + off = expr; + off.to_mpz (offset, TYPE_SIGN (type)); break; default: @@ -169,7 +170,7 @@ bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y, } mpz_init (m); - mpz_set_double_int (m, double_int::mask (TYPE_PRECISION (type)), true); + wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (m, UNSIGNED); mpz_add_ui (m, m, 1); mpz_sub (bnds->up, x, y); mpz_set (bnds->below, bnds->up); @@ -448,15 +449,15 @@ end: difference of two values in TYPE. */ static void -bounds_add (bounds *bnds, double_int delta, tree type) +bounds_add (bounds *bnds, max_wide_int delta, tree type) { mpz_t mdelta, max; mpz_init (mdelta); - mpz_set_double_int (mdelta, delta, false); + delta.to_mpz (mdelta, SIGNED); mpz_init (max); - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true); + max_wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (max, UNSIGNED); mpz_add (bnds->up, bnds->up, mdelta); mpz_add (bnds->below, bnds->below, mdelta); @@ -501,8 +502,8 @@ inverse (tree x, tree mask) unsigned HOST_WIDE_INT imask; unsigned HOST_WIDE_INT irslt = 1; - gcc_assert (cst_and_fits_in_hwi (x)); - gcc_assert (cst_and_fits_in_hwi (mask)); + gcc_assert (cst_fits_shwi_p (x)); + gcc_assert (cst_fits_shwi_p (mask)); ix = int_cst_value (x); imask = int_cst_value (mask); @@ -550,7 +551,7 @@ static void number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, bounds *bnds, bool exit_must_be_taken) { - double_int max; + max_wide_int max; mpz_t d; tree type = TREE_TYPE (c); bool bnds_u_valid = ((no_overflow && exit_must_be_taken) @@ -559,10 +560,8 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, if (integer_onep (s) || (TREE_CODE (c) == INTEGER_CST && TREE_CODE (s) == INTEGER_CST - && tree_to_double_int (c).mod (tree_to_double_int (s), - TYPE_UNSIGNED (type), - EXACT_DIV_EXPR).is_zero ()) - || (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (c)) + && wide_int (c).mod_trunc (s, TYPE_SIGN (type)).zero_p ()) + || (TYPE_OVERFLOW_UNDEFINED (type) && multiple_of_p (type, c, s))) { /* If C is an exact multiple of S, then its value will be reached before @@ -580,15 +579,16 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, the whole # of iterations analysis will fail). */ if (!no_overflow) { - max = double_int::mask (TYPE_PRECISION (type) - - tree_low_cst (num_ending_zeros (s), 1)); - mpz_set_double_int (bnd, max, true); + max = max_wide_int::mask (TYPE_PRECISION (type) + - max_wide_int (s).ctz ().to_uhwi (), + false); + max.to_mpz (bnd, UNSIGNED); return; } /* Now we know that the induction variable does not overflow, so the loop iterates at most (range of type / S) times. */ - mpz_set_double_int (bnd, double_int::mask (TYPE_PRECISION (type)), true); + wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (bnd, UNSIGNED); /* If the induction variable is guaranteed to reach the value of C before overflow, ... */ @@ -597,13 +597,13 @@ number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s, /* ... then we can strengthen this to C / S, and possibly we can use the upper bound on C given by BNDS. */ if (TREE_CODE (c) == INTEGER_CST) - mpz_set_double_int (bnd, tree_to_double_int (c), true); + wide_int (c).to_mpz (bnd, UNSIGNED); else if (bnds_u_valid) mpz_set (bnd, bnds->up); } mpz_init (d); - mpz_set_double_int (d, tree_to_double_int (s), true); + wide_int (s).to_mpz (d, UNSIGNED); mpz_fdiv_q (bnd, bnd, d); mpz_clear (d); } @@ -654,7 +654,7 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final, mpz_init (max); number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds, exit_must_be_taken); - niter->max = mpz_get_double_int (niter_type, max, false); + niter->max = max_wide_int::from_mpz (niter_type, max, false); mpz_clear (max); /* First the trivial cases -- when the step is 1. */ @@ -670,7 +670,7 @@ number_of_iterations_ne (tree type, affine_iv *iv, tree final, bits = num_ending_zeros (s); bound = build_low_bits_mask (niter_type, (TYPE_PRECISION (niter_type) - - tree_low_cst (bits, 1))); + - tree_to_uhwi (bits))); d = fold_binary_to_constant (LSHIFT_EXPR, niter_type, build_int_cst (niter_type, 1), bits); @@ -727,7 +727,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1, tmod = fold_convert (type1, mod); mpz_init (mmod); - mpz_set_double_int (mmod, tree_to_double_int (mod), true); + wide_int (mod).to_mpz (mmod, UNSIGNED); mpz_neg (mmod, mmod); /* If the induction variable does not overflow and the exit is taken, @@ -809,7 +809,7 @@ number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1, niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, niter->may_be_zero, noloop); - bounds_add (bnds, tree_to_double_int (mod), type); + bounds_add (bnds, max_wide_int (mod), type); *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod); ret = true; @@ -899,7 +899,7 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1, tree assumption = boolean_true_node, bound, diff; tree mbz, mbzl, mbzr, type1; bool rolls_p, no_overflow_p; - double_int dstep; + max_wide_int dstep; mpz_t mstep, max; /* We are going to compute the number of iterations as @@ -925,22 +925,22 @@ assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1, /* First check whether the answer does not follow from the bounds we gathered before. */ if (integer_nonzerop (iv0->step)) - dstep = tree_to_double_int (iv0->step); + dstep = iv0->step; else { - dstep = tree_to_double_int (iv1->step).sext (TYPE_PRECISION (type)); + dstep = max_wide_int (iv1->step).sext (TYPE_PRECISION (type)); dstep = -dstep; } mpz_init (mstep); - mpz_set_double_int (mstep, dstep, true); + dstep.to_mpz (mstep, UNSIGNED); mpz_neg (mstep, mstep); mpz_add_ui (mstep, mstep, 1); rolls_p = mpz_cmp (mstep, bnds->below) <= 0; mpz_init (max); - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), true); + wide_int::minus_one (TYPE_PRECISION (type)).to_mpz (max, UNSIGNED); mpz_add (max, max, mstep); no_overflow_p = (mpz_cmp (bnds->up, max) <= 0 /* For pointers, only values lying inside a single object @@ -1067,7 +1067,7 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1, niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node, iv1->base, iv0->base); niter->niter = delta; - niter->max = mpz_get_double_int (niter_type, bnds->up, false); + niter->max = max_wide_int::from_mpz (niter_type, bnds->up, false); return true; } @@ -1110,11 +1110,11 @@ number_of_iterations_lt (tree type, affine_iv *iv0, affine_iv *iv1, mpz_init (mstep); mpz_init (tmp); - mpz_set_double_int (mstep, tree_to_double_int (step), true); + wide_int (step).to_mpz (mstep, UNSIGNED); mpz_add (tmp, bnds->up, mstep); mpz_sub_ui (tmp, tmp, 1); mpz_fdiv_q (tmp, tmp, mstep); - niter->max = mpz_get_double_int (niter_type, tmp, false); + niter->max = max_wide_int::from_mpz (niter_type, tmp, false); mpz_clear (mstep); mpz_clear (tmp); @@ -1177,7 +1177,7 @@ number_of_iterations_le (tree type, affine_iv *iv0, affine_iv *iv1, iv0->base = fold_build2 (MINUS_EXPR, type1, iv0->base, build_int_cst (type1, 1)); - bounds_add (bnds, double_int_one, type1); + bounds_add (bnds, 1, type1); return number_of_iterations_lt (type, iv0, iv1, niter, exit_must_be_taken, bnds); @@ -1249,8 +1249,7 @@ number_of_iterations_cond (struct loop *loop, niter->assumptions = boolean_true_node; niter->may_be_zero = boolean_false_node; niter->niter = NULL_TREE; - niter->max = double_int_zero; - + niter->max = 0; niter->bound = NULL_TREE; niter->cmp = ERROR_MARK; @@ -1322,7 +1321,7 @@ number_of_iterations_cond (struct loop *loop, if (tem && integer_zerop (tem)) { niter->niter = build_int_cst (unsigned_type_for (type), 0); - niter->max = double_int_zero; + niter->max = 0; return true; } @@ -1398,7 +1397,7 @@ number_of_iterations_cond (struct loop *loop, fprintf (dump_file, " # of iterations "); print_generic_expr (dump_file, niter->niter, TDF_SLIM); fprintf (dump_file, ", bounded by "); - dump_double_int (dump_file, niter->max, true); + print_decu (niter->max, dump_file); fprintf (dump_file, "\n"); } else @@ -1910,7 +1909,7 @@ number_of_iterations_exit (struct loop *loop, edge exit, /* If NITER has simplified into a constant, update MAX. */ if (TREE_CODE (niter->niter) == INTEGER_CST) - niter->max = tree_to_double_int (niter->niter); + niter->max = niter->niter; if (integer_onep (niter->assumptions)) return true; @@ -2022,7 +2021,7 @@ find_loop_niter (struct loop *loop, edge *exit) bool finite_loop_p (struct loop *loop) { - double_int nit; + max_wide_int nit; int flags; if (flag_unsafe_loop_optimizations) @@ -2336,13 +2335,13 @@ find_loop_niter_by_eval (struct loop *loop, edge *exit) */ -static double_int derive_constant_upper_bound_ops (tree, tree, - enum tree_code, tree); +static max_wide_int derive_constant_upper_bound_ops (tree, tree, + enum tree_code, tree); /* Returns a constant upper bound on the value of the right-hand side of an assignment statement STMT. */ -static double_int +static max_wide_int derive_constant_upper_bound_assign (gimple stmt) { enum tree_code code = gimple_assign_rhs_code (stmt); @@ -2357,7 +2356,7 @@ derive_constant_upper_bound_assign (gimple stmt) is considered to be unsigned. If its type is signed, its value must be nonnegative. */ -static double_int +static max_wide_int derive_constant_upper_bound (tree val) { enum tree_code code; @@ -2371,12 +2370,12 @@ derive_constant_upper_bound (tree val) whose type is TYPE. The expression is considered to be unsigned. If its type is signed, its value must be nonnegative. */ -static double_int +static max_wide_int derive_constant_upper_bound_ops (tree type, tree op0, enum tree_code code, tree op1) { tree subtype, maxt; - double_int bnd, max, mmax, cst; + max_wide_int bnd, max, mmax, cst; gimple stmt; if (INTEGRAL_TYPE_P (type)) @@ -2384,12 +2383,12 @@ derive_constant_upper_bound_ops (tree type, tree op0, else maxt = upper_bound_in_type (type, type); - max = tree_to_double_int (maxt); + max = maxt; switch (code) { case INTEGER_CST: - return tree_to_double_int (op0); + return max_wide_int (op0); CASE_CONVERT: subtype = TREE_TYPE (op0); @@ -2411,7 +2410,7 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* If the bound does not fit in TYPE, max. value of TYPE could be attained. */ - if (max.ult (bnd)) + if (max.ltu_p (bnd)) return max; return bnd; @@ -2426,25 +2425,25 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to choose the most logical way how to treat this constant regardless of the signedness of the type. */ - cst = tree_to_double_int (op1); + cst = op1; cst = cst.sext (TYPE_PRECISION (type)); if (code != MINUS_EXPR) cst = -cst; bnd = derive_constant_upper_bound (op0); - if (cst.is_negative ()) + if (cst.neg_p (SIGNED)) { cst = -cst; /* Avoid CST == 0x80000... */ - if (cst.is_negative ()) + if (cst.neg_p (SIGNED)) return max;; /* OP0 + CST. We need to check that BND <= MAX (type) - CST. */ mmax -= cst; - if (bnd.ugt (mmax)) + if (bnd.ltu_p (mmax)) return max; return bnd + cst; @@ -2464,13 +2463,13 @@ derive_constant_upper_bound_ops (tree type, tree op0, /* This should only happen if the type is unsigned; however, for buggy programs that use overflowing signed arithmetics even with -fno-wrapv, this condition may also be true for signed values. */ - if (bnd.ult (cst)) + if (bnd.ltu_p (cst)) return max; if (TYPE_UNSIGNED (type)) { tree tem = fold_binary (GE_EXPR, boolean_type_node, op0, - double_int_to_tree (type, cst)); + wide_int_to_tree (type, cst)); if (!tem || integer_nonzerop (tem)) return max; } @@ -2487,13 +2486,13 @@ derive_constant_upper_bound_ops (tree type, tree op0, return max; bnd = derive_constant_upper_bound (op0); - return bnd.udiv (tree_to_double_int (op1), FLOOR_DIV_EXPR); + return bnd.udiv_floor (max_wide_int (op1)); case BIT_AND_EXPR: if (TREE_CODE (op1) != INTEGER_CST || tree_int_cst_sign_bit (op1)) return max; - return tree_to_double_int (op1); + return op1; case SSA_NAME: stmt = SSA_NAME_DEF_STMT (op0); @@ -2513,21 +2512,21 @@ derive_constant_upper_bound_ops (tree type, tree op0, I_BOUND times. */ void -record_niter_bound (struct loop *loop, double_int i_bound, bool realistic, - bool upper) +record_niter_bound (struct loop *loop, const max_wide_int &i_bound, + bool realistic, bool upper) { /* Update the bounds only when there is no previous estimation, or when the current estimation is smaller. */ if (upper && (!loop->any_upper_bound - || i_bound.ult (loop->nb_iterations_upper_bound))) + || i_bound.ltu_p (loop->nb_iterations_upper_bound))) { loop->any_upper_bound = true; loop->nb_iterations_upper_bound = i_bound; } if (realistic && (!loop->any_estimate - || i_bound.ult (loop->nb_iterations_estimate))) + || i_bound.ltu_p (loop->nb_iterations_estimate))) { loop->any_estimate = true; loop->nb_iterations_estimate = i_bound; @@ -2537,7 +2536,7 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic, number of iterations, use the upper bound instead. */ if (loop->any_upper_bound && loop->any_estimate - && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate)) + && loop->nb_iterations_upper_bound.ltu_p (loop->nb_iterations_estimate)) loop->nb_iterations_estimate = loop->nb_iterations_upper_bound; } @@ -2545,7 +2544,7 @@ record_niter_bound (struct loop *loop, double_int i_bound, bool realistic, static void do_warn_aggressive_loop_optimizations (struct loop *loop, - double_int i_bound, gimple stmt) + max_wide_int i_bound, gimple stmt) { /* Don't warn if the loop doesn't have known constant bound. */ if (!loop->nb_iterations @@ -2558,7 +2557,7 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, || loop->warned_aggressive_loop_optimizations /* Only warn if undefined behavior gives us lower estimate than the known constant bound. */ - || i_bound.ucmp (tree_to_double_int (loop->nb_iterations)) >= 0 + || i_bound.cmpu (loop->nb_iterations) >= 0 /* And undefined behavior happens unconditionally. */ || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt))) return; @@ -2570,8 +2569,8 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, gimple estmt = last_stmt (e->src); if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations, "iteration %E invokes undefined behavior", - double_int_to_tree (TREE_TYPE (loop->nb_iterations), - i_bound))) + wide_int_to_tree (TREE_TYPE (loop->nb_iterations), + i_bound))) inform (gimple_location (estmt), "containing loop"); loop->warned_aggressive_loop_optimizations = true; } @@ -2581,13 +2580,13 @@ do_warn_aggressive_loop_optimizations (struct loop *loop, is taken at last when the STMT is executed BOUND + 1 times. REALISTIC is true if BOUND is expected to be close to the real number of iterations. UPPER is true if we are sure the loop iterates at most - BOUND times. I_BOUND is an unsigned double_int upper estimate on BOUND. */ + BOUND times. I_BOUND is an unsigned wide_int upper estimate on BOUND. */ static void -record_estimate (struct loop *loop, tree bound, double_int i_bound, +record_estimate (struct loop *loop, tree bound, max_wide_int i_bound, gimple at_stmt, bool is_exit, bool realistic, bool upper) { - double_int delta; + max_wide_int delta; if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -2597,7 +2596,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound, upper ? "" : "probably "); print_generic_expr (dump_file, bound, TDF_SLIM); fprintf (dump_file, " (bounded by "); - dump_double_int (dump_file, i_bound, true); + print_decu (i_bound, dump_file); fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num); } @@ -2606,7 +2605,7 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound, if (TREE_CODE (bound) != INTEGER_CST) realistic = false; else - gcc_checking_assert (i_bound == tree_to_double_int (bound)); + gcc_checking_assert (i_bound == max_wide_int (bound)); if (!upper && !realistic) return; @@ -2637,13 +2636,13 @@ record_estimate (struct loop *loop, tree bound, double_int i_bound, otherwise it can be executed BOUND + 1 times. We will lower the estimate later if such statement must be executed on last iteration */ if (is_exit) - delta = double_int_zero; + delta = 0; else - delta = double_int_one; + delta = 1; i_bound += delta; /* If an overflow occurred, ignore the result. */ - if (i_bound.ult (delta)) + if (i_bound.ltu_p (delta)) return; if (upper && !is_exit) @@ -2663,7 +2662,7 @@ record_nonwrapping_iv (struct loop *loop, tree base, tree step, gimple stmt, { tree niter_bound, extreme, delta; tree type = TREE_TYPE (base), unsigned_type; - double_int max; + max_wide_int max; if (TREE_CODE (step) != INTEGER_CST || integer_zerop (step)) return; @@ -3008,42 +3007,38 @@ infer_loop_bounds_from_undefined (struct loop *loop) free (bbs); } -/* Converts VAL to double_int. */ +/* Converts VAL to max_wide_int. */ -static double_int -gcov_type_to_double_int (gcov_type val) +static max_wide_int +gcov_type_to_wide_int (gcov_type val) { - double_int ret; + HOST_WIDE_INT a[2]; - ret.low = (unsigned HOST_WIDE_INT) val; + a[0] = (unsigned HOST_WIDE_INT) val; /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by the size of type. */ val >>= HOST_BITS_PER_WIDE_INT - 1; val >>= 1; - ret.high = (unsigned HOST_WIDE_INT) val; + a[1] = (unsigned HOST_WIDE_INT) val; - return ret; + return max_wide_int::from_array (a, 2); } -/* Compare double ints, callback for qsort. */ +/* Compare wide ints, callback for qsort. */ int -double_int_cmp (const void *p1, const void *p2) +wide_int_cmp (const void *p1, const void *p2) { - const double_int *d1 = (const double_int *)p1; - const double_int *d2 = (const double_int *)p2; - if (*d1 == *d2) - return 0; - if (d1->ult (*d2)) - return -1; - return 1; + const max_wide_int *d1 = (const max_wide_int *)p1; + const max_wide_int *d2 = (const max_wide_int *)p2; + return (*d1).cmpu (*d2); } /* Return index of BOUND in BOUNDS array sorted in increasing order. Lookup by binary search. */ int -bound_index (vec<double_int> bounds, double_int bound) +bound_index (vec<max_wide_int> bounds, const max_wide_int &bound) { unsigned int end = bounds.length (); unsigned int begin = 0; @@ -3052,11 +3047,11 @@ bound_index (vec<double_int> bounds, double_int bound) while (begin != end) { unsigned int middle = (begin + end) / 2; - double_int index = bounds[middle]; + max_wide_int index = bounds[middle]; if (index == bound) return middle; - else if (index.ult (bound)) + else if (index.ltu_p (bound)) begin = middle + 1; else end = middle; @@ -3075,7 +3070,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) { pointer_map_t *bb_bounds; struct nb_iter_bound *elt; - vec<double_int> bounds = vNULL; + vec<max_wide_int> bounds = vNULL; vec<vec<basic_block> > queues = vNULL; vec<basic_block> queue = vNULL; ptrdiff_t queue_index; @@ -3085,20 +3080,20 @@ discover_iteration_bound_by_body_walk (struct loop *loop) /* Discover what bounds may interest us. */ for (elt = loop->bounds; elt; elt = elt->next) { - double_int bound = elt->bound; + max_wide_int bound = elt->bound; /* Exit terminates loop at given iteration, while non-exits produce undefined effect on the next iteration. */ if (!elt->is_exit) { - bound += double_int_one; + bound += 1; /* If an overflow occurred, ignore the result. */ - if (bound.is_zero ()) + if (bound.zero_p ()) continue; } if (!loop->any_upper_bound - || bound.ult (loop->nb_iterations_upper_bound)) + || bound.ltu_p (loop->nb_iterations_upper_bound)) bounds.safe_push (bound); } @@ -3111,7 +3106,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) /* Sort the bounds in decreasing order. */ qsort (bounds.address (), bounds.length (), - sizeof (double_int), double_int_cmp); + sizeof (max_wide_int), wide_int_cmp); /* For every basic block record the lowest bound that is guaranteed to terminate the loop. */ @@ -3119,17 +3114,17 @@ discover_iteration_bound_by_body_walk (struct loop *loop) bb_bounds = pointer_map_create (); for (elt = loop->bounds; elt; elt = elt->next) { - double_int bound = elt->bound; + max_wide_int bound = elt->bound; if (!elt->is_exit) { - bound += double_int_one; + bound += 1; /* If an overflow occurred, ignore the result. */ - if (bound.is_zero ()) + if (bound.zero_p ()) continue; } if (!loop->any_upper_bound - || bound.ult (loop->nb_iterations_upper_bound)) + || bound.ltu_p (loop->nb_iterations_upper_bound)) { ptrdiff_t index = bound_index (bounds, bound); void **entry = pointer_map_contains (bb_bounds, @@ -3229,7 +3224,7 @@ discover_iteration_bound_by_body_walk (struct loop *loop) if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "Found better loop bound "); - dump_double_int (dump_file, bounds[latch_index], true); + print_decu (bounds[latch_index], dump_file); fprintf (dump_file, "\n"); } record_niter_bound (loop, bounds[latch_index], false, true); @@ -3264,7 +3259,7 @@ maybe_lower_iteration_bound (struct loop *loop) for (elt = loop->bounds; elt; elt = elt->next) { if (!elt->is_exit - && elt->bound.ult (loop->nb_iterations_upper_bound)) + && elt->bound.ltu_p (loop->nb_iterations_upper_bound)) { if (!not_executed_last_iteration) not_executed_last_iteration = pointer_set_create (); @@ -3338,7 +3333,7 @@ maybe_lower_iteration_bound (struct loop *loop) if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "Reducing loop iteration estimate by 1; " "undefined statement must be executed at the last iteration.\n"); - record_niter_bound (loop, loop->nb_iterations_upper_bound - double_int_one, + record_niter_bound (loop, loop->nb_iterations_upper_bound - 1, false, true); } BITMAP_FREE (visited); @@ -3357,7 +3352,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) unsigned i; struct tree_niter_desc niter_desc; edge ex; - double_int bound; + max_wide_int bound; edge likely_exit; /* Give up if we already have tried to compute an estimation. */ @@ -3404,7 +3399,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) if (loop->header->count != 0) { gcov_type nit = expected_loop_iterations_unbounded (loop) + 1; - bound = gcov_type_to_double_int (nit); + bound = gcov_type_to_wide_int (nit); record_niter_bound (loop, bound, true, false); } @@ -3415,8 +3410,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) && TREE_CODE (loop->nb_iterations) == INTEGER_CST) { loop->any_upper_bound = true; - loop->nb_iterations_upper_bound - = tree_to_double_int (loop->nb_iterations); + loop->nb_iterations_upper_bound = loop->nb_iterations; } } @@ -3426,7 +3420,7 @@ estimate_numbers_of_iterations_loop (struct loop *loop) the function returns false, otherwise returns true. */ bool -estimated_loop_iterations (struct loop *loop, double_int *nit) +estimated_loop_iterations (struct loop *loop, max_wide_int *nit) { /* When SCEV information is available, try to update loop iterations estimate. Otherwise just return whatever we recorded earlier. */ @@ -3439,7 +3433,7 @@ estimated_loop_iterations (struct loop *loop, double_int *nit) { if (loop->header->count) { - *nit = gcov_type_to_double_int + *nit = gcov_type_to_wide_int (expected_loop_iterations_unbounded (loop) + 1); return true; } @@ -3455,7 +3449,7 @@ estimated_loop_iterations (struct loop *loop, double_int *nit) false, otherwise returns true. */ bool -max_loop_iterations (struct loop *loop, double_int *nit) +max_loop_iterations (struct loop *loop, max_wide_int *nit) { /* When SCEV information is available, try to update loop iterations estimate. Otherwise just return whatever we recorded earlier. */ @@ -3475,13 +3469,13 @@ max_loop_iterations (struct loop *loop, double_int *nit) HOST_WIDE_INT estimated_loop_iterations_int (struct loop *loop) { - double_int nit; + max_wide_int nit; HOST_WIDE_INT hwi_nit; if (!estimated_loop_iterations (loop, &nit)) return -1; - if (!nit.fits_shwi ()) + if (!nit.fits_shwi_p ()) return -1; hwi_nit = nit.to_shwi (); @@ -3495,13 +3489,13 @@ estimated_loop_iterations_int (struct loop *loop) HOST_WIDE_INT max_loop_iterations_int (struct loop *loop) { - double_int nit; + max_wide_int nit; HOST_WIDE_INT hwi_nit; if (!max_loop_iterations (loop, &nit)) return -1; - if (!nit.fits_shwi ()) + if (!nit.fits_shwi_p ()) return -1; hwi_nit = nit.to_shwi (); @@ -3551,18 +3545,18 @@ estimated_stmt_executions_int (struct loop *loop) false, otherwise returns true. */ bool -max_stmt_executions (struct loop *loop, double_int *nit) +max_stmt_executions (struct loop *loop, max_wide_int *nit) { - double_int nit_minus_one; + max_wide_int nit_minus_one; if (!max_loop_iterations (loop, nit)) return false; nit_minus_one = *nit; - *nit += double_int_one; + *nit += 1; - return (*nit).ugt (nit_minus_one); + return (*nit).gtu_p (nit_minus_one); } /* Sets NIT to the estimated number of executions of the latch of the @@ -3570,18 +3564,18 @@ max_stmt_executions (struct loop *loop, double_int *nit) false, otherwise returns true. */ bool -estimated_stmt_executions (struct loop *loop, double_int *nit) +estimated_stmt_executions (struct loop *loop, max_wide_int *nit) { - double_int nit_minus_one; + max_wide_int nit_minus_one; if (!estimated_loop_iterations (loop, nit)) return false; nit_minus_one = *nit; - *nit += double_int_one; + *nit += 1; - return (*nit).ugt (nit_minus_one); + return (*nit).gtu_p (nit_minus_one); } /* Records estimates on numbers of iterations of loops. */ @@ -3653,7 +3647,7 @@ n_of_executions_at_most (gimple stmt, struct nb_iter_bound *niter_bound, tree niter) { - double_int bound = niter_bound->bound; + max_wide_int bound = niter_bound->bound; tree nit_type = TREE_TYPE (niter), e; enum tree_code cmp; @@ -3661,7 +3655,7 @@ n_of_executions_at_most (gimple stmt, /* If the bound does not even fit into NIT_TYPE, it cannot tell us that the number of iterations is small. */ - if (!double_int_fits_to_tree_p (nit_type, bound)) + if (!bound.fits_to_tree_p (nit_type)) return false; /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1 @@ -3704,16 +3698,16 @@ n_of_executions_at_most (gimple stmt, gsi_next (&bsi)) if (gimple_has_side_effects (gsi_stmt (bsi))) return false; - bound += double_int_one; - if (bound.is_zero () - || !double_int_fits_to_tree_p (nit_type, bound)) + bound += 1; + if (bound.zero_p () + || !bound.fits_to_tree_p (nit_type)) return false; } cmp = GT_EXPR; } e = fold_binary (cmp, boolean_type_node, - niter, double_int_to_tree (nit_type, bound)); + niter, wide_int_to_tree (nit_type, bound)); return e && integer_nonzerop (e); } @@ -3751,7 +3745,7 @@ scev_probably_wraps_p (tree base, tree step, tree unsigned_type, valid_niter; tree type = TREE_TYPE (step); tree e; - double_int niter; + max_wide_int niter; struct nb_iter_bound *bound; /* FIXME: We really need something like @@ -3817,10 +3811,10 @@ scev_probably_wraps_p (tree base, tree step, estimate_numbers_of_iterations_loop (loop); if (max_loop_iterations (loop, &niter) - && double_int_fits_to_tree_p (TREE_TYPE (valid_niter), niter) + && niter.fits_to_tree_p (TREE_TYPE (valid_niter)) && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter, - double_int_to_tree (TREE_TYPE (valid_niter), - niter))) != NULL + wide_int_to_tree (TREE_TYPE (valid_niter), + niter))) != NULL && integer_nonzerop (e)) { fold_undefer_and_ignore_overflow_warnings (); diff --git a/gcc/tree-ssa-loop-prefetch.c b/gcc/tree-ssa-loop-prefetch.c index 4d313040fad..a3ff32a2155 100644 --- a/gcc/tree-ssa-loop-prefetch.c +++ b/gcc/tree-ssa-loop-prefetch.c @@ -285,7 +285,7 @@ dump_mem_details (FILE *file, tree base, tree step, fprintf (file, "(base "); print_generic_expr (file, base, TDF_SLIM); fprintf (file, ", step "); - if (cst_and_fits_in_hwi (step)) + if (cst_fits_shwi_p (step)) fprintf (file, HOST_WIDE_INT_PRINT_DEC, int_cst_value (step)); else print_generic_expr (file, step, TDF_TREE); @@ -326,7 +326,7 @@ find_or_create_group (struct mem_ref_group **groups, tree base, tree step) /* If step is an integer constant, keep the list of groups sorted by decreasing step. */ - if (cst_and_fits_in_hwi ((*groups)->step) && cst_and_fits_in_hwi (step) + if (cst_fits_shwi_p ((*groups)->step) && cst_fits_shwi_p (step) && int_cst_value ((*groups)->step) < int_cst_value (step)) break; } @@ -434,12 +434,12 @@ idx_analyze_ref (tree base, tree *index, void *data) step = iv.step; if (TREE_CODE (ibase) == POINTER_PLUS_EXPR - && cst_and_fits_in_hwi (TREE_OPERAND (ibase, 1))) + && cst_fits_shwi_p (TREE_OPERAND (ibase, 1))) { idelta = int_cst_value (TREE_OPERAND (ibase, 1)); ibase = TREE_OPERAND (ibase, 0); } - if (cst_and_fits_in_hwi (ibase)) + if (cst_fits_shwi_p (ibase)) { idelta += int_cst_value (ibase); ibase = build_int_cst (TREE_TYPE (ibase), 0); @@ -448,7 +448,7 @@ idx_analyze_ref (tree base, tree *index, void *data) if (TREE_CODE (base) == ARRAY_REF) { stepsize = array_ref_element_size (base); - if (!cst_and_fits_in_hwi (stepsize)) + if (!cst_fits_shwi_p (stepsize)) return false; imult = int_cst_value (stepsize); step = fold_build2 (MULT_EXPR, sizetype, @@ -505,7 +505,7 @@ analyze_ref (struct loop *loop, tree *ref_p, tree *base, for (; TREE_CODE (ref) == COMPONENT_REF; ref = TREE_OPERAND (ref, 0)) { off = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); - bit_offset = TREE_INT_CST_LOW (off); + bit_offset = tree_to_hwi (off); gcc_assert (bit_offset % BITS_PER_UNIT == 0); *delta += bit_offset / BITS_PER_UNIT; @@ -546,7 +546,7 @@ gather_memory_references_ref (struct loop *loop, struct mem_ref_group **refs, /* Limit non-constant step prefetching only to the innermost loops and only when the step is loop invariant in the entire loop nest. */ - if (!cst_and_fits_in_hwi (step)) + if (!cst_fits_shwi_p (step)) { if (loop->inner != NULL) { @@ -660,7 +660,7 @@ prune_ref_by_self_reuse (struct mem_ref *ref) bool backward; /* If the step size is non constant, we cannot calculate prefetch_mod. */ - if (!cst_and_fits_in_hwi (ref->group->step)) + if (!cst_fits_shwi_p (ref->group->step)) return; step = int_cst_value (ref->group->step); @@ -770,7 +770,7 @@ prune_ref_by_group_reuse (struct mem_ref *ref, struct mem_ref *by, int align_unit; /* If the step is non constant we cannot calculate prefetch_before. */ - if (!cst_and_fits_in_hwi (ref->group->step)) { + if (!cst_fits_shwi_p (ref->group->step)) { return; } @@ -1135,7 +1135,7 @@ issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) for (ap = 0; ap < n_prefetches; ap++) { - if (cst_and_fits_in_hwi (ref->group->step)) + if (cst_fits_shwi_p (ref->group->step)) { /* Determine the address to prefetch. */ delta = (ahead + ap * ref->prefetch_mod) * @@ -1449,8 +1449,8 @@ add_subscript_strides (tree access_fn, unsigned stride, if ((unsigned) loop_depth (aloop) <= min_depth) continue; - if (host_integerp (step, 0)) - astep = tree_low_cst (step, 0); + if (tree_fits_shwi_p (step)) + astep = tree_to_shwi (step); else astep = L1_CACHE_LINE_SIZE; @@ -1499,8 +1499,8 @@ self_reuse_distance (data_reference_p dr, unsigned *loop_sizes, unsigned n, if (TREE_CODE (ref) == ARRAY_REF) { stride = TYPE_SIZE_UNIT (TREE_TYPE (ref)); - if (host_integerp (stride, 1)) - astride = tree_low_cst (stride, 1); + if (tree_fits_uhwi_p (stride)) + astride = tree_to_uhwi (stride); else astride = L1_CACHE_LINE_SIZE; diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c index d1691478c4b..7743b9557be 100644 --- a/gcc/tree-ssa-math-opts.c +++ b/gcc/tree-ssa-math-opts.c @@ -1139,7 +1139,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, multiplication sequence when profitable. */ c = TREE_REAL_CST (arg1); n = real_to_integer (&c); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); c_is_int = real_identical (&c, &cint); if (c_is_int @@ -1185,7 +1185,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are optimizing for space. Don't do this optimization if we don't have a hardware sqrt insn. */ - real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0); + real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED); SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2); if (flag_unsafe_math_optimizations @@ -1249,7 +1249,7 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, Do not calculate the powi factor when n/2 = 0. */ real_arithmetic (&c2, MULT_EXPR, &c, &dconst2); n = real_to_integer (&c2); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); c2_is_int = real_identical (&c2, &cint); if (flag_unsafe_math_optimizations @@ -1297,11 +1297,11 @@ gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc, different from pow(x, 1./3.) due to rounding and behavior with negative x, we need to constrain this transformation to unsafe math and positive x or finite math. */ - real_from_integer (&dconst3, VOIDmode, 3, 0, 0); + real_from_integer (&dconst3, VOIDmode, 3, SIGNED); real_arithmetic (&c2, MULT_EXPR, &c, &dconst3); real_round (&c2, mode, &c2); n = real_to_integer (&c2); - real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, n, SIGNED); real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3); real_convert (&c2, mode, &c2); @@ -1492,10 +1492,10 @@ execute_cse_sincos (void) } else { - if (!host_integerp (arg1, 0)) + if (!tree_fits_shwi_p (arg1)) break; - - n = TREE_INT_CST_LOW (arg1); + + n = tree_to_shwi (arg1); result = gimple_expand_builtin_powi (&gsi, loc, arg0, n); } @@ -1753,7 +1753,7 @@ find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit) case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: - if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2))) + if (!do_shift_rotate (code, n, (int)tree_to_hwi (rhs2))) return NULL_TREE; break; CASE_CONVERT: @@ -1846,7 +1846,7 @@ find_bswap (gimple stmt) increase that number by three here in order to also cover signed -> unsigned converions of the src operand as can be seen in libgcc, and for initial shift/and operation of the src operand. */ - limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); + limit = tree_to_hwi (TYPE_SIZE_UNIT (gimple_expr_type (stmt))); limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit); source_expr = find_bswap_1 (stmt, &n, limit); diff --git a/gcc/tree-ssa-phiopt.c b/gcc/tree-ssa-phiopt.c index ddcd040ba7c..8418648abb1 100644 --- a/gcc/tree-ssa-phiopt.c +++ b/gcc/tree-ssa-phiopt.c @@ -712,7 +712,7 @@ jump_function_from_stmt (tree *arg, gimple stmt) &offset); if (tem && TREE_CODE (tem) == MEM_REF - && (mem_ref_offset (tem) + double_int::from_shwi (offset)).is_zero ()) + && (mem_ref_offset (tem) + offset).zero_p ()) { *arg = TREE_OPERAND (tem, 0); return true; @@ -1303,7 +1303,7 @@ add_or_mark_expr (basic_block bb, tree exp, if (TREE_CODE (exp) == MEM_REF && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME - && host_integerp (TREE_OPERAND (exp, 1), 0) + && tree_fits_shwi_p (TREE_OPERAND (exp, 1)) && (size = int_size_in_bytes (TREE_TYPE (exp))) > 0) { tree name = TREE_OPERAND (exp, 0); @@ -1318,7 +1318,7 @@ add_or_mark_expr (basic_block bb, tree exp, map.phase = 0; map.bb = 0; map.store = store; - map.offset = tree_low_cst (TREE_OPERAND (exp, 1), 0); + map.offset = tree_to_shwi (TREE_OPERAND (exp, 1)); map.size = size; slot = seen_ssa_names.find_slot (&map, INSERT); @@ -1951,14 +1951,14 @@ hoist_adjacent_loads (basic_block bb0, basic_block bb1, tree_offset2 = bit_position (field2); tree_size2 = DECL_SIZE (field2); - if (!host_integerp (tree_offset1, 1) - || !host_integerp (tree_offset2, 1) - || !host_integerp (tree_size2, 1)) + if (!tree_fits_uhwi_p (tree_offset1) + || !tree_fits_uhwi_p (tree_offset2) + || !tree_fits_uhwi_p (tree_size2)) continue; - offset1 = TREE_INT_CST_LOW (tree_offset1); - offset2 = TREE_INT_CST_LOW (tree_offset2); - size2 = TREE_INT_CST_LOW (tree_size2); + offset1 = tree_to_uhwi (tree_offset1); + offset2 = tree_to_uhwi (tree_offset2); + size2 = tree_to_uhwi (tree_size2); align1 = DECL_ALIGN (field1) % param_align_bits; if (offset1 % BITS_PER_UNIT != 0) diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c index f6928a8c0b3..db56f954568 100644 --- a/gcc/tree-ssa-pre.c +++ b/gcc/tree-ssa-pre.c @@ -1602,11 +1602,11 @@ phi_translate_1 (pre_expr expr, bitmap_set_t set1, bitmap_set_t set2, && TREE_CODE (op[1]) == INTEGER_CST && TREE_CODE (op[2]) == INTEGER_CST) { - double_int off = tree_to_double_int (op[0]); - off += -tree_to_double_int (op[1]); - off *= tree_to_double_int (op[2]); - if (off.fits_shwi ()) - newop.off = off.low; + addr_wide_int off = op[0]; + off += -addr_wide_int (op[1]); + off *= addr_wide_int (op[2]); + if (off.fits_shwi_p ()) + newop.off = off.to_shwi (); } newoperands[j] = newop; /* If it transforms from an SSA_NAME to an address, fold with diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c index 7e1d8c77a3f..35b5b153953 100644 --- a/gcc/tree-ssa-reassoc.c +++ b/gcc/tree-ssa-reassoc.c @@ -1036,13 +1036,13 @@ decrement_power (gimple stmt) arg1 = gimple_call_arg (stmt, 1); c = TREE_REAL_CST (arg1); power = real_to_integer (&c) - 1; - real_from_integer (&cint, VOIDmode, power, 0, 0); + real_from_integer (&cint, VOIDmode, power, SIGNED); gimple_call_set_arg (stmt, 1, build_real (TREE_TYPE (arg1), cint)); return power; CASE_FLT_FN (BUILT_IN_POWI): arg1 = gimple_call_arg (stmt, 1); - power = TREE_INT_CST_LOW (arg1) - 1; + power = tree_to_hwi (arg1) - 1; gimple_call_set_arg (stmt, 1, build_int_cst (TREE_TYPE (arg1), power)); return power; @@ -3460,8 +3460,7 @@ acceptable_pow_call (gimple stmt, tree *base, HOST_WIDE_INT *exponent) return false; *exponent = real_to_integer (&c); - real_from_integer (&cint, VOIDmode, *exponent, - *exponent < 0 ? -1 : 0, 0); + real_from_integer (&cint, VOIDmode, *exponent, SIGNED); if (!real_identical (&c, &cint)) return false; @@ -3471,10 +3470,10 @@ acceptable_pow_call (gimple stmt, tree *base, HOST_WIDE_INT *exponent) *base = gimple_call_arg (stmt, 0); arg1 = gimple_call_arg (stmt, 1); - if (!host_integerp (arg1, 0)) + if (!tree_fits_shwi_p (arg1)) return false; - *exponent = TREE_INT_CST_LOW (arg1); + *exponent = tree_to_shwi (arg1); break; default: diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c index 6886efbe3eb..508042fd4ae 100644 --- a/gcc/tree-ssa-sccvn.c +++ b/gcc/tree-ssa-sccvn.c @@ -654,11 +654,11 @@ vn_reference_eq (const_vn_reference_t const vr1, const_vn_reference_t const vr2) } else if (INTEGRAL_TYPE_P (vr1->type) && (TYPE_PRECISION (vr1->type) - != TREE_INT_CST_LOW (TYPE_SIZE (vr1->type)))) + != tree_to_hwi (TYPE_SIZE (vr1->type)))) return false; else if (INTEGRAL_TYPE_P (vr2->type) && (TYPE_PRECISION (vr2->type) - != TREE_INT_CST_LOW (TYPE_SIZE (vr2->type)))) + != tree_to_hwi (TYPE_SIZE (vr2->type)))) return false; i = 0; @@ -778,8 +778,8 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) case MEM_REF: /* The base address gets its own vn_reference_op_s structure. */ temp.op0 = TREE_OPERAND (ref, 1); - if (host_integerp (TREE_OPERAND (ref, 1), 0)) - temp.off = TREE_INT_CST_LOW (TREE_OPERAND (ref, 1)); + if (tree_fits_shwi_p (TREE_OPERAND (ref, 1))) + temp.off = tree_to_shwi (TREE_OPERAND (ref, 1)); break; case BIT_FIELD_REF: /* Record bits and position. */ @@ -799,15 +799,15 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) && TREE_CODE (this_offset) == INTEGER_CST) { tree bit_offset = DECL_FIELD_BIT_OFFSET (TREE_OPERAND (ref, 1)); - if (TREE_INT_CST_LOW (bit_offset) % BITS_PER_UNIT == 0) + if (tree_to_hwi (bit_offset) % BITS_PER_UNIT == 0) { - double_int off - = tree_to_double_int (this_offset) - + tree_to_double_int (bit_offset) - .rshift (BITS_PER_UNIT == 8 - ? 3 : exact_log2 (BITS_PER_UNIT)); - if (off.fits_shwi ()) - temp.off = off.low; + addr_wide_int off + = (addr_wide_int (this_offset) + + addr_wide_int (bit_offset) + .rshiftu (BITS_PER_UNIT == 8 + ? 3 : exact_log2 (BITS_PER_UNIT))); + if (off.fits_shwi_p ()) + temp.off = off.to_shwi (); } } } @@ -823,11 +823,11 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) && TREE_CODE (temp.op1) == INTEGER_CST && TREE_CODE (temp.op2) == INTEGER_CST) { - double_int off = tree_to_double_int (temp.op0); - off += -tree_to_double_int (temp.op1); - off *= tree_to_double_int (temp.op2); - if (off.fits_shwi ()) - temp.off = off.low; + addr_wide_int off = temp.op0; + off += -addr_wide_int (temp.op1); + off *= addr_wide_int (temp.op2); + if (off.fits_shwi_p ()) + temp.off = off.to_shwi(); } break; case VAR_DECL: @@ -879,7 +879,7 @@ copy_reference_ops_from_ref (tree ref, vec<vn_reference_op_s> *result) break; case IMAGPART_EXPR: /* This is only interesting for its constant offset. */ - temp.off = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (ref))); + temp.off = tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (ref))); break; default: gcc_unreachable (); @@ -932,10 +932,10 @@ ao_ref_init_from_vn_reference (ao_ref *ref, } if (size_tree != NULL_TREE) { - if (!host_integerp (size_tree, 1)) + if (!tree_fits_uhwi_p (size_tree)) size = -1; else - size = TREE_INT_CST_LOW (size_tree); + size = tree_to_uhwi (size_tree); } /* Initially, maxsize is the same as the accessed element size. @@ -991,7 +991,7 @@ ao_ref_init_from_vn_reference (ao_ref *ref, /* And now the usual component-reference style ops. */ case BIT_FIELD_REF: - offset += tree_low_cst (op->op1, 0); + offset += tree_to_shwi (op->op1); break; case COMPONENT_REF: @@ -1002,13 +1002,13 @@ ao_ref_init_from_vn_reference (ao_ref *ref, parts manually. */ if (op->op1 - || !host_integerp (DECL_FIELD_OFFSET (field), 1)) + || !tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) max_size = -1; else { - offset += (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (field)) + offset += (tree_to_uhwi (DECL_FIELD_OFFSET (field)) * BITS_PER_UNIT); - offset += TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (field)); + offset += tree_to_hwi (DECL_FIELD_BIT_OFFSET (field)); } break; } @@ -1016,15 +1016,15 @@ ao_ref_init_from_vn_reference (ao_ref *ref, case ARRAY_RANGE_REF: case ARRAY_REF: /* We recorded the lower bound and the element size. */ - if (!host_integerp (op->op0, 0) - || !host_integerp (op->op1, 0) - || !host_integerp (op->op2, 0)) + if (!tree_fits_shwi_p (op->op0) + || !tree_fits_shwi_p (op->op1) + || !tree_fits_shwi_p (op->op2)) max_size = -1; else { - HOST_WIDE_INT hindex = TREE_INT_CST_LOW (op->op0); - hindex -= TREE_INT_CST_LOW (op->op1); - hindex *= TREE_INT_CST_LOW (op->op2); + HOST_WIDE_INT hindex = tree_to_shwi (op->op0); + hindex -= tree_to_shwi (op->op1); + hindex *= tree_to_shwi (op->op2); hindex *= BITS_PER_UNIT; offset += hindex; } @@ -1147,13 +1147,13 @@ vn_reference_fold_indirect (vec<vn_reference_op_s> *ops, gcc_checking_assert (addr_base && TREE_CODE (addr_base) != MEM_REF); if (addr_base != TREE_OPERAND (op->op0, 0)) { - double_int off = tree_to_double_int (mem_op->op0); - off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0))); - off += double_int::from_shwi (addr_offset); - mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off); + addr_wide_int off = addr_wide_int (mem_op->op0) + .sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0))); + off += addr_offset; + mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); op->op0 = build_fold_addr_expr (addr_base); - if (host_integerp (mem_op->op0, 0)) - mem_op->off = TREE_INT_CST_LOW (mem_op->op0); + if (tree_fits_shwi_p (mem_op->op0)) + mem_op->off = tree_to_shwi (mem_op->op0); else mem_op->off = -1; } @@ -1170,7 +1170,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, vn_reference_op_t mem_op = &(*ops)[i - 1]; gimple def_stmt; enum tree_code code; - double_int off; + addr_wide_int off; def_stmt = SSA_NAME_DEF_STMT (op->op0); if (!is_gimple_assign (def_stmt)) @@ -1181,8 +1181,8 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, && code != POINTER_PLUS_EXPR) return; - off = tree_to_double_int (mem_op->op0); - off = off.sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0))); + off = addr_wide_int (mem_op->op0) + .sext (TYPE_PRECISION (TREE_TYPE (mem_op->op0))); /* The only thing we have to do is from &OBJ.foo.bar add the offset from .foo.bar to the preceding MEM_REF offset and replace the @@ -1199,7 +1199,7 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, || TREE_CODE (addr_base) != MEM_REF) return; - off += double_int::from_shwi (addr_offset); + off += addr_offset; off += mem_ref_offset (addr_base); op->op0 = TREE_OPERAND (addr_base, 0); } @@ -1212,13 +1212,13 @@ vn_reference_maybe_forwprop_address (vec<vn_reference_op_s> *ops, || TREE_CODE (ptroff) != INTEGER_CST) return; - off += tree_to_double_int (ptroff); + off += ptroff; op->op0 = ptr; } - mem_op->op0 = double_int_to_tree (TREE_TYPE (mem_op->op0), off); - if (host_integerp (mem_op->op0, 0)) - mem_op->off = TREE_INT_CST_LOW (mem_op->op0); + mem_op->op0 = wide_int_to_tree (TREE_TYPE (mem_op->op0), off); + if (tree_fits_shwi_p (mem_op->op0)) + mem_op->off = tree_to_shwi (mem_op->op0); else mem_op->off = -1; if (TREE_CODE (op->op0) == SSA_NAME) @@ -1298,7 +1298,7 @@ fully_constant_vn_reference_p (vn_reference_t ref) && compare_tree_int (op->op0, TREE_STRING_LENGTH (arg0->op0)) < 0) return build_int_cst_type (op->type, (TREE_STRING_POINTER (arg0->op0) - [TREE_INT_CST_LOW (op->op0)])); + [tree_to_hwi (op->op0)])); } return NULL_TREE; @@ -1370,11 +1370,11 @@ valueize_refs_1 (vec<vn_reference_op_s> orig, bool *valueized_anything) && TREE_CODE (vro->op1) == INTEGER_CST && TREE_CODE (vro->op2) == INTEGER_CST) { - double_int off = tree_to_double_int (vro->op0); - off += -tree_to_double_int (vro->op1); - off *= tree_to_double_int (vro->op2); - if (off.fits_shwi ()) - vro->off = off.low; + addr_wide_int off = vro->op0; + off += -addr_wide_int (vro->op1); + off *= addr_wide_int (vro->op2); + if (off.fits_shwi_p ()) + vro->off = off.to_shwi (); } } @@ -1582,16 +1582,16 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) if (is_gimple_reg_type (vr->type) && gimple_call_builtin_p (def_stmt, BUILT_IN_MEMSET) && integer_zerop (gimple_call_arg (def_stmt, 1)) - && host_integerp (gimple_call_arg (def_stmt, 2), 1) + && tree_fits_uhwi_p (gimple_call_arg (def_stmt, 2)) && TREE_CODE (gimple_call_arg (def_stmt, 0)) == ADDR_EXPR) { tree ref2 = TREE_OPERAND (gimple_call_arg (def_stmt, 0), 0); tree base2; HOST_WIDE_INT offset2, size2, maxsize2; base2 = get_ref_base_and_extent (ref2, &offset2, &size2, &maxsize2); - size2 = TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2)) * 8; + size2 = tree_to_uhwi (gimple_call_arg (def_stmt, 2)) * 8; if ((unsigned HOST_WIDE_INT)size2 / 8 - == TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2)) + == tree_to_uhwi (gimple_call_arg (def_stmt, 2)) && maxsize2 != -1 && operand_equal_p (base, base2, 0) && offset2 <= offset @@ -1694,7 +1694,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) { tree val = NULL_TREE; HOST_WIDE_INT elsz - = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (TREE_TYPE (rhs1)))); + = tree_to_hwi (TYPE_SIZE (TREE_TYPE (TREE_TYPE (rhs1)))); if (gimple_assign_rhs_code (def_stmt2) == COMPLEX_EXPR) { if (off == 0) @@ -1831,7 +1831,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) || TREE_CODE (gimple_call_arg (def_stmt, 0)) == SSA_NAME) && (TREE_CODE (gimple_call_arg (def_stmt, 1)) == ADDR_EXPR || TREE_CODE (gimple_call_arg (def_stmt, 1)) == SSA_NAME) - && host_integerp (gimple_call_arg (def_stmt, 2), 1)) + && tree_fits_uhwi_p (gimple_call_arg (def_stmt, 2))) { tree lhs, rhs; ao_ref r; @@ -1858,10 +1858,10 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) if (!tem) return (void *)-1; if (TREE_CODE (tem) == MEM_REF - && host_integerp (TREE_OPERAND (tem, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (tem, 1))) { lhs = TREE_OPERAND (tem, 0); - lhs_offset += TREE_INT_CST_LOW (TREE_OPERAND (tem, 1)); + lhs_offset += tree_to_uhwi (TREE_OPERAND (tem, 1)); } else if (DECL_P (tem)) lhs = build_fold_addr_expr (tem); @@ -1884,10 +1884,10 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) if (!tem) return (void *)-1; if (TREE_CODE (tem) == MEM_REF - && host_integerp (TREE_OPERAND (tem, 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (tem, 1))) { rhs = TREE_OPERAND (tem, 0); - rhs_offset += TREE_INT_CST_LOW (TREE_OPERAND (tem, 1)); + rhs_offset += tree_to_uhwi (TREE_OPERAND (tem, 1)); } else if (DECL_P (tem)) rhs = build_fold_addr_expr (tem); @@ -1898,14 +1898,14 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) && TREE_CODE (rhs) != ADDR_EXPR) return (void *)-1; - copy_size = TREE_INT_CST_LOW (gimple_call_arg (def_stmt, 2)); + copy_size = tree_to_hwi (gimple_call_arg (def_stmt, 2)); /* The bases of the destination and the references have to agree. */ if ((TREE_CODE (base) != MEM_REF && !DECL_P (base)) || (TREE_CODE (base) == MEM_REF && (TREE_OPERAND (base, 0) != lhs - || !host_integerp (TREE_OPERAND (base, 1), 1))) + || !tree_fits_uhwi_p (TREE_OPERAND (base, 1)))) || (DECL_P (base) && (TREE_CODE (lhs) != ADDR_EXPR || TREE_OPERAND (lhs, 0) != base))) @@ -1914,7 +1914,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) /* And the access has to be contained within the memcpy destination. */ at = offset / BITS_PER_UNIT; if (TREE_CODE (base) == MEM_REF) - at += TREE_INT_CST_LOW (TREE_OPERAND (base, 1)); + at += tree_to_hwi (TREE_OPERAND (base, 1)); if (lhs_offset > at || lhs_offset + copy_size < at + maxsize / BITS_PER_UNIT) return (void *)-1; @@ -3217,12 +3217,12 @@ simplify_binary_expression (gimple stmt) /* Pointer plus constant can be represented as invariant address. Do so to allow further propatation, see also tree forwprop. */ if (code == POINTER_PLUS_EXPR - && host_integerp (op1, 1) + && tree_fits_uhwi_p (op1) && TREE_CODE (op0) == ADDR_EXPR && is_gimple_min_invariant (op0)) return build_invariant_address (TREE_TYPE (op0), TREE_OPERAND (op0, 0), - TREE_INT_CST_LOW (op1)); + tree_to_uhwi (op1)); /* Avoid folding if nothing changed. */ if (op0 == gimple_assign_rhs1 (stmt) diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c index 75d27f1989b..1f16f78f367 100644 --- a/gcc/tree-ssa-strlen.c +++ b/gcc/tree-ssa-strlen.c @@ -205,10 +205,10 @@ get_stridx (tree exp) s = string_constant (exp, &o); if (s != NULL_TREE - && (o == NULL_TREE || host_integerp (o, 0)) + && (o == NULL_TREE || tree_fits_shwi_p (o)) && TREE_STRING_LENGTH (s) > 0) { - HOST_WIDE_INT offset = o ? tree_low_cst (o, 0) : 0; + HOST_WIDE_INT offset = o ? tree_to_shwi (o) : 0; const char *p = TREE_STRING_POINTER (s); int max = TREE_STRING_LENGTH (s) - 1; @@ -836,16 +836,16 @@ adjust_last_stmt (strinfo si, gimple stmt, bool is_strcat) } len = gimple_call_arg (last.stmt, 2); - if (host_integerp (len, 1)) + if (tree_fits_uhwi_p (len)) { - if (!host_integerp (last.len, 1) + if (!tree_fits_uhwi_p (last.len) || integer_zerop (len) - || (unsigned HOST_WIDE_INT) tree_low_cst (len, 1) - != (unsigned HOST_WIDE_INT) tree_low_cst (last.len, 1) + 1) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (len) + != (unsigned HOST_WIDE_INT) tree_to_uhwi (last.len) + 1) return; /* Don't adjust the length if it is divisible by 4, it is more efficient to store the extra '\0' in that case. */ - if ((((unsigned HOST_WIDE_INT) tree_low_cst (len, 1)) & 3) == 0) + if ((((unsigned HOST_WIDE_INT) tree_to_uhwi (len)) & 3) == 0) return; } else if (TREE_CODE (len) == SSA_NAME) @@ -1300,7 +1300,7 @@ handle_builtin_memcpy (enum built_in_function bcode, gimple_stmt_iterator *gsi) return; if (olddsi != NULL - && host_integerp (len, 1) + && tree_fits_uhwi_p (len) && !integer_zerop (len)) adjust_last_stmt (olddsi, stmt, false); @@ -1326,8 +1326,8 @@ handle_builtin_memcpy (enum built_in_function bcode, gimple_stmt_iterator *gsi) si = NULL; /* Handle memcpy (x, "abcd", 5) or memcpy (x, "abc\0uvw", 7). */ - if (!host_integerp (len, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (len, 1) + if (!tree_fits_uhwi_p (len) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (len) <= (unsigned HOST_WIDE_INT) ~idx) return; } @@ -1616,11 +1616,11 @@ handle_pointer_plus (gimple_stmt_iterator *gsi) if (idx < 0) { tree off = gimple_assign_rhs2 (stmt); - if (host_integerp (off, 1) - && (unsigned HOST_WIDE_INT) tree_low_cst (off, 1) + if (tree_fits_uhwi_p (off) + && (unsigned HOST_WIDE_INT) tree_to_uhwi (off) <= (unsigned HOST_WIDE_INT) ~idx) ssa_ver_to_stridx[SSA_NAME_VERSION (lhs)] - = ~(~idx - (int) tree_low_cst (off, 1)); + = ~(~idx - (int) tree_to_uhwi (off)); return; } diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c index ff6babb14a3..3a475869e00 100644 --- a/gcc/tree-ssa-structalias.c +++ b/gcc/tree-ssa-structalias.c @@ -2973,12 +2973,12 @@ process_constraint (constraint_t t) static HOST_WIDE_INT bitpos_of_field (const tree fdecl) { - if (!host_integerp (DECL_FIELD_OFFSET (fdecl), 0) - || !host_integerp (DECL_FIELD_BIT_OFFSET (fdecl), 0)) + if (!tree_fits_shwi_p (DECL_FIELD_OFFSET (fdecl)) + || !tree_fits_shwi_p (DECL_FIELD_BIT_OFFSET (fdecl))) return -1; - return (TREE_INT_CST_LOW (DECL_FIELD_OFFSET (fdecl)) * BITS_PER_UNIT - + TREE_INT_CST_LOW (DECL_FIELD_BIT_OFFSET (fdecl))); + return (tree_to_shwi (DECL_FIELD_OFFSET (fdecl)) * BITS_PER_UNIT + + tree_to_shwi (DECL_FIELD_BIT_OFFSET (fdecl))); } @@ -3011,14 +3011,14 @@ get_constraint_for_ptr_offset (tree ptr, tree offset, else { /* Sign-extend the offset. */ - double_int soffset = tree_to_double_int (offset) - .sext (TYPE_PRECISION (TREE_TYPE (offset))); - if (!soffset.fits_shwi ()) + addr_wide_int soffset = addr_wide_int (offset) + .sext (TYPE_PRECISION (TREE_TYPE (offset))); + if (!soffset.fits_shwi_p ()) rhsoffset = UNKNOWN_OFFSET; else { /* Make sure the bit-offset also fits. */ - HOST_WIDE_INT rhsunitoffset = soffset.low; + HOST_WIDE_INT rhsunitoffset = soffset.to_shwi (); rhsoffset = rhsunitoffset * BITS_PER_UNIT; if (rhsunitoffset != rhsoffset / BITS_PER_UNIT) rhsoffset = UNKNOWN_OFFSET; @@ -3408,8 +3408,8 @@ get_constraint_for_1 (tree t, vec<ce_s> *results, bool address_p, && curr) { unsigned HOST_WIDE_INT size; - if (host_integerp (TYPE_SIZE (TREE_TYPE (t)), 1)) - size = TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (t))); + if (tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (t)))) + size = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (t))); else size = -1; for (; curr; curr = vi_next (curr)) @@ -5327,7 +5327,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack, } if (!DECL_SIZE (field) - || !host_integerp (DECL_SIZE (field), 1)) + || !tree_fits_uhwi_p (DECL_SIZE (field))) has_unknown_size = true; /* If adjacent fields do not contain pointers merge them. */ @@ -5339,7 +5339,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack, && !pair->has_unknown_size && pair->offset + (HOST_WIDE_INT)pair->size == offset + foff) { - pair->size += TREE_INT_CST_LOW (DECL_SIZE (field)); + pair->size += tree_to_hwi (DECL_SIZE (field)); } else { @@ -5347,7 +5347,7 @@ push_fields_onto_fieldstack (tree type, vec<fieldoff_s> *fieldstack, e.offset = offset + foff; e.has_unknown_size = has_unknown_size; if (!has_unknown_size) - e.size = TREE_INT_CST_LOW (DECL_SIZE (field)); + e.size = tree_to_hwi (DECL_SIZE (field)); else e.size = -1; e.must_have_pointers = must_have_pointers_p; @@ -5603,7 +5603,7 @@ create_variable_info_for_1 (tree decl, const char *name) unsigned int i; if (!declsize - || !host_integerp (declsize, 1)) + || !tree_fits_uhwi_p (declsize)) { vi = new_var_info (decl, name); vi->offset = 0; @@ -5664,7 +5664,7 @@ create_variable_info_for_1 (tree decl, const char *name) vi = new_var_info (decl, name); vi->offset = 0; vi->may_have_pointers = true; - vi->fullsize = TREE_INT_CST_LOW (declsize); + vi->fullsize = tree_to_hwi (declsize); vi->size = vi->fullsize; vi->is_full_var = true; fieldstack.release (); @@ -5672,7 +5672,7 @@ create_variable_info_for_1 (tree decl, const char *name) } vi = new_var_info (decl, name); - vi->fullsize = TREE_INT_CST_LOW (declsize); + vi->fullsize = tree_to_hwi (declsize); for (i = 0, newvi = vi; fieldstack.iterate (i, &fo); ++i, newvi = vi_next (newvi)) diff --git a/gcc/tree-ssa.c b/gcc/tree-ssa.c index 83a265c43ac..aa3bff6699f 100644 --- a/gcc/tree-ssa.c +++ b/gcc/tree-ssa.c @@ -1828,9 +1828,9 @@ non_rewritable_mem_ref_base (tree ref) || TREE_CODE (TREE_TYPE (decl)) == COMPLEX_TYPE) && useless_type_conversion_p (TREE_TYPE (base), TREE_TYPE (TREE_TYPE (decl))) - && mem_ref_offset (base).fits_uhwi () - && tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (decl))) - .ugt (mem_ref_offset (base)) + && mem_ref_offset (base).fits_uhwi_p () + && addr_wide_int (TYPE_SIZE_UNIT (TREE_TYPE (decl))) + .gtu_p (mem_ref_offset (base)) && multiple_of_p (sizetype, TREE_OPERAND (base, 1), TYPE_SIZE_UNIT (TREE_TYPE (base)))) return NULL_TREE; diff --git a/gcc/tree-stdarg.c b/gcc/tree-stdarg.c index 50592c03a36..c7060457a2b 100644 --- a/gcc/tree-stdarg.c +++ b/gcc/tree-stdarg.c @@ -164,9 +164,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if ((rhs_code == POINTER_PLUS_EXPR || rhs_code == PLUS_EXPR) && TREE_CODE (rhs1) == SSA_NAME - && host_integerp (gimple_assign_rhs2 (stmt), 1)) + && tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))) { - ret += tree_low_cst (gimple_assign_rhs2 (stmt), 1); + ret += tree_to_uhwi (gimple_assign_rhs2 (stmt)); lhs = rhs1; continue; } @@ -174,9 +174,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if (rhs_code == ADDR_EXPR && TREE_CODE (TREE_OPERAND (rhs1, 0)) == MEM_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0)) == SSA_NAME - && host_integerp (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1))) { - ret += tree_low_cst (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1); + ret += tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1)); lhs = TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0); continue; } @@ -231,9 +231,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if ((rhs_code == POINTER_PLUS_EXPR || rhs_code == PLUS_EXPR) && TREE_CODE (rhs1) == SSA_NAME - && host_integerp (gimple_assign_rhs2 (stmt), 1)) + && tree_fits_uhwi_p (gimple_assign_rhs2 (stmt))) { - val -= tree_low_cst (gimple_assign_rhs2 (stmt), 1); + val -= tree_to_uhwi (gimple_assign_rhs2 (stmt)); lhs = rhs1; continue; } @@ -241,9 +241,9 @@ va_list_counter_bump (struct stdarg_info *si, tree counter, tree rhs, if (rhs_code == ADDR_EXPR && TREE_CODE (TREE_OPERAND (rhs1, 0)) == MEM_REF && TREE_CODE (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0)) == SSA_NAME - && host_integerp (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1)) + && tree_fits_uhwi_p (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1))) { - val -= tree_low_cst (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1), 1); + val -= tree_to_uhwi (TREE_OPERAND (TREE_OPERAND (rhs1, 0), 1)); lhs = TREE_OPERAND (TREE_OPERAND (rhs1, 0), 0); continue; } @@ -581,15 +581,15 @@ check_all_va_list_escapes (struct stdarg_info *si) if (rhs_code == MEM_REF && TREE_OPERAND (rhs, 0) == use && TYPE_SIZE_UNIT (TREE_TYPE (rhs)) - && host_integerp (TYPE_SIZE_UNIT (TREE_TYPE (rhs)), 1) + && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (rhs))) && si->offsets[SSA_NAME_VERSION (use)] != -1) { unsigned HOST_WIDE_INT gpr_size; tree access_size = TYPE_SIZE_UNIT (TREE_TYPE (rhs)); gpr_size = si->offsets[SSA_NAME_VERSION (use)] - + tree_low_cst (TREE_OPERAND (rhs, 1), 0) - + tree_low_cst (access_size, 1); + + tree_to_shwi (TREE_OPERAND (rhs, 1)) + + tree_to_uhwi (access_size); if (gpr_size >= VA_LIST_MAX_GPR_SIZE) cfun->va_list_gpr_size = VA_LIST_MAX_GPR_SIZE; else if (gpr_size > cfun->va_list_gpr_size) diff --git a/gcc/tree-streamer-in.c b/gcc/tree-streamer-in.c index 9efd0991041..85c909965df 100644 --- a/gcc/tree-streamer-in.c +++ b/gcc/tree-streamer-in.c @@ -146,8 +146,9 @@ unpack_ts_base_value_fields (struct bitpack_d *bp, tree expr) static void unpack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr) { - TREE_INT_CST_LOW (expr) = bp_unpack_var_len_unsigned (bp); - TREE_INT_CST_HIGH (expr) = bp_unpack_var_len_int (bp); + int i; + for (i = 0; i < TREE_INT_CST_NUNITS (expr); i++) + TREE_INT_CST_ELT (expr, i) = bp_unpack_var_len_int (bp); } @@ -561,6 +562,12 @@ streamer_alloc_tree (struct lto_input_block *ib, struct data_in *data_in, unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); result = make_tree_binfo (len); } + else if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) + { + unsigned HOST_WIDE_INT len = streamer_read_uhwi (ib); + result = make_int_cst (len); + TREE_INT_CST_NUNITS (result) = len; + } else if (code == CALL_EXPR) { unsigned HOST_WIDE_INT nargs = streamer_read_uhwi (ib); diff --git a/gcc/tree-streamer-out.c b/gcc/tree-streamer-out.c index 692a46aae85..e6ddc7566ab 100644 --- a/gcc/tree-streamer-out.c +++ b/gcc/tree-streamer-out.c @@ -117,10 +117,14 @@ pack_ts_base_value_fields (struct bitpack_d *bp, tree expr) expression EXPR into bitpack BP. */ static void -pack_ts_int_cst_value_fields (struct bitpack_d *bp, tree expr) +pack_ts_int_cst_value_fields (struct bitpack_d *bp, + tree expr ATTRIBUTE_UNUSED) { - bp_pack_var_len_unsigned (bp, TREE_INT_CST_LOW (expr)); - bp_pack_var_len_int (bp, TREE_INT_CST_HIGH (expr)); + int i; + /* Note that the number of elements has already been written out in + streamer_write_tree_header. */ + for (i = 0; i < TREE_INT_CST_NUNITS (expr); i++) + bp_pack_var_len_int (bp, TREE_INT_CST_ELT (expr, i)); } @@ -949,6 +953,11 @@ streamer_write_tree_header (struct output_block *ob, tree expr) streamer_write_uhwi (ob, BINFO_N_BASE_BINFOS (expr)); else if (TREE_CODE (expr) == CALL_EXPR) streamer_write_uhwi (ob, call_expr_nargs (expr)); + else if (CODE_CONTAINS_STRUCT (code, TS_INT_CST)) + { + gcc_assert (TREE_INT_CST_NUNITS (expr)); + streamer_write_uhwi (ob, TREE_INT_CST_NUNITS (expr)); + } } @@ -958,9 +967,12 @@ streamer_write_tree_header (struct output_block *ob, tree expr) void streamer_write_integer_cst (struct output_block *ob, tree cst, bool ref_p) { + int i; + int len = TREE_INT_CST_NUNITS (cst); gcc_assert (!TREE_OVERFLOW (cst)); streamer_write_record_start (ob, LTO_integer_cst); stream_write_tree (ob, TREE_TYPE (cst), ref_p); - streamer_write_uhwi (ob, TREE_INT_CST_LOW (cst)); - streamer_write_hwi (ob, TREE_INT_CST_HIGH (cst)); + streamer_write_uhwi (ob, len); + for (i = 0; i < len; i++) + streamer_write_hwi (ob, TREE_INT_CST_ELT (cst, i)); } diff --git a/gcc/tree-switch-conversion.c b/gcc/tree-switch-conversion.c index 0d128981e38..49eb07fc0f2 100644 --- a/gcc/tree-switch-conversion.c +++ b/gcc/tree-switch-conversion.c @@ -348,15 +348,13 @@ emit_case_bit_tests (gimple swtch, tree index_expr, else test[k].bits++; - lo = tree_low_cst (int_const_binop (MINUS_EXPR, - CASE_LOW (cs), minval), - 1); + lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, + CASE_LOW (cs), minval)); if (CASE_HIGH (cs) == NULL_TREE) hi = lo; else - hi = tree_low_cst (int_const_binop (MINUS_EXPR, - CASE_HIGH (cs), minval), - 1); + hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, + CASE_HIGH (cs), minval)); for (j = lo; j <= hi; j++) if (j >= HOST_BITS_PER_WIDE_INT) @@ -438,7 +436,13 @@ emit_case_bit_tests (gimple swtch, tree index_expr, if (const & csui) goto target */ for (k = 0; k < count; k++) { - tmp = build_int_cst_wide (word_type_node, test[k].lo, test[k].hi); + HOST_WIDE_INT a[2]; + + a[0] = test[k].lo; + a[1] = test[k].hi; + tmp = wide_int_to_tree (word_type_node, + wide_int::from_array (a, 2, + TYPE_PRECISION (word_type_node))); tmp = fold_build2 (BIT_AND_EXPR, word_type_node, csui, tmp); tmp = force_gimple_operand_gsi (&gsi, tmp, /*simple=*/true, NULL_TREE, @@ -691,13 +695,13 @@ static bool check_range (struct switch_conv_info *info) { gcc_assert (info->range_size); - if (!host_integerp (info->range_size, 1)) + if (!tree_fits_uhwi_p (info->range_size)) { info->reason = "index range way too large or otherwise unusable"; return false; } - if ((unsigned HOST_WIDE_INT) tree_low_cst (info->range_size, 1) + if ((unsigned HOST_WIDE_INT) tree_to_uhwi (info->range_size) > ((unsigned) info->count * SWITCH_CONVERSION_BRANCH_RATIO)) { info->reason = "the maximum range-branch ratio exceeded"; @@ -799,7 +803,7 @@ create_temp_arrays (struct switch_conv_info *info) info->target_inbound_names = info->default_values + info->phi_count; info->target_outbound_names = info->target_inbound_names + info->phi_count; for (i = 0; i < info->phi_count; i++) - vec_alloc (info->constructors[i], tree_low_cst (info->range_size, 1) + 1); + vec_alloc (info->constructors[i], tree_to_uhwi (info->range_size) + 1); } /* Free the arrays created by create_temp_arrays(). The vectors that are @@ -878,7 +882,7 @@ build_constructors (gimple swtch, struct switch_conv_info *info) info->constructors[k]->quick_push (elt); } - pos = int_const_binop (PLUS_EXPR, pos, integer_one_node); + pos = int_const_binop (PLUS_EXPR, pos, build_int_cst (TREE_TYPE (pos), 1)); } gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs))); @@ -903,7 +907,7 @@ build_constructors (gimple swtch, struct switch_conv_info *info) elt.value = unshare_expr_without_location (val); info->constructors[j]->quick_push (elt); - pos = int_const_binop (PLUS_EXPR, pos, integer_one_node); + pos = int_const_binop (PLUS_EXPR, pos, build_int_cst (TREE_TYPE (pos), 1)); } while (!tree_int_cst_lt (high, pos) && tree_int_cst_lt (low, pos)); j++; @@ -958,12 +962,12 @@ array_value_type (gimple swtch, tree type, int num, FOR_EACH_VEC_SAFE_ELT (info->constructors[num], i, elt) { - double_int cst; + wide_int cst; if (TREE_CODE (elt->value) != INTEGER_CST) return type; - cst = TREE_INT_CST (elt->value); + cst = elt->value; while (1) { unsigned int prec = GET_MODE_BITSIZE (mode); diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 4e99747b73c..acd7e107d7e 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -108,7 +108,7 @@ vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, tree scalar_type = gimple_expr_type (stmt); HOST_WIDE_INT lhs, rhs; - lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); + lhs = rhs = tree_to_hwi (TYPE_SIZE_UNIT (scalar_type)); if (is_gimple_assign (stmt) && (gimple_assign_cast_p (stmt) @@ -118,7 +118,7 @@ vect_get_smallest_scalar_type (gimple stmt, HOST_WIDE_INT *lhs_size_unit, { tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt)); - rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type)); + rhs = tree_to_hwi (TYPE_SIZE_UNIT (rhs_type)); if (rhs < lhs) scalar_type = rhs_type; } @@ -541,16 +541,16 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) return true; /* Check the types. */ - type_size_a = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); - type_size_b = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); + type_size_a = tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)))); + type_size_b = tree_to_hwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)))); if (type_size_a != type_size_b || !types_compatible_p (TREE_TYPE (DR_REF (dra)), TREE_TYPE (DR_REF (drb)))) return true; - init_a = TREE_INT_CST_LOW (DR_INIT (dra)); - init_b = TREE_INT_CST_LOW (DR_INIT (drb)); + init_a = tree_to_hwi (DR_INIT (dra)); + init_b = tree_to_hwi (DR_INIT (drb)); /* Two different locations - no dependence. */ if (init_a != init_b) @@ -653,7 +653,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) if (loop && nested_in_vect_loop_p (loop, stmt)) { tree step = DR_STEP (dr); - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) == 0) { @@ -681,7 +681,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) if (!loop) { tree step = DR_STEP (dr); - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0) { @@ -771,7 +771,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) /* Modulo alignment. */ misalign = size_binop (FLOOR_MOD_EXPR, misalign, alignment); - if (!host_integerp (misalign, 1)) + if (!tree_fits_uhwi_p (misalign)) { /* Negative or overflowed misalignment value. */ if (dump_enabled_p ()) @@ -780,7 +780,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr) return false; } - SET_DR_MISALIGNMENT (dr, TREE_INT_CST_LOW (misalign)); + SET_DR_MISALIGNMENT (dr, tree_to_hwi (misalign)); if (dump_enabled_p ()) { @@ -957,10 +957,10 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) static bool not_size_aligned (tree exp) { - if (!host_integerp (TYPE_SIZE (TREE_TYPE (exp)), 1)) + if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp)))) return true; - return (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (exp))) + return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp))) > get_object_alignment (exp)); } @@ -1979,12 +1979,12 @@ vect_analyze_group_access (struct data_reference *dr) { tree step = DR_STEP (dr); tree scalar_type = TREE_TYPE (DR_REF (dr)); - HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type)); + HOST_WIDE_INT type_size = tree_to_hwi (TYPE_SIZE_UNIT (scalar_type)); gimple stmt = DR_STMT (dr); stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info); - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); HOST_WIDE_INT groupsize, last_accessed_element = 1; bool slp_impossible = false; struct loop *loop = NULL; @@ -2103,8 +2103,8 @@ vect_analyze_group_access (struct data_reference *dr) /* Check that the distance between two accesses is equal to the type size. Otherwise, we have gaps. */ - diff = (TREE_INT_CST_LOW (DR_INIT (data_ref)) - - TREE_INT_CST_LOW (prev_init)) / type_size; + diff = (tree_to_hwi (DR_INIT (data_ref)) + - tree_to_hwi (prev_init)) / type_size; if (diff != 1) { /* FORNOW: SLP of accesses with gaps is not supported. */ @@ -2284,7 +2284,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) /* Consecutive? */ if (TREE_CODE (step) == INTEGER_CST) { - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step = tree_to_hwi (step); if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type)) || (dr_step < 0 && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) @@ -2506,11 +2506,11 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) /* Check that the data-refs have the same constant size and step. */ tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))); tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))); - if (!host_integerp (sza, 1) - || !host_integerp (szb, 1) + if (!tree_fits_uhwi_p (sza) + || !tree_fits_uhwi_p (szb) || !tree_int_cst_equal (sza, szb) - || !host_integerp (DR_STEP (dra), 0) - || !host_integerp (DR_STEP (drb), 0) + || !tree_fits_shwi_p (DR_STEP (dra)) + || !tree_fits_shwi_p (DR_STEP (drb)) || !tree_int_cst_equal (DR_STEP (dra), DR_STEP (drb))) break; @@ -2525,19 +2525,19 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo) break; /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */ - HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra)); - HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb)); + HOST_WIDE_INT init_a = tree_to_hwi (DR_INIT (dra)); + HOST_WIDE_INT init_b = tree_to_hwi (DR_INIT (drb)); gcc_assert (init_a < init_b); /* If init_b == init_a + the size of the type * k, we have an interleaving, and DRA is accessed before DRB. */ - HOST_WIDE_INT type_size_a = TREE_INT_CST_LOW (sza); + HOST_WIDE_INT type_size_a = tree_to_hwi (sza); if ((init_b - init_a) % type_size_a != 0) break; /* The step (if not zero) is greater than the difference between data-refs' inits. This splits groups into suitable sizes. */ - HOST_WIDE_INT step = TREE_INT_CST_LOW (DR_STEP (dra)); + HOST_WIDE_INT step = tree_to_hwi (DR_STEP (dra)); if (step != 0 && step <= (init_b - init_a)) break; @@ -2695,8 +2695,8 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, { if (off == NULL_TREE) { - double_int moff = mem_ref_offset (base); - off = double_int_to_tree (sizetype, moff); + addr_wide_int moff = mem_ref_offset (base); + off = wide_int_to_tree (sizetype, moff); } else off = size_binop (PLUS_EXPR, off, @@ -2792,9 +2792,9 @@ vect_check_gather (gimple stmt, loop_vec_info loop_vinfo, tree *basep, } break; case MULT_EXPR: - if (scale == 1 && host_integerp (op1, 0)) + if (scale == 1 && tree_fits_shwi_p (op1)) { - scale = tree_low_cst (op1, 0); + scale = tree_to_shwi (op1); off = op0; continue; } @@ -4790,7 +4790,7 @@ vect_supportable_dr_alignment (struct data_reference *dr, { tree vectype = STMT_VINFO_VECTYPE (stmt_info); if ((nested_in_vect_loop - && (TREE_INT_CST_LOW (DR_STEP (dr)) + && (tree_to_hwi (DR_STEP (dr)) != GET_MODE_SIZE (TYPE_MODE (vectype)))) || !loop_vinfo) return dr_explicit_realign; diff --git a/gcc/tree-vect-generic.c b/gcc/tree-vect-generic.c index b2a6944687c..a23ea430d36 100644 --- a/gcc/tree-vect-generic.c +++ b/gcc/tree-vect-generic.c @@ -45,11 +45,11 @@ static void expand_vector_operations_1 (gimple_stmt_iterator *); static tree build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value) { - int width = tree_low_cst (TYPE_SIZE (inner_type), 1); - int n = HOST_BITS_PER_WIDE_INT / width; - unsigned HOST_WIDE_INT low, high, mask; - tree ret; - + int width = tree_to_uhwi (TYPE_SIZE (inner_type)); + int n = TYPE_PRECISION (type) / width; + unsigned HOST_WIDE_INT low, mask; + HOST_WIDE_INT a[WIDE_INT_MAX_ELTS]; + int i; gcc_assert (n); if (width == HOST_BITS_PER_WIDE_INT) @@ -60,17 +60,11 @@ build_replicated_const (tree type, tree inner_type, HOST_WIDE_INT value) low = (unsigned HOST_WIDE_INT) ~0 / mask * (value & mask); } - if (TYPE_PRECISION (type) < HOST_BITS_PER_WIDE_INT) - low &= ((HOST_WIDE_INT)1 << TYPE_PRECISION (type)) - 1, high = 0; - else if (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT) - high = 0; - else if (TYPE_PRECISION (type) == HOST_BITS_PER_DOUBLE_INT) - high = low; - else - gcc_unreachable (); + for (i = 0; i < n; i++) + a[i] = low; - ret = build_int_cst_wide (type, low, high); - return ret; + return wide_int_to_tree + (type, wide_int::from_array (a, n, TYPE_PRECISION (type), false)); } static GTY(()) tree vector_inner_type; @@ -234,8 +228,8 @@ expand_vector_piecewise (gimple_stmt_iterator *gsi, elem_op_func f, tree part_width = TYPE_SIZE (inner_type); tree index = bitsize_int (0); int nunits = TYPE_VECTOR_SUBPARTS (type); - int delta = tree_low_cst (part_width, 1) - / tree_low_cst (TYPE_SIZE (TREE_TYPE (type)), 1); + int delta = tree_to_uhwi (part_width) + / tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type))); int i; location_t loc = gimple_location (gsi_stmt (*gsi)); @@ -268,7 +262,7 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type, { tree result, compute_type; enum machine_mode mode; - int n_words = tree_low_cst (TYPE_SIZE_UNIT (type), 1) / UNITS_PER_WORD; + int n_words = tree_to_uhwi (TYPE_SIZE_UNIT (type)) / UNITS_PER_WORD; location_t loc = gimple_location (gsi_stmt (*gsi)); /* We have three strategies. If the type is already correct, just do @@ -291,7 +285,7 @@ expand_vector_parallel (gimple_stmt_iterator *gsi, elem_op_func f, tree type, else { /* Use a single scalar operation with a mode no wider than word_mode. */ - mode = mode_for_size (tree_low_cst (TYPE_SIZE (type), 1), MODE_INT, 0); + mode = mode_for_size (tree_to_uhwi (TYPE_SIZE (type)), MODE_INT, 0); compute_type = lang_hooks.types.type_for_mode (mode, 1); result = f (gsi, compute_type, a, b, NULL_TREE, NULL_TREE, code); warning_at (loc, OPT_Wvector_operation_performance, @@ -313,7 +307,7 @@ expand_vector_addition (gimple_stmt_iterator *gsi, tree type, tree a, tree b, enum tree_code code) { int parts_per_word = UNITS_PER_WORD - / tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (type)), 1); + / tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type))); if (INTEGRAL_TYPE_P (TREE_TYPE (type)) && parts_per_word >= 4 @@ -404,7 +398,8 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, unsigned HOST_WIDE_INT *mulc = XALLOCAVEC (unsigned HOST_WIDE_INT, nunits); int prec = TYPE_PRECISION (TREE_TYPE (type)); int dummy_int; - unsigned int i, unsignedp = TYPE_UNSIGNED (TREE_TYPE (type)); + unsigned int i; + signop sign_p = TYPE_SIGN (TREE_TYPE (type)); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (TYPE_MODE (TREE_TYPE (type))); tree *vec; tree cur_op, mulcst, tem; @@ -428,7 +423,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, tree cst = VECTOR_CST_ELT (op1, i); unsigned HOST_WIDE_INT ml; - if (!host_integerp (cst, unsignedp) || integer_zerop (cst)) + if (!tree_fits_hwi_p (cst, sign_p) || integer_zerop (cst)) return NULL_TREE; pre_shifts[i] = 0; post_shifts[i] = 0; @@ -446,10 +441,10 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, } if (mode == -2) continue; - if (unsignedp) + if (sign_p == UNSIGNED) { unsigned HOST_WIDE_INT mh; - unsigned HOST_WIDE_INT d = tree_low_cst (cst, 1) & mask; + unsigned HOST_WIDE_INT d = tree_to_uhwi (cst) & mask; if (d >= ((unsigned HOST_WIDE_INT) 1 << (prec - 1))) /* FIXME: Can transform this into op0 >= op1 ? 1 : 0. */ @@ -481,9 +476,9 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, unsigned HOST_WIDE_INT d2; int this_pre_shift; - if (!host_integerp (cst2, 1)) + if (!tree_fits_uhwi_p (cst2)) return NULL_TREE; - d2 = tree_low_cst (cst2, 1) & mask; + d2 = tree_to_uhwi (cst2) & mask; if (d2 == 0) return NULL_TREE; this_pre_shift = floor_log2 (d2 & -d2); @@ -519,7 +514,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, } else { - HOST_WIDE_INT d = tree_low_cst (cst, 0); + HOST_WIDE_INT d = tree_to_shwi (cst); unsigned HOST_WIDE_INT abs_d; if (d == -1) @@ -575,7 +570,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, if (use_pow2) { tree addend = NULL_TREE; - if (!unsignedp) + if (sign_p == SIGNED) { tree uns_type; @@ -627,7 +622,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, } if (code == TRUNC_DIV_EXPR) { - if (unsignedp) + if (sign_p == UNSIGNED) { /* q = op0 >> shift; */ cur_op = add_rshift (gsi, type, op0, shifts); @@ -661,7 +656,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, if (op != unknown_optab && optab_handler (op, TYPE_MODE (type)) != CODE_FOR_nothing) { - if (unsignedp) + if (sign_p == UNSIGNED) /* r = op0 & mask; */ return gimplify_build2 (gsi, BIT_AND_EXPR, type, op0, mask); else if (addend != NULL_TREE) @@ -702,7 +697,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, switch (mode) { case 0: - gcc_assert (unsignedp); + gcc_assert (sign_p == UNSIGNED); /* t1 = oprnd0 >> pre_shift; t2 = t1 h* ml; q = t2 >> post_shift; */ @@ -711,7 +706,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, return NULL_TREE; break; case 1: - gcc_assert (unsignedp); + gcc_assert (sign_p == UNSIGNED); for (i = 0; i < nunits; i++) { shift_temps[i] = 1; @@ -722,7 +717,7 @@ expand_vector_divmod (gimple_stmt_iterator *gsi, tree type, tree op0, case 3: case 4: case 5: - gcc_assert (!unsignedp); + gcc_assert (sign_p == SIGNED); for (i = 0; i < nunits; i++) shift_temps[i] = prec - 1; break; @@ -1049,8 +1044,8 @@ vector_element (gimple_stmt_iterator *gsi, tree vect, tree idx, tree *ptmpvec) /* Given that we're about to compute a binary modulus, we don't care about the high bits of the value. */ - index = TREE_INT_CST_LOW (idx); - if (!host_integerp (idx, 1) || index >= elements) + index = tree_to_hwi (idx); + if (!tree_fits_uhwi_p (idx) || index >= elements) { index &= elements - 1; idx = build_int_cst (TREE_TYPE (idx), index); @@ -1155,7 +1150,7 @@ lower_vec_perm (gimple_stmt_iterator *gsi) unsigned char *sel_int = XALLOCAVEC (unsigned char, elements); for (i = 0; i < elements; ++i) - sel_int[i] = (TREE_INT_CST_LOW (VECTOR_CST_ELT (mask, i)) + sel_int[i] = (tree_to_hwi (VECTOR_CST_ELT (mask, i)) & (2 * elements - 1)); if (can_vec_perm_p (TYPE_MODE (vect_type), false, sel_int)) @@ -1181,8 +1176,8 @@ lower_vec_perm (gimple_stmt_iterator *gsi) { unsigned HOST_WIDE_INT index; - index = TREE_INT_CST_LOW (i_val); - if (!host_integerp (i_val, 1) || index >= elements) + index = tree_to_hwi (i_val); + if (!tree_fits_uhwi_p (i_val) || index >= elements) i_val = build_int_cst (mask_elt_type, index & (elements - 1)); if (two_operand_p && (index & elements) != 0) diff --git a/gcc/tree-vect-loop-manip.c b/gcc/tree-vect-loop-manip.c index 12f70ee002a..ffb67009636 100644 --- a/gcc/tree-vect-loop-manip.c +++ b/gcc/tree-vect-loop-manip.c @@ -1814,7 +1814,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio, : LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 2; if (check_profitability) max_iter = MAX (max_iter, (int) th - 1); - record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true); + record_niter_bound (new_loop, max_iter, false, true); dump_printf (MSG_NOTE, "Setting upper bound of nb iterations for epilogue " "loop to %d\n", max_iter); @@ -2048,7 +2048,7 @@ vect_do_peeling_for_alignment (loop_vec_info loop_vinfo, max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 2; if (check_profitability) max_iter = MAX (max_iter, (int) th - 1); - record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true); + record_niter_bound (new_loop, max_iter, false, true); dump_printf (MSG_NOTE, "Setting upper bound of nb iterations for prologue " "loop to %d\n", max_iter); diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c index 41eac972a97..a71194f1670 100644 --- a/gcc/tree-vect-loop.c +++ b/gcc/tree-vect-loop.c @@ -1241,7 +1241,7 @@ vect_analyze_loop_form (struct loop *loop) dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations); } } - else if (TREE_INT_CST_LOW (number_of_iterations) == 0) + else if (tree_to_hwi (number_of_iterations) == 0) { if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, @@ -3046,10 +3046,10 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code, } else { - int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree bitsize = TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt))); - int element_bitsize = tree_low_cst (bitsize, 1); + int element_bitsize = tree_to_uhwi (bitsize); int nelements = vec_size_in_bits / element_bitsize; optab = optab_for_tree_code (code, vectype, optab_default); @@ -3558,7 +3558,7 @@ get_initial_def_for_reduction (gimple stmt, tree init_val, if (SCALAR_FLOAT_TYPE_P (scalar_type)) init_value = build_real (scalar_type, TREE_REAL_CST (init_val)); else - init_value = build_int_cst (scalar_type, TREE_INT_CST_LOW (init_val)); + init_value = build_int_cst (scalar_type, tree_to_hwi (init_val)); } else init_value = init_val; @@ -4058,8 +4058,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt, enum tree_code shift_code = ERROR_MARK; bool have_whole_vector_shift = true; int bit_offset; - int element_bitsize = tree_low_cst (bitsize, 1); - int vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + int element_bitsize = tree_to_uhwi (bitsize); + int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); tree vec_temp; if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing) @@ -4136,7 +4136,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt, dump_printf_loc (MSG_NOTE, vect_location, "Reduce using scalar code. "); - vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1); + vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype)); FOR_EACH_VEC_ELT (new_phis, i, new_phi) { if (gimple_code (new_phi) == GIMPLE_PHI) @@ -5815,19 +5815,17 @@ vect_transform_loop (loop_vec_info loop_vinfo) scale_loop_profile (loop, GCOV_COMPUTE_SCALE (1, vectorization_factor), expected_iterations / vectorization_factor); loop->nb_iterations_upper_bound - = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (vectorization_factor), - FLOOR_DIV_EXPR); + = loop->nb_iterations_upper_bound.udiv_floor (vectorization_factor); if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) - && loop->nb_iterations_upper_bound != double_int_zero) - loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - double_int_one; + && !loop->nb_iterations_upper_bound.zero_p ()) + loop->nb_iterations_upper_bound = loop->nb_iterations_upper_bound - 1; if (loop->any_estimate) { loop->nb_iterations_estimate - = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (vectorization_factor), - FLOOR_DIV_EXPR); + = loop->nb_iterations_estimate.udiv_floor (vectorization_factor); if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) - && loop->nb_iterations_estimate != double_int_zero) - loop->nb_iterations_estimate = loop->nb_iterations_estimate - double_int_one; + && !loop->nb_iterations_estimate.zero_p ()) + loop->nb_iterations_estimate = loop->nb_iterations_estimate - 1; } if (dump_enabled_p ()) diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c index 0a48727821a..37cf077e626 100644 --- a/gcc/tree-vect-patterns.c +++ b/gcc/tree-vect-patterns.c @@ -776,8 +776,8 @@ vect_recog_pow_pattern (vec<gimple> *stmts, tree *type_in, *type_out = NULL_TREE; /* Catch squaring. */ - if ((host_integerp (exp, 0) - && tree_low_cst (exp, 0) == 2) + if ((tree_fits_shwi_p (exp) + && tree_to_shwi (exp) == 2) || (TREE_CODE (exp) == REAL_CST && REAL_VALUES_EQUAL (TREE_REAL_CST (exp), dconst2))) { @@ -1625,14 +1625,14 @@ vect_recog_rotate_pattern (vec<gimple> *stmts, tree *type_in, tree *type_out) if (TREE_CODE (def) == INTEGER_CST) { - if (!host_integerp (def, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (def, 1) + if (!tree_fits_uhwi_p (def) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (def) >= GET_MODE_PRECISION (TYPE_MODE (type)) || integer_zerop (def)) return NULL; def2 = build_int_cst (stype, GET_MODE_PRECISION (TYPE_MODE (type)) - - tree_low_cst (def, 1)); + - tree_to_uhwi (def)); } else { @@ -2055,7 +2055,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts, return pattern_stmt; } - if (!host_integerp (oprnd1, TYPE_UNSIGNED (itype)) + if (!tree_fits_hwi_p (oprnd1, TYPE_SIGN (itype)) || integer_zerop (oprnd1) || prec > HOST_BITS_PER_WIDE_INT) return NULL; @@ -2069,7 +2069,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts, { unsigned HOST_WIDE_INT mh, ml; int pre_shift, post_shift; - unsigned HOST_WIDE_INT d = tree_low_cst (oprnd1, 1) + unsigned HOST_WIDE_INT d = tree_to_uhwi (oprnd1) & GET_MODE_MASK (TYPE_MODE (itype)); tree t1, t2, t3, t4; @@ -2186,7 +2186,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts, { unsigned HOST_WIDE_INT ml; int post_shift; - HOST_WIDE_INT d = tree_low_cst (oprnd1, 0); + HOST_WIDE_INT d = tree_to_shwi (oprnd1); unsigned HOST_WIDE_INT abs_d; bool add = false; tree t1, t2, t3, t4; diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c index 0580f7dfadc..cd7ac526417 100644 --- a/gcc/tree-vect-stmts.c +++ b/gcc/tree-vect-stmts.c @@ -4911,7 +4911,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, nested within an outer-loop that is being vectorized. */ if (nested_in_vect_loop - && (TREE_INT_CST_LOW (DR_STEP (dr)) + && (tree_to_hwi (DR_STEP (dr)) % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)) { gcc_assert (alignment_support_scheme != dr_explicit_realign_optimized); diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h index 7c5dfe884df..22a7dbb6f5e 100644 --- a/gcc/tree-vectorizer.h +++ b/gcc/tree-vectorizer.h @@ -330,7 +330,7 @@ typedef struct _loop_vec_info { #define LOOP_VINFO_LOOP_NEST(L) (L)->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->datarefs #define LOOP_VINFO_DDRS(L) (L)->ddrs -#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) +#define LOOP_VINFO_INT_NITERS(L) (tree_to_hwi ((L)->num_iters)) #define LOOP_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts @@ -352,8 +352,8 @@ typedef struct _loop_vec_info { (L)->may_alias_ddrs.length () > 0 #define NITERS_KNOWN_P(n) \ -(host_integerp ((n),0) \ -&& TREE_INT_CST_LOW ((n)) > 0) +(tree_fits_shwi_p ((n)) \ +&& tree_to_shwi ((n)) > 0) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ NITERS_KNOWN_P((L)->num_iters) diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c index ff82591f5f4..d339442ea3c 100644 --- a/gcc/tree-vrp.c +++ b/gcc/tree-vrp.c @@ -1614,10 +1614,14 @@ extract_range_from_assert (value_range_t *vr_p, tree expr) /* Make sure to not set TREE_OVERFLOW on the final type conversion. We are willingly interpreting large positive unsigned values as negative singed values here. */ - min = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (min), - 0, false); - max = force_fit_type_double (TREE_TYPE (var), tree_to_double_int (max), - 0, false); + min = force_fit_type (TREE_TYPE (var), + wide_int (min).force_to_size (TYPE_PRECISION (TREE_TYPE (var)), + TYPE_SIGN (TREE_TYPE (min))), + 0, false); + max = force_fit_type (TREE_TYPE (var), + wide_int (max).force_to_size (TYPE_PRECISION (TREE_TYPE (var)), + TYPE_SIGN (TREE_TYPE (max))), + 0, false); /* We can transform a max, min range to an anti-range or vice-versa. Use set_and_canonicalize_value_range which does @@ -1873,6 +1877,10 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2) /* If the singed operation wraps then int_const_binop has done everything we want. */ ; + /* Signed division of -1/0 overflows and by the time it gets here + returns NULL_TREE. */ + else if (!res) + return NULL_TREE; else if ((TREE_OVERFLOW (res) && !TREE_OVERFLOW (val1) && !TREE_OVERFLOW (val2)) @@ -1964,19 +1972,20 @@ vrp_int_const_binop (enum tree_code code, tree val1, tree val2) } -/* For range VR compute two double_int bitmasks. In *MAY_BE_NONZERO +/* For range VR compute two wide_int bitmasks. In *MAY_BE_NONZERO bitmask if some bit is unset, it means for all numbers in the range the bit is 0, otherwise it might be 0 or 1. In *MUST_BE_NONZERO bitmask if some bit is set, it means for all numbers in the range the bit is 1, otherwise it might be 0 or 1. */ static bool -zero_nonzero_bits_from_vr (value_range_t *vr, - double_int *may_be_nonzero, - double_int *must_be_nonzero) +zero_nonzero_bits_from_vr (const tree expr_type, + value_range_t *vr, + wide_int *may_be_nonzero, + wide_int *must_be_nonzero) { - *may_be_nonzero = double_int_minus_one; - *must_be_nonzero = double_int_zero; + *may_be_nonzero = wide_int::minus_one (TYPE_PRECISION (expr_type)); + *must_be_nonzero = wide_int::zero (TYPE_PRECISION (expr_type)); if (!range_int_cst_p (vr) || TREE_OVERFLOW (vr->min) || TREE_OVERFLOW (vr->max)) @@ -1984,34 +1993,24 @@ zero_nonzero_bits_from_vr (value_range_t *vr, if (range_int_cst_singleton_p (vr)) { - *may_be_nonzero = tree_to_double_int (vr->min); + *may_be_nonzero = vr->min; *must_be_nonzero = *may_be_nonzero; } else if (tree_int_cst_sgn (vr->min) >= 0 || tree_int_cst_sgn (vr->max) < 0) { - double_int dmin = tree_to_double_int (vr->min); - double_int dmax = tree_to_double_int (vr->max); - double_int xor_mask = dmin ^ dmax; - *may_be_nonzero = dmin | dmax; - *must_be_nonzero = dmin & dmax; - if (xor_mask.high != 0) - { - unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 - << floor_log2 (xor_mask.high)) - 1; - may_be_nonzero->low = ALL_ONES; - may_be_nonzero->high |= mask; - must_be_nonzero->low = 0; - must_be_nonzero->high &= ~mask; - } - else if (xor_mask.low != 0) + wide_int wmin = vr->min; + wide_int wmax = vr->max; + wide_int xor_mask = wmin ^ wmax; + *may_be_nonzero = wmin | wmax; + *must_be_nonzero = wmin & wmax; + if (!xor_mask.zero_p ()) { - unsigned HOST_WIDE_INT mask - = ((unsigned HOST_WIDE_INT) 1 - << floor_log2 (xor_mask.low)) - 1; - may_be_nonzero->low |= mask; - must_be_nonzero->low &= ~mask; + wide_int mask = wide_int::mask (xor_mask.floor_log2 ().to_shwi (), + false, + (*may_be_nonzero).get_precision ()); + *may_be_nonzero = (*may_be_nonzero) | mask; + *must_be_nonzero = (*must_be_nonzero).and_not (mask); } } @@ -2044,15 +2043,15 @@ ranges_from_anti_range (value_range_t *ar, vr0->type = VR_RANGE; vr0->min = vrp_val_min (type); vr0->max - = double_int_to_tree (type, - tree_to_double_int (ar->min) - double_int_one); + = wide_int_to_tree (type, + wide_int (ar->min) - 1); } if (!vrp_val_is_max (ar->max)) { vr1->type = VR_RANGE; vr1->min - = double_int_to_tree (type, - tree_to_double_int (ar->max) + double_int_one); + = wide_int_to_tree (type, + wide_int (ar->max) + 1); vr1->max = vrp_val_max (type); } if (vr0->type == VR_UNDEFINED) @@ -2218,28 +2217,6 @@ extract_range_from_multiplicative_op_1 (value_range_t *vr, set_value_range (vr, type, min, max, NULL); } -/* Some quadruple precision helpers. */ -static int -quad_int_cmp (double_int l0, double_int h0, - double_int l1, double_int h1, bool uns) -{ - int c = h0.cmp (h1, uns); - if (c != 0) return c; - return l0.ucmp (l1); -} - -static void -quad_int_pair_sort (double_int *l0, double_int *h0, - double_int *l1, double_int *h1, bool uns) -{ - if (quad_int_cmp (*l0, *h0, *l1, *h1, uns) > 0) - { - double_int tmp; - tmp = *l0; *l0 = *l1; *l1 = tmp; - tmp = *h0; *h0 = *h1; *h1 = tmp; - } -} - /* Extract range information from a binary operation CODE based on the ranges of each of its operands, *VR0 and *VR1 with resulting type EXPR_TYPE. The resulting range is stored in *VR. */ @@ -2411,43 +2388,42 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* If we have a PLUS_EXPR with two VR_RANGE integer constant ranges compute the precise range for such case if possible. */ if (range_int_cst_p (&vr0) - && range_int_cst_p (&vr1) - /* We need as many bits as the possibly unsigned inputs. */ - && TYPE_PRECISION (expr_type) <= HOST_BITS_PER_DOUBLE_INT) - { - double_int min0 = tree_to_double_int (vr0.min); - double_int max0 = tree_to_double_int (vr0.max); - double_int min1 = tree_to_double_int (vr1.min); - double_int max1 = tree_to_double_int (vr1.max); - bool uns = TYPE_UNSIGNED (expr_type); - double_int type_min - = double_int::min_value (TYPE_PRECISION (expr_type), uns); - double_int type_max - = double_int::max_value (TYPE_PRECISION (expr_type), uns); - double_int dmin, dmax; + && range_int_cst_p (&vr1)) + { + signop sgn = TYPE_SIGN (expr_type); + unsigned int prec = TYPE_PRECISION (expr_type); + wide_int min0 = wide_int (vr0.min); + wide_int max0 = wide_int (vr0.max); + wide_int min1 = wide_int (vr1.min); + wide_int max1 = wide_int (vr1.max); + wide_int type_min + = wide_int::min_value (TYPE_PRECISION (expr_type), sgn); + wide_int type_max + = wide_int::max_value (TYPE_PRECISION (expr_type), sgn); + wide_int wmin, wmax; int min_ovf = 0; int max_ovf = 0; if (code == PLUS_EXPR) { - dmin = min0 + min1; - dmax = max0 + max1; - - /* Check for overflow in double_int. */ - if (min1.cmp (double_int_zero, uns) != dmin.cmp (min0, uns)) - min_ovf = min0.cmp (dmin, uns); - if (max1.cmp (double_int_zero, uns) != dmax.cmp (max0, uns)) - max_ovf = max0.cmp (dmax, uns); + wmin = min0 + min1; + wmax = max0 + max1; + + /* Check for overflow. */ + if (min1.cmp (0, sgn) != wmin.cmp (min0, sgn)) + min_ovf = min0.cmp (wmin, sgn); + if (max1.cmp (0, sgn) != wmax.cmp (max0, sgn)) + max_ovf = max0.cmp (wmax, sgn); } else /* if (code == MINUS_EXPR) */ { - dmin = min0 - max1; - dmax = max0 - min1; + wmin = min0 - max1; + wmax = max0 - min1; - if (double_int_zero.cmp (max1, uns) != dmin.cmp (min0, uns)) - min_ovf = min0.cmp (max1, uns); - if (double_int_zero.cmp (min1, uns) != dmax.cmp (max0, uns)) - max_ovf = max0.cmp (min1, uns); + if (wide_int (0).cmp (max1, sgn) != wmin.cmp (min0, sgn)) + min_ovf = min0.cmp (max1, sgn); + if (wide_int (0).cmp (min1, sgn) != wmax.cmp (max0, sgn)) + max_ovf = max0.cmp (min1, sgn); } /* For non-wrapping arithmetic look at possibly smaller @@ -2455,24 +2431,24 @@ extract_range_from_binary_expr_1 (value_range_t *vr, if (!TYPE_OVERFLOW_WRAPS (expr_type)) { if (vrp_val_min (expr_type)) - type_min = tree_to_double_int (vrp_val_min (expr_type)); + type_min = wide_int (vrp_val_min (expr_type)); if (vrp_val_max (expr_type)) - type_max = tree_to_double_int (vrp_val_max (expr_type)); + type_max = wide_int (vrp_val_max (expr_type)); } /* Check for type overflow. */ if (min_ovf == 0) { - if (dmin.cmp (type_min, uns) == -1) + if (wmin.cmp (type_min, sgn) == -1) min_ovf = -1; - else if (dmin.cmp (type_max, uns) == 1) + else if (wmin.cmp (type_max, sgn) == 1) min_ovf = 1; } if (max_ovf == 0) { - if (dmax.cmp (type_min, uns) == -1) + if (wmax.cmp (type_min, sgn) == -1) max_ovf = -1; - else if (dmax.cmp (type_max, uns) == 1) + else if (wmax.cmp (type_max, sgn) == 1) max_ovf = 1; } @@ -2480,16 +2456,14 @@ extract_range_from_binary_expr_1 (value_range_t *vr, { /* If overflow wraps, truncate the values and adjust the range kind and bounds appropriately. */ - double_int tmin - = dmin.ext (TYPE_PRECISION (expr_type), uns); - double_int tmax - = dmax.ext (TYPE_PRECISION (expr_type), uns); + wide_int tmin = wmin.force_to_size (prec, sgn); + wide_int tmax = wmax.force_to_size (prec, sgn); if (min_ovf == max_ovf) { /* No overflow or both overflow or underflow. The range kind stays VR_RANGE. */ - min = double_int_to_tree (expr_type, tmin); - max = double_int_to_tree (expr_type, tmax); + min = wide_int_to_tree (expr_type, tmin); + max = wide_int_to_tree (expr_type, tmax); } else if (min_ovf == -1 && max_ovf == 1) @@ -2503,26 +2477,26 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* Min underflow or max overflow. The range kind changes to VR_ANTI_RANGE. */ bool covers = false; - double_int tem = tmin; + wide_int tem = tmin; gcc_assert ((min_ovf == -1 && max_ovf == 0) || (max_ovf == 1 && min_ovf == 0)); type = VR_ANTI_RANGE; - tmin = tmax + double_int_one; - if (tmin.cmp (tmax, uns) < 0) + tmin = tmax + 1; + if (tmin.cmp (tmax, sgn) < 0) covers = true; - tmax = tem + double_int_minus_one; - if (tmax.cmp (tem, uns) > 0) + tmax = tem - 1; + if (tmax.cmp (tem, sgn) > 0) covers = true; /* If the anti-range would cover nothing, drop to varying. Likewise if the anti-range bounds are outside of the types values. */ - if (covers || tmin.cmp (tmax, uns) > 0) + if (covers || tmin.cmp (tmax, sgn) > 0) { set_value_range_to_varying (vr); return; } - min = double_int_to_tree (expr_type, tmin); - max = double_int_to_tree (expr_type, tmax); + min = wide_int_to_tree (expr_type, tmin); + max = wide_int_to_tree (expr_type, tmax); } } else @@ -2535,7 +2509,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) min = negative_overflow_infinity (expr_type); else - min = double_int_to_tree (expr_type, type_min); + min = wide_int_to_tree (expr_type, type_min); } else if (min_ovf == 1) { @@ -2543,10 +2517,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) min = positive_overflow_infinity (expr_type); else - min = double_int_to_tree (expr_type, type_max); + min = wide_int_to_tree (expr_type, type_max); } else - min = double_int_to_tree (expr_type, dmin); + min = wide_int_to_tree (expr_type, wmin); if (max_ovf == -1) { @@ -2554,7 +2528,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) max = negative_overflow_infinity (expr_type); else - max = double_int_to_tree (expr_type, type_min); + max = wide_int_to_tree (expr_type, type_min); } else if (max_ovf == 1) { @@ -2562,10 +2536,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && supports_overflow_infinity (expr_type)) max = positive_overflow_infinity (expr_type); else - max = double_int_to_tree (expr_type, type_max); + max = wide_int_to_tree (expr_type, type_max); } else - max = double_int_to_tree (expr_type, dmax); + max = wide_int_to_tree (expr_type, wmax); } if (needs_overflow_infinity (expr_type) && supports_overflow_infinity (expr_type)) @@ -2651,97 +2625,86 @@ extract_range_from_binary_expr_1 (value_range_t *vr, else if (code == MULT_EXPR) { /* Fancy code so that with unsigned, [-3,-1]*[-3,-1] does not - drop to varying. */ + drop to varying. This test requires 2*prec bits if both + operands are signed and 2*prec + 2 bits if either is not. */ + + signop sign = TYPE_SIGN (expr_type); + unsigned int prec = TYPE_PRECISION (expr_type); + unsigned int prec2 = (prec * 2) + (sign == UNSIGNED ? 2 : 0); + if (range_int_cst_p (&vr0) && range_int_cst_p (&vr1) && TYPE_OVERFLOW_WRAPS (expr_type)) { - double_int min0, max0, min1, max1, sizem1, size; - double_int prod0l, prod0h, prod1l, prod1h, - prod2l, prod2h, prod3l, prod3h; - bool uns0, uns1, uns; - - sizem1 = double_int::max_value (TYPE_PRECISION (expr_type), true); - size = sizem1 + double_int_one; - - min0 = tree_to_double_int (vr0.min); - max0 = tree_to_double_int (vr0.max); - min1 = tree_to_double_int (vr1.min); - max1 = tree_to_double_int (vr1.max); + wide_int min0, max0, min1, max1; + wide_int prod0, prod1, prod2, prod3; + wide_int sizem1 = wide_int::max_value (prec, UNSIGNED, prec2); + wide_int size = sizem1 + 1; - uns0 = TYPE_UNSIGNED (expr_type); - uns1 = uns0; + /* Extend the values using the sign of the result to PREC2. + From here on out, everthing is just signed math no matter + what the input types were. */ + min0 = wide_int (vr0.min).force_to_size (prec2, sign); + max0 = wide_int (vr0.max).force_to_size (prec2, sign); + min1 = wide_int (vr1.min).force_to_size (prec2, sign); + max1 = wide_int (vr1.max).force_to_size (prec2, sign); /* Canonicalize the intervals. */ - if (TYPE_UNSIGNED (expr_type)) + if (sign == UNSIGNED) { - double_int min2 = size - min0; - if (!min2.is_zero () && min2.cmp (max0, true) < 0) + if (size.ltu_p (min0 + max0)) { - min0 = -min2; + min0 -= size; max0 -= size; - uns0 = false; } - min2 = size - min1; - if (!min2.is_zero () && min2.cmp (max1, true) < 0) + if (size.ltu_p (min1 + max1)) { - min1 = -min2; + min1 -= size; max1 -= size; - uns1 = false; } } - uns = uns0 & uns1; - bool overflow; - prod0l = min0.wide_mul_with_sign (min1, true, &prod0h, &overflow); - if (!uns0 && min0.is_negative ()) - prod0h -= min1; - if (!uns1 && min1.is_negative ()) - prod0h -= min0; - - prod1l = min0.wide_mul_with_sign (max1, true, &prod1h, &overflow); - if (!uns0 && min0.is_negative ()) - prod1h -= max1; - if (!uns1 && max1.is_negative ()) - prod1h -= min0; - - prod2l = max0.wide_mul_with_sign (min1, true, &prod2h, &overflow); - if (!uns0 && max0.is_negative ()) - prod2h -= min1; - if (!uns1 && min1.is_negative ()) - prod2h -= max0; - - prod3l = max0.wide_mul_with_sign (max1, true, &prod3h, &overflow); - if (!uns0 && max0.is_negative ()) - prod3h -= max1; - if (!uns1 && max1.is_negative ()) - prod3h -= max0; - - /* Sort the 4 products. */ - quad_int_pair_sort (&prod0l, &prod0h, &prod3l, &prod3h, uns); - quad_int_pair_sort (&prod1l, &prod1h, &prod2l, &prod2h, uns); - quad_int_pair_sort (&prod0l, &prod0h, &prod1l, &prod1h, uns); - quad_int_pair_sort (&prod2l, &prod2h, &prod3l, &prod3h, uns); - - /* Max - min. */ - if (prod0l.is_zero ()) + prod0 = min0 * min1; + prod1 = min0 * max1; + prod2 = max0 * min1; + prod3 = max0 * max1; + + /* Sort the 4 products so that min is in prod0 and max is in + prod3. */ + /* min0min1 > max0max1 */ + if (prod0.gts_p (prod3)) { - prod1l = double_int_zero; - prod1h = -prod0h; + wide_int tmp = prod3; + prod3 = prod0; + prod0 = tmp; } - else + + /* min0max1 > max0min1 */ + if (prod1.gts_p (prod2)) + { + wide_int tmp = prod2; + prod2 = prod1; + prod1 = tmp; + } + + if (prod0.gts_p (prod1)) { - prod1l = -prod0l; - prod1h = ~prod0h; + wide_int tmp = prod1; + prod1 = prod0; + prod0 = tmp; } - prod2l = prod3l + prod1l; - prod2h = prod3h + prod1h; - if (prod2l.ult (prod3l)) - prod2h += double_int_one; /* carry */ - if (!prod2h.is_zero () - || prod2l.cmp (sizem1, true) >= 0) + if (prod2.gts_p (prod3)) + { + wide_int tmp = prod3; + prod3 = prod2; + prod2 = tmp; + } + + /* diff = max - min. */ + prod2 = prod3 - prod0; + if (prod2.geu_p (sizem1)) { /* the range covers all values. */ set_value_range_to_varying (vr); @@ -2750,8 +2713,8 @@ extract_range_from_binary_expr_1 (value_range_t *vr, /* The following should handle the wrapping and selecting VR_ANTI_RANGE for us. */ - min = double_int_to_tree (expr_type, prod0l); - max = double_int_to_tree (expr_type, prod3l); + min = wide_int_to_tree (expr_type, prod0); + max = wide_int_to_tree (expr_type, prod3); set_and_canonicalize_value_range (vr, VR_RANGE, min, max, NULL); return; } @@ -2799,10 +2762,9 @@ extract_range_from_binary_expr_1 (value_range_t *vr, value_range_t vr1p = VR_INITIALIZER; vr1p.type = VR_RANGE; vr1p.min - = double_int_to_tree (expr_type, - double_int_one - .llshift (TREE_INT_CST_LOW (vr1.min), - TYPE_PRECISION (expr_type))); + = wide_int_to_tree (expr_type, + wide_int::set_bit_in_zero (tree_to_shwi (vr1.min), + TYPE_PRECISION (expr_type))); vr1p.max = vr1p.min; /* We have to use a wrapping multiply though as signed overflow on lshifts is implementation defined in C89. */ @@ -2819,34 +2781,34 @@ extract_range_from_binary_expr_1 (value_range_t *vr, int prec = TYPE_PRECISION (expr_type); int overflow_pos = prec; int bound_shift; - double_int bound, complement, low_bound, high_bound; + wide_int bound, complement, low_bound, high_bound; bool uns = TYPE_UNSIGNED (expr_type); bool in_bounds = false; if (!uns) overflow_pos -= 1; - bound_shift = overflow_pos - TREE_INT_CST_LOW (vr1.max); - /* If bound_shift == HOST_BITS_PER_DOUBLE_INT, the llshift can + bound_shift = overflow_pos - tree_to_shwi (vr1.max); + /* If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can overflow. However, for that to happen, vr1.max needs to be zero, which means vr1 is a singleton range of zero, which means it should be handled by the previous LSHIFT_EXPR if-clause. */ - bound = double_int_one.llshift (bound_shift, prec); - complement = ~(bound - double_int_one); + bound = wide_int::set_bit_in_zero (bound_shift, prec); + complement = ~(bound - 1); if (uns) { - low_bound = bound.zext (prec); - high_bound = complement.zext (prec); - if (tree_to_double_int (vr0.max).ult (low_bound)) + low_bound = bound; + high_bound = complement; + if (wide_int::ltu_p (vr0.max, low_bound)) { /* [5, 6] << [1, 2] == [10, 24]. */ /* We're shifting out only zeroes, the value increases monotonically. */ in_bounds = true; } - else if (high_bound.ult (tree_to_double_int (vr0.min))) + else if (high_bound.ltu_p (vr0.min)) { /* [0xffffff00, 0xffffffff] << [1, 2] == [0xfffffc00, 0xfffffffe]. */ @@ -2858,10 +2820,10 @@ extract_range_from_binary_expr_1 (value_range_t *vr, else { /* [-1, 1] << [1, 2] == [-4, 4]. */ - low_bound = complement.sext (prec); + low_bound = complement; high_bound = bound; - if (tree_to_double_int (vr0.max).slt (high_bound) - && low_bound.slt (tree_to_double_int (vr0.min))) + if (wide_int::lts_p (vr0.max, high_bound) + && low_bound.lts_p (wide_int (vr0.min))) { /* For non-negative numbers, we're shifting out only zeroes, the value increases monotonically. @@ -2985,7 +2947,7 @@ extract_range_from_binary_expr_1 (value_range_t *vr, max = fold_unary_to_constant (ABS_EXPR, expr_type, vr1.min); if (tree_int_cst_lt (max, vr1.max)) max = vr1.max; - max = int_const_binop (MINUS_EXPR, max, integer_one_node); + max = int_const_binop (MINUS_EXPR, max, build_int_cst (TREE_TYPE (max), 1)); /* If the dividend is non-negative the modulus will be non-negative as well. */ if (TYPE_UNSIGNED (expr_type) @@ -2997,21 +2959,21 @@ extract_range_from_binary_expr_1 (value_range_t *vr, else if (code == BIT_AND_EXPR || code == BIT_IOR_EXPR || code == BIT_XOR_EXPR) { bool int_cst_range0, int_cst_range1; - double_int may_be_nonzero0, may_be_nonzero1; - double_int must_be_nonzero0, must_be_nonzero1; + wide_int may_be_nonzero0, may_be_nonzero1; + wide_int must_be_nonzero0, must_be_nonzero1; - int_cst_range0 = zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, + int_cst_range0 = zero_nonzero_bits_from_vr (expr_type, &vr0, &may_be_nonzero0, &must_be_nonzero0); - int_cst_range1 = zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, + int_cst_range1 = zero_nonzero_bits_from_vr (expr_type, &vr1, &may_be_nonzero1, &must_be_nonzero1); type = VR_RANGE; if (code == BIT_AND_EXPR) { - double_int dmax; - min = double_int_to_tree (expr_type, - must_be_nonzero0 & must_be_nonzero1); - dmax = may_be_nonzero0 & may_be_nonzero1; + wide_int wmax; + min = wide_int_to_tree (expr_type, + must_be_nonzero0 & must_be_nonzero1); + wmax = may_be_nonzero0 & may_be_nonzero1; /* If both input ranges contain only negative values we can truncate the result range maximum to the minimum of the input range maxima. */ @@ -3019,28 +2981,24 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && tree_int_cst_sgn (vr0.max) < 0 && tree_int_cst_sgn (vr1.max) < 0) { - dmax = dmax.min (tree_to_double_int (vr0.max), - TYPE_UNSIGNED (expr_type)); - dmax = dmax.min (tree_to_double_int (vr1.max), - TYPE_UNSIGNED (expr_type)); + wmax = wmax.min (vr0.max, TYPE_SIGN (expr_type)); + wmax = wmax.min (vr1.max, TYPE_SIGN (expr_type)); } /* If either input range contains only non-negative values we can truncate the result range maximum to the respective maximum of the input range. */ if (int_cst_range0 && tree_int_cst_sgn (vr0.min) >= 0) - dmax = dmax.min (tree_to_double_int (vr0.max), - TYPE_UNSIGNED (expr_type)); + wmax = wmax.min (vr0.max, TYPE_SIGN (expr_type)); if (int_cst_range1 && tree_int_cst_sgn (vr1.min) >= 0) - dmax = dmax.min (tree_to_double_int (vr1.max), - TYPE_UNSIGNED (expr_type)); - max = double_int_to_tree (expr_type, dmax); + wmax = wmax.min (vr1.max, TYPE_SIGN (expr_type)); + max = wide_int_to_tree (expr_type, wmax); } else if (code == BIT_IOR_EXPR) { - double_int dmin; - max = double_int_to_tree (expr_type, - may_be_nonzero0 | may_be_nonzero1); - dmin = must_be_nonzero0 | must_be_nonzero1; + wide_int wmin; + max = wide_int_to_tree (expr_type, + may_be_nonzero0 | may_be_nonzero1); + wmin = must_be_nonzero0 | must_be_nonzero1; /* If the input ranges contain only positive values we can truncate the minimum of the result range to the maximum of the input range minima. */ @@ -3048,31 +3006,27 @@ extract_range_from_binary_expr_1 (value_range_t *vr, && tree_int_cst_sgn (vr0.min) >= 0 && tree_int_cst_sgn (vr1.min) >= 0) { - dmin = dmin.max (tree_to_double_int (vr0.min), - TYPE_UNSIGNED (expr_type)); - dmin = dmin.max (tree_to_double_int (vr1.min), - TYPE_UNSIGNED (expr_type)); + wmin = wmin.max (vr0.min, TYPE_SIGN (expr_type)); + wmin = wmin.max (vr1.min, TYPE_SIGN (expr_type)); } /* If either input range contains only negative values we can truncate the minimum of the result range to the respective minimum range. */ if (int_cst_range0 && tree_int_cst_sgn (vr0.max) < 0) - dmin = dmin.max (tree_to_double_int (vr0.min), - TYPE_UNSIGNED (expr_type)); + wmin = wmin.max (vr0.min, TYPE_SIGN (expr_type)); if (int_cst_range1 && tree_int_cst_sgn (vr1.max) < 0) - dmin = dmin.max (tree_to_double_int (vr1.min), - TYPE_UNSIGNED (expr_type)); - min = double_int_to_tree (expr_type, dmin); + wmin = wmin.max (vr1.min, TYPE_SIGN (expr_type)); + min = wide_int_to_tree (expr_type, wmin); } else if (code == BIT_XOR_EXPR) { - double_int result_zero_bits, result_one_bits; + wide_int result_zero_bits, result_one_bits; result_zero_bits = (must_be_nonzero0 & must_be_nonzero1) | ~(may_be_nonzero0 | may_be_nonzero1); result_one_bits = must_be_nonzero0.and_not (may_be_nonzero1) | must_be_nonzero1.and_not (may_be_nonzero0); - max = double_int_to_tree (expr_type, ~result_zero_bits); - min = double_int_to_tree (expr_type, result_one_bits); + max = wide_int_to_tree (expr_type, ~result_zero_bits); + min = wide_int_to_tree (expr_type, result_one_bits); /* If the range has all positive or all negative values the result is better than VARYING. */ if (tree_int_cst_sgn (min) < 0 @@ -3287,15 +3241,17 @@ extract_range_from_unary_expr_1 (value_range_t *vr, if (is_overflow_infinity (vr0.min)) new_min = negative_overflow_infinity (outer_type); else - new_min = force_fit_type_double (outer_type, - tree_to_double_int (vr0.min), - 0, false); + new_min = force_fit_type (outer_type, + wide_int (vr0.min).force_to_size (TYPE_PRECISION (outer_type), + TYPE_SIGN (TREE_TYPE (vr0.min))), + 0, false); if (is_overflow_infinity (vr0.max)) new_max = positive_overflow_infinity (outer_type); else - new_max = force_fit_type_double (outer_type, - tree_to_double_int (vr0.max), - 0, false); + new_max = force_fit_type (outer_type, + wide_int (vr0.max).force_to_size (TYPE_PRECISION (outer_type), + TYPE_SIGN (TREE_TYPE (vr0.max))), + 0, false); set_and_canonicalize_value_range (vr, vr0.type, new_min, new_max, NULL); return; @@ -3393,7 +3349,7 @@ extract_range_from_unary_expr_1 (value_range_t *vr, min = (vr0.min != type_min_value ? int_const_binop (PLUS_EXPR, type_min_value, - integer_one_node) + build_int_cst (TREE_TYPE (type_min_value), 1)) : type_min_value); } else @@ -3868,30 +3824,29 @@ adjust_range_with_scev (value_range_t *vr, struct loop *loop, && (TREE_CODE (init) != SSA_NAME || get_value_range (init)->type == VR_RANGE)) { - double_int nit; + max_wide_int nit; /* We are only entering here for loop header PHI nodes, so using the number of latch executions is the correct thing to use. */ if (max_loop_iterations (loop, &nit)) { value_range_t maxvr = VR_INITIALIZER; - double_int dtmp; - bool unsigned_p = TYPE_UNSIGNED (TREE_TYPE (step)); - bool overflow = false; - - dtmp = tree_to_double_int (step) - .mul_with_sign (nit, unsigned_p, &overflow); + max_wide_int wtmp; + signop sgn = TYPE_SIGN (TREE_TYPE (step)); + bool overflow; + + wtmp = max_wide_int (step).mul (nit, sgn, &overflow); /* If the multiplication overflowed we can't do a meaningful adjustment. Likewise if the result doesn't fit in the type of the induction variable. For a signed type we have to check whether the result has the expected signedness which is that of the step as number of iterations is unsigned. */ if (!overflow - && double_int_fits_to_tree_p (TREE_TYPE (init), dtmp) - && (unsigned_p - || ((dtmp.high ^ TREE_INT_CST_HIGH (step)) >= 0))) + && wtmp.fits_to_tree_p (TREE_TYPE (init)) + && (sgn == UNSIGNED + || (wtmp.gts_p (0) == wide_int::gts_p (step, 0)))) { - tem = double_int_to_tree (TREE_TYPE (init), dtmp); + tem = wide_int_to_tree (TREE_TYPE (init), wtmp); extract_range_from_binary_expr (&maxvr, PLUS_EXPR, TREE_TYPE (init), init, tem); /* Likewise if the addition did. */ @@ -4612,8 +4567,7 @@ register_new_assert_for (tree name, tree expr, machinery. */ if (TREE_CODE (val) == INTEGER_CST && TREE_OVERFLOW (val)) - val = build_int_cst_wide (TREE_TYPE (val), - TREE_INT_CST_LOW (val), TREE_INT_CST_HIGH (val)); + val = wide_int_to_tree (TREE_TYPE (val), val); /* The new assertion A will be inserted at BB or E. We need to determine if the new location is dominated by a previously @@ -4766,23 +4720,23 @@ extract_code_and_val_from_cond_with_ops (tree name, enum tree_code cond_code, (to transform signed values into unsigned) and at the end xor SGNBIT back. */ -static double_int -masked_increment (double_int val, double_int mask, double_int sgnbit, +static wide_int +masked_increment (wide_int val, wide_int mask, wide_int sgnbit, unsigned int prec) { - double_int bit = double_int_one, res; + wide_int bit = wide_int::one (prec), res; unsigned int i; val ^= sgnbit; for (i = 0; i < prec; i++, bit += bit) { res = mask; - if ((res & bit).is_zero ()) + if ((res & bit).zero_p ()) continue; - res = bit - double_int_one; + res = bit - 1; res = (val + bit).and_not (res); res &= mask; - if (res.ugt (val)) + if (res.gtu_p (val)) return res ^ sgnbit; } return val ^ sgnbit; @@ -4957,7 +4911,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, gimple def_stmt = SSA_NAME_DEF_STMT (name); tree name2 = NULL_TREE, names[2], cst2 = NULL_TREE; tree val2 = NULL_TREE; - double_int mask = double_int_zero; + wide_int mask = 0; unsigned int prec = TYPE_PRECISION (TREE_TYPE (val)); unsigned int nprec = prec; enum tree_code rhs_code = ERROR_MARK; @@ -5024,15 +4978,14 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, name2 = gimple_assign_rhs1 (def_stmt); cst2 = gimple_assign_rhs2 (def_stmt); if (TREE_CODE (name2) == SSA_NAME - && host_integerp (cst2, 1) + && tree_fits_uhwi_p (cst2) && INTEGRAL_TYPE_P (TREE_TYPE (name2)) - && IN_RANGE (tree_low_cst (cst2, 1), 1, prec - 1) - && prec <= HOST_BITS_PER_DOUBLE_INT + && IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1) && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))) && live_on_edge (e, name2) && !has_single_use (name2)) { - mask = double_int::mask (tree_low_cst (cst2, 1)); + mask = wide_int::mask (tree_to_uhwi (cst2), false, prec); val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2); } } @@ -5055,26 +5008,26 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, val2 = fold_convert (type, val2); } tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, val2); - new_val = double_int_to_tree (TREE_TYPE (tmp), mask); + new_val = wide_int_to_tree (TREE_TYPE (tmp), mask); new_comp_code = comp_code == EQ_EXPR ? LE_EXPR : GT_EXPR; } else if (comp_code == LT_EXPR || comp_code == GE_EXPR) { - double_int minval - = double_int::min_value (prec, TYPE_UNSIGNED (TREE_TYPE (val))); + wide_int minval + = wide_int::min_value (prec, TYPE_SIGN (TREE_TYPE (val))); new_val = val2; - if (minval == tree_to_double_int (new_val)) + if (minval == wide_int (new_val)) new_val = NULL_TREE; } else { - double_int maxval - = double_int::max_value (prec, TYPE_UNSIGNED (TREE_TYPE (val))); - mask |= tree_to_double_int (val2); + wide_int maxval + = wide_int::max_value (prec, TYPE_SIGN (TREE_TYPE (val))); + mask |= wide_int (val2); if (mask == maxval) new_val = NULL_TREE; else - new_val = double_int_to_tree (TREE_TYPE (val2), mask); + new_val = wide_int_to_tree (TREE_TYPE (val2), mask); } if (new_val) @@ -5126,7 +5079,6 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, && INTEGRAL_TYPE_P (TREE_TYPE (name2)) && TREE_CODE (cst2) == INTEGER_CST && !integer_zerop (cst2) - && nprec <= HOST_BITS_PER_DOUBLE_INT && (nprec > 1 || TYPE_UNSIGNED (TREE_TYPE (val)))) { @@ -5149,17 +5101,17 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, } if (names[0] || names[1]) { - double_int minv, maxv = double_int_zero, valv, cst2v; - double_int tem, sgnbit; + wide_int minv, maxv = 0, valv, cst2v; + wide_int tem, sgnbit; bool valid_p = false, valn = false, cst2n = false; enum tree_code ccode = comp_code; - valv = tree_to_double_int (val).zext (nprec); - cst2v = tree_to_double_int (cst2).zext (nprec); - if (!TYPE_UNSIGNED (TREE_TYPE (val))) + valv = wide_int (val).zforce_to_size (nprec); + cst2v = wide_int (cst2).zforce_to_size (nprec); + if (TYPE_SIGN (TREE_TYPE (val)) == SIGNED) { - valn = valv.sext (nprec).is_negative (); - cst2n = cst2v.sext (nprec).is_negative (); + valn = valv.sext (nprec).neg_p (SIGNED); + cst2n = cst2v.sext (nprec).neg_p (SIGNED); } /* If CST2 doesn't have most significant bit set, but VAL is negative, we have comparison like @@ -5167,9 +5119,9 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, if (!cst2n && valn) ccode = ERROR_MARK; if (cst2n) - sgnbit = double_int_one.llshift (nprec - 1, nprec).zext (nprec); + sgnbit = wide_int::set_bit_in_zero (nprec - 1, nprec); else - sgnbit = double_int_zero; + sgnbit = wide_int::zero (nprec); minv = valv & cst2v; switch (ccode) { @@ -5182,30 +5134,29 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, maxv = maxv.zext (nprec); valid_p = true; break; + case NE_EXPR: tem = valv | ~cst2v; tem = tem.zext (nprec); /* If VAL is 0, handle (X & CST2) != 0 as (X & CST2) > 0U. */ - if (valv.is_zero ()) + if (valv.zero_p ()) { cst2n = false; - sgnbit = double_int_zero; + sgnbit = wide_int::zero (nprec); goto gt_expr; } /* If (VAL | ~CST2) is all ones, handle it as (X & CST2) < VAL. */ - if (tem == double_int::mask (nprec)) + if (tem == -1) { cst2n = false; valn = false; - sgnbit = double_int_zero; + sgnbit = wide_int::zero (nprec); goto lt_expr; } - if (!cst2n - && cst2v.sext (nprec).is_negative ()) - sgnbit - = double_int_one.llshift (nprec - 1, nprec).zext (nprec); - if (!sgnbit.is_zero ()) + if (!cst2n && cst2v.sext (nprec).neg_p (SIGNED)) + sgnbit = wide_int::set_bit_in_zero (nprec - 1, nprec); + if (!sgnbit.zero_p ()) { if (valv == sgnbit) { @@ -5213,15 +5164,16 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, valn = true; goto gt_expr; } - if (tem == double_int::mask (nprec - 1)) + if (tem == wide_int::mask (nprec - 1, false, nprec)) { cst2n = true; goto lt_expr; } if (!cst2n) - sgnbit = double_int_zero; + sgnbit = 0; } break; + case GE_EXPR: /* Minimum unsigned value for >= if (VAL & CST2) == VAL is VAL and maximum unsigned value is ~0. For signed @@ -5236,9 +5188,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, if (minv == valv) break; } - maxv = double_int::mask (nprec - (cst2n ? 1 : 0)); + maxv = wide_int::mask (nprec - (cst2n ? 1 : 0), false, nprec); valid_p = true; break; + case GT_EXPR: gt_expr: /* Find out smallest MINV where MINV > VAL @@ -5247,9 +5200,10 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, minv = masked_increment (valv, cst2v, sgnbit, nprec); if (minv == valv) break; - maxv = double_int::mask (nprec - (cst2n ? 1 : 0)); + maxv = wide_int::mask (nprec - (cst2n ? 1 : 0), false, nprec); valid_p = true; break; + case LE_EXPR: /* Minimum unsigned value for <= is 0 and maximum unsigned value is VAL | ~CST2 if (VAL & CST2) == VAL. @@ -5266,13 +5220,14 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, maxv = masked_increment (valv, cst2v, sgnbit, nprec); if (maxv == valv) break; - maxv -= double_int_one; + maxv -= 1; } maxv |= ~cst2v; maxv = maxv.zext (nprec); minv = sgnbit; valid_p = true; break; + case LT_EXPR: lt_expr: /* Minimum unsigned value for < is 0 and maximum @@ -5295,17 +5250,18 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, if (maxv == valv) break; } - maxv -= double_int_one; + maxv -= 1; maxv |= ~cst2v; maxv = maxv.zext (nprec); minv = sgnbit; valid_p = true; break; + default: break; } if (valid_p - && (maxv - minv).zext (nprec) != double_int::mask (nprec)) + && (maxv - minv).zext (nprec) != wide_int::minus_one (nprec)) { tree tmp, new_val, type; int i; @@ -5313,7 +5269,7 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, for (i = 0; i < 2; i++) if (names[i]) { - double_int maxv2 = maxv; + wide_int maxv2 = maxv; tmp = names[i]; type = TREE_TYPE (names[i]); if (!TYPE_UNSIGNED (type)) @@ -5321,13 +5277,13 @@ register_edge_assert_for_2 (tree name, edge e, gimple_stmt_iterator bsi, type = build_nonstandard_integer_type (nprec, 1); tmp = build1 (NOP_EXPR, type, names[i]); } - if (!minv.is_zero ()) + if (!minv.zero_p ()) { tmp = build2 (PLUS_EXPR, type, tmp, - double_int_to_tree (type, -minv)); + wide_int_to_tree (type, -minv)); maxv2 = maxv - minv; } - new_val = double_int_to_tree (type, maxv2); + new_val = wide_int_to_tree (type, maxv2); if (dump_file) { @@ -6162,7 +6118,8 @@ check_array_ref (location_t location, tree ref, bool ignore_off_by_one) } low_bound = array_ref_low_bound (ref); - up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, integer_one_node); + up_bound_p1 = int_const_binop (PLUS_EXPR, up_bound, + build_int_cst (TREE_TYPE (up_bound), 1)); if (TREE_CODE (low_sub) == SSA_NAME) { @@ -6259,7 +6216,7 @@ search_for_addr_array (tree t, location_t location) { tree tem = TREE_OPERAND (TREE_OPERAND (t, 0), 0); tree low_bound, up_bound, el_sz; - double_int idx; + addr_wide_int idx; if (TREE_CODE (TREE_TYPE (tem)) != ARRAY_TYPE || TREE_CODE (TREE_TYPE (TREE_TYPE (tem))) == ARRAY_TYPE || !TYPE_DOMAIN (TREE_TYPE (tem))) @@ -6277,8 +6234,8 @@ search_for_addr_array (tree t, location_t location) return; idx = mem_ref_offset (t); - idx = idx.sdiv (tree_to_double_int (el_sz), TRUNC_DIV_EXPR); - if (idx.slt (double_int_zero)) + idx = idx.sdiv_trunc (addr_wide_int (el_sz)); + if (idx.lts_p (0)) { if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -6290,9 +6247,9 @@ search_for_addr_array (tree t, location_t location) "array subscript is below array bounds"); TREE_NO_WARNING (t) = 1; } - else if (idx.sgt (tree_to_double_int (up_bound) - - tree_to_double_int (low_bound) - + double_int_one)) + else if (idx.gts_p (addr_wide_int (up_bound) + - low_bound + + 1)) { if (dump_file && (dump_flags & TDF_DETAILS)) { @@ -7498,9 +7455,11 @@ union_ranges (enum value_range_type *vr0type, && vrp_val_is_max (vr1max)) { tree min = int_const_binop (PLUS_EXPR, - *vr0max, integer_one_node); + *vr0max, + build_int_cst (TREE_TYPE (*vr0max), 1)); tree max = int_const_binop (MINUS_EXPR, - vr1min, integer_one_node); + vr1min, + build_int_cst (TREE_TYPE (vr1min), 1)); if (!operand_less_p (max, min)) { *vr0type = VR_ANTI_RANGE; @@ -7522,9 +7481,11 @@ union_ranges (enum value_range_type *vr0type, && vrp_val_is_max (*vr0max)) { tree min = int_const_binop (PLUS_EXPR, - vr1max, integer_one_node); + vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); tree max = int_const_binop (MINUS_EXPR, - *vr0min, integer_one_node); + *vr0min, + build_int_cst (TREE_TYPE (*vr0min), 1)); if (!operand_less_p (max, min)) { *vr0type = VR_ANTI_RANGE; @@ -7560,9 +7521,11 @@ union_ranges (enum value_range_type *vr0type, { /* Arbitrarily choose the right or left gap. */ if (!mineq && TREE_CODE (vr1min) == INTEGER_CST) - *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, vr1min, + build_int_cst (TREE_TYPE (vr1min), 1)); else if (!maxeq && TREE_CODE (vr1max) == INTEGER_CST) - *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); else goto give_up; } @@ -7593,12 +7556,14 @@ union_ranges (enum value_range_type *vr0type, *vr0type = VR_ANTI_RANGE; if (!mineq && TREE_CODE (*vr0min) == INTEGER_CST) { - *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, *vr0min, + build_int_cst (TREE_TYPE (*vr0min), 1)); *vr0min = vr1min; } else if (!maxeq && TREE_CODE (*vr0max) == INTEGER_CST) { - *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, *vr0max, + build_int_cst (TREE_TYPE (*vr0max), 1)); *vr0max = vr1max; } else @@ -7626,7 +7591,8 @@ union_ranges (enum value_range_type *vr0type, && vr1type == VR_RANGE) { if (TREE_CODE (vr1min) == INTEGER_CST) - *vr0max = int_const_binop (MINUS_EXPR, vr1min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, vr1min, + build_int_cst (TREE_TYPE (vr1min), 1)); else goto give_up; } @@ -7636,7 +7602,8 @@ union_ranges (enum value_range_type *vr0type, if (TREE_CODE (*vr0max) == INTEGER_CST) { *vr0type = vr1type; - *vr0min = int_const_binop (PLUS_EXPR, *vr0max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, *vr0max, + build_int_cst (TREE_TYPE (*vr0max), 1)); *vr0max = vr1max; } else @@ -7660,7 +7627,8 @@ union_ranges (enum value_range_type *vr0type, && vr1type == VR_RANGE) { if (TREE_CODE (vr1max) == INTEGER_CST) - *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); else goto give_up; } @@ -7671,7 +7639,8 @@ union_ranges (enum value_range_type *vr0type, { *vr0type = vr1type; *vr0min = vr1min; - *vr0max = int_const_binop (MINUS_EXPR, *vr0min, integer_one_node); + *vr0max = int_const_binop (MINUS_EXPR, *vr0min, + build_int_cst (TREE_TYPE (*vr0min), 1)); } else goto give_up; @@ -7786,7 +7755,8 @@ intersect_ranges (enum value_range_type *vr0type, if (mineq) { if (TREE_CODE (vr1max) == INTEGER_CST) - *vr0min = int_const_binop (PLUS_EXPR, vr1max, integer_one_node); + *vr0min = int_const_binop (PLUS_EXPR, vr1max, + build_int_cst (TREE_TYPE (vr1max), 1)); else *vr0min = vr1max; } @@ -7795,7 +7765,7 @@ intersect_ranges (enum value_range_type *vr0type, { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, - integer_one_node); + build_int_cst (TREE_TYPE (vr1min), 1)); else *vr0max = vr1min; } @@ -7841,7 +7811,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, *vr0max, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0max), 1)); else *vr0min = *vr0max; *vr0max = vr1max; @@ -7852,7 +7822,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, *vr0min, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0min), 1)); else *vr0max = *vr0min; *vr0min = vr1min; @@ -7904,7 +7874,7 @@ intersect_ranges (enum value_range_type *vr0type, { if (TREE_CODE (vr1min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, vr1min, - integer_one_node); + build_int_cst (TREE_TYPE (vr1min), 1)); else *vr0max = vr1min; } @@ -7914,7 +7884,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, *vr0max, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0max), 1)); else *vr0min = *vr0max; *vr0max = vr1max; @@ -7938,7 +7908,7 @@ intersect_ranges (enum value_range_type *vr0type, { if (TREE_CODE (vr1max) == INTEGER_CST) *vr0min = int_const_binop (PLUS_EXPR, vr1max, - integer_one_node); + build_int_cst (TREE_TYPE (vr1max), 1)); else *vr0min = vr1max; } @@ -7948,7 +7918,7 @@ intersect_ranges (enum value_range_type *vr0type, *vr0type = VR_RANGE; if (TREE_CODE (*vr0min) == INTEGER_CST) *vr0max = int_const_binop (MINUS_EXPR, *vr0min, - integer_one_node); + build_int_cst (TREE_TYPE (*vr0min), 1)); else *vr0max = *vr0min; *vr0min = vr1min; @@ -8358,7 +8328,8 @@ simplify_truth_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) if (rhs_code == EQ_EXPR) { if (TREE_CODE (op1) == INTEGER_CST) - op1 = int_const_binop (BIT_XOR_EXPR, op1, integer_one_node); + op1 = int_const_binop (BIT_XOR_EXPR, op1, + build_int_cst (TREE_TYPE (op1), 1)); else return false; } @@ -8544,9 +8515,9 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) tree op = NULL_TREE; value_range_t vr0 = VR_INITIALIZER; value_range_t vr1 = VR_INITIALIZER; - double_int may_be_nonzero0, may_be_nonzero1; - double_int must_be_nonzero0, must_be_nonzero1; - double_int mask; + wide_int may_be_nonzero0, may_be_nonzero1; + wide_int must_be_nonzero0, must_be_nonzero1; + wide_int mask; if (TREE_CODE (op0) == SSA_NAME) vr0 = *(get_value_range (op0)); @@ -8562,22 +8533,22 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) else return false; - if (!zero_nonzero_bits_from_vr (&vr0, &may_be_nonzero0, &must_be_nonzero0)) + if (!zero_nonzero_bits_from_vr (TREE_TYPE (op0), &vr0, &may_be_nonzero0, &must_be_nonzero0)) return false; - if (!zero_nonzero_bits_from_vr (&vr1, &may_be_nonzero1, &must_be_nonzero1)) + if (!zero_nonzero_bits_from_vr (TREE_TYPE (op1), &vr1, &may_be_nonzero1, &must_be_nonzero1)) return false; switch (gimple_assign_rhs_code (stmt)) { case BIT_AND_EXPR: mask = may_be_nonzero0.and_not (must_be_nonzero1); - if (mask.is_zero ()) + if (mask.zero_p ()) { op = op0; break; } mask = may_be_nonzero1.and_not (must_be_nonzero0); - if (mask.is_zero ()) + if (mask.zero_p ()) { op = op1; break; @@ -8585,13 +8556,13 @@ simplify_bit_ops_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) break; case BIT_IOR_EXPR: mask = may_be_nonzero0.and_not (must_be_nonzero1); - if (mask.is_zero ()) + if (mask.zero_p ()) { op = op1; break; } mask = may_be_nonzero1.and_not (must_be_nonzero0); - if (mask.is_zero ()) + if (mask.zero_p ()) { op = op0; break; @@ -8677,11 +8648,12 @@ test_for_singularity (enum tree_code cond_code, tree op0, by PRECISION and UNSIGNED_P. */ static bool -range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) +range_fits_type_p (value_range_t *vr, unsigned dest_precision, signop dest_sgn) { tree src_type; unsigned src_precision; - double_int tem; + max_wide_int tem; + signop src_sgn; /* We can only handle integral and pointer types. */ src_type = TREE_TYPE (vr->min); @@ -8689,13 +8661,13 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) && !POINTER_TYPE_P (src_type)) return false; - /* An extension is fine unless VR is signed and unsigned_p, + /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, and so is an identity transform. */ src_precision = TYPE_PRECISION (TREE_TYPE (vr->min)); - if ((src_precision < precision - && !(unsigned_p && !TYPE_UNSIGNED (src_type))) - || (src_precision == precision - && TYPE_UNSIGNED (src_type) == unsigned_p)) + src_sgn = TYPE_SIGN (src_type); + if ((src_precision < dest_precision + && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) + || (src_precision == dest_precision && src_sgn == dest_sgn)) return true; /* Now we can only handle ranges with constant bounds. */ @@ -8704,21 +8676,21 @@ range_fits_type_p (value_range_t *vr, unsigned precision, bool unsigned_p) || TREE_CODE (vr->max) != INTEGER_CST) return false; - /* For sign changes, the MSB of the double_int has to be clear. + /* For sign changes, the MSB of the wide_int has to be clear. An unsigned value with its MSB set cannot be represented by - a signed double_int, while a negative value cannot be represented - by an unsigned double_int. */ - if (TYPE_UNSIGNED (src_type) != unsigned_p - && (TREE_INT_CST_HIGH (vr->min) | TREE_INT_CST_HIGH (vr->max)) < 0) + a signed wide_int, while a negative value cannot be represented + by an unsigned wide_int. */ + if (src_sgn != dest_sgn + && (max_wide_int (vr->min).lts_p (0) || max_wide_int (vr->max).lts_p (0))) return false; /* Then we can perform the conversion on both ends and compare the result for equality. */ - tem = tree_to_double_int (vr->min).ext (precision, unsigned_p); - if (tree_to_double_int (vr->min) != tem) + tem = max_wide_int (vr->min).ext (dest_precision, dest_sgn); + if (max_wide_int (vr->min) != tem) return false; - tem = tree_to_double_int (vr->max).ext (precision, unsigned_p); - if (tree_to_double_int (vr->max) != tem) + tem = max_wide_int (vr->max).ext (dest_precision, dest_sgn); + if (max_wide_int (vr->max) != tem) return false; return true; @@ -8833,7 +8805,7 @@ simplify_cond_using_ranges (gimple stmt) if (range_int_cst_p (vr) && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (op0)), - TYPE_UNSIGNED (TREE_TYPE (op0))) + TYPE_SIGN (TREE_TYPE (op0))) && int_fits_type_p (op1, TREE_TYPE (innerop)) /* The range must not have overflowed, or if it did overflow we must not be wrapping/trapping overflow and optimizing @@ -8978,9 +8950,9 @@ simplify_conversion_using_ranges (gimple stmt) tree innerop, middleop, finaltype; gimple def_stmt; value_range_t *innervr; - bool inner_unsigned_p, middle_unsigned_p, final_unsigned_p; + signop inner_sgn, middle_sgn, final_sgn; unsigned inner_prec, middle_prec, final_prec; - double_int innermin, innermed, innermax, middlemin, middlemed, middlemax; + max_wide_int innermin, innermed, innermax, middlemin, middlemed, middlemax; finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); if (!INTEGRAL_TYPE_P (finaltype)) @@ -9004,8 +8976,8 @@ simplify_conversion_using_ranges (gimple stmt) /* Simulate the conversion chain to check if the result is equal if the middle conversion is removed. */ - innermin = tree_to_double_int (innervr->min); - innermax = tree_to_double_int (innervr->max); + innermin = innervr->min; + innermax = innervr->max; inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); @@ -9013,34 +8985,34 @@ simplify_conversion_using_ranges (gimple stmt) /* If the first conversion is not injective, the second must not be widening. */ - if ((innermax - innermin).ugt (double_int::mask (middle_prec)) + if ((innermax - innermin).gtu_p (max_wide_int::mask (middle_prec, false)) && middle_prec < final_prec) return false; /* We also want a medium value so that we can track the effect that narrowing conversions with sign change have. */ - inner_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (innerop)); - if (inner_unsigned_p) - innermed = double_int::mask (inner_prec).lrshift (1, inner_prec); + inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); + if (inner_sgn == UNSIGNED) + innermed = max_wide_int::shifted_mask (1, inner_prec - 1, false); else - innermed = double_int_zero; - if (innermin.cmp (innermed, inner_unsigned_p) >= 0 - || innermed.cmp (innermax, inner_unsigned_p) >= 0) + innermed = 0; + if (innermin.cmp (innermed, inner_sgn) >= 0 + || innermed.cmp (innermax, inner_sgn) >= 0) innermed = innermin; - middle_unsigned_p = TYPE_UNSIGNED (TREE_TYPE (middleop)); - middlemin = innermin.ext (middle_prec, middle_unsigned_p); - middlemed = innermed.ext (middle_prec, middle_unsigned_p); - middlemax = innermax.ext (middle_prec, middle_unsigned_p); + middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); + middlemin = innermin.ext (middle_prec, middle_sgn); + middlemed = innermed.ext (middle_prec, middle_sgn); + middlemax = innermax.ext (middle_prec, middle_sgn); /* Require that the final conversion applied to both the original and the intermediate range produces the same result. */ - final_unsigned_p = TYPE_UNSIGNED (finaltype); - if (middlemin.ext (final_prec, final_unsigned_p) - != innermin.ext (final_prec, final_unsigned_p) - || middlemed.ext (final_prec, final_unsigned_p) - != innermed.ext (final_prec, final_unsigned_p) - || middlemax.ext (final_prec, final_unsigned_p) - != innermax.ext (final_prec, final_unsigned_p)) + final_sgn = TYPE_SIGN (finaltype); + if (middlemin.ext (final_prec, final_sgn) + != innermin.ext (final_prec, final_sgn) + || middlemed.ext (final_prec, final_sgn) + != innermed.ext (final_prec, final_sgn) + || middlemax.ext (final_prec, final_sgn) + != innermax.ext (final_prec, final_sgn)) return false; gimple_assign_set_rhs1 (stmt, innerop); @@ -9070,8 +9042,7 @@ simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) && (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), 0) != CODE_FOR_nothing) - && range_fits_type_p (vr, GET_MODE_PRECISION - (TYPE_MODE (TREE_TYPE (rhs1))), 0)) + && range_fits_type_p (vr, TYPE_PRECISION (TREE_TYPE (rhs1)), SIGNED)) mode = TYPE_MODE (TREE_TYPE (rhs1)); /* If we can do the conversion in the current input mode do nothing. */ else if (can_float_p (fltmode, TYPE_MODE (TREE_TYPE (rhs1)), @@ -9088,7 +9059,7 @@ simplify_float_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple stmt) or if the value-range does not fit in the signed type try with a wider mode. */ if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing - && range_fits_type_p (vr, GET_MODE_PRECISION (mode), 0)) + && range_fits_type_p (vr, GET_MODE_PRECISION (mode), SIGNED)) break; mode = GET_MODE_WIDER_MODE (mode); @@ -9120,6 +9091,7 @@ static bool simplify_stmt_using_ranges (gimple_stmt_iterator *gsi) { gimple stmt = gsi_stmt (*gsi); + if (is_gimple_assign (stmt)) { enum tree_code rhs_code = gimple_assign_rhs_code (stmt); diff --git a/gcc/tree.c b/gcc/tree.c index a1c0deab21e..028710a8c98 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -59,6 +59,7 @@ along with GCC; see the file COPYING3. If not see #include "except.h" #include "debug.h" #include "intl.h" +#include "wide-int.h" /* Tree code classes. */ @@ -514,7 +515,7 @@ init_ttree (void) int_cst_hash_table = htab_create_ggc (1024, int_cst_hash_hash, int_cst_hash_eq, NULL); - int_cst_node = make_node (INTEGER_CST); + int_cst_node = make_int_cst (1); cl_option_hash_table = htab_create_ggc (64, cl_option_hash_hash, cl_option_hash_eq, NULL); @@ -617,7 +618,7 @@ decl_assembler_name_hash (const_tree asmname) /* Compute the number of bytes occupied by a tree with code CODE. This function cannot be used for nodes that have variable sizes, - including TREE_VEC, STRING_CST, and CALL_EXPR. */ + including TREE_VEC, INTEGER_CST, STRING_CST, and CALL_EXPR. */ size_t tree_code_size (enum tree_code code) { @@ -665,7 +666,7 @@ tree_code_size (enum tree_code code) case tcc_constant: /* a constant */ switch (code) { - case INTEGER_CST: return sizeof (struct tree_int_cst); + case INTEGER_CST: gcc_unreachable (); case REAL_CST: return sizeof (struct tree_real_cst); case FIXED_CST: return sizeof (struct tree_fixed_cst); case COMPLEX_CST: return sizeof (struct tree_complex); @@ -712,6 +713,10 @@ tree_size (const_tree node) const enum tree_code code = TREE_CODE (node); switch (code) { + case INTEGER_CST: + return (sizeof (struct tree_int_cst) + + (TREE_INT_CST_NUNITS (node) - 1) * sizeof (HOST_WIDE_INT)); + case TREE_BINFO: return (offsetof (struct tree_binfo, base_binfos) + vec<tree, va_gc> @@ -844,8 +849,9 @@ allocate_decl_uid (void) /* Return a newly allocated node of code CODE. For decl and type nodes, some other fields are initialized. The rest of the node is - initialized to zero. This function cannot be used for TREE_VEC or - OMP_CLAUSE nodes, which is enforced by asserts in tree_code_size. + initialized to zero. This function cannot be used for TREE_VEC, + INTEGER_CST or OMP_CLAUSE nodes, which is enforced by asserts in + tree_code_size. Achoo! I got a code in the node. */ @@ -1042,7 +1048,13 @@ build_int_cst (tree type, HOST_WIDE_INT low) if (!type) type = integer_type_node; - return double_int_to_tree (type, double_int::from_shwi (low)); + return wide_int_to_tree (type, low); +} + +/* static inline */ tree +build_int_cstu (tree type, unsigned HOST_WIDE_INT cst) +{ + return wide_int_to_tree (type, cst); } /* Create an INT_CST node with a LOW value sign extended to TYPE. */ @@ -1052,7 +1064,7 @@ build_int_cst_type (tree type, HOST_WIDE_INT low) { gcc_assert (type); - return double_int_to_tree (type, double_int::from_shwi (low)); + return wide_int_to_tree (type, low); } /* Constructs tree in type TYPE from with value given by CST. Signedness @@ -1065,24 +1077,11 @@ double_int_to_tree (tree type, double_int cst) cst = cst.ext (TYPE_PRECISION (type), !sign_extended_type); - return build_int_cst_wide (type, cst.low, cst.high); -} - -/* Returns true if CST fits into range of TYPE. Signedness of CST is assumed - to be the same as the signedness of TYPE. */ - -bool -double_int_fits_to_tree_p (const_tree type, double_int cst) -{ - bool sign_extended_type = !TYPE_UNSIGNED (type); - - double_int ext - = cst.ext (TYPE_PRECISION (type), !sign_extended_type); - - return cst == ext; + return wide_int_to_tree (type, wide_int::from_array ((HOST_WIDE_INT*)&cst.low, + 2, TYPE_PRECISION (type))); } -/* We force the double_int CST to the range of the type TYPE by sign or +/* We force the wide_int CST to the range of the type TYPE by sign or zero extending it. OVERFLOWABLE indicates if we are interested in overflow of the value, when >0 we are only interested in signed overflow, for <0 we are interested in any overflow. OVERFLOWED @@ -1093,26 +1092,38 @@ double_int_fits_to_tree_p (const_tree type, double_int cst) OVERFLOWED is nonzero, or OVERFLOWABLE is >0 and signed overflow occurs or OVERFLOWABLE is <0 and any overflow occurs - We return a new tree node for the extended double_int. The node + We return a new tree node for the extended wide_int. The node is shared if no overflow flags are set. */ tree -force_fit_type_double (tree type, double_int cst, int overflowable, - bool overflowed) +force_fit_type (tree type, const wide_int &cst, + int overflowable, bool overflowed) { - bool sign_extended_type = !TYPE_UNSIGNED (type); + signop sign = TYPE_SIGN (type); /* If we need to set overflow flags, return a new unshared node. */ - if (overflowed || !double_int_fits_to_tree_p(type, cst)) + if (overflowed || !cst.fits_to_tree_p (type)) { if (overflowed || overflowable < 0 - || (overflowable > 0 && sign_extended_type)) + || (overflowable > 0 && sign == SIGNED)) { - tree t = make_node (INTEGER_CST); - TREE_INT_CST (t) - = cst.ext (TYPE_PRECISION (type), !sign_extended_type); + wide_int tmp = cst.force_to_size (TYPE_PRECISION (type), + sign); + int l = tmp.get_len (); + tree t = make_int_cst (l); + if (l > 1) + { + if (tmp.elt (l - 1) == 0) + gcc_assert (tmp.elt (l - 2) < 0); + if (tmp.elt (l - 1) == (HOST_WIDE_INT) -1) + gcc_assert (tmp.elt (l - 2) >= 0); + } + + for (int i = 0; i < l; i++) + TREE_INT_CST_ELT (t, i) = tmp.elt (i); + TREE_TYPE (t) = type; TREE_OVERFLOW (t) = 1; return t; @@ -1120,7 +1131,7 @@ force_fit_type_double (tree type, double_int cst, int overflowable, } /* Else build a shared node. */ - return double_int_to_tree (type, cst); + return wide_int_to_tree (type, cst); } /* These are the hash table functions for the hash table of INTEGER_CST @@ -1132,9 +1143,13 @@ static hashval_t int_cst_hash_hash (const void *x) { const_tree const t = (const_tree) x; + hashval_t code = htab_hash_pointer (TREE_TYPE (t)); + int i; + + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + code ^= TREE_INT_CST_ELT (t, i); - return (TREE_INT_CST_HIGH (t) ^ TREE_INT_CST_LOW (t) - ^ htab_hash_pointer (TREE_TYPE (t))); + return code; } /* Return nonzero if the value represented by *X (an INTEGER_CST tree node) @@ -1146,34 +1161,64 @@ int_cst_hash_eq (const void *x, const void *y) const_tree const xt = (const_tree) x; const_tree const yt = (const_tree) y; - return (TREE_TYPE (xt) == TREE_TYPE (yt) - && TREE_INT_CST_HIGH (xt) == TREE_INT_CST_HIGH (yt) - && TREE_INT_CST_LOW (xt) == TREE_INT_CST_LOW (yt)); + if (TREE_TYPE (xt) != TREE_TYPE (yt) + || TREE_INT_CST_NUNITS (xt) != TREE_INT_CST_NUNITS (yt)) + return false; + + for (int i = 0; i < TREE_INT_CST_NUNITS (xt); i++) + if (TREE_INT_CST_ELT (xt, i) != TREE_INT_CST_ELT (yt, i)) + return false; + + return true; } -/* Create an INT_CST node of TYPE and value HI:LOW. +/* Create an INT_CST node of TYPE and value CST. The returned node is always shared. For small integers we use a - per-type vector cache, for larger ones we use a single hash table. */ + per-type vector cache, for larger ones we use a single hash table. + The value is extended from it's precision according to the sign of + the type to be a multiple of HOST_BITS_PER_WIDE_INT. This defines + the upper bits and ensures that hashing and value equality based + upon the underlying HOST_WIDE_INTs works without masking. */ tree -build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) +wide_int_to_tree (tree type, const wide_int_ro &pcst) { tree t; int ix = -1; int limit = 0; + int i; gcc_assert (type); + int prec = TYPE_PRECISION (type); + signop sgn = TYPE_SIGN (type); + + /* Verify that everything is canonical. */ + int l = pcst.get_len (); + if (l > 1) + { + if (pcst.elt (l - 1) == 0) + gcc_assert (pcst.elt (l - 2) < 0); + if (pcst.elt (l - 1) == (HOST_WIDE_INT) -1) + gcc_assert (pcst.elt (l - 2) >= 0); + } + + wide_int cst = pcst.force_to_size (prec, sgn); + /* The following call makes sure that all tree-cst's are canonical. + i.e. it really does sign or zero extend the top block of the + value if the precision of the type is not an even multiple of the + size of an HWI. */ + cst.clear_undef (sgn); switch (TREE_CODE (type)) { case NULLPTR_TYPE: - gcc_assert (hi == 0 && low == 0); + gcc_assert (cst.zero_p ()); /* Fallthru. */ case POINTER_TYPE: case REFERENCE_TYPE: /* Cache NULL pointer. */ - if (!hi && !low) + if (cst.zero_p ()) { limit = 1; ix = 0; @@ -1183,8 +1228,8 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) case BOOLEAN_TYPE: /* Cache false or true. */ limit = 2; - if (!hi && low < 2) - ix = low; + if (cst.leu_p (1)) + ix = cst.to_uhwi (); break; case INTEGER_TYPE: @@ -1193,17 +1238,35 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) { /* Cache 0..N */ limit = INTEGER_SHARE_LIMIT; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low; + + /* This is a little hokie, but if the prec is smaller than + what is necessary to hold INTEGER_SHARE_LIMIT, then the + obvious test will not get the correct answer. */ + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (cst.to_uhwi () < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT) + ix = cst.to_uhwi (); + } + else if (cst.ltu_p (INTEGER_SHARE_LIMIT)) + ix = cst.to_uhwi (); } else { /* Cache -1..N */ limit = INTEGER_SHARE_LIMIT + 1; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low + 1; - else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1) + + if (cst.minus_one_p ()) ix = 0; + else if (!cst.neg_p (SIGNED)) + { + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (cst.to_shwi () < INTEGER_SHARE_LIMIT) + ix = cst.to_shwi () + 1; + } + else if (cst.lts_p (INTEGER_SHARE_LIMIT)) + ix = cst.to_shwi () + 1; + } } break; @@ -1228,28 +1291,30 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) { /* Make sure no one is clobbering the shared constant. */ gcc_assert (TREE_TYPE (t) == type); - gcc_assert (TREE_INT_CST_LOW (t) == low); - gcc_assert (TREE_INT_CST_HIGH (t) == hi); + gcc_assert (TREE_INT_CST_NUNITS (t) == cst.get_len ()); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + gcc_assert (TREE_INT_CST_ELT (t, i) == cst.elt (i)); } else { /* Create a new shared int. */ - t = make_node (INTEGER_CST); - - TREE_INT_CST_LOW (t) = low; - TREE_INT_CST_HIGH (t) = hi; + t = make_int_cst (cst.get_len ()); + TREE_INT_CST_NUNITS (t) = cst.get_len (); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + TREE_INT_CST_ELT (t, i) = cst.elt (i); TREE_TYPE (t) = type; - + TREE_VEC_ELT (TYPE_CACHED_VALUES (type), ix) = t; } } - else + else if (cst.get_len () == 1) { - /* Use the cache of larger shared ints. */ + /* 99.99% of all int csts will fit in a single HWI. Do that one + efficiently. */ + /* Use the cache of larger shared ints. */ void **slot; - TREE_INT_CST_LOW (int_cst_node) = low; - TREE_INT_CST_HIGH (int_cst_node) = hi; + TREE_INT_CST_ELT (int_cst_node, 0) = cst.elt (0); TREE_TYPE (int_cst_node) = type; slot = htab_find_slot (int_cst_hash_table, int_cst_node, INSERT); @@ -1260,7 +1325,27 @@ build_int_cst_wide (tree type, unsigned HOST_WIDE_INT low, HOST_WIDE_INT hi) t = int_cst_node; *slot = t; /* Make a new node for next time round. */ - int_cst_node = make_node (INTEGER_CST); + int_cst_node = make_int_cst (1); + } + } + else + { + /* The value either hashes properly or we drop it on the floor + for the gc to take care of. There will not be enough of them + to worry about. */ + void **slot; + tree nt = make_int_cst (cst.get_len ()); + for (i = 0; i < cst.get_len (); i++) + TREE_INT_CST_ELT (nt, i) = cst.elt (i); + TREE_TYPE (nt) = type; + + slot = htab_find_slot (int_cst_hash_table, nt, INSERT); + t = (tree) *slot; + if (!t) + { + /* Insert this one into the hash table. */ + t = nt; + *slot = t; } } @@ -1271,23 +1356,22 @@ void cache_integer_cst (tree t) { tree type = TREE_TYPE (t); - HOST_WIDE_INT hi = TREE_INT_CST_HIGH (t); - unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (t); int ix = -1; int limit = 0; + int prec = TYPE_PRECISION (type); gcc_assert (!TREE_OVERFLOW (t)); switch (TREE_CODE (type)) { case NULLPTR_TYPE: - gcc_assert (hi == 0 && low == 0); + gcc_assert (integer_zerop (t)); /* Fallthru. */ case POINTER_TYPE: case REFERENCE_TYPE: /* Cache NULL pointer. */ - if (!hi && !low) + if (integer_zerop (t)) { limit = 1; ix = 0; @@ -1297,8 +1381,8 @@ cache_integer_cst (tree t) case BOOLEAN_TYPE: /* Cache false or true. */ limit = 2; - if (!hi && low < 2) - ix = low; + if (wide_int::ltu_p (t, 2)) + ix = TREE_INT_CST_ELT (t, 0); break; case INTEGER_TYPE: @@ -1307,17 +1391,35 @@ cache_integer_cst (tree t) { /* Cache 0..N */ limit = INTEGER_SHARE_LIMIT; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low; + + /* This is a little hokie, but if the prec is smaller than + what is necessary to hold INTEGER_SHARE_LIMIT, then the + obvious test will not get the correct answer. */ + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (tree_to_uhwi (t) < (unsigned HOST_WIDE_INT) INTEGER_SHARE_LIMIT) + ix = tree_to_uhwi (t); + } + else if (wide_int::ltu_p (t, INTEGER_SHARE_LIMIT)) + ix = tree_to_uhwi (t); } else { /* Cache -1..N */ limit = INTEGER_SHARE_LIMIT + 1; - if (!hi && low < (unsigned HOST_WIDE_INT)INTEGER_SHARE_LIMIT) - ix = low + 1; - else if (hi == -1 && low == -(unsigned HOST_WIDE_INT)1) + + if (integer_minus_onep (t)) ix = 0; + else if (!wide_int (t).neg_p (SIGNED)) + { + if (prec < HOST_BITS_PER_WIDE_INT) + { + if (tree_to_shwi (t) < INTEGER_SHARE_LIMIT) + ix = tree_to_shwi (t) + 1; + } + else if (wide_int::ltu_p (t, INTEGER_SHARE_LIMIT)) + ix = tree_to_shwi (t) + 1; + } } break; @@ -1349,13 +1451,10 @@ cache_integer_cst (tree t) /* If there is already an entry for the number verify it's the same. */ if (*slot) - { - gcc_assert (TREE_INT_CST_LOW ((tree)*slot) == low - && TREE_INT_CST_HIGH ((tree)*slot) == hi); - return; - } - /* Otherwise insert this one into the hash table. */ - *slot = t; + gcc_assert (wide_int::eq_p (((tree)*slot), t)); + else + /* Otherwise insert this one into the hash table. */ + *slot = t; } } @@ -1366,34 +1465,10 @@ cache_integer_cst (tree t) tree build_low_bits_mask (tree type, unsigned bits) { - double_int mask; - gcc_assert (bits <= TYPE_PRECISION (type)); - if (bits == TYPE_PRECISION (type) - && !TYPE_UNSIGNED (type)) - /* Sign extended all-ones mask. */ - mask = double_int_minus_one; - else - mask = double_int::mask (bits); - - return build_int_cst_wide (type, mask.low, mask.high); -} - -/* Checks that X is integer constant that can be expressed in (unsigned) - HOST_WIDE_INT without loss of precision. */ - -bool -cst_and_fits_in_hwi (const_tree x) -{ - if (TREE_CODE (x) != INTEGER_CST) - return false; - - if (TYPE_PRECISION (TREE_TYPE (x)) > HOST_BITS_PER_WIDE_INT) - return false; - - return (TREE_INT_CST_HIGH (x) == 0 - || TREE_INT_CST_HIGH (x) == -1); + return wide_int_to_tree (type, wide_int::mask (bits, false, + TYPE_PRECISION (type))); } /* Build a newly constructed TREE_VEC node of length LEN. */ @@ -1634,8 +1709,8 @@ real_value_from_int_cst (const_tree type, const_tree i) memset (&d, 0, sizeof d); real_from_integer (&d, type ? TYPE_MODE (type) : VOIDmode, - TREE_INT_CST_LOW (i), TREE_INT_CST_HIGH (i), - TYPE_UNSIGNED (TREE_TYPE (i))); + wide_int (i), + TYPE_SIGN (TREE_TYPE (i))); return d; } @@ -1873,6 +1948,27 @@ build_case_label (tree low_value, tree high_value, tree label_decl) return t; } +/* Build a newly constructed INETEGER_CST node of length LEN. */ + +tree +make_int_cst_stat (int len MEM_STAT_DECL) +{ + tree t; + int length = (len - 1) * sizeof (tree) + sizeof (struct tree_int_cst); + + gcc_assert (len); + record_node_allocation_statistics (INTEGER_CST, length); + + t = ggc_alloc_cleared_tree_node_stat (length PASS_MEM_STAT); + + TREE_SET_CODE (t, INTEGER_CST); + TREE_INT_CST_NUNITS (t) = len; + + TREE_CONSTANT (t) = 1; + + return t; +} + /* Build a newly constructed TREE_VEC node of length LEN. */ tree @@ -1902,8 +1998,7 @@ integer_zerop (const_tree expr) switch (TREE_CODE (expr)) { case INTEGER_CST: - return (TREE_INT_CST_LOW (expr) == 0 - && TREE_INT_CST_HIGH (expr) == 0); + return wide_int (expr).zero_p (); case COMPLEX_CST: return (integer_zerop (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr))); @@ -1931,8 +2026,7 @@ integer_onep (const_tree expr) switch (TREE_CODE (expr)) { case INTEGER_CST: - return (TREE_INT_CST_LOW (expr) == 1 - && TREE_INT_CST_HIGH (expr) == 0); + return wide_int (expr).one_p (); case COMPLEX_CST: return (integer_onep (TREE_REALPART (expr)) && integer_zerop (TREE_IMAGPART (expr))); @@ -1955,9 +2049,6 @@ integer_onep (const_tree expr) int integer_all_onesp (const_tree expr) { - int prec; - int uns; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST @@ -1977,35 +2068,7 @@ integer_all_onesp (const_tree expr) else if (TREE_CODE (expr) != INTEGER_CST) return 0; - uns = TYPE_UNSIGNED (TREE_TYPE (expr)); - if (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0 - && TREE_INT_CST_HIGH (expr) == -1) - return 1; - if (!uns) - return 0; - - prec = TYPE_PRECISION (TREE_TYPE (expr)); - if (prec >= HOST_BITS_PER_WIDE_INT) - { - HOST_WIDE_INT high_value; - int shift_amount; - - shift_amount = prec - HOST_BITS_PER_WIDE_INT; - - /* Can not handle precisions greater than twice the host int size. */ - gcc_assert (shift_amount <= HOST_BITS_PER_WIDE_INT); - if (shift_amount == HOST_BITS_PER_WIDE_INT) - /* Shifting by the host word size is undefined according to the ANSI - standard, so we must handle this as a special case. */ - high_value = -1; - else - high_value = ((HOST_WIDE_INT) 1 << shift_amount) - 1; - - return (TREE_INT_CST_LOW (expr) == ~(unsigned HOST_WIDE_INT) 0 - && TREE_INT_CST_HIGH (expr) == high_value); - } - else - return TREE_INT_CST_LOW (expr) == ((unsigned HOST_WIDE_INT) 1 << prec) - 1; + return wide_int::max_value (TYPE_PRECISION (TREE_TYPE (expr)), UNSIGNED) == expr; } /* Return 1 if EXPR is the integer constant minus one. */ @@ -2028,9 +2091,6 @@ integer_minus_onep (const_tree expr) int integer_pow2p (const_tree expr) { - int prec; - unsigned HOST_WIDE_INT high, low; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST @@ -2041,29 +2101,7 @@ integer_pow2p (const_tree expr) if (TREE_CODE (expr) != INTEGER_CST) return 0; - prec = TYPE_PRECISION (TREE_TYPE (expr)); - high = TREE_INT_CST_HIGH (expr); - low = TREE_INT_CST_LOW (expr); - - /* First clear all bits that are beyond the type's precision in case - we've been sign extended. */ - - if (prec == HOST_BITS_PER_DOUBLE_INT) - ; - else if (prec > HOST_BITS_PER_WIDE_INT) - high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); - else - { - high = 0; - if (prec < HOST_BITS_PER_WIDE_INT) - low &= ~((HOST_WIDE_INT) (-1) << prec); - } - - if (high == 0 && low == 0) - return 0; - - return ((high == 0 && (low & (low - 1)) == 0) - || (low == 0 && (high & (high - 1)) == 0)); + return wide_int (expr).popcount () == 1; } /* Return 1 if EXPR is an integer constant other than zero or a @@ -2075,8 +2113,7 @@ integer_nonzerop (const_tree expr) STRIP_NOPS (expr); return ((TREE_CODE (expr) == INTEGER_CST - && (TREE_INT_CST_LOW (expr) != 0 - || TREE_INT_CST_HIGH (expr) != 0)) + && (!wide_int (expr).zero_p ())) || (TREE_CODE (expr) == COMPLEX_CST && (integer_nonzerop (TREE_REALPART (expr)) || integer_nonzerop (TREE_IMAGPART (expr))))); @@ -2097,34 +2134,12 @@ fixed_zerop (const_tree expr) int tree_log2 (const_tree expr) { - int prec; - HOST_WIDE_INT high, low; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST) return tree_log2 (TREE_REALPART (expr)); - prec = TYPE_PRECISION (TREE_TYPE (expr)); - high = TREE_INT_CST_HIGH (expr); - low = TREE_INT_CST_LOW (expr); - - /* First clear all bits that are beyond the type's precision in case - we've been sign extended. */ - - if (prec == HOST_BITS_PER_DOUBLE_INT) - ; - else if (prec > HOST_BITS_PER_WIDE_INT) - high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); - else - { - high = 0; - if (prec < HOST_BITS_PER_WIDE_INT) - low &= ~((HOST_WIDE_INT) (-1) << prec); - } - - return (high != 0 ? HOST_BITS_PER_WIDE_INT + exact_log2 (high) - : exact_log2 (low)); + return wide_int (expr).exact_log2 ().to_shwi (); } /* Similar, but return the largest integer Y such that 2 ** Y is less @@ -2133,35 +2148,12 @@ tree_log2 (const_tree expr) int tree_floor_log2 (const_tree expr) { - int prec; - HOST_WIDE_INT high, low; - STRIP_NOPS (expr); if (TREE_CODE (expr) == COMPLEX_CST) return tree_log2 (TREE_REALPART (expr)); - prec = TYPE_PRECISION (TREE_TYPE (expr)); - high = TREE_INT_CST_HIGH (expr); - low = TREE_INT_CST_LOW (expr); - - /* First clear all bits that are beyond the type's precision in case - we've been sign extended. Ignore if type's precision hasn't been set - since what we are doing is setting it. */ - - if (prec == HOST_BITS_PER_DOUBLE_INT || prec == 0) - ; - else if (prec > HOST_BITS_PER_WIDE_INT) - high &= ~((HOST_WIDE_INT) (-1) << (prec - HOST_BITS_PER_WIDE_INT)); - else - { - high = 0; - if (prec < HOST_BITS_PER_WIDE_INT) - low &= ~((HOST_WIDE_INT) (-1) << prec); - } - - return (high != 0 ? HOST_BITS_PER_WIDE_INT + floor_log2 (high) - : floor_log2 (low)); + return wide_int (expr).floor_log2 ().to_shwi (); } /* Return 1 if EXPR is the real constant zero. Trailing zeroes matter for @@ -2582,14 +2574,11 @@ int_size_in_bytes (const_tree type) type = TYPE_MAIN_VARIANT (type); t = TYPE_SIZE_UNIT (type); - if (t == 0 - || TREE_CODE (t) != INTEGER_CST - || TREE_INT_CST_HIGH (t) != 0 - /* If the result would appear negative, it's too big to represent. */ - || (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0) - return -1; - return TREE_INT_CST_LOW (t); + if (t && cst_fits_uhwi_p (t)) + return tree_to_hwi (t); + else + return -1; } /* Return the maximum size of TYPE (in bytes) as a wide integer @@ -2607,8 +2596,8 @@ max_int_size_in_bytes (const_tree type) { size_tree = TYPE_ARRAY_MAX_SIZE (type); - if (size_tree && host_integerp (size_tree, 1)) - size = tree_low_cst (size_tree, 1); + if (size_tree && tree_fits_uhwi_p (size_tree)) + size = tree_to_uhwi (size_tree); } /* If we still haven't been able to get a size, see if the language @@ -2618,8 +2607,8 @@ max_int_size_in_bytes (const_tree type) { size_tree = lang_hooks.types.max_size (type); - if (size_tree && host_integerp (size_tree, 1)) - size = tree_low_cst (size_tree, 1); + if (size_tree && tree_fits_uhwi_p (size_tree)) + size = tree_to_uhwi (size_tree); } return size; @@ -2654,7 +2643,7 @@ bit_position (const_tree field) HOST_WIDE_INT int_bit_position (const_tree field) { - return tree_low_cst (bit_position (field), 0); + return tree_to_shwi (bit_position (field)); } /* Return the byte position of FIELD, in bytes from the start of the record. @@ -2674,7 +2663,7 @@ byte_position (const_tree field) HOST_WIDE_INT int_byte_position (const_tree field) { - return tree_low_cst (byte_position (field), 0); + return tree_to_shwi (byte_position (field)); } /* Return the strictest alignment, in bits, that T is known to have. */ @@ -4248,11 +4237,11 @@ build_simple_mem_ref_loc (location_t loc, tree ptr) /* Return the constant offset of a MEM_REF or TARGET_MEM_REF tree T. */ -double_int +addr_wide_int mem_ref_offset (const_tree t) { tree toff = TREE_OPERAND (t, 1); - return tree_to_double_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff))); + return addr_wide_int (toff).sext (TYPE_PRECISION (TREE_TYPE (toff))); } /* Return the pointer-type relevant for TBAA purposes from the @@ -4494,6 +4483,8 @@ build_type_attribute_qual_variant (tree ttype, tree attribute, int quals) { hashval_t hashcode = 0; tree ntype; + int i; + tree t; enum tree_code code = TREE_CODE (ttype); /* Building a distinct copy of a tagged type is inappropriate; it @@ -4535,10 +4526,9 @@ build_type_attribute_qual_variant (tree ttype, tree attribute, int quals) hashcode); break; case INTEGER_TYPE: - hashcode = iterative_hash_object - (TREE_INT_CST_LOW (TYPE_MAX_VALUE (ntype)), hashcode); - hashcode = iterative_hash_object - (TREE_INT_CST_HIGH (TYPE_MAX_VALUE (ntype)), hashcode); + t = TYPE_MAX_VALUE (ntype); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + hashcode = iterative_hash_object (TREE_INT_CST_ELT (t, i), hashcode); break; case REAL_TYPE: case FIXED_POINT_TYPE: @@ -4934,7 +4924,7 @@ free_lang_data_in_decl (tree decl) DECL_VINDEX referring to itself into a vtable slot number as it should. Happens with functions that are copied and then forgotten about. Just clear it, it won't matter anymore. */ - if (DECL_VINDEX (decl) && !host_integerp (DECL_VINDEX (decl), 0)) + if (DECL_VINDEX (decl) && !tree_fits_shwi_p (DECL_VINDEX (decl))) DECL_VINDEX (decl) = NULL_TREE; } else if (TREE_CODE (decl) == VAR_DECL) @@ -6462,6 +6452,8 @@ type_hash_eq (const void *va, const void *vb) case INTEGER_TYPE: case REAL_TYPE: case BOOLEAN_TYPE: + if (TYPE_PRECISION (a->type) != TYPE_PRECISION (b->type)) + return false; return ((TYPE_MAX_VALUE (a->type) == TYPE_MAX_VALUE (b->type) || tree_int_cst_equal (TYPE_MAX_VALUE (a->type), TYPE_MAX_VALUE (b->type))) @@ -6750,18 +6742,26 @@ type_num_arguments (const_tree type) int tree_int_cst_equal (const_tree t1, const_tree t2) { + unsigned int prec1, prec2; if (t1 == t2) return 1; if (t1 == 0 || t2 == 0) return 0; - if (TREE_CODE (t1) == INTEGER_CST - && TREE_CODE (t2) == INTEGER_CST - && TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2)) - return 1; + if (TREE_CODE (t1) != INTEGER_CST + || TREE_CODE (t2) != INTEGER_CST) + return 0; + + prec1 = TYPE_PRECISION (TREE_TYPE (t1)); + prec2 = TYPE_PRECISION (TREE_TYPE (t2)); + if (prec1 == prec2) + return wide_int::eq_p (t1, t2); + else if (prec1 < prec2) + return (wide_int (t1)).force_to_size (prec2, TYPE_SIGN (TREE_TYPE (t1))) == t2; + else + return (wide_int (t2)).force_to_size (prec1, TYPE_SIGN (TREE_TYPE (t2))) == t1; return 0; } @@ -6806,37 +6806,6 @@ tree_int_cst_compare (const_tree t1, const_tree t2) return 0; } -/* Return 1 if T is an INTEGER_CST that can be manipulated efficiently on - the host. If POS is zero, the value can be represented in a single - HOST_WIDE_INT. If POS is nonzero, the value must be non-negative and can - be represented in a single unsigned HOST_WIDE_INT. */ - -int -host_integerp (const_tree t, int pos) -{ - if (t == NULL_TREE) - return 0; - - return (TREE_CODE (t) == INTEGER_CST - && ((TREE_INT_CST_HIGH (t) == 0 - && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) >= 0) - || (! pos && TREE_INT_CST_HIGH (t) == -1 - && (HOST_WIDE_INT) TREE_INT_CST_LOW (t) < 0 - && !TYPE_UNSIGNED (TREE_TYPE (t))) - || (pos && TREE_INT_CST_HIGH (t) == 0))); -} - -/* Return the HOST_WIDE_INT least significant bits of T if it is an - INTEGER_CST and there is no overflow. POS is nonzero if the result must - be non-negative. We must be able to satisfy the above conditions. */ - -HOST_WIDE_INT -tree_low_cst (const_tree t, int pos) -{ - gcc_assert (host_integerp (t, pos)); - return TREE_INT_CST_LOW (t); -} - /* Return the HOST_WIDE_INT least significant bits of T, a sizetype kind INTEGER_CST. This makes sure to properly sign-extend the constant. */ @@ -6844,8 +6813,11 @@ tree_low_cst (const_tree t, int pos) HOST_WIDE_INT size_low_cst (const_tree t) { - double_int d = tree_to_double_int (t); - return d.sext (TYPE_PRECISION (TREE_TYPE (t))).low; + HOST_WIDE_INT w = TREE_INT_CST_ELT (t, 0); + int prec = TYPE_PRECISION (TREE_TYPE (t)); + if (prec < HOST_BITS_PER_WIDE_INT) + return sext_hwi (w, prec); + return w; } /* Return the most significant (sign) bit of T. */ @@ -6854,17 +6826,8 @@ int tree_int_cst_sign_bit (const_tree t) { unsigned bitno = TYPE_PRECISION (TREE_TYPE (t)) - 1; - unsigned HOST_WIDE_INT w; - - if (bitno < HOST_BITS_PER_WIDE_INT) - w = TREE_INT_CST_LOW (t); - else - { - w = TREE_INT_CST_HIGH (t); - bitno -= HOST_BITS_PER_WIDE_INT; - } - return (w >> bitno) & 1; + return wide_int (t).extract_to_hwi (bitno, 1); } /* Return an indication of the sign of the integer constant T. @@ -6874,11 +6837,12 @@ tree_int_cst_sign_bit (const_tree t) int tree_int_cst_sgn (const_tree t) { - if (TREE_INT_CST_LOW (t) == 0 && TREE_INT_CST_HIGH (t) == 0) + wide_int w = t; + if (w.zero_p ()) return 0; else if (TYPE_UNSIGNED (TREE_TYPE (t))) return 1; - else if (TREE_INT_CST_HIGH (t) < 0) + else if (w.neg_p (SIGNED)) return -1; else return 1; @@ -6888,7 +6852,7 @@ tree_int_cst_sgn (const_tree t) signed or unsigned type, UNSIGNEDP says which. */ unsigned int -tree_int_cst_min_precision (tree value, bool unsignedp) +tree_int_cst_min_precision (tree value, signop sgn) { /* If the value is negative, compute its negative minus 1. The latter adjustment is because the absolute value of the largest negative value @@ -6906,7 +6870,7 @@ tree_int_cst_min_precision (tree value, bool unsignedp) if (integer_zerop (value)) return 1; else - return tree_floor_log2 (value) + 1 + !unsignedp; + return tree_floor_log2 (value) + 1 + (sgn == SIGNED ? 1 : 0) ; } /* Compare two constructor-element-type constants. Return 1 if the lists @@ -6967,8 +6931,7 @@ simple_cst_equal (const_tree t1, const_tree t2) switch (code1) { case INTEGER_CST: - return (TREE_INT_CST_LOW (t1) == TREE_INT_CST_LOW (t2) - && TREE_INT_CST_HIGH (t1) == TREE_INT_CST_HIGH (t2)); + return max_wide_int (t1) == max_wide_int (t2); case REAL_CST: return REAL_VALUES_IDENTICAL (TREE_REAL_CST (t1), TREE_REAL_CST (t2)); @@ -7104,11 +7067,11 @@ compare_tree_int (const_tree t, unsigned HOST_WIDE_INT u) { if (tree_int_cst_sgn (t) < 0) return -1; - else if (TREE_INT_CST_HIGH (t) != 0) + else if (!cst_fits_uhwi_p (t)) return 1; - else if (TREE_INT_CST_LOW (t) == u) + else if ((unsigned HOST_WIDE_INT) tree_to_hwi (t) == u) return 0; - else if (TREE_INT_CST_LOW (t) < u) + else if ((unsigned HOST_WIDE_INT) tree_to_hwi (t) < u) return -1; else return 1; @@ -7121,7 +7084,7 @@ compare_tree_int (const_tree t, unsigned HOST_WIDE_INT u) bool valid_constant_size_p (const_tree size) { - if (! host_integerp (size, 1) + if (! tree_fits_uhwi_p (size) || TREE_OVERFLOW (size) || tree_int_cst_sign_bit (size) != 0) return false; @@ -7240,8 +7203,9 @@ iterative_hash_expr (const_tree t, hashval_t val) /* Alas, constants aren't shared, so we can't rely on pointer identity. */ case INTEGER_CST: - val = iterative_hash_host_wide_int (TREE_INT_CST_LOW (t), val); - return iterative_hash_host_wide_int (TREE_INT_CST_HIGH (t), val); + for (i = 0; i < TREE_INT_CST_NUNITS (t); i++) + val = iterative_hash_host_wide_int (TREE_INT_CST_ELT (t, i), val); + return val; case REAL_CST: { unsigned int val2 = real_hash (TREE_REAL_CST_PTR (t)); @@ -7587,8 +7551,8 @@ build_nonstandard_integer_type (unsigned HOST_WIDE_INT precision, fixup_signed_type (itype); ret = itype; - if (host_integerp (TYPE_MAX_VALUE (itype), 1)) - ret = type_hash_canon (tree_low_cst (TYPE_MAX_VALUE (itype), 1), itype); + if (tree_fits_uhwi_p (TYPE_MAX_VALUE (itype))) + ret = type_hash_canon (tree_to_uhwi (TYPE_MAX_VALUE (itype)), itype); if (precision <= MAX_INT_CACHED_PREC) nonstandard_integer_type_cache[precision + unsignedp] = ret; @@ -8529,10 +8493,10 @@ get_narrower (tree op, int *unsignedp_ptr) && TREE_CODE (TREE_TYPE (op)) != FIXED_POINT_TYPE /* Ensure field is laid out already. */ && DECL_SIZE (TREE_OPERAND (op, 1)) != 0 - && host_integerp (DECL_SIZE (TREE_OPERAND (op, 1)), 1)) + && tree_fits_uhwi_p (DECL_SIZE (TREE_OPERAND (op, 1)))) { unsigned HOST_WIDE_INT innerprec - = tree_low_cst (DECL_SIZE (TREE_OPERAND (op, 1)), 1); + = tree_to_uhwi (DECL_SIZE (TREE_OPERAND (op, 1))); int unsignedp = (DECL_UNSIGNED (TREE_OPERAND (op, 1)) || TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (op, 1)))); tree type = lang_hooks.types.type_for_size (innerprec, unsignedp); @@ -8568,9 +8532,9 @@ int_fits_type_p (const_tree c, const_tree type) { tree type_low_bound, type_high_bound; bool ok_for_low_bound, ok_for_high_bound, unsc; - double_int dc, dd; + wide_int wc, wd; - dc = tree_to_double_int (c); + wc = c; unsc = TYPE_UNSIGNED (TREE_TYPE (c)); retry: @@ -8580,7 +8544,7 @@ retry: /* If at least one bound of the type is a constant integer, we can check ourselves and maybe make a decision. If no such decision is possible, but this type is a subtype, try checking against that. Otherwise, use - double_int_fits_to_tree_p, which checks against the precision. + fits_to_tree_p, which checks against the precision. Compute the status for each possibly constant bound, and return if we see one does not match. Use ok_for_xxx_bound for this purpose, assigning -1 @@ -8590,18 +8554,18 @@ retry: /* Check if c >= type_low_bound. */ if (type_low_bound && TREE_CODE (type_low_bound) == INTEGER_CST) { - dd = tree_to_double_int (type_low_bound); + wd = type_low_bound; if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_low_bound))) { - int c_neg = (!unsc && dc.is_negative ()); - int t_neg = (unsc && dd.is_negative ()); + int c_neg = (!unsc && wc.neg_p (SIGNED)); + int t_neg = (unsc && wd.neg_p (SIGNED)); if (c_neg && !t_neg) return false; - if ((c_neg || !t_neg) && dc.ult (dd)) + if ((c_neg || !t_neg) && wc.ltu_p (wd)) return false; } - else if (dc.cmp (dd, unsc) < 0) + else if (wc.cmp (wd, TYPE_SIGN (TREE_TYPE (type_low_bound))) < 0) return false; ok_for_low_bound = true; } @@ -8611,18 +8575,18 @@ retry: /* Check if c <= type_high_bound. */ if (type_high_bound && TREE_CODE (type_high_bound) == INTEGER_CST) { - dd = tree_to_double_int (type_high_bound); + wd = type_high_bound; if (unsc != TYPE_UNSIGNED (TREE_TYPE (type_high_bound))) { - int c_neg = (!unsc && dc.is_negative ()); - int t_neg = (unsc && dd.is_negative ()); + int c_neg = (!unsc && wc.neg_p (SIGNED)); + int t_neg = (unsc && wd.neg_p (SIGNED)); if (t_neg && !c_neg) return false; - if ((t_neg || !c_neg) && dc.ugt (dd)) + if ((t_neg || !c_neg) && wc.gtu_p (wd)) return false; } - else if (dc.cmp (dd, unsc) > 0) + else if (wc.cmp (wd, TYPE_SIGN (TREE_TYPE (type_high_bound))) > 0) return false; ok_for_high_bound = true; } @@ -8636,7 +8600,7 @@ retry: /* Perform some generic filtering which may allow making a decision even if the bounds are not constant. First, negative integers never fit in unsigned types, */ - if (TYPE_UNSIGNED (type) && !unsc && dc.is_negative ()) + if (TYPE_UNSIGNED (type) && !unsc && wc.neg_p (SIGNED)) return false; /* Second, narrower types always fit in wider ones. */ @@ -8644,18 +8608,8 @@ retry: return true; /* Third, unsigned integers with top bit set never fit signed types. */ - if (! TYPE_UNSIGNED (type) && unsc) - { - int prec = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (c))) - 1; - if (prec < HOST_BITS_PER_WIDE_INT) - { - if (((((unsigned HOST_WIDE_INT) 1) << prec) & dc.low) != 0) - return false; - } - else if (((((unsigned HOST_WIDE_INT) 1) - << (prec - HOST_BITS_PER_WIDE_INT)) & dc.high) != 0) - return false; - } + if (! TYPE_UNSIGNED (type) && unsc && wc.neg_p (SIGNED)) + return false; /* If we haven't been able to decide at this point, there nothing more we can check ourselves here. Look at the base type if we have one and it @@ -8668,8 +8622,8 @@ retry: goto retry; } - /* Or to double_int_fits_to_tree_p, if nothing else. */ - return double_int_fits_to_tree_p (type, dc); + /* Or to fits_to_tree_p, if nothing else. */ + return wc.fits_to_tree_p (type); } /* Stores bounds of an integer TYPE in MIN and MAX. If TYPE has non-constant @@ -8682,33 +8636,25 @@ get_type_static_bounds (const_tree type, mpz_t min, mpz_t max) { if (!POINTER_TYPE_P (type) && TYPE_MIN_VALUE (type) && TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST) - mpz_set_double_int (min, tree_to_double_int (TYPE_MIN_VALUE (type)), - TYPE_UNSIGNED (type)); + wide_int (TYPE_MIN_VALUE (type)).to_mpz (min, TYPE_SIGN (type)); else { if (TYPE_UNSIGNED (type)) mpz_set_ui (min, 0); else { - double_int mn; - mn = double_int::mask (TYPE_PRECISION (type) - 1); - mn = (mn + double_int_one).sext (TYPE_PRECISION (type)); - mpz_set_double_int (min, mn, false); + wide_int mn = wide_int::min_value (TYPE_PRECISION (type), SIGNED); + mn.to_mpz (min, SIGNED); } } if (!POINTER_TYPE_P (type) && TYPE_MAX_VALUE (type) && TREE_CODE (TYPE_MAX_VALUE (type)) == INTEGER_CST) - mpz_set_double_int (max, tree_to_double_int (TYPE_MAX_VALUE (type)), - TYPE_UNSIGNED (type)); + wide_int (TYPE_MAX_VALUE (type)).to_mpz (max, TYPE_SIGN (type)); else { - if (TYPE_UNSIGNED (type)) - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type)), - true); - else - mpz_set_double_int (max, double_int::mask (TYPE_PRECISION (type) - 1), - true); + wide_int mn = wide_int::max_value (TYPE_PRECISION (type), TYPE_SIGN (type)); + mn.to_mpz (max, TYPE_SIGN (type)); } } @@ -9376,6 +9322,18 @@ tree_contains_struct_check_failed (const_tree node, (dynamically sized) vector. */ void +tree_int_cst_elt_check_failed (int idx, int len, const char *file, int line, + const char *function) +{ + internal_error + ("tree check: accessed elt %d of tree_int_cst with %d elts in %s, at %s:%d", + idx + 1, len, function, trim_filename (file), line); +} + +/* Similar to above, except that the check is for the bounds of a TREE_VEC's + (dynamically sized) vector. */ + +void tree_vec_elt_check_failed (int idx, int len, const char *file, int line, const char *function) { @@ -10158,10 +10116,10 @@ build_vector_type_for_mode (tree innertype, enum machine_mode mode) case MODE_INT: /* Check that there are no leftover bits. */ gcc_assert (GET_MODE_BITSIZE (mode) - % TREE_INT_CST_LOW (TYPE_SIZE (innertype)) == 0); + % tree_to_hwi (TYPE_SIZE (innertype)) == 0); nunits = GET_MODE_BITSIZE (mode) - / TREE_INT_CST_LOW (TYPE_SIZE (innertype)); + / tree_to_hwi (TYPE_SIZE (innertype)); break; default: @@ -10546,11 +10504,10 @@ HOST_WIDE_INT int_cst_value (const_tree x) { unsigned bits = TYPE_PRECISION (TREE_TYPE (x)); - unsigned HOST_WIDE_INT val = TREE_INT_CST_LOW (x); + unsigned HOST_WIDE_INT val = tree_to_hwi (x); /* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */ - gcc_assert (TREE_INT_CST_HIGH (x) == 0 - || TREE_INT_CST_HIGH (x) == -1); + gcc_assert (cst_fits_shwi_p (x)); if (bits < HOST_BITS_PER_WIDE_INT) { @@ -10570,16 +10527,20 @@ HOST_WIDEST_INT widest_int_cst_value (const_tree x) { unsigned bits = TYPE_PRECISION (TREE_TYPE (x)); - unsigned HOST_WIDEST_INT val = TREE_INT_CST_LOW (x); + unsigned HOST_WIDEST_INT val = tree_to_hwi (x); #if HOST_BITS_PER_WIDEST_INT > HOST_BITS_PER_WIDE_INT gcc_assert (HOST_BITS_PER_WIDEST_INT >= HOST_BITS_PER_DOUBLE_INT); - val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_HIGH (x)) - << HOST_BITS_PER_WIDE_INT); + gcc_assert (TREE_INT_CST_NUNITS (x) <= 2); + + if (TREE_INT_CST_NUNITS (x) == 1) + val = ((HOST_WIDEST_INT)val << HOST_BITS_PER_WIDE_INT) >> HOST_BITS_PER_WIDE_INT; + else + val |= (((unsigned HOST_WIDEST_INT) TREE_INT_CST_ELT (x, 1)) + << HOST_BITS_PER_WIDE_INT); #else /* Make sure the sign-extended value will fit in a HOST_WIDE_INT. */ - gcc_assert (TREE_INT_CST_HIGH (x) == 0 - || TREE_INT_CST_HIGH (x) == -1); + gcc_assert (TREE_INT_CST_NUNITS (x) == 1); #endif if (bits < HOST_BITS_PER_WIDEST_INT) @@ -10664,7 +10625,6 @@ truth_type_for (tree type) tree upper_bound_in_type (tree outer, tree inner) { - double_int high; unsigned int det = 0; unsigned oprec = TYPE_PRECISION (outer); unsigned iprec = TYPE_PRECISION (inner); @@ -10708,21 +10668,8 @@ upper_bound_in_type (tree outer, tree inner) gcc_unreachable (); } - /* Compute 2^^prec - 1. */ - if (prec <= HOST_BITS_PER_WIDE_INT) - { - high.high = 0; - high.low = ((~(unsigned HOST_WIDE_INT) 0) - >> (HOST_BITS_PER_WIDE_INT - prec)); - } - else - { - high.high = ((~(unsigned HOST_WIDE_INT) 0) - >> (HOST_BITS_PER_DOUBLE_INT - prec)); - high.low = ~(unsigned HOST_WIDE_INT) 0; - } - - return double_int_to_tree (outer, high); + return wide_int_to_tree (outer, + wide_int::mask (prec, false, TYPE_PRECISION (outer))); } /* Returns the smallest value obtainable by casting something in INNER type to @@ -10731,7 +10678,6 @@ upper_bound_in_type (tree outer, tree inner) tree lower_bound_in_type (tree outer, tree inner) { - double_int low; unsigned oprec = TYPE_PRECISION (outer); unsigned iprec = TYPE_PRECISION (inner); @@ -10742,7 +10688,7 @@ lower_bound_in_type (tree outer, tree inner) contains all values of INNER type. In particular, both INNER and OUTER types have zero in common. */ || (oprec > iprec && TYPE_UNSIGNED (inner))) - low.low = low.high = 0; + return wide_int_to_tree (outer, 0); else { /* If we are widening a signed type to another signed type, we @@ -10750,21 +10696,10 @@ lower_bound_in_type (tree outer, tree inner) precision or narrowing to a signed type, we want to obtain -2^(oprec-1). */ unsigned prec = oprec > iprec ? iprec : oprec; - - if (prec <= HOST_BITS_PER_WIDE_INT) - { - low.high = ~(unsigned HOST_WIDE_INT) 0; - low.low = (~(unsigned HOST_WIDE_INT) 0) << (prec - 1); - } - else - { - low.high = ((~(unsigned HOST_WIDE_INT) 0) - << (prec - HOST_BITS_PER_WIDE_INT - 1)); - low.low = 0; - } + return wide_int_to_tree (outer, + wide_int::mask (prec - 1, + true, TYPE_PRECISION (outer))); } - - return double_int_to_tree (outer, low); } /* Return nonzero if two operands that are suitable for PHI nodes are @@ -10783,42 +10718,13 @@ operand_equal_for_phi_arg_p (const_tree arg0, const_tree arg1) return operand_equal_p (arg0, arg1, 0); } -/* Returns number of zeros at the end of binary representation of X. - - ??? Use ffs if available? */ +/* Returns number of zeros at the end of binary representation of X. */ tree num_ending_zeros (const_tree x) { - unsigned HOST_WIDE_INT fr, nfr; - unsigned num, abits; tree type = TREE_TYPE (x); - - if (TREE_INT_CST_LOW (x) == 0) - { - num = HOST_BITS_PER_WIDE_INT; - fr = TREE_INT_CST_HIGH (x); - } - else - { - num = 0; - fr = TREE_INT_CST_LOW (x); - } - - for (abits = HOST_BITS_PER_WIDE_INT / 2; abits; abits /= 2) - { - nfr = fr >> abits; - if (nfr << abits == fr) - { - num += abits; - fr = nfr; - } - } - - if (num > TYPE_PRECISION (type)) - num = TYPE_PRECISION (type); - - return build_int_cst_type (type, num); + return wide_int_to_tree (type, wide_int (x).ctz ()); } @@ -11890,7 +11796,7 @@ get_binfo_at_offset (tree binfo, HOST_WIDE_INT offset, tree expected_type) continue; pos = int_bit_position (fld); - size = tree_low_cst (DECL_SIZE (fld), 1); + size = tree_to_uhwi (DECL_SIZE (fld)); if (pos <= offset && (pos + size) > offset) break; } diff --git a/gcc/tree.def b/gcc/tree.def index da30074b109..577e6fc4af9 100644 --- a/gcc/tree.def +++ b/gcc/tree.def @@ -257,13 +257,16 @@ DEFTREECODE (LANG_TYPE, "lang_type", tcc_type, 0) /* First, the constants. */ -/* Contents are in TREE_INT_CST_LOW and TREE_INT_CST_HIGH fields, - 32 bits each, giving us a 64 bit constant capability. INTEGER_CST - nodes can be shared, and therefore should be considered read only. - They should be copied, before setting a flag such as TREE_OVERFLOW. - If an INTEGER_CST has TREE_OVERFLOW already set, it is known to be unique. - INTEGER_CST nodes are created for the integral types, for pointer - types and for vector and float types in some circumstances. */ +/* Contents are in an array of HOST_WIDE_INTS. The array may be as + wide as the precision requires but may be shorter when all of the + upper bits are sign bits. The length of the array is given in + TREE_INT_CST_NUNITS and each element can be obtained using + TREE_INT_CST_ELT. INTEGER_CST nodes can be shared, and therefore + should be considered read only. They should be copied, before + setting a flag such as TREE_OVERFLOW. If an INTEGER_CST has + TREE_OVERFLOW already set, it is known to be unique. INTEGER_CST + nodes are created for the integral types, for pointer types and for + vector and float types in some circumstances. */ DEFTREECODE (INTEGER_CST, "integer_cst", tcc_constant, 0) /* Contents are in TREE_REAL_CST field. */ diff --git a/gcc/tree.h b/gcc/tree.h index 94f112f43d7..8a665346c17 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -469,7 +469,7 @@ struct GTY(()) tree_base { /* The following fields are present in tree_base to save space. The nodes using them do not require any of the flags above and so can make better use of the 4-byte sized word. */ - /* VEC length. This field is only used with TREE_VEC. */ + /* VEC length. This field is only used with TREE_VEC and TREE_INT_CST. */ int length; /* SSA version number. This field is only used with SSA_NAME. */ unsigned int version; @@ -735,6 +735,8 @@ enum tree_node_structure_enum { }; #undef DEFTREESTRUCT +#define NULL_TREE (tree) NULL + /* Define accessors for the fields that all tree nodes have (though some fields are not used for all kinds of nodes). */ @@ -806,6 +808,9 @@ enum tree_node_structure_enum { #define NON_TYPE_CHECK(T) \ (non_type_check ((T), __FILE__, __LINE__, __FUNCTION__)) +#define TREE_INT_CST_ELT_CHECK(T, I) \ +(*tree_int_cst_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__)) + #define TREE_VEC_ELT_CHECK(T, I) \ (*(CONST_CAST2 (tree *, typeof (T)*, \ tree_vec_elt_check ((T), (I), __FILE__, __LINE__, __FUNCTION__)))) @@ -861,6 +866,9 @@ extern void tree_not_class_check_failed (const_tree, const enum tree_code_class, const char *, int, const char *) ATTRIBUTE_NORETURN; +extern void tree_int_cst_elt_check_failed (int, int, const char *, + int, const char *) + ATTRIBUTE_NORETURN; extern void tree_vec_elt_check_failed (int, int, const char *, int, const char *) ATTRIBUTE_NORETURN; @@ -898,6 +906,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, #define TREE_RANGE_CHECK(T, CODE1, CODE2) (T) #define EXPR_CHECK(T) (T) #define NON_TYPE_CHECK(T) (T) +#define TREE_INT_CST_ELT_CHECK(T, I) ((T)->int_cst.val[I]) #define TREE_VEC_ELT_CHECK(T, I) ((T)->vec.a[I]) #define TREE_OPERAND_CHECK(T, I) ((T)->exp.operands[I]) #define TREE_OPERAND_CHECK_CODE(T, CODE, I) ((T)->exp.operands[I]) @@ -1123,7 +1132,7 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, #define SET_PREDICT_EXPR_OUTCOME(NODE, OUTCOME) \ (PREDICT_EXPR_CHECK(NODE)->base.addressable_flag = (int) OUTCOME) #define PREDICT_EXPR_PREDICTOR(NODE) \ - ((enum br_predictor)tree_low_cst (TREE_OPERAND (PREDICT_EXPR_CHECK (NODE), 0), 0)) + ((enum br_predictor)tree_to_shwi (TREE_OPERAND (PREDICT_EXPR_CHECK (NODE), 0))) /* In a VAR_DECL, nonzero means allocate static storage. In a FUNCTION_DECL, nonzero if function has been defined. @@ -1267,6 +1276,9 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, /* In integral and pointer types, means an unsigned type. */ #define TYPE_UNSIGNED(NODE) (TYPE_CHECK (NODE)->base.u.bits.unsigned_flag) +/* Same as TYPE_UNSIGNED but converted to SIGNOP. */ +#define TYPE_SIGN(NODE) ((signop)TYPE_UNSIGNED(NODE)) + /* True if overflow wraps around for the given integral type. That is, TYPE_MAX + 1 == TYPE_MIN. */ #define TYPE_OVERFLOW_WRAPS(TYPE) \ @@ -1398,29 +1410,18 @@ extern void omp_clause_range_check_failed (const_tree, const char *, int, /* Define additional fields and accessors for nodes representing constants. */ -/* In an INTEGER_CST node. These two together make a 2-word integer. - If the data type is signed, the value is sign-extended to 2 words - even though not all of them may really be in use. - In an unsigned constant shorter than 2 words, the extra bits are 0. */ -#define TREE_INT_CST(NODE) (INTEGER_CST_CHECK (NODE)->int_cst.int_cst) -#define TREE_INT_CST_LOW(NODE) (TREE_INT_CST (NODE).low) -#define TREE_INT_CST_HIGH(NODE) (TREE_INT_CST (NODE).high) - #define INT_CST_LT(A, B) \ - (TREE_INT_CST_HIGH (A) < TREE_INT_CST_HIGH (B) \ - || (TREE_INT_CST_HIGH (A) == TREE_INT_CST_HIGH (B) \ - && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B))) + (wide_int::lts_p (A, B)) + +#define INT_CST_LT_UNSIGNED(A, B) \ + (wide_int::ltu_p (A, B)) -#define INT_CST_LT_UNSIGNED(A, B) \ - (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \ - < (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \ - || (((unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (A) \ - == (unsigned HOST_WIDE_INT) TREE_INT_CST_HIGH (B)) \ - && TREE_INT_CST_LOW (A) < TREE_INT_CST_LOW (B))) +#define TREE_INT_CST_NUNITS(NODE) (INTEGER_CST_CHECK (NODE)->base.u.length) +#define TREE_INT_CST_ELT(NODE, I) TREE_INT_CST_ELT_CHECK (NODE, I) struct GTY(()) tree_int_cst { struct tree_typed typed; - double_int int_cst; + HOST_WIDE_INT val[1]; }; /* In a REAL_CST node. struct real_value is an opaque entity, with @@ -1605,7 +1606,7 @@ struct GTY(()) tree_constructor { Note that we have to bypass the use of TREE_OPERAND to access that field to avoid infinite recursion in expanding the macros. */ #define VL_EXP_OPERAND_LENGTH(NODE) \ - ((int)TREE_INT_CST_LOW (VL_EXP_CHECK (NODE)->exp.operands[0])) + ((int)tree_to_hwi (VL_EXP_CHECK (NODE)->exp.operands[0])) /* Nonzero if is_gimple_debug() may possibly hold. */ #define MAY_HAVE_DEBUG_STMTS (flag_var_tracking_assignments) @@ -1707,7 +1708,7 @@ extern void protected_set_expr_location (tree, location_t); #define CHREC_VAR(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 0) #define CHREC_LEFT(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 1) #define CHREC_RIGHT(NODE) TREE_OPERAND (POLYNOMIAL_CHREC_CHECK (NODE), 2) -#define CHREC_VARIABLE(NODE) TREE_INT_CST_LOW (CHREC_VAR (NODE)) +#define CHREC_VARIABLE(NODE) tree_to_hwi (CHREC_VAR (NODE)) /* LABEL_EXPR accessor. This gives access to the label associated with the given label expression. */ @@ -3870,6 +3871,28 @@ non_type_check (tree __t, const char *__f, int __l, const char *__g) return __t; } +inline const HOST_WIDE_INT * +tree_int_cst_elt_check (const_tree __t, int __i, + const char *__f, int __l, const char *__g) +{ + if (TREE_CODE (__t) != INTEGER_CST) + tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0); + if (__i < 0 || __i >= __t->base.u.length) + tree_int_cst_elt_check_failed (__i, __t->base.u.length, __f, __l, __g); + return &CONST_CAST_TREE (__t)->int_cst.val[__i]; +} + +inline HOST_WIDE_INT * +tree_int_cst_elt_check (tree __t, int __i, + const char *__f, int __l, const char *__g) +{ + if (TREE_CODE (__t) != INTEGER_CST) + tree_check_failed (__t, __f, __l, __g, INTEGER_CST, 0); + if (__i < 0 || __i >= __t->base.u.length) + tree_int_cst_elt_check_failed (__i, __t->base.u.length, __f, __l, __g); + return &CONST_CAST_TREE (__t)->int_cst.val[__i]; +} + inline tree * tree_vec_elt_check (tree __t, int __i, const char *__f, int __l, const char *__g) @@ -4101,6 +4124,174 @@ omp_clause_elt_check (const_tree __t, int __i, #endif +/* Checks that X is integer constant that can be expressed in signed + HOST_WIDE_INT without loss of precision. This function differs + from the tree_fits_* versions in that the type of signedness of the + type of X is not considered. */ + +static inline bool +cst_fits_shwi_p (const_tree x) +{ + if (TREE_CODE (x) != INTEGER_CST) + return false; + + return TREE_INT_CST_NUNITS (x) == 1; +} + +/* Checks that X is integer constant that can be expressed in signed + HOST_WIDE_INT without loss of precision. This function differs + from the tree_fits_* versions in that the type of signedness of the + type of X is not considered. */ + +static inline bool +cst_fits_uhwi_p (const_tree x) +{ + if (TREE_CODE (x) != INTEGER_CST) + return false; + + return TREE_INT_CST_NUNITS (x) == 1 && TREE_INT_CST_ELT (x, 0) >= 0; +} + +/* Return true if T is an INTEGER_CST whose value must be non-negative + and can be represented in a single unsigned HOST_WIDE_INT. This + function differs from the cst_fits versions in that the signedness + of the type of cst is considered. */ + +static inline bool +tree_fits_uhwi_p (const_tree cst) +{ + tree type; + if (cst == NULL_TREE) + return false; + + type = TREE_TYPE (cst); + + if (TREE_CODE (cst) != INTEGER_CST) + return false; + + if (TREE_INT_CST_NUNITS (cst) == 1) + { + if ((TYPE_SIGN (type) == UNSIGNED) + && (TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT)) + return true; + + /* For numbers of unsigned type that are longer than a HWI, if + the top bit of the bottom word is set, and there is not + another element, then this is too large to fit in a single + hwi. */ + if (TREE_INT_CST_ELT (cst, 0) >= 0) + return true; + } + else if (TREE_INT_CST_NUNITS (cst) == 2) + { + if (TREE_INT_CST_ELT (cst, 1) == 0) + return true; + } + return false; +} + +/* Return true if CST is an INTEGER_CST whose value can be represented + in a single HOST_WIDE_INT. This function differs from the cst_fits + versions in that the signedness of the type of cst is + considered. */ + +static inline bool +tree_fits_shwi_p (const_tree cst) +{ + if (cst == NULL_TREE) + return false; + + if (TREE_CODE (cst) != INTEGER_CST) + return false; + + if (TREE_INT_CST_NUNITS (cst) != 1) + return false; + + if (TYPE_SIGN (TREE_TYPE (cst)) == SIGNED) + return true; + + if (TREE_INT_CST_ELT (cst, 0) >= 0) + return true; + + return false; +} + +/* Return true if T is an INTEGER_CST that can be manipulated + efficiently on the host. If SIGN is SIGNED, the value can be + represented in a single HOST_WIDE_INT. If SIGN is UNSIGNED, the + value must be non-negative and can be represented in a single + unsigned HOST_WIDE_INT. */ + +static inline bool +tree_fits_hwi_p (const_tree cst, signop sign) +{ + return sign ? tree_fits_uhwi_p (cst) : tree_fits_shwi_p (cst); +} + +/* Return true if T is an INTEGER_CST that can be manipulated + efficiently on the host. If the sign of CST is SIGNED, the value + can be represented in a single HOST_WIDE_INT. If the sign of CST + is UNSIGNED, the value must be non-negative and can be represented + in a single unsigned HOST_WIDE_INT. */ + +static inline bool +tree_fits_hwi_p (const_tree cst) +{ + if (cst == NULL_TREE) + return false; + + if (TREE_CODE (cst) != INTEGER_CST) + return false; + + return TYPE_UNSIGNED (TREE_TYPE (cst)) + ? tree_fits_uhwi_p (cst) : tree_fits_shwi_p (cst); +} + +/* Return the unsigned HOST_WIDE_INT least significant bits of CST. + If checking is enabled, this ices if the value does not fit. */ + +static inline unsigned HOST_WIDE_INT +tree_to_uhwi (const_tree cst) +{ + gcc_checking_assert (tree_fits_uhwi_p (cst)); + + return (unsigned HOST_WIDE_INT)TREE_INT_CST_ELT (cst, 0); +} + +/* Return the HOST_WIDE_INT least significant bits of CST. If + checking is enabled, this ices if the value does not fit. */ + +static inline HOST_WIDE_INT +tree_to_shwi (const_tree cst) +{ + gcc_checking_assert (tree_fits_shwi_p (cst)); + + return (HOST_WIDE_INT)TREE_INT_CST_ELT (cst, 0); +} + +/* Return the HOST_WIDE_INT least significant bits of CST. No + checking is done to assure that it fits. It is assumed that one of + tree_fits_uhwi_p or tree_fits_shwi_p was done before this call. */ + +static inline HOST_WIDE_INT +tree_to_hwi (const_tree cst) +{ + return TREE_INT_CST_ELT (cst, 0); +} + +/* Return the HOST_WIDE_INT least significant bits of CST. The sign + of the checking is based on SIGNOP. */ + +static inline HOST_WIDE_INT +tree_to_hwi (const_tree cst, signop sgn) +{ + if (sgn == SIGNED) + return tree_to_shwi (cst); + else + return tree_to_uhwi (cst); +} + + /* Compute the number of operands in an expression node NODE. For tcc_vl_exp nodes like CALL_EXPRs, this is stored in the node itself, otherwise it is looked up from the node's code. */ @@ -4568,8 +4759,6 @@ enum ptrmemfunc_vbit_where_t ptrmemfunc_vbit_in_delta }; -#define NULL_TREE (tree) NULL - /* True if NODE is an erroneous expression. */ #define error_operand_p(NODE) \ @@ -4585,9 +4774,9 @@ extern hashval_t decl_assembler_name_hash (const_tree asmname); extern size_t tree_size (const_tree); -/* Compute the number of bytes occupied by a tree with code CODE. This - function cannot be used for TREE_VEC codes, which are of variable - length. */ +/* Compute the number of bytes occupied by a tree with code CODE. + This function cannot be used for TREE_VEC or INTEGER_CST nodes, + which are of variable length. */ extern size_t tree_code_size (enum tree_code); /* Allocate and return a new UID from the DECL_UID namespace. */ @@ -4617,6 +4806,11 @@ extern tree build_case_label (tree, tree, tree); extern tree make_tree_binfo_stat (unsigned MEM_STAT_DECL); #define make_tree_binfo(t) make_tree_binfo_stat (t MEM_STAT_INFO) +/* Make a INTEGER_CST. */ + +extern tree make_int_cst_stat (int MEM_STAT_DECL); +#define make_int_cst(t) make_int_cst_stat (t MEM_STAT_INFO) + /* Make a TREE_VEC. */ extern tree make_tree_vec_stat (int MEM_STAT_DECL); @@ -4733,27 +4927,16 @@ extern tree build_var_debug_value_stat (tree, tree MEM_STAT_DECL); /* Constructs double_int from tree CST. */ -static inline double_int -tree_to_double_int (const_tree cst) -{ - return TREE_INT_CST (cst); -} - extern tree double_int_to_tree (tree, double_int); -extern bool double_int_fits_to_tree_p (const_tree, double_int); -extern tree force_fit_type_double (tree, double_int, int, bool); +class wide_int; +extern tree force_fit_type (tree, const wide_int&, int, bool); /* Create an INT_CST node with a CST value zero extended. */ -static inline tree -build_int_cstu (tree type, unsigned HOST_WIDE_INT cst) -{ - return double_int_to_tree (type, double_int::from_uhwi (cst)); -} - +/* static inline */ extern tree build_int_cst (tree, HOST_WIDE_INT); +extern tree build_int_cstu (tree type, unsigned HOST_WIDE_INT cst); extern tree build_int_cst_type (tree, HOST_WIDE_INT); -extern tree build_int_cst_wide (tree, unsigned HOST_WIDE_INT, HOST_WIDE_INT); extern tree make_vector_stat (unsigned MEM_STAT_DECL); #define make_vector(n) make_vector_stat (n MEM_STAT_INFO) extern tree build_vector_stat (tree, tree * MEM_STAT_DECL); @@ -4845,24 +5028,10 @@ extern int attribute_list_contained (const_tree, const_tree); extern int tree_int_cst_equal (const_tree, const_tree); extern int tree_int_cst_lt (const_tree, const_tree); extern int tree_int_cst_compare (const_tree, const_tree); -extern int host_integerp (const_tree, int) -#ifndef ENABLE_TREE_CHECKING - ATTRIBUTE_PURE /* host_integerp is pure only when checking is disabled. */ -#endif - ; -extern HOST_WIDE_INT tree_low_cst (const_tree, int); -#if !defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 4003) -extern inline __attribute__ ((__gnu_inline__)) HOST_WIDE_INT -tree_low_cst (const_tree t, int pos) -{ - gcc_assert (host_integerp (t, pos)); - return TREE_INT_CST_LOW (t); -} -#endif extern HOST_WIDE_INT size_low_cst (const_tree); extern int tree_int_cst_sgn (const_tree); extern int tree_int_cst_sign_bit (const_tree); -extern unsigned int tree_int_cst_min_precision (tree, bool); +extern unsigned int tree_int_cst_min_precision (tree, signop); extern bool tree_expr_nonnegative_p (tree); extern bool tree_expr_nonnegative_warnv_p (tree, bool *); extern bool may_negate_without_overflow_p (const_tree); @@ -5334,7 +5503,6 @@ extern int integer_pow2p (const_tree); extern int integer_nonzerop (const_tree); -extern bool cst_and_fits_in_hwi (const_tree); extern tree num_ending_zeros (const_tree); /* fixed_zerop (tree x) is nonzero if X is a fixed-point constant of @@ -5793,11 +5961,10 @@ extern tree fold_indirect_ref_loc (location_t, tree); extern tree build_simple_mem_ref_loc (location_t, tree); #define build_simple_mem_ref(T)\ build_simple_mem_ref_loc (UNKNOWN_LOCATION, T) -extern double_int mem_ref_offset (const_tree); extern tree reference_alias_ptr_type (const_tree); extern tree build_invariant_address (tree, tree, HOST_WIDE_INT); extern tree constant_boolean_node (bool, tree); -extern tree div_if_zero_remainder (enum tree_code, const_tree, const_tree); +extern tree div_if_zero_remainder (const_tree, const_tree); extern bool tree_swap_operands_p (const_tree, const_tree, bool); extern enum tree_code swap_tree_comparison (enum tree_code); @@ -6139,7 +6306,7 @@ extern tree get_attribute_namespace (const_tree); extern void apply_tm_attr (tree, tree); /* In stor-layout.c */ -extern void set_min_and_max_values_for_integral_type (tree, int, bool); +extern void set_min_and_max_values_for_integral_type (tree, int, signop); extern void fixup_signed_type (tree); extern void internal_reference_types (void); extern unsigned int update_alignment_for_field (record_layout_info, tree, diff --git a/gcc/tsan.c b/gcc/tsan.c index b9171c803aa..dae5d05d4fc 100644 --- a/gcc/tsan.c +++ b/gcc/tsan.c @@ -443,8 +443,8 @@ instrument_builtin_call (gimple_stmt_iterator *gsi) case check_last: case fetch_op: last_arg = gimple_call_arg (stmt, num - 1); - if (!host_integerp (last_arg, 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (last_arg, 1) + if (!tree_fits_uhwi_p (last_arg) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (last_arg) > MEMMODEL_SEQ_CST) return; gimple_call_set_fndecl (stmt, decl); @@ -515,12 +515,12 @@ instrument_builtin_call (gimple_stmt_iterator *gsi) gcc_assert (num == 6); for (j = 0; j < 6; j++) args[j] = gimple_call_arg (stmt, j); - if (!host_integerp (args[4], 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (args[4], 1) + if (!tree_fits_uhwi_p (args[4]) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (args[4]) > MEMMODEL_SEQ_CST) return; - if (!host_integerp (args[5], 1) - || (unsigned HOST_WIDE_INT) tree_low_cst (args[5], 1) + if (!tree_fits_uhwi_p (args[5]) + || (unsigned HOST_WIDE_INT) tree_to_uhwi (args[5]) > MEMMODEL_SEQ_CST) return; update_gimple_call (gsi, decl, 5, args[0], args[1], args[2], diff --git a/gcc/value-prof.c b/gcc/value-prof.c index 8aa9fcda905..d36d0eeefe5 100644 --- a/gcc/value-prof.c +++ b/gcc/value-prof.c @@ -805,9 +805,17 @@ gimple_divmod_fixed_value_transform (gimple_stmt_iterator *si) else prob = 0; - tree_val = build_int_cst_wide (get_gcov_type (), - (unsigned HOST_WIDE_INT) val, - val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1); + if (sizeof (gcov_type) == sizeof (HOST_WIDE_INT)) + tree_val = build_int_cst (get_gcov_type (), val); + else + { + HOST_WIDE_INT a[2]; + a[0] = (unsigned HOST_WIDE_INT) val; + a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1; + + tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2, + TYPE_PRECISION (get_gcov_type ()), false)); + } result = gimple_divmod_fixed_value (stmt, tree_val, prob, count, all); if (dump_file) @@ -1715,9 +1723,18 @@ gimple_stringops_transform (gimple_stmt_iterator *gsi) default: gcc_unreachable (); } - tree_val = build_int_cst_wide (get_gcov_type (), - (unsigned HOST_WIDE_INT) val, - val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1); + if (sizeof (gcov_type) == sizeof (HOST_WIDE_INT)) + tree_val = build_int_cst (get_gcov_type (), val); + else + { + HOST_WIDE_INT a[2]; + a[0] = (unsigned HOST_WIDE_INT) val; + a[1] = val >> (HOST_BITS_PER_WIDE_INT - 1) >> 1; + + tree_val = wide_int_to_tree (get_gcov_type (), wide_int::from_array (a, 2, + TYPE_PRECISION (get_gcov_type ()), false)); + } + if (dump_file) { fprintf (dump_file, "Single value %i stringop transformation on ", diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c index d82d2621fc6..122d3155d8a 100644 --- a/gcc/var-tracking.c +++ b/gcc/var-tracking.c @@ -3523,6 +3523,23 @@ loc_cmp (rtx x, rtx y) default: gcc_unreachable (); } + if (CONST_WIDE_INT_P (x)) + { + /* Compare the vector length first. */ + if (CONST_WIDE_INT_NUNITS (x) >= CONST_WIDE_INT_NUNITS (y)) + return 1; + else if (CONST_WIDE_INT_NUNITS (x) < CONST_WIDE_INT_NUNITS (y)) + return -1; + + /* Compare the vectors elements. */; + for (j = CONST_WIDE_INT_NUNITS (x) - 1; j >= 0 ; j--) + { + if (CONST_WIDE_INT_ELT (x, j) < CONST_WIDE_INT_ELT (y, j)) + return -1; + if (CONST_WIDE_INT_ELT (x, j) > CONST_WIDE_INT_ELT (y, j)) + return 1; + } + } return 0; } @@ -6269,9 +6286,9 @@ prepare_call_arguments (basic_block bb, rtx insn) && DECL_INITIAL (SYMBOL_REF_DECL (l->loc))) { initial = DECL_INITIAL (SYMBOL_REF_DECL (l->loc)); - if (host_integerp (initial, 0)) + if (tree_fits_shwi_p (initial)) { - item = GEN_INT (tree_low_cst (initial, 0)); + item = GEN_INT (tree_to_shwi (initial)); item = gen_rtx_CONCAT (indmode, mem, item); call_arguments = gen_rtx_EXPR_LIST (VOIDmode, item, @@ -6350,7 +6367,7 @@ prepare_call_arguments (basic_block bb, rtx insn) = TYPE_MODE (TREE_TYPE (OBJ_TYPE_REF_EXPR (obj_type_ref))); rtx clobbered = gen_rtx_MEM (mode, this_arg); HOST_WIDE_INT token - = tree_low_cst (OBJ_TYPE_REF_TOKEN (obj_type_ref), 0); + = tree_to_shwi (OBJ_TYPE_REF_TOKEN (obj_type_ref)); if (token) clobbered = plus_constant (mode, clobbered, token * GET_MODE_SIZE (mode)); @@ -8648,7 +8665,7 @@ emit_note_insn_var_location (variable_def **varp, emit_note_data *data) ++n_var_parts; } type_size_unit = TYPE_SIZE_UNIT (TREE_TYPE (decl)); - if ((unsigned HOST_WIDE_INT) last_limit < TREE_INT_CST_LOW (type_size_unit)) + if ((unsigned HOST_WIDE_INT) last_limit < tree_to_uhwi (type_size_unit)) complete = false; if (! flag_var_tracking_uninit) diff --git a/gcc/varasm.c b/gcc/varasm.c index 69ec26a5e6b..450c84e02eb 100644 --- a/gcc/varasm.c +++ b/gcc/varasm.c @@ -1135,7 +1135,7 @@ get_block_for_decl (tree decl) constant size. */ if (DECL_SIZE_UNIT (decl) == NULL) return NULL; - if (!host_integerp (DECL_SIZE_UNIT (decl), 1)) + if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl))) return NULL; /* Find out which section should contain DECL. We cannot put it into @@ -1901,7 +1901,7 @@ assemble_noswitch_variable (tree decl, const char *name, section *sect, { unsigned HOST_WIDE_INT size, rounded; - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); rounded = size; if (flag_asan && asan_protect_global (decl)) @@ -1948,11 +1948,11 @@ assemble_variable_contents (tree decl, const char *name, && !initializer_zerop (DECL_INITIAL (decl))) /* Output the actual data. */ output_constant (DECL_INITIAL (decl), - tree_low_cst (DECL_SIZE_UNIT (decl), 1), + tree_to_uhwi (DECL_SIZE_UNIT (decl)), get_variable_align (decl)); else /* Leave space for it. */ - assemble_zeros (tree_low_cst (DECL_SIZE_UNIT (decl), 1)); + assemble_zeros (tree_to_uhwi (DECL_SIZE_UNIT (decl))); } } @@ -2138,7 +2138,7 @@ assemble_variable (tree decl, int top_level ATTRIBUTE_UNUSED, if (asan_protected) { unsigned HOST_WIDE_INT int size - = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + = tree_to_uhwi (DECL_SIZE_UNIT (decl)); assemble_zeros (asan_red_zone_size (size)); } } @@ -2721,7 +2721,7 @@ decode_addr_const (tree exp, struct addr_const *value) while (1) { if (TREE_CODE (target) == COMPONENT_REF - && host_integerp (byte_position (TREE_OPERAND (target, 1)), 0)) + && tree_fits_shwi_p (byte_position (TREE_OPERAND (target, 1)))) { offset += int_byte_position (TREE_OPERAND (target, 1)); target = TREE_OPERAND (target, 0); @@ -2729,14 +2729,14 @@ decode_addr_const (tree exp, struct addr_const *value) else if (TREE_CODE (target) == ARRAY_REF || TREE_CODE (target) == ARRAY_RANGE_REF) { - offset += (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (target)), 1) - * tree_low_cst (TREE_OPERAND (target, 1), 0)); + offset += (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (target))) + * tree_to_shwi (TREE_OPERAND (target, 1))); target = TREE_OPERAND (target, 0); } else if (TREE_CODE (target) == MEM_REF && TREE_CODE (TREE_OPERAND (target, 0)) == ADDR_EXPR) { - offset += mem_ref_offset (target).low; + offset += mem_ref_offset (target).to_short_addr (); target = TREE_OPERAND (TREE_OPERAND (target, 0), 0); } else if (TREE_CODE (target) == INDIRECT_REF @@ -2816,8 +2816,8 @@ const_hash_1 (const tree exp) switch (code) { case INTEGER_CST: - p = (char *) &TREE_INT_CST (exp); - len = sizeof TREE_INT_CST (exp); + p = (char *) &TREE_INT_CST_ELT (exp, 0); + len = sizeof TREE_INT_CST_NUNITS (exp) * sizeof (HOST_WIDE_INT); break; case REAL_CST: @@ -3523,6 +3523,7 @@ const_rtx_hash_1 (rtx *xp, void *data) enum rtx_code code; hashval_t h, *hp; rtx x; + int i; x = *xp; code = GET_CODE (x); @@ -3533,12 +3534,12 @@ const_rtx_hash_1 (rtx *xp, void *data) { case CONST_INT: hwi = INTVAL (x); + fold_hwi: { int shift = sizeof (hashval_t) * CHAR_BIT; const int n = sizeof (HOST_WIDE_INT) / sizeof (hashval_t); - int i; - + h ^= (hashval_t) hwi; for (i = 1; i < n; ++i) { @@ -3548,8 +3549,16 @@ const_rtx_hash_1 (rtx *xp, void *data) } break; + case CONST_WIDE_INT: + hwi = GET_MODE_PRECISION (mode); + { + for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++) + hwi ^= CONST_WIDE_INT_ELT (x, i); + goto fold_hwi; + } + case CONST_DOUBLE: - if (mode == VOIDmode) + if (TARGET_SUPPORTS_WIDE_INT == 0 && mode == VOIDmode) { hwi = CONST_DOUBLE_LOW (x) ^ CONST_DOUBLE_HIGH (x); goto fold_hwi; @@ -4640,8 +4649,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align) exp = build1 (ADDR_EXPR, saved_type, TREE_OPERAND (exp, 0)); /* Likewise for constant ints. */ else if (TREE_CODE (exp) == INTEGER_CST) - exp = build_int_cst_wide (saved_type, TREE_INT_CST_LOW (exp), - TREE_INT_CST_HIGH (exp)); + exp = wide_int_to_tree (saved_type, exp); } @@ -4681,7 +4689,7 @@ output_constant (tree exp, unsigned HOST_WIDE_INT size, unsigned int align) if (TREE_CODE (exp) == FDESC_EXPR) { #ifdef ASM_OUTPUT_FDESC - HOST_WIDE_INT part = tree_low_cst (TREE_OPERAND (exp, 1), 0); + HOST_WIDE_INT part = tree_to_shwi (TREE_OPERAND (exp, 1)); tree decl = TREE_OPERAND (exp, 0); ASM_OUTPUT_FDESC (asm_out_file, decl, part); #else @@ -4779,7 +4787,7 @@ array_size_for_constructor (tree val) tree max_index; unsigned HOST_WIDE_INT cnt; tree index, value, tmp; - double_int i; + addr_wide_int i; /* This code used to attempt to handle string constants that are not arrays of single-bytes, but nothing else does, so there's no point in @@ -4801,14 +4809,13 @@ array_size_for_constructor (tree val) /* Compute the total number of array elements. */ tmp = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (val))); - i = tree_to_double_int (max_index) - tree_to_double_int (tmp); - i += double_int_one; + i = addr_wide_int (max_index) - tmp + 1; /* Multiply by the array element unit size to find number of bytes. */ - i *= tree_to_double_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); + i *= addr_wide_int (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (val)))); - gcc_assert (i.fits_uhwi ()); - return i.low; + gcc_assert (i.fits_uhwi_p ()); + return i.to_uhwi (); } /* Other datastructures + helpers for output_constructor. */ @@ -4848,9 +4855,9 @@ output_constructor_array_range (oc_local_state *local) = int_size_in_bytes (TREE_TYPE (local->type)); HOST_WIDE_INT lo_index - = tree_low_cst (TREE_OPERAND (local->index, 0), 0); + = tree_to_shwi (TREE_OPERAND (local->index, 0)); HOST_WIDE_INT hi_index - = tree_low_cst (TREE_OPERAND (local->index, 1), 0); + = tree_to_shwi (TREE_OPERAND (local->index, 1)); HOST_WIDE_INT index; unsigned int align2 @@ -4888,11 +4895,9 @@ output_constructor_regular_field (oc_local_state *local) sign-extend the result because Ada has negative DECL_FIELD_OFFSETs but we are using an unsigned sizetype. */ unsigned prec = TYPE_PRECISION (sizetype); - double_int idx = tree_to_double_int (local->index) - - tree_to_double_int (local->min_index); - idx = idx.sext (prec); - fieldpos = (tree_low_cst (TYPE_SIZE_UNIT (TREE_TYPE (local->val)), 1) - * idx.low); + addr_wide_int idx + = (addr_wide_int (local->index) - local->min_index).sext (prec); + fieldpos = (idx * TYPE_SIZE_UNIT (TREE_TYPE (local->val))).to_shwi (); } else if (local->field != NULL_TREE) fieldpos = int_byte_position (local->field); @@ -4941,7 +4946,7 @@ output_constructor_regular_field (oc_local_state *local) gcc_assert (!fieldsize || !DECL_CHAIN (local->field)); } else - fieldsize = tree_low_cst (DECL_SIZE_UNIT (local->field), 1); + fieldsize = tree_to_uhwi (DECL_SIZE_UNIT (local->field)); } else fieldsize = int_size_in_bytes (TREE_TYPE (local->type)); @@ -4966,15 +4971,15 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset) /* Bit size of this element. */ HOST_WIDE_INT ebitsize = (local->field - ? tree_low_cst (DECL_SIZE (local->field), 1) - : tree_low_cst (TYPE_SIZE (TREE_TYPE (local->type)), 1)); + ? tree_to_uhwi (DECL_SIZE (local->field)) + : tree_to_uhwi (TYPE_SIZE (TREE_TYPE (local->type)))); /* Relative index of this element if this is an array component. */ HOST_WIDE_INT relative_index = (!local->field ? (local->index - ? (tree_low_cst (local->index, 0) - - tree_low_cst (local->min_index, 0)) + ? (tree_to_shwi (local->index) + - tree_to_shwi (local->min_index)) : local->last_relative_index + 1) : 0); @@ -5085,22 +5090,13 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset) the word boundary in the INTEGER_CST. We can only select bits from the LOW or HIGH part not from both. */ - if (shift < HOST_BITS_PER_WIDE_INT - && shift + this_time > HOST_BITS_PER_WIDE_INT) - { - this_time = shift + this_time - HOST_BITS_PER_WIDE_INT; - shift = HOST_BITS_PER_WIDE_INT; - } + if ((shift / HOST_BITS_PER_WIDE_INT) + != ((shift + this_time) / HOST_BITS_PER_WIDE_INT)) + this_time = (shift + this_time) & (HOST_BITS_PER_WIDE_INT - 1); /* Now get the bits from the appropriate constant word. */ - if (shift < HOST_BITS_PER_WIDE_INT) - value = TREE_INT_CST_LOW (local->val); - else - { - gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT); - value = TREE_INT_CST_HIGH (local->val); - shift -= HOST_BITS_PER_WIDE_INT; - } + value = TREE_INT_CST_ELT (local->val, shift / HOST_BITS_PER_WIDE_INT); + shift = shift & (HOST_BITS_PER_WIDE_INT - 1); /* Get the result. This works only when: 1 <= this_time <= HOST_BITS_PER_WIDE_INT. */ @@ -5120,19 +5116,13 @@ output_constructor_bitfield (oc_local_state *local, unsigned int bit_offset) the word boundary in the INTEGER_CST. We can only select bits from the LOW or HIGH part not from both. */ - if (shift < HOST_BITS_PER_WIDE_INT - && shift + this_time > HOST_BITS_PER_WIDE_INT) + if ((shift / HOST_BITS_PER_WIDE_INT) + != ((shift + this_time) / HOST_BITS_PER_WIDE_INT)) this_time = (HOST_BITS_PER_WIDE_INT - shift); /* Now get the bits from the appropriate constant word. */ - if (shift < HOST_BITS_PER_WIDE_INT) - value = TREE_INT_CST_LOW (local->val); - else - { - gcc_assert (shift < HOST_BITS_PER_DOUBLE_INT); - value = TREE_INT_CST_HIGH (local->val); - shift -= HOST_BITS_PER_WIDE_INT; - } + value = TREE_INT_CST_ELT (local->val, shift / HOST_BITS_PER_WIDE_INT); + shift = shift & (HOST_BITS_PER_WIDE_INT - 1); /* Get the result. This works only when: 1 <= this_time <= HOST_BITS_PER_WIDE_INT. */ @@ -7084,7 +7074,7 @@ place_block_symbol (rtx symbol) { decl = SYMBOL_REF_DECL (symbol); alignment = get_variable_align (decl); - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); if (flag_asan && asan_protect_global (decl)) { size += asan_red_zone_size (size); @@ -7249,7 +7239,7 @@ output_object_block (struct object_block *block) HOST_WIDE_INT size; decl = SYMBOL_REF_DECL (symbol); assemble_variable_contents (decl, XSTR (symbol, 0), false); - size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); + size = tree_to_uhwi (DECL_SIZE_UNIT (decl)); offset += size; if (flag_asan && asan_protect_global (decl)) { diff --git a/gcc/wide-int-print.cc b/gcc/wide-int-print.cc new file mode 100644 index 00000000000..e1968107b49 --- /dev/null +++ b/gcc/wide-int-print.cc @@ -0,0 +1,145 @@ +/* Printing operations with very long integers. + Copyright (C) 2012-2013 Free Software Foundation, Inc. + Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "hwint.h" +#include "wide-int.h" +#include "wide-int-print.h" + +/* + * public printing routines. + */ + +#define BLOCKS_NEEDED(PREC) \ + (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) + +void +print_dec (const wide_int &wi, char *buf, signop sgn) +{ + if (sgn == SIGNED) + print_decs (wi, buf); + else + print_decu (wi, buf); +} + +void +print_dec (const wide_int &wi, FILE *file, signop sgn) +{ + if (sgn == SIGNED) + print_decs (wi, file); + else + print_decu (wi, file); +} + + +/* Try to print the signed self in decimal to BUF if the number fits + in a HWI. Other print in hex. */ + +void +print_decs (const wide_int &wi, char *buf) +{ + if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT) + || (wi.get_len () == 1)) + { + if (wi.neg_p (SIGNED)) + sprintf (buf, "-" HOST_WIDE_INT_PRINT_UNSIGNED, -wi.to_shwi ()); + else + sprintf (buf, HOST_WIDE_INT_PRINT_DEC, wi.to_shwi ()); + } + else + print_hex (wi, buf); +} + +/* Try to print the signed self in decimal to FILE if the number fits + in a HWI. Other print in hex. */ + +void +print_decs (const wide_int &wi, FILE *file) +{ + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + print_decs (wi, buf); + fputs (buf, file); +} + +/* Try to print the unsigned self in decimal to BUF if the number fits + in a HWI. Other print in hex. */ + +void +print_decu (const wide_int &wi, char *buf) +{ + if ((wi.get_precision () <= HOST_BITS_PER_WIDE_INT) + || (wi.get_len () == 1 && !wi.neg_p (SIGNED))) + sprintf (buf, HOST_WIDE_INT_PRINT_UNSIGNED, wi.to_uhwi ()); + else + print_hex (wi, buf); +} + +/* Try to print the signed self in decimal to FILE if the number fits + in a HWI. Other print in hex. */ + +void +print_decu (const wide_int &wi, FILE *file) +{ + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + print_decu (wi, buf); + fputs (buf, file); +} + +void +print_hex (const wide_int &wi, char *buf) +{ + int i = wi.get_len (); + + if (wi.zero_p ()) + buf += sprintf (buf, "0x0"); + else + { + if (wi.neg_p (SIGNED)) + { + int j; + /* If the number is negative, we may need to pad value with + 0xFFF... because the leading elements may be missing and + we do not print a '-' with hex. */ + buf += sprintf (buf, "0x"); + for (j = BLOCKS_NEEDED (wi.get_precision ()); j > i; j--) + buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, (HOST_WIDE_INT) -1); + + } + else + buf += sprintf (buf, "0x"HOST_WIDE_INT_PRINT_HEX_PURE, wi.elt (--i)); + + while (--i >= 0) + buf += sprintf (buf, HOST_WIDE_INT_PRINT_PADDED_HEX, wi.elt (i)); + } +} + +/* Print one big hex number to FILE. Note that some assemblers may not + accept this for large modes. */ +void +print_hex (const wide_int &wi, FILE *file) +{ + char buf[WIDE_INT_PRINT_BUFFER_SIZE]; + print_hex (wi, buf); + fputs (buf, file); +} + diff --git a/gcc/wide-int-print.h b/gcc/wide-int-print.h new file mode 100644 index 00000000000..d1c42b154fd --- /dev/null +++ b/gcc/wide-int-print.h @@ -0,0 +1,38 @@ +/* Print wide integers. + Copyright (C) 2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef WIDE_INT_PRINT_H +#define WIDE_INT_PRINT_H + +#include <stdio.h> +#include "wide-int.h" + +#define WIDE_INT_PRINT_BUFFER_SIZE ((2 * MAX_BITSIZE_MODE_ANY_INT / BITS_PER_UNIT) + 4) +/* Printing functions. */ + +extern void print_dec (const wide_int &wi, char *buf, signop sgn); +extern void print_dec (const wide_int &wi, FILE *file, signop sgn); +extern void print_decs (const wide_int &wi, char *buf); +extern void print_decs (const wide_int &wi, FILE *file); +extern void print_decu (const wide_int &wi, char *buf); +extern void print_decu (const wide_int &wi, FILE *file); +extern void print_hex (const wide_int &wi, char *buf); +extern void print_hex (const wide_int &wi, FILE *file); + +#endif /* WIDE_INT_PRINT_H */ diff --git a/gcc/wide-int.cc b/gcc/wide-int.cc new file mode 100644 index 00000000000..d3514fcdce2 --- /dev/null +++ b/gcc/wide-int.cc @@ -0,0 +1,3147 @@ +/* Operations with very long integers. + Copyright (C) 2012-2013 Free Software Foundation, Inc. + Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tm.h" +#include "hwint.h" +#include "wide-int.h" +#include "rtl.h" +#include "tree.h" +#include "dumpfile.h" + +/* This is the maximal size of the buffer needed for dump. */ +const int MAX_SIZE = 4 * (MAX_BITSIZE_MODE_ANY_INT / 4 + + MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT + 32); + +/* + * Internal utilities. + */ + +/* Quantities to deal with values that hold half of a wide int. Used + in multiply and divide. */ +#define HALF_INT_MASK (((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1) + +#define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT) +#define BLOCKS_NEEDED(PREC) \ + (PREC ? (((PREC) + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) : 1) +#define SIGN_MASK(X) (((HOST_WIDE_INT)X) >> (HOST_BITS_PER_WIDE_INT - 1)) + +/* + * Conversion routines in and out of wide-int. + */ + +/* Convert OP0 into a wide int of PRECISION. */ +wide_int_ro +wide_int_ro::from_shwi (HOST_WIDE_INT op0, + unsigned int precision) +{ + wide_int result; + + result.precision = precision; + result.val[0] = op0; + result.len = 1; + +#ifdef DEBUG_WIDE_INT + debug_wh ("wide_int::from_shwi %s " HOST_WIDE_INT_PRINT_HEX ")\n", + result, op0); +#endif + + return result; +} + +/* Convert OP0 into a wide int of PRECISION. */ +wide_int_ro +wide_int_ro::from_uhwi (unsigned HOST_WIDE_INT op0, + unsigned int precision) +{ + wide_int result; + + result.precision = precision; + result.val[0] = op0; + + /* If the top bit is a 1, we need to add another word of 0s since + that would not expand the right value since the infinite + expansion of any unsigned number must have 0s at the top. */ + if ((HOST_WIDE_INT)op0 < 0 && precision > HOST_BITS_PER_WIDE_INT) + { + result.val[1] = 0; + result.len = 2; + } + else + result.len = 1; + +#ifdef DEBUG_WIDE_INT + debug_wh ("wide_int::from_uhwi %s " HOST_WIDE_INT_PRINT_HEX ")\n", + result, op0); +#endif + + return result; +} + +/* Create a wide_int from an array of host_wide_ints in OP1 of LEN. + The result has PRECISION. */ +wide_int_ro +wide_int_ro::from_array (const HOST_WIDE_INT *op1, unsigned int len, + unsigned int precision, bool need_canon) +{ + unsigned int i; + wide_int result; + + result.len = len; + result.precision = precision; + + for (i=0; i < len; i++) + result.val[i] = op1[i]; + +#ifdef DEBUG_WIDE_INT + debug_wa ("wide_int::from_array %s = %s\n", result, op1, len, precision); +#endif + + if (need_canon) + result.canonize (); + + return result; +} + +/* Convert a double int into a wide int with precision PREC. */ +wide_int_ro +wide_int_ro::from_double_int (double_int di, unsigned int prec) +{ + HOST_WIDE_INT op = di.low; + wide_int result; + + result.precision = prec; + result.len = (prec <= HOST_BITS_PER_WIDE_INT) ? 1 : 2; + + if (prec < HOST_BITS_PER_WIDE_INT) + result.val[0] = sext_hwi (op, prec); + else + { + result.val[0] = op; + if (prec > HOST_BITS_PER_WIDE_INT) + { + if (prec < HOST_BITS_PER_DOUBLE_INT) + result.val[1] = sext_hwi (di.high, prec); + else + result.val[1] = di.high; + } + } + + if (result.len == 2) + result.canonize (); + + return result; +} + +/* Extract a constant integer from the R. The bits of the integer are + returned. */ +wide_int_ro +wide_int_ro::from_rtx (const rtx_mode_t r) +{ + const_rtx x = get_rtx (r); + enum machine_mode mode = get_mode (r); + wide_int result; + unsigned int prec = GET_MODE_PRECISION (mode); + + gcc_assert (mode != VOIDmode); + + result.precision = prec; + + switch (GET_CODE (x)) + { + case CONST_INT: + result.val[0] = INTVAL (x); + result.len = 1; +#ifdef DEBUG_WIDE_INT + debug_wh ("wide_int:: %s = from_rtx ("HOST_WIDE_INT_PRINT_HEX")\n", + result, INTVAL (x)); +#endif + break; + +#if TARGET_SUPPORTS_WIDE_INT + case CONST_WIDE_INT: + { + int i; + result.len = CONST_WIDE_INT_NUNITS (x); + + for (i = 0; i < result.len; i++) + result.val[i] = CONST_WIDE_INT_ELT (x, i); + } + break; +#else + case CONST_DOUBLE: + result.len = 2; + result.val[0] = CONST_DOUBLE_LOW (x); + result.val[1] = CONST_DOUBLE_HIGH (x); + +#ifdef DEBUG_WIDE_INT + debug_whh ("wide_int:: %s = from_rtx ("HOST_WIDE_INT_PRINT_HEX" "HOST_WIDE_INT_PRINT_HEX")\n", + result, CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x)); +#endif + + break; +#endif + + default: + gcc_unreachable (); + } + + return result; +} + +/* Construct a wide int from a buffer of length LEN. BUFFER will be + read according to byte endianess and word endianess of the target. + Only the lower LEN bytes of the result are set; the remaining high + bytes are cleared. */ +wide_int_ro +wide_int_ro::from_buffer (const unsigned char *buffer, int len) +{ + wide_int result = wide_int::zero (len * BITS_PER_UNIT); + int words = len / UNITS_PER_WORD; + + /* We have to clear all the bits ourself, as we merely or in values + below. */ + result.len = BLOCKS_NEEDED (len*BITS_PER_UNIT); + for (int i = 0; i < result.len; ++i) + result.val[i] = 0; + + for (int byte = 0; byte < len; byte++) + { + int offset; + int index; + int bitpos = byte * BITS_PER_UNIT; + unsigned HOST_WIDE_INT value; + + if (len > UNITS_PER_WORD) + { + int word = byte / UNITS_PER_WORD; + + if (WORDS_BIG_ENDIAN) + word = (words - 1) - word; + + offset = word * UNITS_PER_WORD; + + if (BYTES_BIG_ENDIAN) + offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD); + else + offset += byte % UNITS_PER_WORD; + } + else + offset = BYTES_BIG_ENDIAN ? (len - 1) - byte : byte; + + value = (unsigned HOST_WIDE_INT) buffer[offset]; + + index = bitpos / HOST_BITS_PER_WIDE_INT; + result.val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT); + } + + result.canonize (); + + return result; +} + +/* Sets RESULT from THIS, the sign is taken according to SGN. */ +void +wide_int_ro::to_mpz (mpz_t result, signop sgn) const +{ + bool negative = false; + wide_int tmp; + + if ((*this).neg_p (sgn)) + { + negative = true; + /* We use ones complement to avoid -x80..0 edge case that - + won't work on. */ + tmp = ~(*this); + } + else + tmp = *this; + + mpz_import (result, tmp.len, -1, sizeof (HOST_WIDE_INT), 0, 0, tmp.val); + + if (negative) + mpz_com (result, result); +} + +/* Returns VAL converted to TYPE. If WRAP is true, then out-of-range + values of VAL will be wrapped; otherwise, they will be set to the + appropriate minimum or maximum TYPE bound. */ +wide_int_ro +wide_int_ro::from_mpz (const_tree type, mpz_t val, bool wrap) +{ + size_t count, numb; + wide_int res; + unsigned int i; + + if (!wrap) + { + mpz_t min, max; + + mpz_init (min); + mpz_init (max); + get_type_static_bounds (type, min, max); + + if (mpz_cmp (val, min) < 0) + mpz_set (val, min); + else if (mpz_cmp (val, max) > 0) + mpz_set (val, max); + + mpz_clear (min); + mpz_clear (max); + } + + /* Determine the number of unsigned HOST_WIDE_INTs that are required + for representing the value. The code to calculate count is + extracted from the GMP manual, section "Integer Import and Export": + http://gmplib.org/manual/Integer-Import-and-Export.html */ + numb = 8*sizeof(HOST_WIDE_INT); + count = (mpz_sizeinbase (val, 2) + numb-1) / numb; + if (count < 1) + count = 1; + + /* Need to initialize the number because it writes nothing for + zero. */ + for (i = 0; i < count; i++) + res.val[i] = 0; + + res.len = count; + + mpz_export (res.val, &count, -1, sizeof (HOST_WIDE_INT), 0, 0, val); + + res.precision = TYPE_PRECISION (type); + if (mpz_sgn (val) < 0) + res = -res; + + return res; +} + +/* + * Largest and smallest values in a mode. + */ + +/* Produce the largest SGNed number that is represented in TYPE_PREC. + The resulting number is placed in a wide int of size RESULT_PREC. + IF RESULT_PREC is 0, answer will have TYPE_PREC precision. */ +wide_int_ro +wide_int_ro::max_value (unsigned int type_prec, signop sgn, + unsigned int result_prec) +{ + unsigned int prec = result_prec ? result_prec : type_prec; + + if (type_prec == 0) + return wide_int::zero (result_prec + ? result_prec + : TYPE_PRECISION (integer_type_node)); + + if (sgn == UNSIGNED) + { + if (prec <= type_prec) + /* The unsigned max is just all ones, for which the + compressed rep is just a single HWI. */ + return wide_int::minus_one (prec); + else + return wide_int::mask (type_prec, false, prec); + } + else + /* The signed max is all ones except the top bit. This must be + explicitly represented. */ + return wide_int::mask (type_prec-1, false, prec); +} + +/* Produce the smallest SGNed number that is represented in TYPE_PREC. + The resulting number is placed in a wide int of size RESULT_PREC. + IF RESULT_PREC is 0, answer will have TYPE_PREC precision. */ +wide_int_ro +wide_int_ro::min_value (unsigned int type_prec, signop sgn, + unsigned int result_prec) +{ + if (result_prec == 0) + result_prec = type_prec; + + if (type_prec == 0) + return wide_int_ro::zero (result_prec + ? result_prec + : TYPE_PRECISION (integer_type_node)); + + if (sgn == UNSIGNED) + { + /* The unsigned min is just all zeros, for which the compressed + rep is just a single HWI. */ + wide_int result; + result.len = 1; + result.precision = result_prec; + result.val[0] = 0; + return result; + } + else + { + /* The signed min is all zeros except the top bit. This must be + explicitly represented. */ + return set_bit_in_zero (type_prec - 1, result_prec); + } +} + +/* + * Public utilities. + */ + +/* Check the upper HOST_WIDE_INTs of src to see if the length can be + shortened. An upper HOST_WIDE_INT is unnecessary if it is all ones + or zeros and the top bit of the next lower word matches. + + This function may change the representation of THIS, but does not + change the value that THIS represents. It does not sign extend in + the case that the size of the mode is less than + HOST_BITS_PER_WIDE_INT. */ +void +wide_int_ro::canonize () +{ + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + int blocks_needed = BLOCKS_NEEDED (precision); + HOST_WIDE_INT top; + int i; + + if (len > blocks_needed) + len = blocks_needed; + + /* Clean up the top bits for any mode that is not a multiple of a + HWI and is not compressed. */ + if (len == blocks_needed && small_prec) + val[len - 1] = sext_hwi (val[len - 1], small_prec); + + if (len == 1) + return; + + top = val[len - 1]; + if (top != 0 && top != (HOST_WIDE_INT)-1) + return; + + /* At this point we know that the top is either 0 or -1. Find the + first block that is not a copy of this. */ + for (i = len - 2; i >= 0; i--) + { + HOST_WIDE_INT x = val[i]; + if (x != top) + { + if (SIGN_MASK (x) == top) + { + len = i + 1; + return; + } + + /* We need an extra block because the top bit block i does + not match the extension. */ + len = i + 2; + return; + } + } + + /* The number is 0 or -1. */ + len = 1; +} + +/* Copy THIS replacing the precision with PREC. It can do any of + truncation, extension or copying. This function is only available + with the default wide-int form as the other forms have fixed + precisions. */ +wide_int_ro +wide_int_ro::force_to_size (unsigned int prec, signop sgn) const +{ + wide_int result; + int blocks_needed = BLOCKS_NEEDED (prec); + int i; + + result.precision = prec; + /* If this is a value that has come in from a hwi, then it does not + have a proper precision. However, it is in canonical form, so + just copy and zap in the precision and return. */ + if (precision == 0) + { + /* Some zero prec numbers take 2 hwi's. If the target prec is + small, we may need to shorten it. */ + result.len = len; + if (prec <= HOST_BITS_PER_WIDE_INT) + result.len = 1; + for (int i = 0; i < result.len; ++i) + result.val[i] = val[i]; + return result; + } + + result.len = blocks_needed < len ? blocks_needed : len; + for (i = 0; i < result.len; i++) + result.val[i] = val[i]; + + if (prec == precision) + /* Nothing much to do. */ + ; + else if (prec > precision) + { + /* Expanding */ + int small_precision = precision & (HOST_BITS_PER_WIDE_INT - 1); + + if (sgn == UNSIGNED) + { + /* The top block in the existing rep must be zero extended. */ + if (small_precision + /* We need to ensure we only extend the last block of + the original number, if the number has not been + compressed. If the number has been compressed, then + all the bits are significant. */ + && len == BLOCKS_NEEDED (precision)) + result.val[len-1] = zext_hwi (result.val[len-1], small_precision); + else if (len < blocks_needed + && small_precision == 0 + && result.val[result.len - 1] < 0) + { + /* We need to uncompress the original value first. */ + while (result.len < BLOCKS_NEEDED (precision)) + result.val[result.len++] = (HOST_WIDE_INT)-1; + /* We need to put the 0 block on top to keep the value + from being sign extended. */ + if (result.len < blocks_needed) + result.val[result.len++] = 0; + } + } + /* We have to do this because we cannot guarantee that there is + not trash in the top block of an uncompressed value. For a + compressed value, all the bits are significant. */ + else if (small_precision + && len == BLOCKS_NEEDED (precision)) + result.val[len-1] = sext_hwi (result.val[len-1], small_precision); + } + else + result.canonize (); + +#ifdef DEBUG_WIDE_INT + debug_wwvs ("wide_int:: %s = force_to_size (%s, prec = %d %s)\n", + result, *this, prec, sgn==UNSIGNED ? "U" : "S"); +#endif + + return result; +} + +/* This function hides the fact that we cannot rely on the bits beyond + the precision. This issue comes up in the relational comparisions + where we do allow comparisons of values of different precisions. */ +static inline HOST_WIDE_INT +selt (const HOST_WIDE_INT *a, unsigned int len, + unsigned int blocks_needed, + unsigned int small_prec, + unsigned int index, signop sgn) +{ + if (index >= len) + { + if (index < blocks_needed || sgn == SIGNED) + /* Signed or within the precision. */ + return SIGN_MASK (a[len - 1]); + else + /* Unsigned extension beyond the precision. */ + return 0; + } + + if (small_prec && index == blocks_needed - 1) + { + /* The top block is partially outside of the precision. */ + if (sgn == SIGNED) + return sext_hwi (a[index], small_prec); + else + return zext_hwi (a[index], small_prec); + } + return a[index]; +} + +/* Find the hignest bit represented in a wide int. This will in + general have the same value as the sign bit. */ +static inline HOST_WIDE_INT +top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec) +{ + if (len == BLOCKS_NEEDED (prec) + && (prec & (HOST_BITS_PER_WIDE_INT - 1))) + return (a[len - 1] >> (prec & (HOST_BITS_PER_WIDE_INT - 1))) & 1; + else + return (a[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1)) & 1; +} + +/* + * Comparisons, note that only equality is an operator. The other + * comparisons cannot be operators since they are inherently singed or + * unsigned and C++ has no such operators. + */ + +/* Return true if OP0 == OP1. */ +bool +wide_int_ro::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len) +{ + int l0 = op0len - 1; + unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + + while (op0len != op1len) + return false; + + if (op0len == BLOCKS_NEEDED (prec) && small_prec) + { + /* It does not matter if we zext or sext here, we just have to + do both the same way. */ + if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec)) + return false; + l0--; + } + + while (l0 >= 0) + if (op0[l0] != op1[l0]) + return false; + else + l0--; + + return true; +} + +/* Return true if OP0 < OP1 using signed comparisons. */ +bool +wide_int_ro::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, + unsigned int p1) +{ + HOST_WIDE_INT s0, s1; + unsigned HOST_WIDE_INT u0, u1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + /* Only the top block is compared as signed. The rest are unsigned + comparisons. */ + s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + if (s0 < s1) + return true; + if (s0 > s1) + return false; + + l--; + while (l >= 0) + { + u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + + if (u0 < u1) + return true; + if (u0 > u1) + return false; + l--; + } + + return false; +} + +/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using + signed compares. */ +int +wide_int_ro::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, + unsigned int p1) +{ + HOST_WIDE_INT s0, s1; + unsigned HOST_WIDE_INT u0, u1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + /* Only the top block is compared as signed. The rest are unsigned + comparisons. */ + s0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + s1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + if (s0 < s1) + return -1; + if (s0 > s1) + return 1; + + l--; + while (l >= 0) + { + u0 = selt (op0, op0len, blocks_needed0, small_prec0, l, SIGNED); + u1 = selt (op1, op1len, blocks_needed1, small_prec1, l, SIGNED); + + if (u0 < u1) + return -1; + if (u0 > u1) + return 1; + l--; + } + + return 0; +} + +/* Return true if OP0 < OP1 using unsigned comparisons. */ +bool +wide_int_ro::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1) +{ + unsigned HOST_WIDE_INT x0; + unsigned HOST_WIDE_INT x1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + while (l >= 0) + { + x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED); + x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED); + if (x0 < x1) + return true; + if (x0 > x1) + return false; + l--; + } + + return false; +} + +/* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using + unsigned compares. */ +int +wide_int_ro::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len, unsigned int p0, + const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int p1) +{ + unsigned HOST_WIDE_INT x0; + unsigned HOST_WIDE_INT x1; + unsigned int blocks_needed0 = BLOCKS_NEEDED (p0); + unsigned int blocks_needed1 = BLOCKS_NEEDED (p1); + unsigned int small_prec0 = p0 & (HOST_BITS_PER_WIDE_INT - 1); + unsigned int small_prec1 = p1 & (HOST_BITS_PER_WIDE_INT - 1); + int l = MAX (op0len - 1, op1len - 1); + + while (l >= 0) + { + x0 = selt (op0, op0len, blocks_needed0, small_prec0, l, UNSIGNED); + x1 = selt (op1, op1len, blocks_needed1, small_prec1, l, UNSIGNED); + if (x0 < x1) + return -1; + if (x0 > x1) + return 1; + l--; + } + + return 0; +} + +/* Return true if THIS has the sign bit set to 1 and all other bits are + zero. */ +bool +wide_int_ro::only_sign_bit_p (unsigned int prec) const +{ + int i; + HOST_WIDE_INT x; + int small_prec; + bool result; + + if (BLOCKS_NEEDED (prec) != len) + { + result = false; + goto ex; + } + + for (i=0; i < len - 1; i++) + if (val[i] != 0) + { + result = false; + goto ex; + } + + x = val[len - 1]; + small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + if (small_prec) + x = x << (HOST_BITS_PER_WIDE_INT - small_prec); + + result = x == ((HOST_WIDE_INT)1) << (HOST_BITS_PER_WIDE_INT - 1); + + ex: + +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int:: %d = only_sign_bit_p (%s)\n", result, *this); +#endif + return result; +} + +/* Returns true if THIS fits into range of TYPE. Signedness of OP0 is + assumed to be the same as the signedness of TYPE. */ +bool +wide_int_ro::fits_to_tree_p (const_tree type) const +{ + unsigned int type_prec = TYPE_PRECISION (type); + + if (precision <= type_prec) + return true; + + if (TYPE_SIGN (type) == UNSIGNED) + return *this == zext (type_prec); + else + { + /* For signed, we can do a couple of quick tests since the + compressed rep looks like it was just sign extended. */ + if (len < BLOCKS_NEEDED (type_prec)) + return true; + + if (len > BLOCKS_NEEDED (type_prec)) + return false; + + return *this == sext (type_prec); + } +} + +/* + * Extension. + */ + +/* Sign extend THIS starting at OFFSET. The precision of the result + are the same as THIS. */ +wide_int_ro +wide_int_ro::sext (unsigned int offset) const +{ + wide_int result; + int off; + + gcc_assert (precision >= offset); + + if (precision <= HOST_BITS_PER_WIDE_INT) + { + result.precision = precision; + if (offset < precision) + result.val[0] = sext_hwi (val[0], offset); + else + /* If offset is greater or equal to precision there is nothing + to do since the internal rep is already sign extended. */ + result.val[0] = val[0]; + + result.len = 1; + } + else if (precision == offset) + result = *this; + else + { + result = decompress (offset, precision); + + /* Now we can do the real sign extension. */ + off = offset & (HOST_BITS_PER_WIDE_INT - 1); + if (off) + { + int block = BLOCK_OF (offset); + result.val[block] = sext_hwi (val[block], off); + result.len = block + 1; + } + /* We never need an extra element for sign extended values but + we may need to compress. */ + result.canonize (); + } + +#ifdef DEBUG_WIDE_INT + debug_wwv ("wide_int:: %s = (%s sext %d)\n", result, *this, offset); +#endif + + return result; +} + +/* Zero extend THIS starting at OFFSET. The precision of the result + are the same as THIS. */ +wide_int_ro +wide_int_ro::zext (unsigned int offset) const +{ + wide_int result; + int off; + int block; + + gcc_assert (precision >= offset); + + if (precision <= HOST_BITS_PER_WIDE_INT) + { + result.precision = precision; + if (offset < precision) + result.val[0] = zext_hwi (val[0], offset); + else if (offset == precision) + result.val[0] = val[0]; + /* If offset was greater than the precision we need to zero + extend from the old precision since the internal rep was + equivalent to sign extended. */ + else + result.val[0] = zext_hwi (val[0], precision); + + result.len = 1; + } + else if (precision == offset) + result = *this; + else + { + result = decompress (offset, precision); + + /* Now we can do the real zero extension. */ + off = offset & (HOST_BITS_PER_WIDE_INT - 1); + block = BLOCK_OF (offset); + if (off) + { + result.val[block] = zext_hwi (val[block], off); + result.len = block + 1; + } + else + /* See if we need an extra zero element to satisfy the + compression rule. */ + if (result.val[block - 1] < 0 && offset < precision) + { + result.val[block] = 0; + result.len += 1; + } + result.canonize (); + } +#ifdef DEBUG_WIDE_INT + debug_wwv ("wide_int:: %s = (%s zext %d)\n", result, *this, offset); +#endif + return result; +} + +/* + * Masking, inserting, shifting, rotating. + */ + +/* Return a value with a one bit inserted in THIS at BITPOS. */ +wide_int_ro +wide_int_ro::set_bit (unsigned int bitpos) const +{ + wide_int result; + int i, j; + + if (bitpos >= precision) + result = *this; + else + { + result = decompress (bitpos, precision); + j = bitpos / HOST_BITS_PER_WIDE_INT; + i = bitpos & (HOST_BITS_PER_WIDE_INT - 1); + result.val[j] |= ((HOST_WIDE_INT)1) << i; + } + +#ifdef DEBUG_WIDE_INT + debug_wwv ("wide_int_ro:: %s = (%s set_bit %d)\n", result, *this, bitpos); +#endif + return result; +} + +/* Insert a 1 bit into 0 at BITPOS producing an number with PREC. */ +wide_int_ro +wide_int_ro::set_bit_in_zero (unsigned int bitpos, unsigned int prec) +{ + wide_int result; + int extra_bit = 0; + /* We need one extra bit of 0 above the set bit for the compression + of the bits above the set bit when the bit that is set is the top + bit of a compressed number. When setting the actual top bit + (non-compressed) we can just set it as there are no bits above + it. */ + if (bitpos % HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDE_INT-1 + && bitpos+1 != prec) + extra_bit = 1; + int blocks_needed = BLOCKS_NEEDED (bitpos + 1 + extra_bit); + int i, j; + + result.precision = prec; + if (bitpos >= prec) + { + result.len = 1; + result.val[0] = 0; + } + else + { + result.len = blocks_needed; + for (i = 0; i < blocks_needed; i++) + result.val[i] = 0; + + j = bitpos / HOST_BITS_PER_WIDE_INT; + i = bitpos & (HOST_BITS_PER_WIDE_INT - 1); + result.val[j] |= ((HOST_WIDE_INT)1) << i; + } + +#ifdef DEBUG_WIDE_INT + debug_wv ("wide_int_ro:: %s = set_bit_in_zero (%d)\n", result, bitpos); +#endif + + return result; +} + +/* Insert WIDTH bits from OP0 into THIS starting at START. */ +wide_int_ro +wide_int_ro::insert (const wide_int_ro &op0, unsigned int start, + unsigned int width) const +{ + wide_int result; + wide_int mask; + wide_int tmp; + + if (start >= precision) + return *this; + + gcc_checking_assert (op0.precision >= width); + + if (start + width >= precision) + width = precision - start; + + mask = shifted_mask (start, width, false, precision); + tmp = op0.lshift_widen (start, precision); + result = tmp & mask; + + tmp = and_not (mask); + result = result | tmp; + +#ifdef DEBUG_WIDE_INT + debug_wwwvv ("wide_int_ro:: %s = (%s insert %s start = %d width = %d)\n", + result, *this, op0, start, width); +#endif + + return result; +} + +/* bswap THIS. */ +wide_int_ro +wide_int_ro::bswap () const +{ + wide_int result; + int i, s; + int end; + int len = BLOCKS_NEEDED (precision); + + /* This is not a well defined operation if the precision is not a + multiple of 8. */ + gcc_assert ((precision & 0x7) == 0); + + result.precision = precision; + result.len = len; + + for (i = 0; i < len; i++) + result.val[i] = 0; + + /* Only swap the bytes that are not the padding. */ + if ((precision & (HOST_BITS_PER_WIDE_INT - 1)) + && (this->len == len)) + end = precision; + else + end = this->len * HOST_BITS_PER_WIDE_INT; + + for (s = 0; s < end; s += 8) + { + unsigned int d = precision - s - 8; + unsigned HOST_WIDE_INT byte; + + int block = s / HOST_BITS_PER_WIDE_INT; + int offset = s & (HOST_BITS_PER_WIDE_INT - 1); + + byte = (val[block] >> offset) & 0xff; + + block = d / HOST_BITS_PER_WIDE_INT; + offset = d & (HOST_BITS_PER_WIDE_INT - 1); + + result.val[block] |= byte << offset; + } + + result.canonize (); + +#ifdef DEBUG_WIDE_INT + debug_ww ("wide_int_ro:: %s = bswap (%s)\n", result, *this); +#endif + return result; +} + +/* Return a result mask where the lower WIDTH bits are ones and the + bits above that up to the precision are zeros. The result is + inverted if NEGATE is true. The result is made with PREC. */ +wide_int_ro +wide_int_ro::mask (unsigned int width, bool negate, unsigned int prec) +{ + wide_int result; + unsigned int i = 0; + int shift; + + gcc_assert (width < 4 * MAX_BITSIZE_MODE_ANY_INT); + gcc_assert (prec <= 4 * MAX_BITSIZE_MODE_ANY_INT); + + if (width == prec) + { + if (negate) + result = wide_int::zero (prec); + else + result = wide_int::minus_one (prec); + } + else if (width == 0) + { + if (negate) + result = wide_int::minus_one (prec); + else + result = wide_int::zero (prec); + } + else + { + result.precision = prec; + + while (i < width / HOST_BITS_PER_WIDE_INT) + result.val[i++] = negate ? 0 : (HOST_WIDE_INT)-1; + + shift = width & (HOST_BITS_PER_WIDE_INT - 1); + if (shift != 0) + { + HOST_WIDE_INT last = (((HOST_WIDE_INT)1) << shift) - 1; + result.val[i++] = negate ? ~last : last; + } + else + result.val[i++] = negate ? (HOST_WIDE_INT)-1 : 0; + result.len = i; + } + +#ifdef DEBUG_WIDE_INT + debug_wvv ("wide_int_ro:: %s = mask (%d, negate = %d)\n", result, width, negate); +#endif + return result; +} + +/* Return a result mask of WIDTH ones starting at START and the + bits above that up to the precision are zeros. The result is + inverted if NEGATE is true. */ +wide_int_ro +wide_int_ro::shifted_mask (unsigned int start, unsigned int width, + bool negate, unsigned int prec) +{ + wide_int result; + unsigned int i = 0; + unsigned int shift; + unsigned int end = start + width; + HOST_WIDE_INT block; + + gcc_assert (start < 4 * MAX_BITSIZE_MODE_ANY_INT); + + if (start + width > prec) + width = prec - start; + + if (width == 0) + { + if (negate) + result = wide_int::minus_one (prec); + else + result = wide_int::zero (prec); +#ifdef DEBUG_WIDE_INT + debug_wvvv + ("wide_int:: %s = shifted_mask (start = %d width = %d negate = %d)\n", + result, start, width, negate); +#endif + return result; + } + + result.precision = prec; + + while (i < start / HOST_BITS_PER_WIDE_INT) + result.val[i++] = negate ? (HOST_WIDE_INT)-1 : 0; + + shift = start & (HOST_BITS_PER_WIDE_INT - 1); + if (shift) + { + block = (((HOST_WIDE_INT)1) << shift) - 1; + shift = (end) & (HOST_BITS_PER_WIDE_INT - 1); + if (shift) + { + /* case 000111000 */ + block = (((HOST_WIDE_INT)1) << shift) - block - 1; + result.val[i++] = negate ? ~block : block; + result.len = i; + +#ifdef DEBUG_WIDE_INT + debug_wvvv + ("wide_int_ro:: %s = shifted_mask (start = %d width = %d negate = %d)\n", + result, start, width, negate); +#endif + return result; + } + else + /* ...111000 */ + result.val[i++] = negate ? block : ~block; + } + + while (i < end / HOST_BITS_PER_WIDE_INT) + /* 1111111 */ + result.val[i++] = negate ? 0 : (HOST_WIDE_INT)-1; + + shift = end & (HOST_BITS_PER_WIDE_INT - 1); + if (shift != 0) + { + /* 000011111 */ + block = (((HOST_WIDE_INT)1) << shift) - 1; + result.val[i++] = negate ? ~block : block; + } + else if (end < prec) + result.val[i++] = negate ? (HOST_WIDE_INT)-1 : 0; + + result.len = i; + +#ifdef DEBUG_WIDE_INT + debug_wvvv + ("wide_int_ro:: %s = shifted_mask (start = %d width = %d negate = %d)\n", + result, start, width, negate); +#endif + + return result; +} + +/* Ensure there are no undefined bits returned by elt (). This is + useful for when we might hash the value returned by elt and want to + ensure the top undefined bit are in fact, defined. If sgn is + UNSIGNED, the bits are zeroed, if sgn is SIGNED, then the bits are + copies of the top bit (aka sign bit) as determined by + precision. */ +void +wide_int_ro::clear_undef (signop sgn) +{ + int small_prec = precision % HOST_BITS_PER_WIDE_INT; + if (small_prec) + { + if (len == (precision + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) + { + if (sgn == UNSIGNED) + val[len-1] &= ((unsigned HOST_WIDE_INT)1 << small_prec) - 1; + else + { + int cnt = HOST_BITS_PER_WIDE_INT - small_prec; + val[len-1] = (val[len-1] << cnt) >> cnt; + } + } + } + /* Do we have a int:0 inside a struct? */ + else if (precision == 0) + val[0] = 0; +} + + +/* + * logical operations. + */ + +/* Return THIS & OP1. */ +wide_int_ro +wide_int_ro::and_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + result.len = MAX (op0len, op1len); + result.precision = prec; + + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + if (op1mask == 0) + { + l0 = l1; + result.len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + result.val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + if (op0mask == 0) + result.len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + result.val[l1] = op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + result.val[l0] = op0[l0] & op1[l0]; + l0--; + } + + if (need_canon) + result.canonize (); + + return result; +} + +/* Return THIS & ~OP1. */ +wide_int_ro +wide_int_ro::and_not_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + result.len = MAX (op0len, op1len); + result.precision = prec; + + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + if (op1mask != 0) + { + l0 = l1; + result.len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + result.val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + if (op0mask == 0) + result.len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + result.val[l1] = ~op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + result.val[l0] = op0[l0] & ~op1[l0]; + l0--; + } + + if (need_canon) + result.canonize (); + + return result; +} + +/* Return THIS | OP1. */ +wide_int_ro +wide_int_ro::or_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + result.len = MAX (op0len, op1len); + result.precision = prec; + + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + if (op1mask != 0) + { + l0 = l1; + result.len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + result.val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + if (op0mask != 0) + result.len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + result.val[l1] = op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + result.val[l0] = op0[l0] | op1[l0]; + l0--; + } + + if (need_canon) + result.canonize (); + + return result; +} + +/* Return THIS | ~OP1. */ +wide_int_ro +wide_int_ro::or_not_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + bool need_canon = true; + + result.len = MAX (op0len, op1len); + result.precision = prec; + + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + if (op1mask == 0) + { + l0 = l1; + result.len = l1 + 1; + } + else + { + need_canon = false; + while (l0 > l1) + { + result.val[l0] = op0[l0]; + l0--; + } + } + } + else if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + if (op0mask != 0) + result.len = l0 + 1; + else + { + need_canon = false; + while (l1 > l0) + { + result.val[l1] = ~op1[l1]; + l1--; + } + } + } + + while (l0 >= 0) + { + result.val[l0] = op0[l0] | ~op1[l0]; + l0--; + } + + if (need_canon) + result.canonize (); + + return result; +} + +/* Return the exclusive ior (xor) of THIS and OP1. */ +wide_int_ro +wide_int_ro::xor_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len) +{ + wide_int result; + int l0 = op0len - 1; + int l1 = op1len - 1; + + result.len = MAX (op0len, op1len); + result.precision = prec; + + if (l0 > l1) + { + HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec); + while (l0 > l1) + { + result.val[l0] = op0[l0] ^ op1mask; + l0--; + } + } + + if (l1 > l0) + { + HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec); + while (l1 > l0) + { + result.val[l1] = op0mask ^ op1[l1]; + l1--; + } + } + + while (l0 >= 0) + { + result.val[l0] = op0[l0] ^ op1[l0]; + l0--; + } + + result.canonize (); + +#ifdef DEBUG_WIDE_INT + debug_waa ("wide_int_ro:: %s = (%s ^ %s)\n", + result, op0, op0len, prec, op1, op1len, prec); +#endif + return result; +} + +/* + * math + */ + +/* Absolute value of THIS. */ +wide_int_ro +wide_int_ro::abs () const +{ + wide_int result; + gcc_checking_assert (precision); + + if (sign_mask ()) + result = neg (); + else + result = *this; + +#ifdef DEBUG_WIDE_INT + debug_ww ("wide_int_ro:: %s = abs (%s)\n", result, *this); +#endif + return result; +} + +/* Add of THIS and OP1. No overflow is detected. */ +wide_int_ro +wide_int_ro::add_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len, + signop sgn, bool *overflow) +{ + wide_int result; + unsigned HOST_WIDE_INT o0 = 0; + unsigned HOST_WIDE_INT o1 = 0; + unsigned HOST_WIDE_INT x = 0; + unsigned HOST_WIDE_INT carry = 0; + unsigned HOST_WIDE_INT old_carry = 0; + unsigned HOST_WIDE_INT mask0, mask1; + unsigned int i, small_prec; + + result.precision = prec; + result.len = MAX (op0len, op1len); + mask0 = -top_bit_of (op0, op0len, prec); + mask1 = -top_bit_of (op1, op1len, prec); + /* Add all of the explicitly defined elements. */ + + for (i = 0; i < result.len; i++) + { + o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0; + o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1; + x = o0 + o1 + carry; + result.val[i] = x; + old_carry = carry; + carry = carry == 0 ? x < o0 : x <= o0; + } + + if (result.len * HOST_BITS_PER_WIDE_INT < prec) + { + result.val[result.len] = mask0 + mask1 + carry; + result.len++; + if (overflow) + *overflow = false; + } + else if (overflow) + { + if (sgn == SIGNED) + { + int p = (result.len == BLOCKS_NEEDED (prec) + ? HOST_BITS_PER_WIDE_INT + : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1; + HOST_WIDE_INT x = (result.val[result.len - 1] ^ o0) + & (result.val[result.len - 1] ^ o1); + x = (x >> p) & 1; + *overflow = (x != 0); + } + else + { + if (old_carry) + *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] <= o0); + else + *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] < o0); + } + } + + small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + if (small_prec != 0 && BLOCKS_NEEDED (prec) == result.len) + { + /* Modes with weird precisions. */ + i = result.len - 1; + result.val[i] = sext_hwi (result.val[i], small_prec); + } + + result.canonize (); + + return result; +} + + +/* Count leading zeros of THIS but only looking at the bits in the + smallest HWI of size mode. */ +wide_int_ro +wide_int_ro::clz () const +{ + int i; + int start; + int count; + HOST_WIDE_INT v; + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + + gcc_checking_assert (precision); + + if (zero_p ()) + { + enum machine_mode mode = mode_for_size (precision, MODE_INT, 0); + if (mode == BLKmode) + mode_for_size (precision, MODE_PARTIAL_INT, 0); + + /* Even if the value at zero is undefined, we have to come up + with some replacement. Seems good enough. */ + if (mode == BLKmode) + count = precision; + else if (!CLZ_DEFINED_VALUE_AT_ZERO (mode, count)) + count = precision; + } + else if (neg_p (SIGNED)) + count = 0; + else + { + /* The high order block is special if it is the last block and the + precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We + have to clear out any ones above the precision before doing clz + on this block. */ + if (BLOCKS_NEEDED (precision) == len && small_prec) + { + v = zext_hwi (val[len - 1], small_prec); + count = clz_hwi (v) - (HOST_BITS_PER_WIDE_INT - small_prec); + start = len - 2; + if (v != 0) + { +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int:: %d = clz (%s)\n", count, *this); +#endif + return from_shwi (count, precision); + } + } + else + { + count = HOST_BITS_PER_WIDE_INT * (BLOCKS_NEEDED (precision) - len); + start = len - 1; + } + + for (i = start; i >= 0; i--) + { + v = elt (i); + count += clz_hwi (v); + if (v != 0) + break; + } + + } + +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int_ro:: %d = clz (%s)\n", count, *this); +#endif + return from_shwi (count, precision); +} + +/* Count the number of redundant leading bits of THIS. Return result + as a HOST_WIDE_INT. */ +wide_int_ro +wide_int_ro::clrsb () const +{ + gcc_checking_assert (precision); + + if (neg_p (SIGNED)) + return operator ~ ().clz () - 1; + + return clz () - 1; +} + +/* Count zeros of THIS. */ +wide_int_ro +wide_int_ro::ctz () const +{ + int i; + unsigned int count = 0; + HOST_WIDE_INT v; + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + int end; + bool more_to_do; + + gcc_checking_assert (precision); + + if (zero_p ()) + { + enum machine_mode mode = mode_for_size (precision, MODE_INT, 0); + if (mode == BLKmode) + mode_for_size (precision, MODE_PARTIAL_INT, 0); + + /* Even if the value at zero is undefined, we have to come up + with some replacement. Seems good enough. */ + if (mode == BLKmode) + count = precision; + else if (!CTZ_DEFINED_VALUE_AT_ZERO (mode, count)) + count = precision; + } + else + { + /* The high order block is special if it is the last block and the + precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We + have to clear out any ones above the precision before doing clz + on this block. */ + if (BLOCKS_NEEDED (precision) == len && small_prec) + { + end = len - 1; + more_to_do = true; + } + else + { + end = len; + more_to_do = false; + } + + for (i = 0; i < end; i++) + { + v = val[i]; + count += ctz_hwi (v); + if (v != 0) + { +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int_ro:: %d = ctz (%s)\n", count, *this); +#endif + return wide_int_ro::from_shwi (count, precision); + } + } + + if (more_to_do) + { + v = zext_hwi (val[len - 1], small_prec); + count = ctz_hwi (v); + /* The top word was all zeros so we have to cut it back to prec, + because we are counting some of the zeros above the + interesting part. */ + if (count > precision) + count = precision; + } + else + /* Skip over the blocks that are not represented. They must be + all zeros at this point. */ + count = precision; + } + +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int_ro:: %d = ctz (%s)\n", count, *this); +#endif + return wide_int_ro::from_shwi (count, precision); +} + +/* ffs of THIS. */ +wide_int_ro +wide_int_ro::ffs () const +{ + HOST_WIDE_INT count = ctz ().to_shwi (); + + if (count == precision) + count = 0; + else + count += 1; + +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int_ro:: %d = ffs (%s)\n", count, *this); +#endif + return wide_int_ro::from_shwi (count, precision); +} + +/* Subroutines of the multiplication and division operations. Unpack + the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN + HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by + uncompressing the top bit of INPUT[IN_LEN - 1]. */ +static void +wi_unpack (unsigned HOST_HALF_WIDE_INT *result, + const unsigned HOST_WIDE_INT *input, + int in_len, int out_len, unsigned int prec, signop sgn) +{ + int i; + int j = 0; + int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + int blocks_needed = BLOCKS_NEEDED (prec); + HOST_WIDE_INT mask; + + if (sgn == SIGNED) + { + mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec); + mask &= HALF_INT_MASK; + } + else + mask = 0; + + for (i = 0; i < in_len; i++) + { + HOST_WIDE_INT x = input[i]; + if (i == blocks_needed - 1 && small_prec) + { + if (sgn == SIGNED) + x = sext_hwi (x, small_prec); + else + x = zext_hwi (x, small_prec); + } + result[j++] = x; + result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT; + } + + /* Smear the sign bit. */ + while (j < out_len) + result[j++] = mask; +} + +/* The inverse of wi_unpack. IN_LEN is the the number of input + blocks. The number of output blocks will be half this amount. */ +static void +wi_pack (unsigned HOST_WIDE_INT *result, + const unsigned HOST_HALF_WIDE_INT *input, + int in_len) +{ + int i = 0; + int j = 0; + + while (i < in_len - 2) + { + result[j++] = (unsigned HOST_WIDE_INT)input[i] + | ((unsigned HOST_WIDE_INT)input[i + 1] + << HOST_BITS_PER_HALF_WIDE_INT); + i += 2; + } + + /* Handle the case where in_len is odd. For this we zero extend. */ + if (in_len & 1) + result[j++] = (unsigned HOST_WIDE_INT)input[i]; + else + result[j++] = (unsigned HOST_WIDE_INT)input[i] + | ((unsigned HOST_WIDE_INT)input[i + 1] << HOST_BITS_PER_HALF_WIDE_INT); +} + +/* Return an integer that is the exact log2 of THIS. */ +wide_int_ro +wide_int_ro::exact_log2 () const +{ + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + wide_int count; + wide_int result; + + gcc_checking_assert (precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT v; + if (small_prec) + v = zext_hwi (val[0], small_prec); + else + v = val[0]; + result = wide_int_ro::from_shwi (::exact_log2 (v), precision); + } + else + { + count = ctz (); + if (clz () + count + 1 == precision) + result = count; + else + result = wide_int_ro::from_shwi (-1, precision); + } + +#ifdef DEBUG_WIDE_INT + debug_ww ("wide_int_ro:: %s = exact_log2 (%s)\n", result, *this); +#endif + return result; +} + +/* Return an integer that is the floor log2 of THIS. */ +wide_int_ro +wide_int_ro::floor_log2 () const +{ + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + wide_int result; + + gcc_checking_assert (precision); + if (precision <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT v; + if (small_prec) + v = zext_hwi (val[0], small_prec); + else + v = val[0]; + result = wide_int_ro::from_shwi (::floor_log2 (v), precision); + } + else + result = wide_int_ro::from_shwi (precision, precision) - 1 - clz (); + +#ifdef DEBUG_WIDE_INT + debug_ww ("wide_int_ro:: %s = floor_log2 (%s)\n", result, *this); +#endif + return result; +} + +/* Multiply Op1 by Op2. If HIGH is set, only the upper half of the + result is returned. If FULL is set, the entire result is returned + in a mode that is twice the width of the inputs. However, that + mode needs to exist if the value is to be usable. Clients that use + FULL need to check for this. + + If HIGH or FULL are not set, throw away the upper half after the check + is made to see if it overflows. Unfortunately there is no better + way to check for overflow than to do this. OVERFLOW is assumed to + be sticky so it should be initialized. SGN controls the signedness + and is used to check overflow or if HIGH or FULL is set. */ +wide_int_ro +wide_int_ro::mul_internal (bool high, bool full, + const HOST_WIDE_INT *op1, unsigned int op1len, + unsigned int prec, + const HOST_WIDE_INT *op2, unsigned int op2len, + signop sgn, bool *overflow, + bool needs_overflow) +{ + wide_int result; + unsigned HOST_WIDE_INT o0, o1, k, t; + unsigned int i; + unsigned int j; + unsigned int blocks_needed = BLOCKS_NEEDED (prec); + unsigned int half_blocks_needed = blocks_needed * 2; + /* The sizes here are scaled to support a 2x largest mode by 2x + largest mode yielding a 4x largest mode result. This is what is + needed by vpn. */ + + unsigned HOST_HALF_WIDE_INT + u[2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned HOST_HALF_WIDE_INT + v[2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + /* The '2' in 'R' is because we are internally doing a full + multiply. */ + unsigned HOST_HALF_WIDE_INT + r[2 * 2 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT) - 1; + + /* If the top level routine did not really pass in an overflow, then + just make sure that we never attempt to set it. */ + if (overflow == 0) + needs_overflow = false; + result.precision = prec; + + /* If we need to check for overflow, we can only do half wide + multiplies quickly because we need to look at the top bits to + check for the overflow. */ + if ((high || full || needs_overflow) + && (prec <= HOST_BITS_PER_HALF_WIDE_INT)) + { + HOST_WIDE_INT r; + result.len = 1; + + if (sgn == SIGNED) + { + o0 = sext_hwi (op1[0], prec); + o1 = sext_hwi (op2[0], prec); + } + else + { + o0 = zext_hwi (op1[0], prec); + o1 = zext_hwi (op2[0], prec); + } + + r = o0 * o1; + if (needs_overflow) + { + HOST_WIDE_INT upper; + HOST_WIDE_INT sm + = (r << (HOST_BITS_PER_WIDE_INT - prec)) + >> (HOST_BITS_PER_WIDE_INT - 1); + mask = ((HOST_WIDE_INT)1 << prec) - 1; + sm &= mask; + upper = (r >> prec) & mask; + + if (sgn == SIGNED) + { + if (sm != upper) + *overflow = true; + } + else + if (upper != 0) + *overflow = true; + } + if (full) + { + result.val[0] = sext_hwi (r, prec * 2); + result.precision = prec * 2; + } + else if (high) + result.val[0] = r >> prec; + else + result.val[0] = sext_hwi (r, prec); +#ifdef DEBUG_WIDE_INT + debug_wvasa ("wide_int_ro:: %s O=%d = (%s *%s %s)\n", + result, overflow ? *overflow : 0, op1, op1len, prec, + sgn==UNSIGNED ? "U" : "S", op2, op2len, prec); +#endif + return result; + } + + /* We do unsigned mul and then correct it. */ + wi_unpack (u, (const unsigned HOST_WIDE_INT*)op1, op1len, + half_blocks_needed, prec, SIGNED); + wi_unpack (v, (const unsigned HOST_WIDE_INT*)op2, op2len, + half_blocks_needed, prec, SIGNED); + + /* The 2 is for a full mult. */ + memset (r, 0, half_blocks_needed * 2 + * HOST_BITS_PER_HALF_WIDE_INT / BITS_PER_UNIT); + + for (j = 0; j < half_blocks_needed; j++) + { + k = 0; + for (i = 0; i < half_blocks_needed; i++) + { + t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j] + + r[i + j] + k); + r[i + j] = t & HALF_INT_MASK; + k = t >> HOST_BITS_PER_HALF_WIDE_INT; + } + r[j + half_blocks_needed] = k; + } + + /* We did unsigned math above. For signed we must adjust the + product (assuming we need to see that). */ + if (sgn == SIGNED && (full || high || needs_overflow)) + { + unsigned HOST_WIDE_INT b; + if (op1[op1len-1] < 0) + { + b = 0; + for (i = 0; i < half_blocks_needed; i++) + { + t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed] + - (unsigned HOST_WIDE_INT)v[i] - b; + r[i + half_blocks_needed] = t & HALF_INT_MASK; + b = t >> (HOST_BITS_PER_WIDE_INT - 1); + } + } + if (op2[op2len-1] < 0) + { + b = 0; + for (i = 0; i < half_blocks_needed; i++) + { + t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed] + - (unsigned HOST_WIDE_INT)u[i] - b; + r[i + half_blocks_needed] = t & HALF_INT_MASK; + b = t >> (HOST_BITS_PER_WIDE_INT - 1); + } + } + } + + if (needs_overflow) + { + HOST_WIDE_INT top; + + /* For unsigned, overflow is true if any of the top bits are set. + For signed, overflow is true if any of the top bits are not equal + to the sign bit. */ + if (sgn == UNSIGNED) + top = 0; + else + { + top = r[(half_blocks_needed) - 1]; + top = SIGN_MASK (top << (HOST_BITS_PER_WIDE_INT / 2)); + top &= mask; + } + + for (i = half_blocks_needed; i < half_blocks_needed * 2; i++) + if (((HOST_WIDE_INT)(r[i] & mask)) != top) + *overflow = true; + } + + if (full) + { + /* compute [2prec] <- [prec] * [prec] */ + wi_pack ((unsigned HOST_WIDE_INT*)result.val, r, 2 * half_blocks_needed); + result.len = blocks_needed * 2; + result.precision = prec * 2; + } + else if (high) + { + /* compute [prec] <- ([prec] * [prec]) >> [prec] */ + wi_pack ((unsigned HOST_WIDE_INT*)&result.val [blocks_needed >> 1], + r, half_blocks_needed); + result.len = blocks_needed; + } + else + { + /* compute [prec] <- ([prec] * [prec]) && ((1 << [prec]) - 1) */ + wi_pack ((unsigned HOST_WIDE_INT*)result.val, r, half_blocks_needed); + result.len = blocks_needed; + } + + result.canonize (); + +#ifdef DEBUG_WIDE_INT + debug_wvasa ("wide_int_ro:: %s O=%d = (%s *%s %s)\n", + result, overflow ? *overflow : 0, op1, op1len, prec, + sgn==UNSIGNED ? "U" : "S", op2, op2len, prec); +#endif + return result; +} + +/* Compute the parity of THIS. */ +wide_int_ro +wide_int_ro::parity () const +{ + wide_int count = popcount (); + return count & 1; +} + +/* Compute the population count of THIS. */ +wide_int_ro +wide_int_ro::popcount () const +{ + int i; + int start; + int count; + HOST_WIDE_INT v; + int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1); + int blocks_needed = BLOCKS_NEEDED (precision); + + gcc_checking_assert (precision); + + /* The high order block is special if it is the last block and the + precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We + have to clear out any ones above the precision before doing + popcount on this block. */ + if (small_prec) + { + v = zext_hwi (elt (blocks_needed - 1), small_prec); + count = popcount_hwi (v); + + if (len == blocks_needed) + start = len - 2; + else + { + start = len - 1; + blocks_needed--; + } + } + else + { + start = len - 1; + count = 0; + } + + if (sign_mask ()) + count += HOST_BITS_PER_WIDE_INT * (blocks_needed - len); + + for (i = start; i >= 0; i--) + { + v = val[i]; + count += popcount_hwi (v); + } + +#ifdef DEBUG_WIDE_INT + debug_vw ("wide_int_ro:: %d = popcount (%s)\n", count, *this); +#endif + return wide_int_ro::from_shwi (count, precision); +} + +/* Subtract of THIS and OP1. If the pointer to OVERFLOW is not 0, set + OVERFLOW if the value overflows. */ +wide_int_ro +wide_int_ro::sub_large (const HOST_WIDE_INT *op0, unsigned int op0len, + unsigned int prec, + const HOST_WIDE_INT *op1, unsigned int op1len, + signop sgn, bool *overflow) +{ + wide_int result; + unsigned HOST_WIDE_INT o0 = 0; + unsigned HOST_WIDE_INT o1 = 0; + unsigned HOST_WIDE_INT x = 0; + /* We implement subtraction as an in place negate and add. Negation + is just inversion and add 1, so we can do the add of 1 by just + starting the borrow in of the first element at 1. */ + unsigned HOST_WIDE_INT borrow = 0; + unsigned HOST_WIDE_INT old_borrow = 0; + + unsigned HOST_WIDE_INT mask0, mask1; + unsigned int i, small_prec; + + result.precision = prec; + result.len = MAX (op0len, op1len); + mask0 = -top_bit_of (op0, op0len, prec); + mask1 = -top_bit_of (op1, op1len, prec); + + /* Subtract all of the explicitly defined elements. */ + for (i = 0; i < result.len; i++) + { + o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0; + o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1; + x = o0 - o1 - borrow; + result.val[i] = x; + old_borrow = borrow; + borrow = borrow == 0 ? o0 < o1 : o0 <= o1; + } + + if (result.len * HOST_BITS_PER_WIDE_INT < prec) + { + result.val[result.len] = mask0 - mask1 - borrow; + result.len++; + if (overflow) + *overflow = false; + } + else if (overflow) + { + if (sgn == SIGNED) + { + int p = (result.len == BLOCKS_NEEDED (prec) + ? HOST_BITS_PER_WIDE_INT + : prec & (HOST_BITS_PER_WIDE_INT - 1) ) - 1; + HOST_WIDE_INT x + = (((o0 ^ o1) & (result.val[result.len - 1] ^ o0)) >> p) & 1; + *overflow = (x != 0); + } + else + { + if (old_borrow) + *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] >= o0); + else + *overflow = ((unsigned HOST_WIDE_INT)result.val[result.len - 1] > o0); + } + } + + + small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1); + if (small_prec != 0 && BLOCKS_NEEDED (prec) == result.len) + { + /* Modes with weird precisions. */ + i = result.len - 1; + result.val[i] = sext_hwi (result.val[i], small_prec); + } + + result.canonize (); + + return result; +} + + +/* + * Division and Mod + */ + +/* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The + algorithm is a small modification of the algorithm in Hacker's + Delight by Warren, which itself is a small modification of Knuth's + algorithm. M is the number of significant elements of U however + there needs to be at least one extra element of B_DIVIDEND + allocated, N is the number of elements of B_DIVISOR. */ +void +wide_int_ro::divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient, + unsigned HOST_HALF_WIDE_INT *b_remainder, + unsigned HOST_HALF_WIDE_INT *b_dividend, + unsigned HOST_HALF_WIDE_INT *b_divisor, + int m, int n) +{ + /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a + HOST_WIDE_INT and stored in the lower bits of each word. This + algorithm should work properly on both 32 and 64 bit + machines. */ + unsigned HOST_WIDE_INT b + = (unsigned HOST_WIDE_INT)1 << HOST_BITS_PER_HALF_WIDE_INT; + unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */ + unsigned HOST_WIDE_INT rhat; /* A remainder. */ + unsigned HOST_WIDE_INT p; /* Product of two digits. */ + HOST_WIDE_INT s, i, j, t, k; + + /* Single digit divisor. */ + if (n == 1) + { + k = 0; + for (j = m - 1; j >= 0; j--) + { + b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0]; + k = ((k * b + b_dividend[j]) + - ((unsigned HOST_WIDE_INT)b_quotient[j] + * (unsigned HOST_WIDE_INT)b_divisor[0])); + } + b_remainder[0] = k; + return; + } + + s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */ + + if (s) + { + /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published + algorithm, we can overwrite b_dividend and b_divisor, so we do + that. */ + for (i = n - 1; i > 0; i--) + b_divisor[i] = (b_divisor[i] << s) + | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s)); + b_divisor[0] = b_divisor[0] << s; + + b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s); + for (i = m - 1; i > 0; i--) + b_dividend[i] = (b_dividend[i] << s) + | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s)); + b_dividend[0] = b_dividend[0] << s; + } + + /* Main loop. */ + for (j = m - n; j >= 0; j--) + { + qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1]; + rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1]; + again: + if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2]) + { + qhat -= 1; + rhat += b_divisor[n-1]; + if (rhat < b) + goto again; + } + + /* Multiply and subtract. */ + k = 0; + for (i = 0; i < n; i++) + { + p = qhat * b_divisor[i]; + t = b_dividend[i+j] - k - (p & HALF_INT_MASK); + b_dividend[i + j] = t; + k = ((p >> HOST_BITS_PER_HALF_WIDE_INT) + - (t >> HOST_BITS_PER_HALF_WIDE_INT)); + } + t = b_dividend[j+n] - k; + b_dividend[j+n] = t; + + b_quotient[j] = qhat; + if (t < 0) + { + b_quotient[j] -= 1; + k = 0; + for (i = 0; i < n; i++) + { + t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k; + b_dividend[i+j] = t; + k = t >> HOST_BITS_PER_HALF_WIDE_INT; + } + b_dividend[j+n] += k; + } + } + if (s) + for (i = 0; i < n; i++) + b_remainder[i] = (b_dividend[i] >> s) + | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s)); + else + for (i = 0; i < n; i++) + b_remainder[i] = b_dividend[i]; +} + + +/* Do a truncating divide DIVISOR into DIVIDEND. The result is the + same size as the operands. SIGN is either SIGNED or UNSIGNED. */ +wide_int_ro +wide_int_ro::divmod_internal (bool compute_quotient, + const HOST_WIDE_INT *dividend, + unsigned int dividend_len, + unsigned int dividend_prec, + const HOST_WIDE_INT *divisor, + unsigned int divisor_len, + unsigned int divisor_prec, + signop sgn, wide_int_ro *remainder, + bool compute_remainder, + bool *oflow) +{ + wide_int quotient, u0, u1; + int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec); + int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec); + unsigned HOST_HALF_WIDE_INT + b_quotient[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned HOST_HALF_WIDE_INT + b_remainder[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + unsigned HOST_HALF_WIDE_INT + b_dividend[(4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT) + 1]; + unsigned HOST_HALF_WIDE_INT + b_divisor[4 * MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_HALF_WIDE_INT]; + int m, n; + bool dividend_neg = false; + bool divisor_neg = false; + bool overflow = false; + + if (divisor[0] == 0 && divisor_len == 1) + overflow = true; + + /* The smallest signed number / -1 causes overflow. */ + if (sgn == SIGNED) + { + HOST_WIDE_INT small_prec = dividend_prec & (HOST_BITS_PER_WIDE_INT - 1); + if (dividend_len == BLOCKS_NEEDED (dividend_prec) + && divisor_len == 1 + && divisor[0] == HOST_WIDE_INT(-1)) + + if ((small_prec + && ((HOST_WIDE_INT)zext_hwi (dividend[dividend_len - 1], + small_prec) + == (HOST_WIDE_INT(1) << (small_prec - 1)))) + || dividend[dividend_len - 1] + == HOST_WIDE_INT(1) << (HOST_BITS_PER_WIDE_INT - 1)) + { + /* The smallest neg number is 100...00. The high word was + checked above, now check the rest of the words are + zero. */ + unsigned int i; + bool all_zero = true; + for (i = 0; i < dividend_len - 1; i++) + if (dividend[i] != 0) + { + all_zero = false; + break; + } + if (all_zero) + overflow = true; + } + } + + quotient.precision = dividend_prec; + remainder->precision = dividend_prec; + + /* If overflow is set, just get out. There will only be grief by + continuing. */ + if (overflow) + { + if (compute_remainder) + { + remainder->len = 1; + remainder->val[0] = 0; + } + if (oflow != 0) + *oflow = true; + return wide_int::zero (dividend_prec); + } + + /* Do it on the host if you can. */ + if (dividend_prec <= HOST_BITS_PER_WIDE_INT + && divisor_prec <= HOST_BITS_PER_WIDE_INT) + { + quotient.len = 1; + remainder->len = 1; + if (sgn == SIGNED) + { + HOST_WIDE_INT o0 = sext_hwi (dividend[0], dividend_prec); + HOST_WIDE_INT o1 = sext_hwi (divisor[0], divisor_prec); + + quotient.val[0] = sext_hwi (o0 / o1, dividend_prec); + remainder->val[0] = sext_hwi (o0 % o1, dividend_prec); + } + else + { + unsigned HOST_WIDE_INT o0 = zext_hwi (dividend[0], dividend_prec); + unsigned HOST_WIDE_INT o1 = zext_hwi (divisor[0], divisor_prec); + + quotient.val[0] = zext_hwi (o0 / o1, dividend_prec); + remainder->val[0] = zext_hwi (o0 % o1, dividend_prec); + } + +#ifdef DEBUG_WIDE_INT + debug_wwasa ("wide_int_ro:: (q = %s) (r = %s) = (%s /%s %s)\n", + quotient, *remainder, + dividend, dividend_len, dividend_prec, + sgn == SIGNED ? "S" : "U", + divisor, divisor_len, divisor_prec); +#endif + return quotient; + } + + /* Make the divisor and dividend positive and remember what we + did. */ + if (sgn == SIGNED) + { + if (top_bit_of (dividend, dividend_len, dividend_prec)) + { + u0 = sub_large (wide_int (0).val, 1, + dividend_prec, dividend, dividend_len, UNSIGNED); + dividend = u0.val; + dividend_len = u0.len; + dividend_neg = true; + } + if (top_bit_of (divisor, divisor_len, divisor_prec)) + { + u1 = sub_large (wide_int (0).val, 1, + divisor_prec, divisor, divisor_len, UNSIGNED); + divisor = u1.val; + divisor_len = u1.len; + divisor_neg = true; + } + } + + wi_unpack (b_dividend, (const unsigned HOST_WIDE_INT*)dividend, + dividend_len, dividend_blocks_needed, dividend_prec, sgn); + wi_unpack (b_divisor, (const unsigned HOST_WIDE_INT*)divisor, + divisor_len, divisor_blocks_needed, divisor_prec, sgn); + + if (top_bit_of (dividend, dividend_len, dividend_prec) && sgn == SIGNED) + m = dividend_blocks_needed; + else + m = 2 * dividend_len; + + if (top_bit_of (divisor, divisor_len, divisor_prec) && sgn == SIGNED) + n = divisor_blocks_needed; + else + n = 2 * divisor_len; + + /* We need to find the top non zero block of b_divisor. At most the + top two blocks are zero. */ + if (b_divisor[n - 1] == 0) + n--; + if (b_divisor[n - 1] == 0) + n--; + + memset (b_quotient, 0, sizeof (b_quotient)); + + divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n); + + if (compute_quotient) + { + wi_pack ((unsigned HOST_WIDE_INT*)quotient.val, b_quotient, m); + quotient.len = m / 2; + quotient.canonize (); + /* The quotient is neg if exactly one of the divisor or dividend is + neg. */ + if (dividend_neg != divisor_neg) + quotient = -quotient; + } + else + quotient = wide_int::zero (dividend_prec); + + if (compute_remainder) + { + wi_pack ((unsigned HOST_WIDE_INT*)remainder->val, b_remainder, n); + if (n & 1) + n++; + remainder->len = n / 2; + (*remainder).canonize (); + /* The remainder is always the same sign as the dividend. */ + if (dividend_neg) + *remainder = -(*remainder); + } + else + *remainder = wide_int::zero (dividend_prec); + +#ifdef DEBUG_WIDE_INT + debug_wwasa ("wide_int_ro:: (q = %s) (r = %s) = (%s /%s %s)\n", + quotient, *remainder, + dividend, dividend_len, dividend_prec, + sgn == SIGNED ? "S" : "U", + divisor, divisor_len, divisor_prec); +#endif + return quotient; +} + + +/* Return TRUE iff PRODUCT is an integral multiple of FACTOR, and return + the multiple in *MULTIPLE. Otherwise return FALSE and leave *MULTIPLE + unchanged. */ +bool +wide_int_ro::multiple_of_p (const wide_int_ro &factor, + signop sgn, wide_int_ro *multiple) const +{ + wide_int remainder; + wide_int quotient = divmod_trunc (factor, &remainder, sgn); + if (remainder.zero_p ()) + { + *multiple = quotient; + return true; + } + + return false; +} + +/* + * Shifting, rotating and extraction. + */ + +/* Extract WIDTH bits from THIS starting at OFFSET. The result is + assumed to fit in a HOST_WIDE_INT. This function is safe in that + it can properly access elements that may not be explicitly + represented. */ +HOST_WIDE_INT +wide_int_ro::extract_to_hwi (int offset, int width) const +{ + int start_elt, end_elt, shift; + HOST_WIDE_INT x; + + /* Get rid of the easy cases first. */ + if (offset >= len * HOST_BITS_PER_WIDE_INT) + return sign_mask (); + if (offset + width <= 0) + return 0; + + shift = offset & (HOST_BITS_PER_WIDE_INT - 1); + if (offset < 0) + { + start_elt = -1; + end_elt = 0; + x = 0; + } + else + { + start_elt = offset / HOST_BITS_PER_WIDE_INT; + end_elt = (offset + width - 1) / HOST_BITS_PER_WIDE_INT; + x = start_elt >= len + ? sign_mask () + : (unsigned HOST_WIDE_INT)val[start_elt] >> shift; + } + + if (start_elt != end_elt) + { + HOST_WIDE_INT y = end_elt == len + ? sign_mask () : val[end_elt]; + + x |= y << (HOST_BITS_PER_WIDE_INT - shift); + } + + if (width != HOST_BITS_PER_WIDE_INT) + x &= ((HOST_WIDE_INT)1 << width) - 1; + + return x; +} + + +/* Left shift THIS by CNT. See the definition of Op.TRUNC for how to + set Z. Since this is used internally, it has the ability to + specify the BISIZE and PRECISION independently. This is useful + when inserting a small value into a larger one. */ +wide_int_ro +wide_int_ro::lshift_large (unsigned int cnt, unsigned int res_prec) const +{ + wide_int result; + unsigned int i; + + result.precision = res_prec; + + if (cnt >= res_prec) + { + result.val[0] = 0; + result.len = 1; + return result; + } + + for (i = 0; i < res_prec; i += HOST_BITS_PER_WIDE_INT) + result.val[i / HOST_BITS_PER_WIDE_INT] + = extract_to_hwi (i - cnt, HOST_BITS_PER_WIDE_INT); + + result.len = BLOCKS_NEEDED (res_prec); + result.canonize (); + + return result; +} + +/* Unsigned right shift THIS by CNT. */ +wide_int_ro +wide_int_ro::rshiftu_large (unsigned int cnt) const +{ + wide_int result; + int i; + int small_prec = (precision - cnt) & (HOST_BITS_PER_WIDE_INT - 1); + + if (cnt == 0) + return *this; + + result.precision = precision; + + if (cnt >= precision) + { + result.val[0] = 0; + result.len = 1; + return result; + } + + result.len = BLOCKS_NEEDED (precision - cnt); + + for (i = 0; i < result.len; i++) + result.val[i] + = extract_to_hwi ((i * HOST_BITS_PER_WIDE_INT) + cnt, + HOST_BITS_PER_WIDE_INT); + + /* Extract_to_hwi sign extends. So we need to fix that up. */ + if (small_prec) + result.val [result.len - 1] + = zext_hwi (result.val [result.len - 1], small_prec); + else if (result.val[result.len - 1] < 0) + { + /* Add a new block with a zero. */ + result.val[result.len++] = 0; + return result; + } + + result.canonize (); + + return result; +} + +/* Signed right shift THIS by CNT. */ +wide_int_ro +wide_int_ro::rshifts_large (unsigned int cnt) const +{ + wide_int result; + int i; + + if (cnt == 0) + return *this; + + result.precision = precision; + + if (cnt >= precision) + { + HOST_WIDE_INT m = sign_mask (); + result.val[0] = m; + result.len = 1; + return result; + } + + result.len = BLOCKS_NEEDED (precision - cnt); + + for (i = 0; i < result.len; i++) + result.val[i] + = extract_to_hwi ((i * HOST_BITS_PER_WIDE_INT) + cnt, + HOST_BITS_PER_WIDE_INT); + + result.canonize (); + + return result; +} + +/* + * Private utilities. + */ + +/* Decompress THIS for at least TARGET bits into a result with + precision PREC. */ +wide_int_ro +wide_int_ro::decompress (unsigned int target, unsigned int prec) const +{ + wide_int result; + int blocks_needed = BLOCKS_NEEDED (target); + HOST_WIDE_INT mask; + int len, i; + + result.precision = prec; + result.len = blocks_needed; + + for (i = 0; i < this->len; i++) + result.val[i] = val[i]; + + len = this->len; + + if (target > result.precision) + return result; + + /* The extension that we are doing here is not sign extension, it is + decompression. */ + mask = sign_mask (); + while (len < blocks_needed) + result.val[len++] = mask; + + return result; +} + +void gt_ggc_mx(max_wide_int*) { } +void gt_pch_nx(max_wide_int*,void (*)(void*, void*), void*) { } +void gt_pch_nx(max_wide_int*) { } + +/* + * Private debug printing routines. + */ +#ifdef DEBUG_WIDE_INT +/* The debugging routines print results of wide operations into the + dump files of the respective passes in which they were called. */ +static char * +dumpa (const HOST_WIDE_INT *val, unsigned int len, unsigned int prec, char *buf) +{ + int i; + int l; + const char * sep = ""; + + l = sprintf (buf, "[%d (", prec); + for (i = len - 1; i >= 0; i--) + { + l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]); + sep = " "; + } + + gcc_assert (len != 0); + + l += sprintf (&buf[l], ")]"); + + gcc_assert (l < MAX_SIZE); + return buf; + + +} +#endif + +/* The debugging routines print results of wide operations into the + dump files of the respective passes in which they were called. */ +char * +wide_int_ro::dump (char* buf) const +{ + int i; + int l; + const char * sep = ""; + + l = sprintf (buf, "[%d (", precision); + for (i = len - 1; i >= 0; i--) + { + l += sprintf (&buf[l], "%s" HOST_WIDE_INT_PRINT_HEX, sep, val[i]); + sep = " "; + } + + gcc_assert (len != 0); + + l += sprintf (&buf[l], ")]"); + + gcc_assert (l < MAX_SIZE); + return buf; +} + +#ifdef DEBUG_WIDE_INT + +#if 0 +#define wide_int_dump_file (dump_file ? dump_file : stdout) +#else +#define wide_int_dump_file (dump_file) +#endif + +void +wide_int_ro::debug_vaa (const char* fmt, int r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r, + dumpa (o0, l0, p0, buf0), + dumpa (o1, l1, p1, buf1)); +} + +void +wide_int_ro::debug_vw (const char* fmt, int r, const wide_int_ro& o0) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0)); +} + +void +wide_int_ro::debug_vwa (const char* fmt, int r, const wide_int_ro &o0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0), dumpa (o1, l1, p1, buf1)); +} + +void +wide_int_ro::debug_vwh (const char* fmt, int r, const wide_int_ro &o0, + HOST_WIDE_INT o1) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0), o1); +} + +void +wide_int_ro::debug_vww (const char* fmt, int r, const wide_int_ro &o0, + const wide_int_ro &o1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r, o0.dump (buf0), o1.dump (buf1)); +} + +void +wide_int_ro::debug_wa (const char* fmt, const wide_int_ro &r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), dumpa (o0, l0, p0, buf1)); +} + +void +wide_int_ro::debug_waa (const char* fmt, const wide_int_ro &r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), dumpa (o0, l0, p0, buf1), + dumpa (o1, l1, p1, buf2)); +} + +void +wide_int_ro::debug_waav (const char* fmt, const wide_int_ro &r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1, + int s) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), dumpa (o0, l0, p0, buf1), + dumpa (o1, l1, p1, buf2), s); +} + +void +wide_int_ro::debug_wh (const char* fmt, const wide_int_ro &r, + HOST_WIDE_INT o1) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o1); +} + +void +wide_int_ro::debug_whh (const char* fmt, const wide_int_ro &r, + HOST_WIDE_INT o1, HOST_WIDE_INT o2) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o1, o2); +} + +void +wide_int_ro::debug_wv (const char* fmt, const wide_int_ro &r, int v0) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0); +} + +void +wide_int_ro::debug_wvv (const char* fmt, const wide_int_ro &r, + int v0, int v1) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, v1); +} + +void +wide_int_ro::debug_wvvv (const char* fmt, const wide_int_ro &r, + int v0, int v1, int v2) +{ + char buf0[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, v1, v2); +} + +void +wide_int_ro::debug_wvwa (const char* fmt, const wide_int_ro &r, int v0, + const wide_int_ro &o0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, + o0.dump (buf1), dumpa (o1, l1, p1, buf2)); +} + +void +wide_int_ro::debug_wvasa (const char* fmt, const wide_int_ro &r, int v0, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const char* s, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, + dumpa (o0, l0, p0, buf1), s, dumpa (o1, l1, p1, buf2)); +} + +void +wide_int_ro::debug_wvww (const char* fmt, const wide_int_ro &r, int v0, + const wide_int_ro &o0, const wide_int_ro &o1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), v0, + o0.dump (buf1), o1.dump (buf2)); +} + +void +wide_int_ro::debug_ww (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1)); +} + +void +wide_int_ro::debug_wwa (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), + dumpa (o1, l1, p1, buf2)); +} + +void +wide_int_ro::debug_wwv (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, int v0) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), v0); +} + +void +wide_int_ro::debug_wwvs (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, int v0, + const char *s) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), v0, s); +} + +void +wide_int_ro::debug_wwvvs (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, int v0, int v1, + const char *s) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), o0.dump (buf1), v0, v1, s); +} + +void +wide_int_ro::debug_wwwvv (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1, + int v0, int v1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), + o0.dump (buf1), o1.dump (buf2), v0, v1); +} + +void +wide_int_ro::debug_www (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), + o0.dump (buf1), o1.dump (buf2)); +} + +void +wide_int_ro::debug_wwasa (const char* fmt, const wide_int_ro &r, const wide_int_ro &o0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1, + const char* s, + const HOST_WIDE_INT *o2, unsigned int l2, unsigned int p2) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + char buf3[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), + o0.dump (buf1), dumpa (o1, l1, p1, buf2), s, dumpa (o2, l2, p2, buf3)); +} + +void +wide_int_ro::debug_wwww (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1, + const wide_int_ro &o2) +{ + char buf0[MAX_SIZE]; + char buf1[MAX_SIZE]; + char buf2[MAX_SIZE]; + char buf3[MAX_SIZE]; + if (wide_int_dump_file) + fprintf (wide_int_dump_file, fmt, r.dump (buf0), + o0.dump (buf1), o1.dump (buf2), o2.dump (buf3)); +} + +#endif + diff --git a/gcc/wide-int.h b/gcc/wide-int.h new file mode 100644 index 00000000000..5da3e003033 --- /dev/null +++ b/gcc/wide-int.h @@ -0,0 +1,3629 @@ +/* Operations with very long integers. -*- C++ -*- + Copyright (C) 2012-2013 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 3, or (at your option) any +later version. + +GCC is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef WIDE_INT_H +#define WIDE_INT_H + +/* Wide-int.[cc|h] implements a class that efficiently performs + mathematical operations on finite precision integers. Wide-ints + are designed to be transient - they are not for long term storage + of values. There is tight integration between wide-ints and the + other longer storage GCC representations (rtl and tree). + + The actual precision of a wide-int depends on the flavor. There + are three predfined flavors: + + 1) wide_int (the default). This flavor does the math in the + precision of it's input arguments. It is assumed (and checked) + that the precisions of the operands and results are consistent. + This is the most efficient flavor. It is not possible to examine + bits above the precision that has been specified. Because of + this, the default flavor has semantics that are simple to + understand and in general model the underlying hardware that the + compiler is targetted for. + + This flavor must be used at the RTL level of gcc because there + is, in general, not enough information in the RTL representation + to extend a value beyond the precision specified in the mode. + + This flavor should also be used at the TREE and GIMPLE levels of + the compiler except for the circumstances described in the + descriptions of the other two flavors. + + The default wide_int representation does not contain any + information inherent about signedness of the represented value, + so it can be used to represent both signed and unsigned numbers. + For operations where the results depend on signedness (full width + multiply, division, shifts, comparisons, and operations that need + overflow detected), the signedness must be specified separately. + + 2) addr_wide_int. This is a fixed size representation that is + guaranteed to be large enough to compute any bit or byte sized + address calculation on the target. Currently the value is 64 + 4 + bits rounded up to the next number even multiple of + HOST_BITS_PER_WIDE_INT (but this can be changed when the first + port needs more than 64 bits for the size of a pointer). + + This flavor can be used for all address math on the target. In + this representation, the values are sign or zero extended based + on their input types to the internal precision. All math is done + in this precision and then the values are truncated to fit in the + result type. Unlike most gimple or rtl intermediate code, it is + not useful to perform the address arithmetic at the same + precision in which the operands are represented because there has + been no effort by the front ends to convert most addressing + arithmetic to canonical types. + + In the addr_wide_int, all numbers are represented as signed + numbers. There are enough bits in the internal representation so + that no infomation is lost by representing them this way. + + 3) max_wide_int. This representation is an approximation of + infinite precision math. However, it is not really infinite + precision math as in the GMP library. It is really finite + precision math where the precision is 4 times the size of the + largest integer that the target port can represent. + + Like, the addr_wide_ints, all numbers are inherently signed. + + There are several places in the GCC where this should/must be used: + + * Code that does widening conversions. The canonical way that + this is performed is to sign or zero extend the input value to + the max width based on the sign of the type of the source and + then to truncate that value to the target type. This is in + preference to using the sign of the target type to extend the + value directly (which gets the wrong value for the conversion + of large unsigned numbers to larger signed types). + + * Code that does induction variable optimizations. This code + works with induction variables of many different types at the + same time. Because of this, it ends up doing many different + calculations where the operands are not compatible types. The + max_wide_int makes this easy, because it provides a field where + nothing is lost when converting from any variable, + + * There are a small number of passes that currently use the + max_wide_int that should use the default. These should be + changed. + + There are surprising features of addr_wide_int and max_wide_int + that the users should be careful about: + + 1) Shifts and rotations are just weird. You have to specify a + precision in which the shift or rotate is to happen in. The bits + above this precision remain unchanged. While this is what you + want, it is clearly is non obvious. + + 2) Larger precision math sometimes does not produce the same + answer as would be expected for doing the math at the proper + precision. In particular, a multiply followed by a divide will + produce a different answer if the first product is larger than + what can be represented in the input precision. + + The addr_wide_int and the max_wide_int flavors are more expensive + than the default wide int, so in addition to the caveats with these + two, the default is the prefered representation. + + All three flavors of wide_int are represented as a vector of + HOST_WIDE_INTs. The vector contains enough elements to hold a + value of MAX_BITSIZE_MODE_ANY_INT / HOST_BITS_PER_WIDE_INT which is + a derived for each host/target combination. The values are stored + in the vector with the least significant HOST_BITS_PER_WIDE_INT + bits of the value stored in element 0. + + A wide_int contains three fields: the vector (VAL), precision and a + length, (LEN). The length is the number of HWIs needed to + represent the value. For the max_wide_int and the addr_wide_int, + the precision is a constant that cannot be changed. For the + default wide_int, the precision is set from the constructor. + + Since most integers used in a compiler are small values, it is + generally profitable to use a representation of the value that is + as small as possible. LEN is used to indicate the number of + elements of the vector that are in use. The numbers are stored as + sign extended numbers as a means of compression. Leading + HOST_WIDE_INTs that contain strings of either -1 or 0 are removed + as long as they can be reconstructed from the top bit that is being + represented. + + There are constructors to create the various forms of wide-int from + trees, rtl and constants. For trees and constants, you can simply say: + tree t = ...; + wide_int x = t; + wide_int y = 6; + + However, a little more syntax is required for rtl constants since + they do have an explicit precision. To make an rtl into a + wide_int, you have to pair it with a mode. The canonical way to do + this is with std::make_pair as in: + + rtx r = ... + wide_int x = std::make_pair (r, mode); + + Wide ints sometimes have a value with the precision of 0. These + come from two separate sources: + + * The front ends do sometimes produce values that really have a + precision of 0. The only place where these seem to come in are + the MIN and MAX value for types with a precision of 0. Asside + from the computation of these MIN and MAX values, there appears + to be no other use of true precision 0 numbers so the overloading + of precision 0 does not appear to be an issue. These appear to + be associated with 0 width bit fields. They are harmless, but + there are several paths through the wide int code to support this + without having to special case the front ends. + + * When a constant that has an integer type is converted to a + wide-int it comes in with precision 0. For these constants the + top bit does accurately reflect the sign of that constant; this + is an exception to the normal rule that the signedness is not + represented. When used in a binary operation, the wide-int + implementation properly extends these constants so that they + properly match the other operand of the computation. This allows + you write: + + tree t = ... + wide_int x = t + 6; + + assuming t is a int_cst. + + Note that the bits above the precision are not defined and the + algorithms used here are careful not to depend on their value. In + particular, values that come in from rtx constants may have random + bits. When the precision is 0, all the bits in the LEN elements of + VEC are significant with no undefined bits. Precisionless + constants are limited to being one or two HOST_WIDE_INTs. When two + are used the upper value is 0, and the high order bit of the first + value is set. (Note that this may need to be generalized if it is + ever necessary to support 32bit HWIs again). + + Many binary operations require that the precisions of the two + operands be the same. However, the abi tries to keep this relaxed + as much as possible. In particular: + + * shifts do not care about the precision of the second operand. + + * values that come in from gcc source constants or variables are + not checked as long one of the two operands has a precision. + This is allowed because it is always know whether to sign or zero + extend these values. + + * The comparisons do not require that the operands be the same + length. This allows wide ints to be used in hash tables where + all of the values may not be the same precision. */ + + +#ifndef GENERATOR_FILE +#include <utility> +#include "tree.h" +#include "system.h" +#include "hwint.h" +#include "options.h" +#include "tm.h" +#include "insn-modes.h" +#include "machmode.h" +#include "double-int.h" +#include <gmp.h> +#include "dumpfile.h" +#include "real.h" +#include "signop.h" + +#if 0 +#define DEBUG_WIDE_INT +#endif + +/* The MAX_BITSIZE_MODE_ANY_INT is automatically generated by a very + early examination of the target's mode file. Thus it is safe that + some small multiple of this number is easily larger than any number + that that target could compute. The place in the compiler that + currently needs the widest ints is the code that determines the + range of a multiply. This code needs 2n + 2 bits. */ + +#define WIDE_INT_MAX_ELTS \ + ((4 * MAX_BITSIZE_MODE_ANY_INT + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT) + +/* This is the max size of any pointer on any machine. It does not + seem to be as easy to sniff this out of the machine description as + it is for MAX_BITSIZE_MODE_ANY_INT since targets may support + multiple address sizes and may have different address sizes for + different address spaces. However, currently the largest pointer + on any platform is 64 bits. When that changes, then it is likely + that a target hook should be defined so that targets can make this + value larger for those targets. */ +#define addr_max_bitsize (64) + +/* This is the internal precision used when doing any address + arithmetic. The '4' is really 3 + 1. Three of the bits are for + the number of extra bits needed to do bit addresses and single bit is + allow everything to be signed without loosing any precision. Then + everything is rounded up to the next HWI for efficiency. */ +#define addr_max_precision \ + ((addr_max_bitsize + 4 + HOST_BITS_PER_WIDE_INT - 1) & ~(HOST_BITS_PER_WIDE_INT - 1)) + +enum ShiftOp { + NONE, + /* There are two uses for the wide-int shifting functions. The + first use is as an emulation of the target hardware. The + second use is as service routines for other optimizations. The + first case needs to be identified by passing TRUNC as the value + of ShiftOp so that shift amount is properly handled according to the + SHIFT_COUNT_TRUNCATED flag. For the second case, the shift + amount is always truncated by the bytesize of the mode of + THIS. */ + TRUNC +}; + +/* This is used to bundle an rtx and a mode together so that the pair + can be used as the second operand of a wide int expression. If we + ever put modes into rtx integer constants, this should go away and + then just pass an rtx in. */ +typedef std::pair<rtx, enum machine_mode> rtx_mode_t; + +template <typename T> +inline bool signedp(T) +{ + return ~(T)0 < (T)0; +} + +template <> +inline bool signedp<unsigned int>(unsigned int) +{ + return false; +} + +template <> +inline bool signedp<unsigned long>(unsigned long) +{ + return false; +} + +class wide_int; + +class GTY(()) wide_int_ro { + template <int bitsize> + friend class fixed_wide_int; + friend class wide_int; + /* Internal representation. */ + + protected: + /* VAL is set to a size that is capable of computing a full + multiplication on the largest mode that is represented on the + target. Currently there is a part of tree-vrp that requires 2x + + 2 bits of precision where x is the precision of the variables + being optimized. */ + HOST_WIDE_INT val[WIDE_INT_MAX_ELTS]; + unsigned short len; + unsigned int precision; + + inline const HOST_WIDE_INT* get_val () const { return val; } + wide_int_ro& operator = (const wide_int_ro &r) { + for (unsigned int i = 0; i < r.get_len (); ++i) + val[i] = r.get_val () [i]; + len = r.get_len (); + precision = r.get_precision (); + return *this; + } + + public: + wide_int_ro () : len (0) { } + + /* Convert an integer cst into a wide int. */ + wide_int_ro (const_tree tcst) { + *this = from_array (&TREE_INT_CST_ELT (tcst, 0), + TREE_INT_CST_NUNITS (tcst), + TYPE_PRECISION (TREE_TYPE (tcst)), false); + } + + wide_int_ro (HOST_WIDE_INT op0) { + precision = 0; + val[0] = op0; + len = 1; + } + wide_int_ro (int op0) { + precision = 0; + val[0] = op0; + len = 1; + } + wide_int_ro (unsigned HOST_WIDE_INT op0) { + *this = wide_int_ro::from_uhwi (op0); + } + wide_int_ro (unsigned int op0) { + *this = wide_int_ro::from_uhwi (op0); + } + wide_int_ro (const rtx_mode_t& op0) { + *this = wide_int_ro::from_rtx (op0); + } + /* + * Conversions. + */ + + static wide_int_ro from_shwi (HOST_WIDE_INT op0, + unsigned int precision = 0); + static wide_int_ro from_uhwi (unsigned HOST_WIDE_INT op0, + unsigned int precision = 0); + + /* Convert OP0 into a wide_int with parameters taken from TYPE. */ + inline static wide_int_ro + from_hwi (HOST_WIDE_INT op0, const_tree type) + { + unsigned int prec = TYPE_PRECISION (type); + + if (TYPE_UNSIGNED (type)) + return wide_int_ro::from_uhwi (op0, prec); + else + return wide_int_ro::from_shwi (op0, prec); + } + + /* Convert signed OP0 into a wide_int_ro with parameters taken from + MODE. */ + inline static wide_int_ro + from_shwi (HOST_WIDE_INT op0, enum machine_mode mode) + { + unsigned int prec = GET_MODE_PRECISION (mode); + return wide_int_ro::from_shwi (op0, prec); + } + + /* Convert unsigned OP0 into a wide_int_ro with parameters taken + from MODE. */ + inline static wide_int_ro + from_uhwi (unsigned HOST_WIDE_INT op0, enum machine_mode mode) + { + unsigned int prec = GET_MODE_PRECISION (mode); + return wide_int_ro::from_uhwi (op0, prec); + } + + static wide_int_ro from_array (const HOST_WIDE_INT* op0, + unsigned int len, + unsigned int precision, + bool need_canon = true); + + static wide_int_ro from_double_int (double_int, unsigned int precision); + static wide_int_ro from_buffer (const unsigned char*, int); + + /* Conversion to and from GMP integer representations. */ + void to_mpz (mpz_t, signop) const; + static wide_int_ro from_mpz (const_tree, mpz_t, bool); + + /* Return THIS as a signed HOST_WIDE_INT. If THIS does not fit in + PREC, the information is lost. */ + inline HOST_WIDE_INT + to_shwi (unsigned int prec = 0) const + { + HOST_WIDE_INT result; + + if (prec == 0) + prec = precision; + + if (prec < HOST_BITS_PER_WIDE_INT) + result = sext_hwi (val[0], prec); + else + result = val[0]; + + return result; + } + + + /* Return THIS as an unsigned HOST_WIDE_INT. If THIS does not fit + in PREC, the information is lost. */ + inline unsigned HOST_WIDE_INT to_uhwi (unsigned int prec = 0) const + { + HOST_WIDE_INT result; + + if (prec == 0) + prec = precision; + + if (prec < HOST_BITS_PER_WIDE_INT) + result = zext_hwi (val[0], prec); + else + result = val[0]; + + return result; + } + + + /* TODO: The compiler is half converted from using HOST_WIDE_INT to + represent addresses to using wide_int_ro to represent addresses. + We use to_short_addr at the interface from new code to old, + unconverted code. */ + inline HOST_WIDE_INT to_short_addr () const { + return val[0]; + } + + /* + * Largest and smallest values that are represented in a TYPE_PREC. + * RESULT_PREC is the precision of the value that the answer is + * returned within. The default value of 0 says return the answer + * with TYPE_PREC precision. + + * TODO: There is still code from the double_int era that trys to + * make up for the fact that double int's could not represent the + * min and max values of all types. This code should be removed + * because the min and max values can always be represented in + * wide-ints and int-csts. + */ + static wide_int_ro max_value (unsigned int type_prec, + signop sgn, + unsigned int result_prec = 0); + + /* Produce the largest number that is represented in TYPE. The + precision and sign are taken from TYPE. */ + inline static wide_int_ro max_value (const_tree type) + { + unsigned int prec = TYPE_PRECISION (type); + return max_value (prec, TYPE_SIGN (type), prec); + } + + /* Produce the largest number that is represented in MODE. The + precision are taken from mode. SGN must be SIGNED or + UNSIGNED. */ + inline static wide_int_ro + max_value (enum machine_mode mode, signop sgn) + { + unsigned int prec = GET_MODE_PRECISION (mode); + return max_value (prec, sgn, prec); + } + + static wide_int_ro min_value (unsigned int type_prec, + signop sgn, + unsigned int result_prec = 0); + + /* Produce the smallest number that is represented in TYPE. The + precision and sign are taken from TYPE. */ + inline static + wide_int_ro min_value (const_tree type) + { + unsigned int prec = TYPE_PRECISION (type); + return min_value (prec, TYPE_SIGN (type), prec); + } + + /* Produce the smallest number that is represented in MODE. The + precision are taken from mode. SGN must be SIGNED or + UNSIGNED. */ + inline static + wide_int_ro min_value (enum machine_mode mode, signop sgn) + { + unsigned int prec = GET_MODE_PRECISION (mode); + return min_value (prec, sgn, prec); + } + + /* + * Small constants. These are generally only needed in the places + * where the precision must be provided. For instance in binary + * operations where the other operand has a precision, or for use + * with max_wide_int or addr_wide_int, these are never needed. + */ + + /* Return a wide int of -1 with precision PREC. */ + inline static wide_int_ro + minus_one (unsigned int prec) + { + return wide_int_ro::from_shwi (-1, prec); + } + + /* Return a wide int of 0 with precision PREC. */ + inline static wide_int_ro + zero (unsigned int prec) + { + return wide_int_ro::from_shwi (0, prec); + } + + /* Return a wide int of 1 with precision PREC. */ + inline static wide_int_ro + one (unsigned int prec) + { + return wide_int_ro::from_shwi (1, prec); + } + + /* Return a wide int of 2 with precision PREC. */ + inline static wide_int_ro + two (unsigned int prec) + { + return wide_int_ro::from_shwi (2, prec); + } + + /* + * Public accessors for the interior of a wide int. + */ + + /* Get the number of HOST_WIDE_INTs actually represented within the + wide int. */ + inline unsigned short + get_len () const + { + return len; + } + + /* Get precision of the value represented within the wide int. */ + inline unsigned int + get_precision () const + { + return precision; + } + + /* Get a particular element of the wide int. */ + inline HOST_WIDE_INT + elt (unsigned int i) const + { + return i >= len ? sign_mask () : val[i]; + } + + /* + * Comparative functions. + */ + + /* Return true if THIS is -1. This is correct even if precision is 0. */ + inline bool + minus_one_p () const + { + HOST_WIDE_INT x; + + if (precision && precision < HOST_BITS_PER_WIDE_INT) + x = sext_hwi (val[0], precision); + else + x = val[0]; + + return len == 1 && x == (HOST_WIDE_INT)-1; + } + + /* Return true if THIS is 0. This is correct even if precision is 0. */ + inline bool + zero_p () const + { + HOST_WIDE_INT x; + + if (precision && precision < HOST_BITS_PER_WIDE_INT) + x = sext_hwi (val[0], precision); + else if (len == 0) + { + gcc_assert (precision == 0); + return true; + } + else + x = val[0]; + + return len == 1 && x == 0; + } + + /* Return true if THIS is 1. This is correct even if precision is 0. */ + inline bool + one_p () const + { + HOST_WIDE_INT x; + + if (precision && precision < HOST_BITS_PER_WIDE_INT) + x = zext_hwi (val[0], precision); + else + x = val[0]; + + return len == 1 && x == 1; + } + + /* Return true if THIS is negative based on the interpretation of SGN. + For UNSIGNED, this is always false. This is correct even if + precision is 0. */ + inline bool + neg_p (signop sgn) const + { + if (sgn == UNSIGNED) + return false; + + if (precision == 0) + return (len == 1 && val[0] < 0); + + return sign_mask () != 0; + } + + bool multiple_of_p (const wide_int_ro &, signop, wide_int_ro *) const; + + /* + * Comparisons, note that only equality is an operator. The other + * comparisons cannot be operators since they are inherently signed or + * unsigned and C++ has no such operators. + */ + + /* Return true if THIS == C. If both operands have non zero + precisions, the precisions must be the same. */ + template <typename T> + inline bool + operator == (const T &c) const + { + bool result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, false); + + if (p1 == 0) + /* There are prec 0 types and we need to do this to check their + min and max values. */ + result = (len == cl) && (val[0] == s[0]); + else if (p1 < HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << p1) - 1; + result = (val[0] & mask) == (s[0] & mask); + } + else if (p1 == HOST_BITS_PER_WIDE_INT) + result = val[0] == s[0]; + else + result = eq_p_large (val, len, p1, s, cl); + + if (result) + gcc_assert (len == cl); + +#ifdef DEBUG_WIDE_INT + debug_vwa ("wide_int_ro:: %d = (%s == %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return true of C1 == C2. If both parameters have non zero + precisions, then those precisions must be equal. */ + template <typename T1, typename T2> + static inline bool + eq_p (const T1 &c1, const T2 &c2) + { + bool result; + HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS]; + HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s1, *s2; /* Returned data */ + unsigned int cl1, cl2; /* array lengths */ + unsigned int p1, p2; /* precisions */ + + s1 = to_shwi1 (ws1, &cl1, &p1, c1); + s2 = to_shwi1 (ws2, &cl2, &p2, c2); + check_precision (&p1, &p2, true, false); + + if (p1 == 0) + /* There are prec 0 types and we need to do this to check their + min and max values. */ + result = (cl1 == cl2) && (s1[0] == s2[0]); + else if (p1 < HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT)1 << p1) - 1; + result = (s1[0] & mask) == (s2[0] & mask); + } + else if (p1 == HOST_BITS_PER_WIDE_INT) + result = s1[0] == s2[0]; + else + result = eq_p_large (s1, cl1, p1, s2, cl2); + + return result; + } + + /* Return true if THIS != C. If both parameters have non zero + precisions, then those precisions must be equal. */ + template <typename T> + inline bool + operator != (const T &c) const + { + return !(*this == c); + } + + /* Return true if THIS < C. Signness is indicated by SGN. */ + template <typename T> + inline bool + lt_p (const T &c, signop sgn) const + { + if (sgn == SIGNED) + return lts_p (c); + else + return ltu_p (c); + } + + /* Return true if C1 < C2. Signness is indicated by SGN. */ + template <typename T1, typename T2> + static inline bool + lt_p (const T1 &c1, const T2 &c2, signop sgn) + { + if (sgn == SIGNED) + return lts_p (c1, c2); + else + return ltu_p (c1, c2); + } + + /* Return true if THIS < C using signed comparisons. */ + template <typename T> + inline bool + lts_p (const T &c) const + { + bool result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT + && p2 <= HOST_BITS_PER_WIDE_INT) + { + gcc_assert (cl != 0); + HOST_WIDE_INT x0 = sext_hwi (val[0], p1); + HOST_WIDE_INT x1 = sext_hwi (s[0], p2); + result = x0 < x1; + } + else + result = lts_p_large (val, len, p1, s, cl, p2); + +#ifdef DEBUG_WIDE_INT + debug_vwa ("wide_int_ro:: %d = (%s lts_p %s\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return true if C1 < C2 using signed comparisons. */ + template <typename T1, typename T2> + static inline bool + lts_p (const T1 &c1, const T2 &c2) + { + bool result; + HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS]; + HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s1, *s2; /* Returned data */ + unsigned int cl1, cl2; /* array lengths */ + unsigned int p1, p2; /* precisions */ + + s1 = to_shwi1 (ws1, &cl1, &p1, c1); + s2 = to_shwi1 (ws2, &cl2, &p2, c2); + check_precision (&p1, &p2, false, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT + && p2 <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT x0 = sext_hwi (s1[0], p1); + HOST_WIDE_INT x1 = sext_hwi (s2[0], p2); + result = x0 < x1; + } + else + result = lts_p_large (s1, cl1, p1, s2, cl2, p2); + +#ifdef DEBUG_WIDE_INT + debug_vaa ("wide_int_ro:: %d = (%s lts_p %s\n", result, s1, cl1, p1, s2, cl2, p2); +#endif + return result; + } + + /* Return true if THIS < C using unsigned comparisons. */ + template <typename T> + inline bool + ltu_p (const T &c) const + { + bool result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT + && p2 <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT x0 = zext_hwi (val[0], p1); + unsigned HOST_WIDE_INT x1 = zext_hwi (s[0], p2); + result = x0 < x1; + } + else + result = ltu_p_large (val, len, p1, s, cl, p2); + +#ifdef DEBUG_WIDE_INT + debug_vwa ("wide_int_ro:: %d = (%s ltu_p %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return true if C1 < C2 using unsigned comparisons. */ + template <typename T1, typename T2> + static inline bool + ltu_p (const T1 &c1, const T2 &c2) + { + bool result; + HOST_WIDE_INT ws1[WIDE_INT_MAX_ELTS]; + HOST_WIDE_INT ws2[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s1, *s2; /* Returned data */ + unsigned int cl1, cl2; /* array lengths */ + unsigned int p1, p2; /* precisions */ + + s1 = to_shwi1 (ws1, &cl1, &p1, c1); + s2 = to_shwi1 (ws2, &cl2, &p2, c2); + check_precision (&p1, &p2, false, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT + && p2 <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT x0 = zext_hwi (s1[0], p1); + unsigned HOST_WIDE_INT x1 = zext_hwi (s2[0], p2); + result = x0 < x1; + } + else + result = ltu_p_large (s1, cl1, p1, s2, cl2, p2); +#ifdef DEBUG_WIDE_INT + debug_vaa ("wide_int_ro:: %d = (%s ltu_p %s)\n", result, s1, cl1, p1, s2, cl2, p2); +#endif + return result; + } + + /* Return true if THIS <= C. Signness is indicated by SGN. */ + template <typename T> + inline bool + le_p (const T &c, signop sgn) const + { + if (sgn == SIGNED) + return les_p (c); + else + return leu_p (c); + } + + /* Return true if C1 <= C2. Signness is indicated by SGN. */ + template <typename T1, typename T2> + static inline bool + le_p (const T1 &c1, const T2 &c2, signop sgn) + { + if (sgn == SIGNED) + return les_p (c1, c2); + else + return leu_p (c1, c2); + } + + /* Return true if THIS <= C using signed comparisons. */ + template <typename T> + inline bool + les_p (const T &c) const + { + return !gts_p (c); + } + + /* Return true if C1 <= C2 using signed comparisons. */ + template <typename T1, typename T2> + static inline bool + les_p (const T1 &c1, const T2 &c2) + { + return !gts_p (c1, c2); + } + + /* Return true if THIS <= C using unsigned comparisons. */ + template <typename T> + inline bool + leu_p (const T &c) const + { + return !gtu_p (c); + } + + /* Return true if C1 <= C2 using unsigned comparisons. */ + template <typename T1, typename T2> + static inline bool + leu_p (const T1 &c1, const T2 &c2) + { + return !gtu_p (c1, c2); + } + + /* Return true if THIS > C. Signness is indicated by SGN. */ + template <typename T> + inline bool + gt_p (const T &c, signop sgn) const + { + if (sgn == SIGNED) + return gts_p (c); + else + return gtu_p (c); + } + + /* Return true if C1 > C2. Signness is indicated by SGN. */ + template <typename T1, typename T2> + static inline bool + gt_p (const T1 &c1, const T2 &c2, signop sgn) + { + if (sgn == SIGNED) + return gts_p (c1, c2); + else + return gtu_p (c1, c2); + } + + /* Return true if THIS > C using signed comparisons. */ + template <typename T> + inline bool + gts_p (const T &c) const + { + return lts_p (c, *this); + } + + /* Return true if C1 > C2 using signed comparisons. */ + template <typename T1, typename T2> + static inline bool + gts_p (const T1 &c1, const T2 &c2) + { + return lts_p (c2, c1); + } + + /* Return true if THIS > C using unsigned comparisons. */ + template <typename T> + inline bool + gtu_p (const T &c) const + { + return ltu_p (c, *this); + } + + /* Return true if C1 > C2 using unsigned comparisons. */ + template <typename T1, typename T2> + static inline bool + gtu_p (const T1 &c1, const T2 &c2) + { + return ltu_p (c2, c1); + } + + /* Return true if THIS >= C. Signness is indicated by SGN. */ + template <typename T> + inline bool + ge_p (const T &c, signop sgn) const + { + if (sgn == SIGNED) + return ges_p (c); + else + return geu_p (c); + } + + /* Return true if C1 >= C2. Signness is indicated by SGN. */ + template <typename T1, typename T2> + static inline bool + ge_p (const T1 &c1, const T2 &c2, signop sgn) + { + if (sgn == SIGNED) + return ges_p (c1, c2); + else + return geu_p (c1, c2); + } + + /* Return true if THIS >= C using signed comparisons. */ + template <typename T> + inline bool + ges_p (const T &c) const + { + return !lts_p (c); + } + + /* Return true if C1 >= C2 using signed comparisons. */ + template <typename T1, typename T2> + static inline bool + ges_p (const T1 &c1, const T2 &c2) + { + return !lts_p (c1, c2); + } + + /* Return true if THIS >= C using unsigned comparisons. */ + template <typename T> + inline bool + geu_p (const T &c) const + { + return !ltu_p (c); + } + + /* Return true if C1 >= C2 using unsigned comparisons. */ + template <typename T1, typename T2> + static inline bool + geu_p (const T1 &c1, const T2 &c2) + { + return !ltu_p (c1, c2); + } + + /* Return -1 0 or 1 depending on how THIS compares with C. + Signness is indicated by SGN. */ + template <typename T> + int + cmp (const T &c, signop sgn) const + { + if (sgn == SIGNED) + return cmps (c); + else + return cmpu (c); + } + + /* Returns -1 if THIS < C, 0 if THIS == C and 1 if A > C using + signed compares. */ + template <typename T> + int + cmps (const T &c) const + { + int result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int prec; + + s = to_shwi1 (ws, &cl, &prec, c); + if (prec == 0) + prec = precision; + + if (precision <= HOST_BITS_PER_WIDE_INT + && prec <= HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT x0 = sext_hwi (val[0], precision); + HOST_WIDE_INT x1 = sext_hwi (s[0], prec); + + if (x0 < x1) + result = -1; + else if (x0 > x1) + result = 1; + else + result = 0; + } + else + result = cmps_large (val, len, precision, s, cl, prec); + +#ifdef DEBUG_WIDE_INT + debug_vwa ("wide_int_ro:: %d = (%s cmps %s)\n", result, *this, s, cl, prec); +#endif + return result; + } + + /* Returns -1 if THIS < C, 0 if THIS == C and 1 if A > C using + unsigned compares. */ + template <typename T> + int + cmpu (const T &c) const + { + int result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int prec; + + s = to_shwi1 (ws, &cl, &prec, c); + if (prec == 0) + prec = precision; + + if (precision <= HOST_BITS_PER_WIDE_INT + && prec <= HOST_BITS_PER_WIDE_INT) + { + unsigned HOST_WIDE_INT x0 = zext_hwi (val[0], precision); + unsigned HOST_WIDE_INT x1 = zext_hwi (s[0], prec); + + if (x0 < x1) + result = -1; + else if (x0 == x1) + result = 0; + else + result = 1; + } + else + result = cmpu_large (val, len, precision, s, cl, prec); + +#ifdef DEBUG_WIDE_INT + debug_vwa ("wide_int_ro:: %d = (%s cmpu %s)\n", result, *this, s, cl, prec); +#endif + + return result; + } + + bool only_sign_bit_p (unsigned int prec) const; + + /* Return true if THIS has the sign bit set to 1 and all other bits + are zero. */ + inline bool + only_sign_bit_p () const + { + return only_sign_bit_p (precision); + } + + /* Return true if THIS fits in a HOST_WIDE_INT with no loss of + precision. */ + inline bool + fits_shwi_p () const + { + return len == 1; + } + + /* Return true if THIS fits in an unsigned HOST_WIDE_INT with no + loss of precision. */ + inline bool + fits_uhwi_p () const + { + return len == 1 + || (len == 2 && val[1] == 0); + } + + bool fits_to_tree_p (const_tree type) const; + + /* + * Min and max + */ + + /* Return the signed or unsigned min of THIS and C. */ + template <typename T> + inline wide_int_ro + min (const T &c, signop sgn) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (sgn == SIGNED) + return lts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + else + return ltu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + } + + /* Return the signed or unsigned min of THIS and OP1. */ + inline wide_int_ro + min (const wide_int_ro &op1, signop sgn) const + { + if (sgn == SIGNED) + return lts_p (op1) ? (*this) : op1; + else + return ltu_p (op1) ? (*this) : op1; + } + + /* Return the signed or unsigned max of THIS and C. */ + template <typename T> + inline wide_int_ro + max (const T &c, signop sgn) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + if (sgn == SIGNED) + return gts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + else + return gtu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + } + + /* Return the signed or unsigned max of THIS and OP1. */ + inline wide_int_ro + max (const wide_int_ro &op1, signop sgn) const + { + if (sgn == SIGNED) + return gts_p (op1) ? (*this) : op1; + else + return gtu_p (op1) ? (*this) : op1; + } + + /* Return the signed min of THIS and C. */ + template <typename T> + inline wide_int_ro + smin (const T &c) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + return lts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + } + + /* Return the signed min of THIS and OP1. */ + inline wide_int_ro + smin (const wide_int_ro &op1) const + { + return lts_p (op1) ? (*this) : op1; + } + + /* Return the signed max of THIS and C. */ + template <typename T> + inline wide_int_ro + smax (const T &c) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + return gts_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + } + + /* Return the signed max of THIS and OP1. */ + inline wide_int_ro + smax (const wide_int_ro &op1) const + { + return gts_p (op1) ? (*this) : op1; + } + + /* Return the unsigned min of THIS and C. */ + template <typename T> + inline wide_int_ro + umin (const T &c) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + return ltu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + } + + /* Return the unsigned min of THIS and OP1. */ + inline wide_int_ro + umin (const wide_int_ro &op1) const + { + return ltu_p (op1) ? (*this) : op1; + } + + /* Return the unsigned max of THIS and C. */ + template <typename T> + inline wide_int_ro + umax (const T &c) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + return gtu_p (c) ? (*this) : wide_int_ro::from_array (s, cl, p1, false); + } + + /* Return the unsigned max of THIS and OP1. */ + inline wide_int_ro + umax (const wide_int_ro &op1) const + { + return gtu_p (op1) ? (*this) : op1; + } + + /* + * Extension, these do not change the precision. + */ + + /* Return THIS extended to PREC. The signness of the extension is + specified by OP. */ + inline wide_int_ro + ext (unsigned int prec, signop z) const + { + if (z == UNSIGNED) + return zext (prec); + else + return sext (prec); + } + wide_int_ro sext (unsigned int offset) const; + wide_int_ro zext (unsigned int offset) const; + + /* + * Size changing. These change the underlying precision and are not + * available for max_wide_int or addr_wide_int. + */ + + wide_int_ro force_to_size (unsigned int precision, signop sgn) const; + + /* Return THIS forced to the size PREC. This is sign extended if + needed. */ + inline wide_int_ro + sforce_to_size (unsigned int prec) const + { + return force_to_size (prec, SIGNED); + } + + /* Return THIS forced to the size PREC. This is zero extended if + needed. */ + inline wide_int_ro + zforce_to_size (unsigned int prec) const + { + return force_to_size (prec, UNSIGNED); + } + + /* + * Masking, and Insertion + */ + + wide_int_ro set_bit (unsigned int bitpos) const; + static wide_int_ro set_bit_in_zero (unsigned int bitpos, unsigned int prec); + wide_int_ro insert (const wide_int_ro &op0, unsigned int offset, + unsigned int width) const; + + wide_int_ro bswap () const; + + static wide_int_ro mask (unsigned int width, bool negate, + unsigned int prec); + static wide_int_ro shifted_mask (unsigned int start, unsigned int width, + bool negate, unsigned int prec); + + /* Produce 0 or -1 that is the smear of the sign bit. */ + HOST_WIDE_INT + sign_mask () const + { + int i = len - 1; + if (precision < HOST_BITS_PER_WIDE_INT) + { + /* We don't allow a int:0 inside a struct to get this far, + nor a value of indefinite precision. */ + gcc_assert (precision != 0); + return ((val[0] << (HOST_BITS_PER_WIDE_INT - precision)) + >> (HOST_BITS_PER_WIDE_INT - 1)); + } + + /* VRP appears to be badly broken and this is a very ugly fix. */ + if (i >= 0) + return val[i] >> (HOST_BITS_PER_WIDE_INT - 1); + + gcc_unreachable (); +#if 0 + return val[len - 1] >> (HOST_BITS_PER_WIDE_INT - 1); +#endif + } + + void clear_undef (signop sgn); + + /* + * Logicals + */ + + /* Return THIS & C. */ + template <typename T> + inline wide_int_ro + operator & (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] & s[0]; + } + else + result = and_large (val, len, p1, s, cl); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s & %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return THIS & ~C. */ + template <typename T> + inline wide_int_ro + and_not (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] & ~s[0]; + } + else + result = and_not_large (val, len, p1, s, cl); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s &~ %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return the logical negation (bitwise complement) of THIS. */ + inline wide_int_ro + operator ~ () const + { + wide_int_ro result; + int l0 = len - 1; + + result.len = len; + result.precision = precision; + + while (l0 >= 0) + { + result.val[l0] = ~val[l0]; + l0--; + } + +#ifdef DEBUG_WIDE_INT + debug_ww ("wide_int_ro:: %s = (~ %s)\n", result, *this); +#endif + return result; + } + + /* Return THIS | C. */ + template <typename T> + inline wide_int_ro + operator | (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] | s[0]; + } + else + result = or_large (val, len, p1, s, cl); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s | %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return THIS | ~C. */ + template <typename T> + inline wide_int_ro + or_not (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] | ~s[0]; + } + else + result = or_not_large (val, len, p1, s, cl); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s |~ %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return THIS ^ C. */ + template <typename T> + inline wide_int_ro + operator ^ (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] ^ s[0]; + } + else + result = xor_large (val, len, p1, s, cl); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s ^ %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* + * Arithmetic operation functions, alpha sorted (except divmod). + */ + wide_int_ro abs () const; + + /* Return THIS + C. */ + template <typename T> + inline wide_int_ro + operator + (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] + s[0]; + if (precision < HOST_BITS_PER_WIDE_INT) + result.val[0] = sext_hwi (result.val[0], p1); + } + else + result = add_large (val, len, p1, s, cl, UNSIGNED, 0); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s + %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return THIS + C. OVERFLOW is set based on the sign of the + operation that is specified in SGN. */ + + template <typename T> + inline wide_int_ro + add (const T &c, signop sgn, bool *overflow) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] + s[0]; + if (p1 < HOST_BITS_PER_WIDE_INT) + result.val[0] = sext_hwi (result.val[0], p1); + if (sgn == SIGNED) + { + HOST_WIDE_INT x + = (((result.val[0] ^ val[0]) & (result.val[0] ^ s[0])) + >> (p1 - 1)) & 1; + *overflow = (x != 0); + } + else + *overflow = ((unsigned HOST_WIDE_INT)result.val[0] + < (unsigned HOST_WIDE_INT)val[0]); + } + else + result = add_large (val, len, p1, s, cl, sgn, overflow); + +#ifdef DEBUG_WIDE_INT + debug_waav ("wide_int_ro:: %s = (%s + %s) O=%d\n", + result, val, len, p1, s, cl, p1, *overflow); +#endif + return result; + } + + wide_int_ro clz () const; + wide_int_ro clrsb () const; + wide_int_ro ctz () const; + wide_int_ro exact_log2 () const; + wide_int_ro floor_log2 () const; + wide_int_ro ffs () const; + + /* Multiply THIS and C. The result is the same precision as the + operands, so there is no reason for signed or unsigned + versions. */ + template <typename T> + inline wide_int_ro + operator * (const T &c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + bool overflow = false; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] * s[0]; + if (precision < HOST_BITS_PER_WIDE_INT) + result.val[0] = sext_hwi (result.val[0], precision); + } + else + result = mul_internal (false, false, + val, len, p1, + s, cl, UNSIGNED, &overflow, false); +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s * %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Multiply THIS and C. The signedness is specified with SGN. + OVERFLOW is set true if the result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + mul (const T &c, signop sgn, bool *overflow) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + return mul_internal (false, false, + val, len, p1, + s, cl, sgn, overflow, true); + } + + /* Signed multiply THIS and C. The result is the same precision + as the operands. OVERFLOW is set true if the result overflows, + false otherwise. */ + template <typename T> + inline wide_int_ro + smul (const T &c, bool *overflow) const + { + return mul (c, SIGNED, overflow); + } + + /* Unsigned multiply THIS and C. The result is the same precision + as the operands. OVERFLOW is set true if the result overflows, + false otherwise. */ + template <typename T> + inline wide_int_ro + umul (const T &c, bool *overflow) const + { + return mul (c, UNSIGNED, overflow); + } + + /* Multiply THIS and C. The signedness is specified with SGN. The + result is twice the precision as the operands. The signedness is + specified with SGN. */ + template <typename T> + inline wide_int_ro + mul_full (const T &c, signop sgn) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + return mul_internal (false, true, + val, len, p1, + s, cl, sgn, 0, false); + } + + /* Signed multiply THIS and C. The result is twice the precision + as the operands. */ + template <typename T> + inline wide_int_ro + smul_full (const T &c) const + { + return mul_full (c, SIGNED); + } + + /* Unsigned multiply THIS and C. The result is twice the + precision as the operands. */ + template <typename T> + inline wide_int_ro + umul_full (const T &c) const + { + return mul_full (c, UNSIGNED); + } + + /* Multiply THIS and C and return the high part of that result. + The signedness is specified with SGN. The result is the same + precision as the operands. The mode is the same mode as the + operands. The signedness is specified with y. */ + template <typename T> + inline wide_int_ro + mul_high (const T &c, signop sgn) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + return mul_internal (true, false, + val, len, p1, + s, cl, sgn, 0, false); + } + + /* Negate this. */ + inline wide_int_ro operator - () const { + wide_int_ro r; + r = wide_int_ro (0) - *this; + return r; + } + + /* Negate THIS. */ + inline wide_int_ro + neg () const + { + wide_int_ro z = wide_int_ro::from_shwi (0, precision); + + gcc_checking_assert (precision); + return z - *this; + } + + /* Negate THIS. OVERFLOW is set true if the value cannot be + negated, false otherwise. */ + inline wide_int_ro + neg (bool *overflow) const + { + wide_int_ro z = wide_int_ro::from_shwi (0, precision); + + gcc_checking_assert (precision); + *overflow = only_sign_bit_p (); + + return z - *this; + } + + wide_int_ro parity () const; + wide_int_ro popcount () const; + + /* Return THIS - C. */ + template <typename T> + inline wide_int_ro + operator - (const T& c) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] - s[0]; + if (p1 < HOST_BITS_PER_WIDE_INT) + result.val[0] = sext_hwi (result.val[0], p1); + } + else + result = sub_large (val, len, p1, s, cl, UNSIGNED, 0); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s - %s)\n", result, *this, s, cl, p2); +#endif + return result; + } + + /* Return THIS - C. OVERFLOW is set based on the sign of the + operation that is specified in SGN. */ + template <typename T> + inline wide_int_ro + sub (const T& c, signop sgn, bool *overflow) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, true, true); + + if (p1 <= HOST_BITS_PER_WIDE_INT) + { + result.len = 1; + result.precision = p1; + result.val[0] = val[0] - s[0]; + if (p1 < HOST_BITS_PER_WIDE_INT) + result.val[0] = sext_hwi (result.val[0], p1); + if (sgn == SIGNED) + { + HOST_WIDE_INT x + = (((val[0] ^ s[0]) & (result.val[0] ^ val[0])) + >> (p1 - 1)) & 1; + *overflow = (x != 0); + } + else + *overflow = ((unsigned HOST_WIDE_INT)result.val[0] + > (unsigned HOST_WIDE_INT)val[0]); + } + else + result = sub_large (val, len, p1, s, cl, sgn, overflow); + +#ifdef DEBUG_WIDE_INT + debug_waav ("wide_int_ro:: %s = (%s - %s) O=%d\n", + result, val, len, p1, s, cl, p1, *overflow); +#endif + return result; + } + + /* + * Division and mod. These are the ones that are actually used in + * the compiler. More can be added where they are needed. + */ + + /* Divide DIVISOR into THIS. The result is the same size as the + operands. The sign is specified in SGN. The output is + truncated. If the pointer to OVERFLOW is not 0, OVERFLOW is set + to true if the result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + div_trunc (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + return divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, false, overflow); + } + + /* Signed divide with truncation of result. */ + template <typename T> + inline wide_int_ro + sdiv_trunc (const T &c) const + { + return div_trunc (c, SIGNED); + } + + /* Unsigned divide with truncation of result. */ + template <typename T> + inline wide_int_ro + udiv_trunc (const T &c) const + { + return div_trunc (c, UNSIGNED); + } + + /* Divide DIVISOR into THIS. The result is the same size as the + operands. The sign is specified in SGN. The output is floor + truncated. If the pointer to OVERFLOW is not 0, OVERFLOW is set + to true if the result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + div_floor (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + return divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, false, overflow); + + if (quotient.neg_p (sgn) && !remainder.zero_p ()) + return quotient - 1; + return quotient; + } + + /* Unsigned divide with floor truncation of result. */ + template <typename T> + inline wide_int_ro + udiv_floor (const T &c) const + { + return div_floor (c, UNSIGNED); + } + + /* Signed divide with floor truncation of result. */ + template <typename T> + inline wide_int_ro + sdiv_floor (const T &c) const + { + return div_floor (c, SIGNED); + } + + /* Divide DIVISOR into THIS. The result is the same size as the + operands. The sign is specified in SGN. The output is ceil + truncated. If the pointer to OVERFLOW is not 0, OVERFLOW is set + to true if the result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + div_ceil (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, true, overflow); + + if (!quotient.neg_p (sgn) && !remainder.zero_p ()) + return quotient + 1; + return quotient; + } + + /* Divide DIVISOR into THIS. The result is the same size as the + operands. The sign is specified in SGN. The output is round + truncated. If the pointer to OVERFLOW is not 0, OVERFLOW is set + to true if the result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + div_round (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, true, overflow); + if (!remainder.zero_p ()) + { + wide_int_ro divisor = wide_int_ro::from_array (s, cl, precision); + if (sgn == SIGNED) + { + wide_int_ro p_remainder + = remainder.neg_p (SIGNED) ? -remainder : remainder; + wide_int_ro p_divisor = divisor.neg_p (SIGNED) ? -divisor : divisor; + p_divisor = p_divisor.rshiftu_large (1); + + if (p_divisor.gts_p (p_remainder)) + { + if (quotient.neg_p (SIGNED)) + return quotient - 1; + else + return quotient + 1; + } + } + else + { + wide_int_ro p_divisor = divisor.rshiftu_large (1); + if (p_divisor.gtu_p (remainder)) + return quotient + 1; + } + } + return quotient; + } + + /* Divide DIVISOR into THIS producing both the quotient and + remainder. The result is the same size as the operands. The + sign is specified in SGN. The output is truncated. */ + template <typename T> + inline wide_int_ro + divmod_trunc (const T &c, wide_int_ro *remainder, signop sgn) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + return divmod_internal (true, val, len, p1, s, cl, p2, sgn, + remainder, true, 0); + } + + /* Signed divide/mod with truncation of result. */ + template <typename T> + inline wide_int_ro + sdivmod_trunc (const T &c, wide_int_ro *mod) const + { + return divmod_trunc (c, mod, SIGNED); + } + + /* Unsigned divide/mod with truncation of result. */ + template <typename T> + inline wide_int_ro + udivmod_trunc (const T &c, wide_int_ro *mod) const + { + return divmod_trunc (c, mod, UNSIGNED); + } + + /* Divide DIVISOR into THIS. The remainder is also produced in + REMAINDER. The result is the same size as the operands. The + sign is specified in SGN. The output is floor truncated. */ + template <typename T> + inline wide_int_ro + divmod_floor (const T &c, wide_int_ro *remainder, signop sgn) const + { + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn, + remainder, true, 0); + if (quotient.neg_p (sgn) && !(*remainder).zero_p ()) + { + *remainder = *remainder + wide_int_ro::from_array (s, cl, precision); + return quotient - 1; + } + return quotient; + } + + /* Signed divide/mod with floor truncation of result. */ + template <typename T> + inline wide_int_ro + sdivmod_floor (const T &c, wide_int_ro *mod) const + { + return divmod_floor (c, mod, SIGNED); + } + + /* Divide DIVISOR into THIS producing the remainder. The result is + the same size as the operands. The sign is specified in SGN. + The output is truncated. If the pointer to OVERFLOW is not 0, + OVERFLOW is set to true if the result overflows, false + otherwise. */ + template <typename T> + inline wide_int_ro + mod_trunc (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + divmod_internal (false, val, len, p1, s, cl, p2, sgn, + &remainder, true, overflow); + return remainder; + } + + /* Signed mod with truncation of result. */ + template <typename T> + inline wide_int_ro + smod_trunc (const T &c) const + { + return mod_trunc (c, SIGNED); + } + + /* Unsigned mod with truncation of result. */ + template <typename T> + inline wide_int_ro + umod_trunc (const T &c) const + { + return mod_trunc (c, UNSIGNED); + } + + /* Divide DIVISOR into THIS producing the remainder. The result is + the same size as the operands. The sign is specified in SGN. + The output is floor truncated. OVERFLOW is set to true if the + result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + mod_floor (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, true, overflow); + + if (quotient.neg_p (sgn) && !remainder.zero_p ()) + return remainder + wide_int_ro::from_array (s, cl, precision); + return remainder; + } + + /* Unsigned mod with floor truncation of result. */ + template <typename T> + inline wide_int_ro + umod_floor (const T &c) const + { + return mod_floor (c, UNSIGNED); + } + + /* Divide DIVISOR into THIS producing the remainder. The result is + the same size as the operands. The sign is specified in SGN. + The output is ceil truncated. If the pointer to OVERFLOW is not + 0, OVERFLOW is set to true if the result overflows, false + otherwise. */ + template <typename T> + inline wide_int_ro + mod_ceil (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, true, overflow); + + if (!quotient.neg_p (sgn) && !remainder.zero_p ()) + return remainder - wide_int_ro::from_array (s, cl, precision); + return remainder; + } + + /* Divide DIVISOR into THIS producing the remainder. The result is + the same size as the operands. The sign is specified in SGN. + The output is round truncated. OVERFLOW is set to true if the + result overflows, false otherwise. */ + template <typename T> + inline wide_int_ro + mod_round (const T &c, signop sgn, bool *overflow = 0) const + { + wide_int_ro remainder; + wide_int_ro quotient; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + unsigned int p1, p2; + + if (overflow) + *overflow = false; + p1 = precision; + s = to_shwi1 (ws, &cl, &p2, c); + check_precision (&p1, &p2, false, true); + + quotient = divmod_internal (true, val, len, p1, s, cl, p2, sgn, + &remainder, true, overflow); + + if (!remainder.zero_p ()) + { + wide_int_ro divisor = wide_int_ro::from_array (s, cl, precision); + if (sgn == SIGNED) + { + wide_int_ro p_remainder = remainder.neg_p (SIGNED) ? -remainder : remainder; + wide_int_ro p_divisor = divisor.neg_p (SIGNED) ? -divisor : divisor; + p_divisor = p_divisor.rshiftu_large (1); + + if (p_divisor.gts_p (p_remainder)) + { + if (quotient.neg_p (SIGNED)) + return remainder + divisor; + else + return remainder - divisor; + } + } + else + { + wide_int_ro p_divisor = divisor.rshiftu_large (1); + if (p_divisor.gtu_p (remainder)) + return remainder - divisor; + } + } + return remainder; + } + + /* + * Shifting rotating and extracting. For the default wide_int, the + * bitsize is optional and defaults to the precision of the value + * being shifted, but for addr_wide_int and max_wide_int the + * precision is required because shifting within the precision of + * these two types is not really meaningful. + */ + + HOST_WIDE_INT extract_to_hwi (int offset, int width) const; + + /* Left shift THIS by C. C must be non-negative. BITSIZE is the + width of *THIS used for truncating the shift amount. See the + definition of Op.TRUNC for how to set TRUNC_OP. */ + template <typename T> + inline wide_int_ro + lshift (const T &c, unsigned int bitsize = 0, ShiftOp trunc_op = NONE) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + HOST_WIDE_INT shift; + + s = to_shwi2 (ws, &cl, c); + + gcc_checking_assert (precision); + + shift = trunc_shift (s, cl, bitsize, trunc_op); + if (shift == -1) + result = wide_int_ro::zero (precision); + else if (shift == 0) + result = *this; + /* Handle the simple case quickly. */ + else if (precision <= HOST_BITS_PER_WIDE_INT) + { + result.precision = precision; + result.len = 1; + result.val[0] = val[0] << shift; + } + else + result = lshift_large (shift, precision); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s << %s)\n", result, *this, s, cl, 0); +#endif + return result; + } + + /* Left shift THIS by C into an expanded value with RES_PREC + precision. C must be non-negative. This function is only + available for the default wide-int form. */ + template <typename T> + inline wide_int_ro + lshift_widen (const T &c, unsigned int res_prec) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + HOST_WIDE_INT shift; + + s = to_shwi2 (ws, &cl, c); + + gcc_checking_assert (precision); + gcc_checking_assert (res_prec); + + shift = s[0]; + + gcc_checking_assert (shift >= 0); + + if (shift == 0 && res_prec == precision) + result = *this; + /* Handle the simple case quickly. */ + else if (res_prec <= HOST_BITS_PER_WIDE_INT) + { + result.precision = res_prec; + result.len = 1; + result.val[0] = val[0] << shift; + } + else + result = lshift_large (shift, res_prec); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s <<W %s)\n", result, *this, s, cl, 0); +#endif + return result; + } + + /* Rotate THIS left by C within PREC. If PREC is 0, the precsion of + THIS is used for PREC. The result is the precision of THIS. */ + template <typename T> + inline wide_int_ro + lrotate (const T &c, unsigned int prec = 0) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + + s = to_shwi2 (ws, &cl, c); + + return lrotate ((unsigned HOST_WIDE_INT)s[0], prec); + } + + /* Rotate THIS left by CNT within PREC. If PREC is 0, the precsion + of THIS is used for PREC. CNT must be non-negative. The result + is the precision of the THIS. */ + inline wide_int_ro lrotate (unsigned HOST_WIDE_INT cnt, unsigned int prec = 0) const + { + wide_int_ro left, right, result; + + gcc_checking_assert (precision); + + if (prec == 0) + prec = precision; + + left = lshift (cnt); + right = rshiftu (prec - cnt); + + if (prec != precision) + { + left = left.zforce_to_size (precision); + right = right.zforce_to_size (precision); + } + result = left | right; + + return result; + } + + /* Right shift THIS by C. BITSIZE is the width of *THIS used for + truncating the shift amount. SGN indicates the sign. TRUNC_OP + indicates the truncation option. C must be non-negative. */ + template <typename T> + inline wide_int_ro + rshift (const T &c, signop sgn, unsigned int bitsize = 0, + ShiftOp trunc_op = NONE) const + { + if (sgn == UNSIGNED) + return rshiftu (c, bitsize, trunc_op); + else + return rshifts (c, bitsize, trunc_op); + } + + /* Unsigned right shift THIS by C. C must be non-negative. BITSIZE + is width of *THIS used for truncating the shift amount. See the + definition of Op.TRUNC for how to set TRUNC_OP. */ + template <typename T> + inline wide_int_ro + rshiftu (const T &c, unsigned int bitsize = 0, ShiftOp trunc_op = NONE) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + HOST_WIDE_INT shift; + + s = to_shwi2 (ws, &cl, c); + gcc_checking_assert (precision); + shift = trunc_shift (s, cl, bitsize, trunc_op); + + if (shift == 0) + result = *this; + else if (shift == -1) + result = wide_int_ro::zero (precision); + else if (precision <= HOST_BITS_PER_WIDE_INT) + { + /* Handle the simple case quickly. */ + unsigned HOST_WIDE_INT x = val[0]; + + result.precision = precision; + result.len = 1; + + if (precision < HOST_BITS_PER_WIDE_INT) + x = zext_hwi (x, precision); + + result.val[0] = x >> shift; + } + else + result = rshiftu_large (shift); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s >>U %s)\n", result, *this, s, cl, 0); +#endif + return result; + } + + /* Signed right shift THIS by C. C must be non-negative, BITSIZE is + the width of *THIS used for truncating the shift amount. See the + definition of Op.TRUNC for how to set TRUNC_OP. */ + template <typename T> + inline wide_int_ro + rshifts (const T &c, unsigned int bitsize = 0, ShiftOp trunc_op = NONE) const + { + wide_int_ro result; + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + HOST_WIDE_INT shift; + + s = to_shwi2 (ws, &cl, c); + gcc_checking_assert (precision); + shift = trunc_shift (s, cl, bitsize, trunc_op); + + if (shift == 0) + result = *this; + else if (shift == -1) + result = wide_int_ro::zero (precision); + else if (precision < HOST_BITS_PER_WIDE_INT) + { + /* Handle the simple case quickly. */ + HOST_WIDE_INT x = val[0]; + + result.precision = precision; + result.len = 1; + x = x << (HOST_BITS_PER_WIDE_INT - precision); + result.val[0] = x >> (shift + HOST_BITS_PER_WIDE_INT - precision); + } + else if (precision == HOST_BITS_PER_WIDE_INT) + { + HOST_WIDE_INT x = val[0]; + + result.precision = precision; + result.len = 1; + result.val[0] = x >> shift; + } + else + result = rshifts_large (shift); + +#ifdef DEBUG_WIDE_INT + debug_wwa ("wide_int_ro:: %s = (%s >>S %s)\n", result, *this, s, cl, 0); +#endif + return result; + } + + /* Rotate THIS right by C within PREC. If PREC is 0, the precsion + of THIS is used for PREC. The result has the precision of + THIS. */ + template <typename T> + inline wide_int_ro + rrotate (const T &c, unsigned int prec = 0) const + { + HOST_WIDE_INT ws[WIDE_INT_MAX_ELTS]; + const HOST_WIDE_INT *s; + unsigned int cl; + + s = to_shwi2 (ws, &cl, c); + return rrotate ((unsigned HOST_WIDE_INT) s[0], prec); + } + + /* Rotate THIS left by CNT within PREC. If PREC is 0, the precsion + of THIS is used for PREC. The result has the precision of THIS. + CNT must be non-negative. */ + inline wide_int_ro + rrotate (unsigned HOST_WIDE_INT cnt, unsigned int prec = 0) const + { + wide_int_ro left, right, result; + + gcc_checking_assert (precision); + + if (prec == 0) + prec = precision; + + left = lshift (prec - cnt); + right = rshiftu (cnt); + + if (prec != precision) + { + left = left.zforce_to_size (precision); + right = right.zforce_to_size (precision); + } + result = left | right; + + return result; + } + + char *dump (char* buf) const; + private: + + /* + * Internal versions that do the work if the values do not fit in a + * HWI. + */ + + /* Comparisons */ + static bool eq_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int); + static bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + static int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + static bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + static int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int); + static inline void check_precision (unsigned int *p1, unsigned int *p2, + bool check_eq, bool check_zero); + + + /* Logicals. */ + wide_int_ro static and_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int); + wide_int_ro static and_not_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int); + wide_int_ro static or_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int); + wide_int_ro static or_not_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int); + wide_int_ro static xor_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int); + + /* Arithmetic */ + static wide_int_ro add_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, signop, bool * = 0); + static wide_int_ro sub_large (const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, signop, bool * = 0); + + wide_int_ro lshift_large (unsigned int cnt, unsigned int res_prec) const; + wide_int_ro rshiftu_large (unsigned int cnt) const; + wide_int_ro rshifts_large (unsigned int cnt) const; + + static wide_int_ro + mul_internal (bool high, bool full, + const HOST_WIDE_INT *op1, unsigned int op1len, unsigned int op1prec, + const HOST_WIDE_INT *op2, unsigned int op2len, + signop sgn, bool *overflow, bool needs_overflow); + static void + divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient, + unsigned HOST_HALF_WIDE_INT *b_remainder, + unsigned HOST_HALF_WIDE_INT *b_dividend, + unsigned HOST_HALF_WIDE_INT *b_divisor, + int m, int n); + static wide_int_ro + divmod_internal (bool compute_quotient, + const HOST_WIDE_INT *, unsigned int, unsigned int, + const HOST_WIDE_INT *, unsigned int, unsigned int, + signop sgn, wide_int_ro *remainder, + bool compute_remainder, + bool *overflow); + + + /* Private utility routines. */ + wide_int_ro decompress (unsigned int target, unsigned int precision) const; + void canonize (); + static wide_int_ro from_rtx (const rtx_mode_t); + + /* If SHIFT_COUNT_TRUNCATED is defined, truncate CNT. + + At first look, the shift truncation code does not look right. + Shifts (and rotates) are done according to the precision of the + mode but the shift count is truncated according to the bitsize of + the mode. This is how real hardware works (Knuth's mix machine + is the only known exception to this rule, but it was never real). + + On an ideal machine, like Knuth's mix machine, a shift count is a + word long and all of the bits of that word are examined to + compute the shift amount. But on real hardware, especially on + machines with fast (single cycle shifts) that takes too long. On + these machines, the amount of time to perform a shift dictates + the cycle time of the machine so corners are cut to keep this + fast. A comparison of an entire 64 bit word would take something + like 6 gate delays before the shifting can even start. + + So real hardware only looks at a small part of the shift amount. + On IBM machines, this tends to be 1 more than what is necessary + to encode the shift amount. The rest of the world looks at only + the minimum number of bits. This means that only 3 gate delays + are necessary to set up the shifter. + + On the other hand, right shifts and rotates must be according to + the precision or the operation does not make any sense. + + This function is called in two contexts. If TRUNC_OP == TRUNC, this + function provides a count that matches the semantics of the + target machine depending on the value of SHIFT_COUNT_TRUNCATED. + Note that if SHIFT_COUNT_TRUNCATED is not defined, this function + may produce -1 as a value if the shift amount is greater than the + bitsize of the mode. -1 is a surrogate for a very large amount. + + If TRUNC_OP == NONE, then this function always truncates the shift + value to the bitsize because this shifting operation is a + function that is internal to GCC. */ + + inline int + trunc_shift (const HOST_WIDE_INT *cnt, + unsigned int len ATTRIBUTE_UNUSED, + unsigned int bitsize, ShiftOp trunc_op) const + { + gcc_checking_assert (cnt[0] >= 0); + + if (trunc_op == TRUNC) + { + gcc_checking_assert (bitsize != 0); +#ifdef SHIFT_COUNT_TRUNCATED + return cnt[0] & (bitsize - 1); +#else + if (cnt[0] < bitsize && cnt[0] >= 0 && len == 1) + return cnt[0]; + else + return -1; +#endif + } + else if (bitsize == 0) + return cnt[0]; + else + return cnt[0] & (bitsize - 1); + } + + template <typename T> + static inline bool + top_bit_set (T x) { + return (x >> (sizeof (x)*8 - 1)) != 0; + } + + /* The following template and its overrides are used for the first + and second operand of static binary comparison functions. These + have been implemented so that pointer copying is done from the + rep of the operands rather than actual data copying. This + is safe even for garbage collected objects since the value is + immediately throw away. + + This template matches all integers. */ + + template <typename T> + static inline const HOST_WIDE_INT* + to_shwi1 (HOST_WIDE_INT *s, unsigned int *l, unsigned int *p, const T& x) + { + s[0] = x; + if (signedp(x) + || sizeof (T) < sizeof (HOST_WIDE_INT) + || ! top_bit_set (x)) + { + *l = 1; + } + else + { + s[1] = 0; + *l = 2; + } + *p = 0; + return s; + } + + /* The following template and its overrides are used for the second + operand of binary functions. These have been implemented so that + pointer copying is done from the rep of the second operand rather + than actual data copying. This is safe even for garbage + collected objects since the value is immediately throw away. + + The next template matches all integers. */ + + template <typename T> + static inline const HOST_WIDE_INT* + to_shwi2 (HOST_WIDE_INT *s, unsigned int *l, const T& x) + { + s[0] = x; + if (signedp(x) + || sizeof (T) < sizeof (HOST_WIDE_INT) + || ! top_bit_set (x)) + { + *l = 1; + } + else + { + s[1] = 0; + *l = 2; + } + return s; + } + + +#ifdef DEBUG_WIDE_INT + /* Debugging routines. */ + static void debug_wa (const char* fmt, const wide_int_ro &r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0); + static void debug_waa (const char* fmt, const wide_int_ro &r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1); + static void debug_waav (const char* fmt, const wide_int_ro &r, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1, + int s); + static void debug_vw (const char* fmt, int r, const wide_int_ro& o0); + static void debug_vwh (const char* fmt, int r, const wide_int_ro &o0, + HOST_WIDE_INT o1); + static void debug_vaa (const char* fmt, int r, + const HOST_WIDE_INT *, unsigned int l0, unsigned int p0, + const HOST_WIDE_INT *, unsigned int l1, unsigned int p1); + static void debug_vwa (const char* fmt, int r, const wide_int_ro &o0, + const HOST_WIDE_INT *, unsigned int l1, unsigned int p1); + static void debug_vww (const char* fmt, int r, const wide_int_ro &o0, + const wide_int_ro &o1); + static void debug_wh (const char* fmt, const wide_int_ro &r, + HOST_WIDE_INT o1); + static void debug_whh (const char* fmt, const wide_int_ro &r, + HOST_WIDE_INT o1, HOST_WIDE_INT o2); + static void debug_wv (const char* fmt, const wide_int_ro &r, int v0); + static void debug_wvv (const char* fmt, const wide_int_ro &r, int v0, + int v1); + static void debug_wvvv (const char* fmt, const wide_int_ro &r, int v0, + int v1, int v2); + static void debug_wvwa (const char* fmt, const wide_int_ro &r, int v0, + const wide_int_ro &o0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1); + static void debug_wvasa (const char* fmt, const wide_int_ro &r, int v0, + const HOST_WIDE_INT *o0, unsigned int l0, unsigned int p0, + const char * s, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1); + static void debug_wvww (const char* fmt, const wide_int_ro &r, int v0, + const wide_int_ro &o0, const wide_int_ro &o1); + static void debug_wwa (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, + const HOST_WIDE_INT *, unsigned int l1, unsigned int p1); + static void debug_wwv (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, int v0); + static void debug_wwvs (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, + int v0, const char *s); + static void debug_wwvvs (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, + int v0, int v1, const char *s); + static void debug_wwwvv (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1, + int v0, int v1); + static void debug_ww (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0); + static void debug_www (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1); + static void debug_wwasa (const char* fmt, const wide_int_ro &r, const wide_int_ro &o0, + const HOST_WIDE_INT *o1, unsigned int l1, unsigned int p1, + const char* s3, + const HOST_WIDE_INT *o2, unsigned int l2, unsigned int p2); + static void debug_wwwa (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1, + const HOST_WIDE_INT *o2, unsigned int l2, unsigned int p2); + static void debug_wwww (const char* fmt, const wide_int_ro &r, + const wide_int_ro &o0, const wide_int_ro &o1, + const wide_int_ro &o2); +#endif +}; + +class GTY(()) wide_int : public wide_int_ro { + public: + wide_int () { } + wide_int (const wide_int_ro &r) { + static_cast<wide_int_ro &> (*this) = r; + } + + /* Convert a integer cst into a wide int. */ + wide_int (const_tree tcst) { + *this = from_array (&TREE_INT_CST_ELT (tcst, 0), + TREE_INT_CST_NUNITS (tcst), + TYPE_PRECISION (TREE_TYPE (tcst)), false); + } + + wide_int (HOST_WIDE_INT op0) { + precision = 0; + val[0] = op0; + len = 1; + } + wide_int (int op0) { + precision = 0; + val[0] = op0; + len = 1; + } + wide_int (unsigned HOST_WIDE_INT op0) { + *this = wide_int_ro::from_uhwi (op0); + } + wide_int (unsigned int op0) { + *this = wide_int_ro::from_uhwi (op0); + } + wide_int (const rtx_mode_t& op0) { + *this = wide_int_ro::from_rtx (op0); + } + + wide_int& operator = (const wide_int_ro &r) { + static_cast<wide_int_ro &> (*this) = r; + return *this; + } + wide_int& operator = (const_tree tcst) { + *this = from_array (&TREE_INT_CST_ELT (tcst, 0), + TREE_INT_CST_NUNITS (tcst), + TYPE_PRECISION (TREE_TYPE (tcst)), false); + return *this; + } + wide_int& operator = (HOST_WIDE_INT op0) { + static_cast<wide_int_ro &> (*this) = op0; + return *this; + } + wide_int& operator = (int op0) { + static_cast<wide_int_ro &> (*this) = op0; + return *this; + } + wide_int& operator = (unsigned HOST_WIDE_INT op0) { + static_cast<wide_int_ro &> (*this) = wide_int_ro (op0); + return *this; + } + wide_int& operator = (unsigned int op0) { + static_cast<wide_int_ro &> (*this) = wide_int_ro (op0); + return *this; + } + wide_int& operator = (const rtx_mode_t& op0) { + *this = wide_int_ro::from_rtx (op0); + return *this; + } + + /* Arithmetic operation functions, alpha sorted. */ + + inline wide_int& operator ++ () { + *this += 1; + return *this; + } + inline wide_int& operator -- () { + *this -= 1; + return *this; + } + /* + * Logicals. + */ + + /* &= with C */ + template <typename T> + wide_int &operator &= (const T &c) + { + *this = *this & c; + return *this; + } + + /* |= C */ + template <typename T> + wide_int &operator |= (const T &c) + { + *this = *this | c; + return *this; + } + + /* ^= C */ + template <typename T> + wide_int &operator ^= (const T &c) + { + *this = *this ^ c; + return *this; + } + + /* + * Integer arithmetic + */ + + /* += C */ + template <typename T> + wide_int &operator += (const T &c) + { + *this = *this + c; + return *this; + } + + /* -= C */ + template <typename T> + wide_int &operator -= (const T &c) + { + *this = *this - c; + return *this; + } + + /* *= C */ + template <typename T> + wide_int &operator *= (const T &c) + { + *this = *this * c; + return *this; + } +}; + + + +template <int bitsize> +class GTY(()) fixed_wide_int : public wide_int_ro { + friend class wide_int_ro; + protected: + fixed_wide_int &operator = (const wide_int &w) { + static_cast<wide_int_ro &> (*this) = w; + + /* We only allow the same size in, as otherwise + we would not know how to extend it. */ + gcc_assert (precision == bitsize); + + return *this; + } + fixed_wide_int (const wide_int_ro w) : wide_int_ro (w) { + /* We only allow the same size in, as otherwise + we would not know how to extend it. */ + gcc_assert (precision == bitsize); + } + inline const HOST_WIDE_INT* get_val () const { return val; } + using wide_int_ro::val; +public: + using wide_int_ro::get_precision; + using wide_int_ro::get_len; + using wide_int_ro::to_short_addr; + using wide_int_ro::fits_uhwi_p; + using wide_int_ro::fits_shwi_p; + using wide_int_ro::gtu_p; + using wide_int_ro::gts_p; + using wide_int_ro::geu_p; + using wide_int_ro::ges_p; + using wide_int_ro::to_shwi; + using wide_int_ro::operator ==; + using wide_int_ro::ltu_p; + using wide_int_ro::lts_p; + using wide_int_ro::leu_p; + using wide_int_ro::les_p; + using wide_int_ro::to_uhwi; + using wide_int_ro::cmps; + using wide_int_ro::neg_p; + using wide_int_ro::cmpu; + using wide_int_ro::umod_floor; + using wide_int_ro::one_p; + using wide_int_ro::zero_p; + using wide_int_ro::multiple_of_p; + using wide_int_ro::minus_one_p; + using wide_int_ro::operator !=; + using wide_int_ro::elt; + using wide_int_ro::fits_to_tree_p; + using wide_int_ro::from_uhwi; + using wide_int_ro::ctz; + using wide_int_ro::cmp; + using wide_int_ro::minus_one; + + static inline fixed_wide_int from_wide_int (const wide_int& w) { + if (w.neg_p (SIGNED)) + return w.sforce_to_size (bitsize); + return w.zforce_to_size (bitsize); + } + + static inline fixed_wide_int from_array (const HOST_WIDE_INT* op0, + unsigned int len, + bool need_canon = true) { + return wide_int_ro::from_array (op0, len, bitsize, need_canon); + } + + fixed_wide_int () { } + fixed_wide_int (const_tree t) { + *this = t; + } + fixed_wide_int (HOST_WIDE_INT op0) : wide_int_ro (op0) { + precision = bitsize; + } + fixed_wide_int (int op0) : wide_int_ro (op0) { + precision = bitsize; + } + fixed_wide_int (unsigned HOST_WIDE_INT op0) : wide_int_ro (op0) { + precision = bitsize; + if (neg_p (SIGNED)) + static_cast<wide_int_ro &> (*this) = zext (HOST_BITS_PER_WIDE_INT); + } + fixed_wide_int (unsigned int op0) : wide_int_ro (op0) { + precision = bitsize; + if (sizeof (int) == sizeof (HOST_WIDE_INT) + && neg_p (SIGNED)) + *this = zext (HOST_BITS_PER_WIDE_INT); + } + + inline fixed_wide_int& operator ++ () { + *this += 1; + return *this; + } + + inline fixed_wide_int& operator -- () { + *this -= 1; + return *this; + } + + bool multiple_of_p (const wide_int_ro &factor, + signop sgn, + fixed_wide_int *multiple) const { + return wide_int_ro::multiple_of_p (factor, + sgn, + reinterpret_cast <wide_int *> (multiple)); + } + + /* Conversion to and from GMP integer representations. */ + + void to_mpz (mpz_t m, signop sgn) const { + wide_int_ro::to_mpz (m, sgn); + } + + static fixed_wide_int from_mpz (const_tree t, mpz_t m, bool e) { + return wide_int_ro::from_mpz (t, m, e).force_to_size (bitsize, TYPE_SIGN (t)); + } + + fixed_wide_int &operator = (const_tree t) { + tree type = TREE_TYPE (t); + + static_cast <wide_int_ro &> (*this) + = wide_int_ro::from_array (&TREE_INT_CST_ELT (t, 0), + TREE_INT_CST_NUNITS (t), + TYPE_PRECISION (TREE_TYPE (t)), false); + + precision = bitsize; + + /* This is logically top_bit_set_p. */ + if (TYPE_SIGN (type) == UNSIGNED && neg_p (SIGNED)) + static_cast<wide_int_ro &> (*this) = zext (TYPE_PRECISION (type)); + + return *this; + } + fixed_wide_int &operator = (HOST_WIDE_INT op0) { + static_cast<wide_int_ro &> (*this) = op0; + precision = bitsize; + + return *this; + } + fixed_wide_int &operator = (int op0) { + static_cast<wide_int_ro &> (*this) = op0; + precision = bitsize; + + return *this; + } + fixed_wide_int &operator = (unsigned HOST_WIDE_INT op0) { + static_cast<wide_int_ro &> (*this) = op0; + precision = bitsize; + + /* This is logically top_bit_set_p. */ + if (neg_p (SIGNED)) + static_cast<wide_int_ro &> (*this) = zext (HOST_BITS_PER_WIDE_INT); + + return *this; + } + fixed_wide_int &operator = (unsigned int op0) { + static_cast<wide_int_ro &> (*this) = op0; + precision = bitsize; + + if (sizeof (int) == sizeof (HOST_WIDE_INT) + && neg_p (SIGNED)) + *this = zext (HOST_BITS_PER_WIDE_INT); + + return *this; + } + + /* Extension, these do not change the precision. */ + + fixed_wide_int ext (unsigned int offset, signop sgn) const { + return wide_int_ro::ext (offset, sgn); + } + fixed_wide_int sext (unsigned int offset) const { + return wide_int_ro::sext (offset); + } + fixed_wide_int zext (unsigned int offset) const { + return wide_int_ro::zext (offset); + } + + /* Masking and Insertion */ + + fixed_wide_int set_bit (unsigned int bitpos) const { + return wide_int_ro::set_bit (bitpos); + } + static fixed_wide_int set_bit_in_zero (unsigned int bitpos) { + return wide_int_ro::set_bit_in_zero (bitpos, bitsize); + } + fixed_wide_int insert (const wide_int_ro &op0, unsigned int offset, + unsigned int width) const { + return wide_int_ro::insert (op0, offset, width); + } + + static fixed_wide_int mask (unsigned int width, bool negate) { + return wide_int_ro::mask (width, negate, bitsize); + } + static fixed_wide_int shifted_mask (unsigned int start, unsigned int width, + bool negate) { + return wide_int_ro::shifted_mask (start, width, negate, bitsize); + } + + /* Logicals */ + + template <typename T> + inline fixed_wide_int<bitsize> operator & (const T &c) const { + return *this & fixed_wide_int (c); + } + inline fixed_wide_int<bitsize> operator & (const fixed_wide_int<bitsize> &c) const; + + template <typename T> + inline fixed_wide_int<bitsize> &operator &= (const T &c) { + *this &= fixed_wide_int (c); + return *this; + } + inline fixed_wide_int<bitsize> &operator &= (const fixed_wide_int<bitsize> &c); + + template <typename T> + inline fixed_wide_int and_not (const T &c) const { + return wide_int_ro::and_not (fixed_wide_int (c)); + } + inline fixed_wide_int operator ~ () const { + return ~static_cast <const wide_int_ro &> (*this); + } + + template <typename T> + inline fixed_wide_int<bitsize> operator | (const T &c) const { + return *this | fixed_wide_int (c); + } + inline fixed_wide_int<bitsize> operator | (const fixed_wide_int<bitsize> &c) const; + + template <typename T> + inline fixed_wide_int<bitsize> &operator |= (const T &c) { + *this |= fixed_wide_int (c); + return *this; + } + inline fixed_wide_int<bitsize> &operator |= (const fixed_wide_int<bitsize> &c); + + template <typename T> + inline fixed_wide_int or_not (const T &c) const { + return wide_int_ro::or_not (fixed_wide_int (c)); + } + + template <typename T> + inline fixed_wide_int<bitsize> operator ^ (const T &c) const { + return *this ^ fixed_wide_int (c); + } + inline fixed_wide_int<bitsize> operator ^ (const fixed_wide_int<bitsize> &c) const; + + template <typename T> + inline fixed_wide_int<bitsize> &operator ^= (const T &c) { + *this ^= fixed_wide_int (c); + return *this; + } + inline fixed_wide_int<bitsize> &operator ^= (const fixed_wide_int<bitsize> &c); + + /* Arithmetic operation functions, alpha sorted. */ + + template <typename T> + inline fixed_wide_int<bitsize> operator + (const T &c) const { + return *this + fixed_wide_int (c); + } + inline fixed_wide_int<bitsize> operator + (const fixed_wide_int<bitsize> &c) const; + + template <typename T> + inline fixed_wide_int<bitsize> &operator += (const T &c) { + *this += fixed_wide_int (c); + return *this; + } + inline fixed_wide_int<bitsize> &operator += (const fixed_wide_int<bitsize> &c); + + template <typename T> + inline fixed_wide_int add (const T &c, signop sgn, bool *overflow) const { + return wide_int_ro::add (c, sgn, overflow); + } + + template <typename T> + inline fixed_wide_int<bitsize> operator * (const T &c) const { + return static_cast <const wide_int_ro &> (*this) * fixed_wide_int (c); + } + template <typename T> + inline fixed_wide_int &operator *= (const T &c) { + reinterpret_cast <wide_int &> (*this) *= c; + return *this; + } + template <typename T> + inline fixed_wide_int mul (const T &c, signop sgn, bool *overflow) const { + return wide_int_ro::mul (c, sgn, overflow); + } + template <typename T> + inline fixed_wide_int smul (const T &c, bool *overflow) const { + return wide_int_ro::smul (c, overflow); + } + template <typename T> + inline fixed_wide_int umul (const T &c, bool *overflow) const { + return wide_int_ro::umul (c, overflow); + } + + template <typename T> + inline fixed_wide_int<bitsize> operator - (const T &c) const { + return *this - fixed_wide_int (c); + } + inline fixed_wide_int<bitsize> operator - () const { + return - static_cast<const wide_int_ro &> (*this); + } + inline fixed_wide_int<bitsize> operator - (const fixed_wide_int<bitsize> &c) const; + template <typename T> + inline fixed_wide_int<bitsize> &operator -= (const T &c) { + return *this -= fixed_wide_int (c); + } + inline fixed_wide_int<bitsize> &operator -= (const fixed_wide_int<bitsize> &c); + + template <typename T> + inline fixed_wide_int sub (const T &c, signop sgn, bool *overflow) const { + return wide_int_ro::sub (c, sgn, overflow); + } + + /* Division and mod. These are the ones that are actually used, but + there are a lot of them. */ + + template <typename T> + inline fixed_wide_int div_floor (const T &c, signop sgn, bool *overflow = 0) const { + return wide_int_ro::div_floor (c, sgn, overflow); + } + template <typename T> + inline fixed_wide_int udiv_floor (const T &c) const { + return wide_int_ro::udiv_floor (c); + } + template <typename T> + inline fixed_wide_int sdiv_floor (const T &c) const { + return wide_int_ro::sdiv_floor (c); + } + template <typename T> + inline fixed_wide_int div_ceil (const T &c, signop sgn, bool *overflow = 0) const { + return wide_int_ro::div_ceil (c, sgn, overflow); + } + template <typename T> + inline fixed_wide_int div_round (const T &c, signop sgn, bool *overflow = 0) const { + return wide_int_ro::div_round (c, sgn, overflow); + } + + template <typename T> + inline fixed_wide_int div_trunc (const T &c, signop sgn, bool *overflow = 0) const { + return wide_int_ro::div_trunc (c,sgn, overflow); + } + template <typename T> + inline fixed_wide_int sdiv_trunc (const T &c) const { + return wide_int_ro::sdiv_trunc (c); + } + template <typename T> + inline fixed_wide_int udiv_trunc (const T &c) const { + return wide_int_ro::udiv_trunc (c); + } + + template <typename T> + inline fixed_wide_int divmod_floor (const T &c, fixed_wide_int *mod, signop sgn) const { + return wide_int_ro::divmod_floor (c, mod, sgn); + } + template <typename T> + inline fixed_wide_int sdivmod_floor (const T &c, fixed_wide_int *mod) const { + return wide_int_ro::sdivmod_floor (c, reinterpret_cast <wide_int *> (mod)); + } + + /* Shifting rotating and extracting. */ + + template <typename T> + inline fixed_wide_int lrotate (const T &c, unsigned int prec) const { + return wide_int_ro::lrotate (c, prec); + } + inline fixed_wide_int lrotate (unsigned HOST_WIDE_INT y, unsigned int prec) const { + return wide_int_ro::lrotate (y, prec); + } + + template <typename T> + inline fixed_wide_int lshift (const T &c, unsigned int bit_size = 0, + ShiftOp z = NONE) const { + return wide_int_ro::lshift (c, bit_size, z); + } + + template <typename T> + inline fixed_wide_int lshift_widen (const T &c, unsigned int new_prec) const { + return wide_int_ro::lshift_widen (c, new_prec); + } + + template <typename T> + inline fixed_wide_int rshift (const T &c, signop sgn, + unsigned int bit_size = 0, + ShiftOp z = NONE) const { + return wide_int_ro::rshift (c, sgn, bit_size, z); + } + template <typename T> + inline fixed_wide_int rshiftu (const T &c, unsigned int bit_size = 0, + ShiftOp z = NONE) const { + return wide_int_ro::rshiftu (c, bit_size, z); + } + template <typename T> + inline fixed_wide_int rshifts (const T &c, unsigned int bit_size = 0, + ShiftOp z = NONE) const { + return wide_int_ro::rshifts (c, bit_size, z); + } + + template <typename T> + inline fixed_wide_int rrotate (const T &c, unsigned int prec) const { + return wide_int_ro::rrotate (c, prec); + } + inline fixed_wide_int rrotate (unsigned HOST_WIDE_INT y, unsigned int prec) const { + return wide_int_ro::lrotate (y, prec); + } +}; + +/* Logicals */ +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> fixed_wide_int<bitsize>::operator & (const fixed_wide_int<bitsize> &c) const { + return static_cast<const wide_int_ro &> (*this) & c; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> &fixed_wide_int<bitsize>::operator &= (const fixed_wide_int<bitsize> &c) { + (reinterpret_cast<wide_int &> (*this)) &= (const wide_int_ro&)c; + return *this; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> fixed_wide_int<bitsize>::operator | (const fixed_wide_int<bitsize> &c) const { + return static_cast<const wide_int_ro &> (*this) | c; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> &fixed_wide_int<bitsize>::operator |= (const fixed_wide_int<bitsize> &c) { + (reinterpret_cast<wide_int &> (*this)) |= (const wide_int_ro&)c; + return *this; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> fixed_wide_int<bitsize>::operator ^ (const fixed_wide_int<bitsize> &c) const { + return static_cast<const wide_int_ro &> (*this) ^ c; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> &fixed_wide_int<bitsize>::operator ^= (const fixed_wide_int<bitsize> &c) { + (reinterpret_cast<wide_int &> (*this)) ^= (const wide_int_ro&)c; + return *this; +} + +/* Math operators */ +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> fixed_wide_int<bitsize>::operator + (const fixed_wide_int<bitsize> &c) const { + return static_cast<const wide_int_ro &> (*this) + c; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> &fixed_wide_int<bitsize>::operator += (const fixed_wide_int<bitsize> &c) { + (reinterpret_cast<wide_int &> (*this)) += (const wide_int_ro&)c; + return *this; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> fixed_wide_int<bitsize>::operator - (const fixed_wide_int<bitsize> &c) const { + return static_cast<const wide_int_ro &> (*this) - c; +} + +template <> +template <int bitsize> +inline fixed_wide_int<bitsize> &fixed_wide_int<bitsize>::operator -= (const fixed_wide_int<bitsize> &c) { + (reinterpret_cast<wide_int &> (*this)) -= (const wide_int_ro&)c; + return *this; +} + +/* A wide_int_ro that has a large enough precision to do any address math + on the target. */ +typedef fixed_wide_int<addr_max_precision> addr_wide_int; +/* A wide_int_ro that has a large enough precision to do any math on the + target. */ +typedef fixed_wide_int<MAX_BITSIZE_MODE_ANY_INT> max_wide_int; + +extern void gt_ggc_mx(max_wide_int*); +extern void gt_pch_nx(max_wide_int*,void (*)(void*, void*), void*); +extern void gt_pch_nx(max_wide_int*); + +extern addr_wide_int mem_ref_offset (const_tree); + +/* The wide-int overload templates. */ + +template <> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, + const wide_int_ro &y) +{ + *p = y.precision; + *l = y.len; + return y.val; +} + +template <> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, + const wide_int &y) +{ + *p = y.precision; + *l = y.len; + return y.val; +} + + +template <> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, + const fixed_wide_int<addr_max_precision> &y) +{ + *p = y.get_precision (); + *l = y.get_len (); + return y.get_val (); +} + +#if addr_max_precision != MAX_BITSIZE_MODE_ANY_INT +template <> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, + const fixed_wide_int<MAX_BITSIZE_MODE_ANY_INT> &y) +{ + *p = y.get_precision (); + *l = y.get_len (); + return y.get_val (); +} +#endif + +template <> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, const wide_int &y) +{ + *l = y.len; + return y.val; +} + + +/* The tree and const_tree overload templates. */ +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, + const tree& tcst) +{ + tree type = TREE_TYPE (tcst); + + *p = TYPE_PRECISION (type); + *l = TREE_INT_CST_NUNITS (tcst); + return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0); +} + +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi1 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, unsigned int *p, + const const_tree& tcst) +{ + tree type = TREE_TYPE (tcst); + + *p = TYPE_PRECISION (type); + *l = TREE_INT_CST_NUNITS (tcst); + return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0); +} + +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, const tree& tcst) +{ + *l = TREE_INT_CST_NUNITS (tcst); + return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0); +} + +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, const const_tree& tcst) +{ + *l = TREE_INT_CST_NUNITS (tcst); + return (const HOST_WIDE_INT*)&TREE_INT_CST_ELT (tcst, 0); +} + +/* Checking for the functions that require that at least one of the + operands have a non zero precision. If both of them have a + precision, then if CHECK_EQUAL is true, require that the precision + be the same. */ + +void +wide_int_ro::check_precision (unsigned int *p1, unsigned int *p2, + bool check_equal ATTRIBUTE_UNUSED, + bool check_zero ATTRIBUTE_UNUSED) +{ + gcc_checking_assert ((!check_zero) || *p1 != 0 || *p2 != 0); + + if (*p1 == 0) + *p1 = *p2; + + if (*p2 == 0) + *p2 = *p1; + + gcc_checking_assert ((!check_equal) || *p1 == *p2); +} + +/* This is used to bundle an rtx and a mode together so that the pair + can be used as the second operand of a wide int expression. If we + ever put modes into rtx integer constants, this should go away and + then just pass an rtx in. */ +typedef std::pair<rtx, enum machine_mode> rtx_mode_t; + +/* There should logically be an overload for rtl here, but it cannot + be here because of circular include issues. It is in rtl.h. */ +template<> +inline const HOST_WIDE_INT* +wide_int_ro::to_shwi2 (HOST_WIDE_INT *s ATTRIBUTE_UNUSED, + unsigned int *l, const rtx_mode_t& rp); + + + + + + + +/* tree related routines. */ + +extern tree wide_int_to_tree (tree type, const wide_int_ro &cst); +extern tree wide_int_to_infinite_tree (tree type, const wide_int_ro &cst, + unsigned int prec); +extern tree force_fit_type_wide (tree, const wide_int_ro &, int, bool); + +/* real related routines. */ +extern wide_int real_to_integer (const REAL_VALUE_TYPE *, bool *, int); +extern void real_from_integer (REAL_VALUE_TYPE *, enum machine_mode, + wide_int, signop); +extern wide_int decimal_real_to_integer (const REAL_VALUE_TYPE *, bool *, int); + + +#endif /* GENERATOR FILE */ + +#endif /* WIDE_INT_H */ |