diff options
252 files changed, 5239 insertions, 2875 deletions
diff --git a/ChangeLog b/ChangeLog index 77ce407ab75..ec1b42bd20b 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,10 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * configure.ac (i[3456789]86-*-solaris2*): Also accept + x86_64-*-solaris2.1[0-9]*. + * configure: Regenerate. + 2011-06-29 Richard Sandiford <rdsandiford@googlemail.com> * MAINTAINERS (RTL optimizers): Add self. diff --git a/ChangeLog.MELT b/ChangeLog.MELT index 4634ff696d5..ed42a5d809f 100644 --- a/ChangeLog.MELT +++ b/ChangeLog.MELT @@ -1,6 +1,15 @@ 2011-07-07 Basile Starynkevitch <basile@starynkevitch.net> + MELT branch merged with trunk rev 175963 using svnmerge. + +2011-07-07 Basile Starynkevitch <basile@starynkevitch.net> + + * INSTALL/README-MELT-PLUGIN: Mention that debug_msg & assert_msg + work thanks to MELT_HAVE_DEBUG, and the melt_gcc_version numbers. + +2011-07-07 Basile Starynkevitch <basile@starynkevitch.net> + * INSTALL/README-MELT-PLUGIN: Preparing 0.8rc2 plugin. Comment more on using the scripts. diff --git a/INSTALL/README-MELT-PLUGIN b/INSTALL/README-MELT-PLUGIN index 6d9808f983f..f494a32ad43 100644 --- a/INSTALL/README-MELT-PLUGIN +++ b/INSTALL/README-MELT-PLUGIN @@ -143,6 +143,13 @@ New features: * the MELT plugin build-melt-plugin.sh has changed incompatibly (w.r.t. the previous 0.7 MELT plugin release). + * debug_msg, assert_msg ... should work, thanks to MELT_HAVE_DEBUG + preprocessor flag, even when melt.so is a plugin for a GCC without + checks enabled. + + * melt-runtime.h has a melt_gcc_version integer variable and + melt-runtime.c should be given MELT_GCC_VERSION preprocessor + constant. Many bugfixes (but some bugs remain) diff --git a/boehm-gc/ChangeLog b/boehm-gc/ChangeLog index d08acc014e0..34e3882e506 100644 --- a/boehm-gc/ChangeLog +++ b/boehm-gc/ChangeLog @@ -1,3 +1,10 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * configure.ac (i?86-*-solaris2.[89]): Also accept + x86_64-*-solaris2.1?. + * configure: Regenerate. + 2011-06-29 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> PR libgcj/49451 diff --git a/boehm-gc/configure b/boehm-gc/configure index 7defe3842c7..66bb3e7f06a 100755 --- a/boehm-gc/configure +++ b/boehm-gc/configure @@ -15163,7 +15163,7 @@ $as_echo "$as_me: WARNING: OpenBSD/Alpha without dlopen(). Shared library suppor alpha*-*-linux*) machdep="alpha_mach_dep.lo" ;; - i?86-*-solaris2.[89] | i?86-*-solaris2.1?) + i?86-*-solaris2.[89] | i?86-*-solaris2.1? | x86_64-*-solaris2.1?) $as_echo "#define SOLARIS25_PROC_VDB_BUG_FIXED 1" >>confdefs.h diff --git a/boehm-gc/configure.ac b/boehm-gc/configure.ac index 1f7fb25c0e5..799959a0e0f 100644 --- a/boehm-gc/configure.ac +++ b/boehm-gc/configure.ac @@ -400,7 +400,7 @@ case "$host" in alpha*-*-linux*) machdep="alpha_mach_dep.lo" ;; - i?86-*-solaris2.[[89]] | i?86-*-solaris2.1?) + i?86-*-solaris2.[[89]] | i?86-*-solaris2.1? | x86_64-*-solaris2.1?) AC_DEFINE(SOLARIS25_PROC_VDB_BUG_FIXED,1,[PROC_VDB in Solaris 2.5 gives wrong values for dirty bits]) ;; mipstx39-*-elf*) diff --git a/config/ChangeLog b/config/ChangeLog index c3a1b8b4d19..f25c43a17b6 100644 --- a/config/ChangeLog +++ b/config/ChangeLog @@ -1,3 +1,7 @@ +2011-07-06 Uros Bizjak <ubizjak@gmail.com> + + * mt-alphaieee (GOCFLAGS_FOR_TARGET): Add -mieee. + 2011-06-15 Mike Stump <mikestump@comcast.net> PR target/49461 diff --git a/config/mt-alphaieee b/config/mt-alphaieee index 9c205314ad5..80c17cdc6ab 100644 --- a/config/mt-alphaieee +++ b/config/mt-alphaieee @@ -1,2 +1,3 @@ CFLAGS_FOR_TARGET += -mieee CXXFLAGS_FOR_TARGET += -mieee +GOCFLAGS_FOR_TARGET += -mieee diff --git a/configure b/configure index 38082d489d9..30418bc7097 100755 --- a/configure +++ b/configure @@ -3477,7 +3477,7 @@ case "${target}" in i[3456789]86-*-sco*) noconfigdirs="$noconfigdirs gprof target-libgloss" ;; - i[3456789]86-*-solaris2*) + i[3456789]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) noconfigdirs="$noconfigdirs target-libgloss" ;; i[3456789]86-*-sysv4*) diff --git a/configure.ac b/configure.ac index a1725a84fe8..e0f31a51456 100644 --- a/configure.ac +++ b/configure.ac @@ -923,7 +923,7 @@ case "${target}" in i[[3456789]]86-*-sco*) noconfigdirs="$noconfigdirs gprof target-libgloss" ;; - i[[3456789]]86-*-solaris2*) + i[[3456789]]86-*-solaris2* | x86_64-*-solaris2.1[[0-9]]*) noconfigdirs="$noconfigdirs target-libgloss" ;; i[[3456789]]86-*-sysv4*) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 28ff47174c1..22e3269a3c5 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,490 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR libmudflap/49550 + * gcc.c (MFWRAP_SPEC): Also wrap mmap64. + +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * configure.ac (gcc_cv_as_hidden): Also accept + x86_64-*-solaris2.1[0-9]*. + (gcc_cv_as_cfi_directive): Likewise. + (gcc_cv_as_comdat_group_group): Likewise. + (set_have_as_tls): Likewise. + * configure: Regenerate. + * config.gcc (i[34567]86-*-solaris2*): Also handle + x86_64-*-solaris2.1[0-9]*. + * config.host (i[34567]86-*-solaris2*): Likewise. + * config/sparc/sol2.h (ASM_CPU_DEFAULT_SPEC): Remove. + * config/sol2-bi.h (ASM_CPU_DEFAULT_SPEC): Redefine. + [USE_GLD] (ARCH_DEFAULT_EMULATION): Define. + (TARGET_LD_EMULATION): Use it. + * config/i386/sol2.h (ASM_CPU_DEFAULT_SPEC): Define. + (SUBTARGET_CPU_EXTRA_SPECS): Add asm_cpu_default. + * config/i386/sol2-bi.h (ASM_CPU32_DEFAULT_SPEC): Define. + (ASM_CPU64_DEFAULT_SPEC): Define. + (ASM_CPU_SPEC): Use %(asm_cpu_default). + (ASM_SPEC): Redefine. + (DEFAULT_ARCH32_P): Define using TARGET_64BIT_DEFAULT. + * config/host-solaris.c [__x86_64__] (TRY_EMPTY_VM_SPACE): Reduce. + * doc/install.texi (Specific, amd64-*-solaris2.1[0-9]*): + Document. + (Specific, i?86-*-solaris2.10): Mention x86_64-*-solaris2.1[0-9]* + configuration. + (Specific, x86_64-*-solaris2.1[0-9]*): Document. + +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + * config/sol2.h (ASM_SPEC): Split into ... + (ASM_SPEC_BASE, ASM_PIC_SPEC): ... this. + * config/i386/sol2.h (ASM_SPEC): Define using ASM_SPEC_BASE. + * config/i386/sol2-bi.h (ASM_CPU_SPEC): Redefine. + (ASM_SPEC): Use ASM_SPEC_BASE. + * config/sparc/sol2.h (ASM_SPEC): Redefine. + +2011-07-07 Georg-Johann Lay <avr@gjlay.de> + + * config/avr/avr.md (*reload_insi): Change predicate #1 to + const_int_operand. Ditto for peep2 producing this insn. + Add argument to output_reload_insisf call. + (*movsi,*movsf): Add argument to output_movsisf call. + (*reload_insf): New insn and new peep2 to produce it. + * config/avr/avr-protos.h (output_movsisf): Change prototype. + (output_reload_insisf): Change prototype. + * config/avr/avr.c (avr_asm_len): New function. + (output_reload_insisf): Rewrite. + (output_movsisf): Change prototype. output_reload_insisf for + all CONST_INT and CONST_DOUBLE. ALlow moving 0.0f to memory. + (adjust_insn_length): Add argument to output_movsisf and + output_reload_insisf call. + +2011-07-07 Bernd Schmidt <bernds@codesourcery.com> + + * emit-rtl.c (paradoxical_subreg_p): New function. + * rtl.h (paradoxical_subreg_p): Declare. + * combine.c (set_nonzero_bits_and_sign_copies, get_last_value, + apply_distributive_law, simplify_comparison, simplify_set): Use it. + * cse.c (record_jump_cond, cse_insn): Likewise. + * expr.c (force_operand): Likewise. + * rtlanal.c (num_sign_bit_copies1): Likewise. + * reload1.c (eliminate_regs_1, strip_paradoxical_subreg): Likewise. + * reload.c (push_secondary_reload, find_reloads_toplev): Likewise. + (push_reload): Use precision to check for paradoxical subregs. + * expmed.c (extract_bit_field_1): Likewise. + + * machmode.h (HWI_COMPUTABLE_MODE_P): New macro. + * combine.c (set_nonzero_bits_and_sign_copies): Use it. + (find_split-point, combine_simplify_rtx, simplify_if_then_else, + simplify_set, simplify_logical, expand_compound_operation, + make_extraction, force_to_mode, if_then_else_cond, extended_count, + try_widen_shift_mode, simplify_shift_const_1, simplify_comparison, + record_value_for_reg): Likewise. + * expmed.c (expand_widening_mult, expand_mult_highpart): Likewise. + * simplify-rtx. c (simplify_unary_operation_1, + simplify_binary_operation_1, simplify_const_relational_operation): + Likewise. + + * explow.c (trunc_int_for_mode): Use GET_MODE_PRECISION + instead of GET_MODE_BITSIZE where appropriate. + * rtlanal.c (subreg_lsb_1, subreg_get_info, nonzero_bits1, + num_sign_bit_copies1, canonicalize_condition, low_bitmask_len, + init_num_sign_bit_copies_in_rep): Likewise. + * cse.c (fold_rtx, cse_insn): Likewise. + * loop-doloop.c (doloop_modify, doloop_optimize): Likewise. + * simplify-rtx.c (simplify_unary_operation_1, + simplify_const_unary_operation, simplify_binary_operation_1, + simplify_const_binary_operation, simplify_ternary_operation, + simplify_const_relational_operation, simplify_subreg): Likewise. + * combine.c (try_combine, find_split_point, combine_simplify_rtx, + simplify_if_then_else, simplify_set, expand_compound_operation, + expand_field_assignment, make_extraction, if_then_else_cond, + make_compound_operation, force_to_mode, make_field_assignment, + reg_nonzero_bits_for_combine, reg_num_sign_bit_copies_for_combine, + extended_count, try_widen_shift_mode, simplify_shift_const_1, + simplify_comparison, record_promoted_value, simplify_compare_const, + record_dead_and_set_regs_1): Likewise. + + Revert: + * simplify-rtx.c (simplify_const_binary_operation): Use the + shift_truncation_mask hook instead of performing modulo by + width. Compare against mode precision, not bitsize. + * combine.c (combine_simplify_rtx, simplify_shift_const_1): + Use shift_truncation_mask instead of constructing the value + manually. + +2011-07-06 Michael Meissner <meissner@linux.vnet.ibm.com> + + * config/rs6000/rs6000-protos.h (rs6000_call_indirect_aix): New + declaration. + (rs6000_save_toc_in_prologue_p): Ditto. + + * config/rs6000/rs6000.opt (-mr11): New switch to disable loading + up the static chain (r11) during indirect function calls. + (-msave-toc-indirect): New undocumented debug switch. + + * config/rs6000/rs6000.c (struct machine_function): Add + save_toc_in_prologue field to note whether the prologue needs to + save the TOC value in the reserved stack location. + (rs6000_emit_prologue): Use TOC_REGNUM instead of 2. If we need + to save the TOC in the prologue, do so. + (rs6000_trampoline_init): Don't allow creating AIX style + trampolines if -mno-r11 is in effect. + (rs6000_call_indirect_aix): New function to create AIX style + indirect calls, adding support for -mno-r11 to suppress loading + the static chain, and saving the TOC in the prologue instead of + the call body. + (rs6000_save_toc_in_prologue_p): Return true if we are saving the + TOC in the prologue. + + * config/rs6000/rs6000.md (STACK_POINTER_REGNUM): Add more fixed + register numbers. + (TOC_REGNUM): Ditto. + (STATIC_CHAIN_REGNUM): Ditto. + (ARG_POINTER_REGNUM): Ditto. + (SFP_REGNO): Delete, unused. + (TOC_SAVE_OFFSET_32BIT): Add constants for AIX TOC save and + function descriptor offsets. + (TOC_SAVE_OFFSET_64BIT): Ditto. + (AIX_FUNC_DESC_TOC_32BIT): Ditto. + (AIX_FUNC_DESC_TOC_64BIT): Ditto. + (AIX_FUNC_DESC_SC_32BIT): Ditto. + (AIX_FUNC_DESC_SC_64BIT): Ditto. + (ptrload): New mode attribute for the appropriate load of a + pointer. + (call_indirect_aix32): Delete, rewrite AIX indirect function + calls. + (call_indirect_aix64): Ditto. + (call_value_indirect_aix32): Ditto. + (call_value_indirect_aix64): Ditto. + (call_indirect_nonlocal_aix32_internal): Ditto. + (call_indirect_nonlocal_aix32): Ditto. + (call_indirect_nonlocal_aix64_internal): Ditto. + (call_indirect_nonlocal_aix64): Ditto. + (call): Rewrite AIX indirect function calls. Add support for + eliminating the static chain, and for moving the save of the TOC + to the function prologue. + (call_value): Ditto. + (call_indirect_aix<ptrsize>): Ditto. + (call_indirect_aix<ptrsize>_internal): Ditto. + (call_indirect_aix<ptrsize>_internal2): Ditto. + (call_indirect_aix<ptrsize>_nor11): Ditto. + (call_value_indirect_aix<ptrsize>): Ditto. + (call_value_indirect_aix<ptrsize>_internal): Ditto. + (call_value_indirect_aix<ptrsize>_internal2): Ditto. + (call_value_indirect_aix<ptrsize>_nor11): Ditto. + (call_nonlocal_aix32): Relocate in the rs6000.md file. + (call_nonlocal_aix64): Ditto. + + * doc/invoke.texi (RS/6000 and PowerPC Options): Add -mr11 and + -mno-r11 documentation. + +2011-07-06 Jonathan Wakely <jwakely.gcc@gmail.com> + + PR other/49658 + * doc/extend.texi (Compound Literals): Fix typo. + +2011-07-06 James Greenhalgh <james.greenhalgh@arm.com> + + * config/arm/arm.h (TARGET_CPU_CPP_BUILTINS): Add + __ARM_FEATURE_DSP. + +2011-07-06 Basile Starynkevitch <basile@starynkevitch.net> + + * configure.ac (plugin-version.h): Generate + GCCPLUGIN_VERSION_MAJOR, GCCPLUGIN_VERSION_MINOR, + GCCPLUGIN_VERSION_PATCHLEVEL, GCCPLUGIN_VERSION constant integer + macros. + + * configure: Regenerate. + + * doc/plugins.texi (Building GCC plugins): Mention + GCCPLUGIN_VERSION ... constant macros in plugin-version.h. + +2011-07-06 Bernd Schmidt <bernds@codesourcery.com> + + * machmode.h (TRULY_NOOP_TRUNCATION_MODES_P): New macro. + * combine.c (make_extraction, gen_lowpart_or_truncate, + apply_distributive_law, simplify_comparison, + reg_truncated_to_mode, record_truncated_value): Use it. + * cse.c (notreg_cost): Likewise. + * expmed.c (store_bit_field_1, extract_bit_field_1): Likewise. + * expr.c (convert_move, convert_modes): Likewise. + * optabs.c (expand_binop, expand_unop): Likewise. + * postreload.c (move2add_last_label): Likewise. + * regmove.c (optimize_reg_copy_3): Likewise. + * rtlhooks.c (gen_lowpart_general): Likewise. + * simplify-rtx.c (simplify_unary_operation_1): Likewise. + +2011-07-06 Joseph Myers <joseph@codesourcery.com> + + * config/rs6000/vxworks.h (WORDS_BIG_ENDIAN): Define. + +2011-07-06 Bernd Schmidt <bernds@codesourcery.com> + + * cse.c (find_comparison_args): Use val_mode_signbit_set_p. + * simplify-rtx.c (mode_signbit_p): Use GET_MODE_PRECISION. + (val_mode_signbit_p, val_mode_signbit_set_p): New functions. + (simplify_const_unary_operation, simplify_binary_operation_1, + simplify_const_binary_operation, + simplify_const_relational_operation): Use them. Use + GET_MODE_MASK for masking and sign-extensions. + * combine.c (set_nonzero_bits_and_sign_copies, simplify_set, + combine_simplify_rtx, force_to_mode, reg_nonzero_bits_for_combine, + simplify_shift_const_1, simplify_comparison): Likewise. + * expr.c (convert_modes): Likewise. + * rtlanal.c (nonzero_bits1, canonicalize_condition): Likewise. + * expmed.c (emit_cstore, emit_store_flag_1, emit_store_flag): + Likewise. + * rtl.h (val_mode_signbit_p, val_mode_signbit_set_p): Declare. + + * simplify-rtx.c (simplify_ternary_operation): Remove dead code. + +2011-07-06 Richard Guenther <rguenther@suse.de> + + PR tree-optimization/49645 + * c-decl.c (finish_decl): Also set DECL_HARD_REGISTER for global + register variables. + * tree-ssa-sccvn.c (vn_reference_op_eq): Disregard differences + in type qualification here ... + (copy_reference_ops_from_ref): ... not here. + (vn_reference_lookup_3): ... or here. + (copy_reference_ops_from_ref): Record decl bases as MEM[&decl]. + (vn_reference_lookup): Do the lookup with a valueized ao-ref. + +2011-07-06 Ian Lance Taylor <iant@google.com> + + * doc/install.texi (Configuration): It's + --enable-gnu-indirect-function, not --enable-indirect-function. + +2011-07-06 Bernd Schmidt <bernds@codesourcery.com> + + * simplify-rtx.c (simplify_const_binary_operation): Use the + shift_truncation_mask hook instead of performing modulo by + width. Compare against mode precision, not bitsize. + * combine.c (combine_simplify_rtx, simplify_shift_const_1): + Use shift_truncation_mask instead of constructing the value + manually. + +2011-07-06 H.J. Lu <hongjiu.lu@intel.com> + + PR middle-end/47383 + * tree-ssa-address.c (addr_for_mem_ref): Use pointer_mode for + address computation and convert to address_mode if needed. + +2011-07-06 Richard Guenther <rguenther@suse.de> + + * tree.c (build_common_tree_nodes_2): Merge with + build_common_tree_nodes. + * tree.h (build_common_tree_nodes): Adjust prototype. + (build_common_tree_nodes_2): Remove. + * doc/tm.texi.in (lang_hooks.builtin_function): Adjust. + * doc/tm.texi (lang_hooks.builtin_function): Regenerate. + +2011-07-05 Jakub Jelinek <jakub@redhat.com> + + PR tree-optimization/49618 + * tree-eh.c (tree_could_trap_p) <case CALL_EXPR>: For DECL_WEAK + t recurse on the decl. + <case FUNCTION_DECL, case VAR_DECL>: For DECL_WEAK decls + return true if expr isn't known to be defined in current + TU or some other LTO partition. + +2011-07-05 Michael Meissner <meissner@linux.vnet.ibm.com> + + * params.def (PARAM_CASE_VALUES_THRESHOLD): New parameter to + override CASE_VALUES_THRESHOLD. + + * stmt.c (toplevel): Include params.h. + (case_values_threshold): Use the --param case-values-threshold + value if non-zero, otherwise use machine dependent value. + (expand_case): Use case_values_threshold. + + * Makefile.in (stmt.o): Add $(PARAMS_H) dependency. + + * doc/invoke.texi (--param case-values-threshold): Document. + +2011-07-05 Richard Henderson <rth@redhat.com> + + * dwarf2out.c (dwarf2out_cfi_label): Make static. + (dwarf2out_flush_queued_reg_saves): Make static. + (dwarf2out_reg_save): Remove. + (dwarf2out_return_save): Remove. + (dwarf2out_return_reg): Remove. + (dwarf2out_reg_save_reg): Remove. + (dwarf2out_def_cfa): Merge into ... + (dwarf2out_frame_init): ... here. + * dwarf2out.h, tree.h: Remove declarations as necessary. + +2011-07-05 Richard Henderson <rth@redhat.com> + + * config/ia64/ia64.c (ia64_dwarf_handle_frame_unspec): Remove. + (TARGET_DWARF_HANDLE_FRAME_UNSPEC): Remove. + (ia64_expand_epilogue): Emit an empty FRAME_RELATED_EXPR for + the alloc insn. + + * config/ia64/ia64.c (ia64_emit_deleted_label_after_insn): Remove. + (IA64_CHANGE_CFA_IN_EPILOGUE): Remove. + (process_epilogue): Don't call dwarf2out_def_cfa. + + * config/ia64/ia64.c (ia64_expand_prologue): Use pc_rtx to + indicate the return address save. + (process_cfa_register): Likewise. + + * config/ia64/ia64.c (ia64_dwarf2out_def_steady_cfa): Remove. + (process_cfa_adjust_cfa, ia64_asm_unwind_emit): Don't call it. + + * config/ia64/ia64.c (ia64_expand_prologue): Emit REG_CFA_REGISTER + for ar.pfs save at alloc insn. + +2011-07-05 Richard Henderson <rth@redhat.com> + + * config/arm/arm.c (arm_dwarf_handle_frame_unspec): Remove. + (TARGET_DWARF_HANDLE_FRAME_UNSPEC): Remove. + (arm_expand_prologue): Use REG_CFA_REGISTER to mark the + stack pointer save. + (arm_unwind_emit_set): Don't recognize UNSPEC_STACK_ALIGN. + (arm_unwind_emit): Walk REG_NOTES for unwinding notes. Emit + proper unwind info for a REG_CFA_REGISTER save of stack pointer. + * config/arm/arm.md (UNSPEC_STACK_ALIGN): Remove. + +2011-07-05 Richard Henderson <rth@redhat.com> + + * config/vax/vax.md (define_c_enum unspecv): New. Define the + VUNSPEC_* constants here instead of via define_constants. + (VUNSPEC_PEM): New constant. + (procedure_entry_mask): New insn. + (prologue): New expander. + * config/vax/vax.c (vax_add_reg_cfa_offset): New. + (vax_expand_prologue): Rename from vax_output_function_prologue; + emit rtl instead of text. + (TARGET_ASM_FUNCTION_PROLOGUE): Remove. + (print_operand): Add 'x' prefix. + +2011-07-05 H.J. Lu <hongjiu.lu@intel.com> + + PR middle-end/47715 + * calls.c (precompute_register_parameters): Promote the function + argument before checking non-legitimate constant. + +2011-07-05 Sebastian Pop <sebastian.pop@amd.com> + + PR tree-optimization/47654 + * graphite-blocking.c (pbb_strip_mine_time_depth): Do not return bool. + (lst_do_strip_mine_loop): Return an int. + (lst_do_strip_mine): Same. + (scop_do_strip_mine): Same. + (scop_do_block): Loop blocking should strip-mine at least two loops. + * graphite-interchange.c (lst_interchange_select_outer): Return an int. + (scop_do_interchange): Same. + * graphite-poly.h (scop_do_interchange): Update declaration. + (scop_do_strip_mine): Same. + +2011-07-05 Sebastian Pop <sebastian.pop@amd.com> + + * graphite-clast-to-gimple.c (precision_for_value): Removed. + (precision_for_interval): Removed. + (gcc_type_for_interval): Use mpz_sizeinbase. + +2011-07-05 Sebastian Pop <sebastian.pop@amd.com> + + * graphite-ppl.h (value_max): Correct computation of max. + +2011-07-05 Sebastian Pop <sebastian.pop@amd.com> + + * graphite-clast-to-gimple.c (clast_name_to_index): Add missing space. + +2011-07-05 Richard Guenther <rguenther@suse.de> + + * c-decl.c (c_init_decl_processing): Defer building common + tree nodes to c_common_nodes_and_builtins. + +2011-07-05 Razya Ladelsky <razya@il.ibm.com> + + PR tree-optimization/49580 + * tree-cfg.c (gimple_duplicate_sese_tail): Remove handling of + the loop's number of iterations. + * tree-parloops.c (transform_to_exit_first_loop): Add the + handling of the loop's number of iterations before the call + to gimple_duplicate_sese_tail. + Insert the stmt caclculating the new rhs of the loop's + condition stmt to the preheader instead of iters_bb. + +2011-07-05 H.J. Lu <hongjiu.lu@intel.com> + + PR rtl-optimization/47449 + * fwprop.c (forward_propagate_subreg): Don't propagate hard + register nor zero/sign extended hard register. + +2011-07-05 Richard Guenther <rguenther@suse.de> + + PR tree-optimization/49518 + PR tree-optimization/49628 + * tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Skip + irrelevant and invariant data-references. + (vect_analyze_data_ref_access): For invariant loads clear the + group association. + +2011-07-04 Jakub Jelinek <jakub@redhat.com> + + PR rtl-optimization/49619 + * combine.c (combine_simplify_rtx): In PLUS -> IOR simplification + pass VOIDmode as op0_mode to recursive call, and return temp even + when different from tor, just if it is not IOR of the original + PLUS arguments. + + PR rtl-optimization/49472 + * simplify-rtx.c (simplify_unary_operation_1) <case NEG>: When + negating MULT, negate the second operand instead of first. + (simplify_binary_operation_1) <case MULT>: If one operand is + a NEG and the other is MULT, don't attempt to optimize by + negation of the MULT operand if it only moves the NEG operation + around. + + PR debug/49602 + * tree-into-ssa.c (rewrite_debug_stmt_uses): Disregard + get_current_def return value if it can't be trusted to be + the current value of the variable in the current bb. + +2011-07-04 Uros Bizjak <ubizjak@gmail.com> + + PR target/49600 + * config/i386/i386.md (SSE2 int->float split): Push operand 1 in + general register to memory for !TARGET_INTER_UNIT_MOVES. + +2011-07-04 Georg-Johann Lay <avr@gjlay.de> + + PR target/44643 + * config/avr/avr.c (avr_insert_attributes): Use TYPE_READONLY + instead of TREE_READONLY. + +2011-07-04 Georg-Johann Lay <avr@gjlay.de> + + * doc/extend.texi (AVR Built-in Functions): Update documentation + of __builtin_avr_fmul*. + * config/avr/avr.c (avr_init_builtins): Don't depend on AVR_HAVE_MUL. + * config/avr/avr-c.c (avr_cpu_cpp_builtins): Ditto. + * config/avr/avr.md (fmul): Rename to fmul_insn. + (fmuls): Rename to fmuls_insn. + (fmulsu): Rename to fmulsu_insn. + (fmul,fmuls,fmulsu): New expander. + (*fmul.call,*fmuls.call,*fmulsu.call): New Insn. + * config/avr/t-avr (LIB1ASMFUNCS): Add _fmul, _fmuls, _fmulsu. + * config/avr/libgcc.S (__fmul): New function. + (__fmuls): New function. + (__fmulsu,__fmulsu_exit): New function. + +2011-07-04 Richard Guenther <rguenther@suse.de> + + PR tree-optimization/49615 + * tree-cfgcleanup.c (split_bbs_on_noreturn_calls): Fix + basic-block index check. + +2011-07-04 Georg-Johann Lay <avr@gjlay.de> + + * longlong.h (count_leading_zeros, count_trailing_zeros, + COUNT_LEADING_ZEROS_0): Define for target avr if W_TYPE_SIZE is 16 + resp. 64. + 2011-07-03 Ira Rosen <ira.rosen@linaro.org> PR tree-optimization/49610 @@ -72,8 +559,7 @@ 2011-07-01 Richard Guenther <rguenther@suse.de> PR tree-optimization/49603 - * tree-vect-stmts.c (vectorizable_load): Remove unnecessary - assert. + * tree-vect-stmts.c (vectorizable_load): Remove unnecessary assert. 2011-06-30 Martin Jambor <mjambor@suse.cz> @@ -153,8 +639,7 @@ PR tree-optimization/46787 * tree-data-ref.c (dr_address_invariant_p): Remove. (find_data_references_in_stmt): Invariant accesses are ok now. - * tree-vect-stmts.c (vectorizable_load): Handle invariant - loads. + * tree-vect-stmts.c (vectorizable_load): Handle invariant loads. * tree-vect-data-refs.c (vect_analyze_data_ref_access): Allow invariant loads. @@ -240,9 +725,8 @@ * config/arm/unwind-arm.c (enum __cxa_type_match_result): New. (cxa_type_match): Correct declaration. - (__gnu_unwind_pr_common): Reconstruct - additional indirection when __cxa_type_match returns - succeeded_with_ptr_to_base. + (__gnu_unwind_pr_common): Reconstruct additional indirection + when __cxa_type_match returns succeeded_with_ptr_to_base. 2011-06-29 Ulrich Weigand <Ulrich.Weigand@de.ibm.com> @@ -258,7 +742,7 @@ Detect subregs via recursive descent instead of via SUBREG_LOC. 2011-06-29 Georg-Johann Lay <avr@gjlay.de> - + * config/avr/avr.c (avr_encode_section_info): Dispatch to default_encode_section_info. @@ -275,14 +759,13 @@ 2011-06-29 Jakub Jelinek <jakub@redhat.com> PR debug/49567 - * dwarf2out.c (mem_loc_descriptor) <case ZERO_EXTEND>: Give up - for non-MODE_INT modes instead of asserting the mode has MODE_INT - class. + * dwarf2out.c (mem_loc_descriptor) <case ZERO_EXTEND>: Give up for + non-MODE_INT modes instead of asserting the mode has MODE_INT class. 2011-06-29 Georg-Johann Lay <avr@gjlay.de> - + PR target/34734 - * config/avr/avr.c (avr_handle_progmem_attribute): Move warning + * config/avr/avr.c (avr_handle_progmem_attribute): Move warning about uninitialized data attributed 'progmem' from here... (avr_encode_section_info): ...to this new function. (TARGET_ENCODE_SECTION_INFO): New define. @@ -290,7 +773,7 @@ section flag SECTION_WRITE. 2011-06-29 Georg-Johann Lay <avr@gjlay.de> - + * config/avr/t-avr (LIB1ASMFUNCS): Add _mulhisi3, _umulhisi3, _xmulhisi3_exit. * config/avr/libgcc.S (_xmulhisi3_exit): New Function. @@ -352,13 +835,12 @@ * gcc.c: Include params.h. (set_option_handlers): Also use common_handle_option and target_handle_option. - (main): Call global_init_params, finish_params and - init_options_struct. + (main): Call global_init_params, finish_params and init_options_struct. * opts.c (debug_type_names): Move from toplev.c. (print_filtered_help): Access quiet_flag through opts pointer. - (common_handle_option): Return early in the driver for some - options. Access in_lto_p, dwarf_version and - warn_maybe_uninitialized through opts pointer. + (common_handle_option): Return early in the driver for some options. + Access in_lto_p, dwarf_version and warn_maybe_uninitialized through + opts pointer. * toplev.c (in_lto_p): Move to common.opt. (debug_type_names): Move to opts.c. * Makefile.in (OBJS): Remove opts.o. @@ -367,8 +849,7 @@ 2011-06-28 Kai Tietz <ktietz@redhat.com> - * tree-ssa-forwprop.c (simplify_bitwise_binary): Improve - type sinking. + * tree-ssa-forwprop.c (simplify_bitwise_binary): Improve type sinking. 2011-06-28 Ramana Radhakrishnan <ramana.radhakrishnan@linaro.org> @@ -402,8 +883,7 @@ (genprogrtl): Add attr-common. * genattr.c (main): Include insn-attr-common.h. Don't generate definitions of DELAY_SLOTS or INSN_SCHEDULING. - * opts.c: Include insn-attr-common.h instead of rtl.h and - insn-attr.h. + * opts.c: Include insn-attr-common.h instead of rtl.h and insn-attr.h. 2011-06-28 Georg-Johann Lay <avr@gjlay.de> @@ -500,9 +980,8 @@ 2011-06-27 Nick Clifton <nickc@redhat.com> - * config/mn10300/mn10300.md (clzsi2): Use XOR after BSCH to - convert bit position of highest bit set into a count of the high - zero bits. + * config/mn10300/mn10300.md (clzsi2): Use XOR after BSCH to convert + bit position of highest bit set into a count of the high zero bits. 2011-06-27 Eric Botcazou <ebotcazou@adacore.com> @@ -524,8 +1003,8 @@ * tree-ssa-math-opts.c (do_shift_rotate): Zero bits out of type precision after operation. - (find_bswap): Take for limit value the integer auto- - promotion into account. + (find_bswap): Take for limit value the integer auto-promotion + into account. 2011-06-27 Eric Botcazou <ebotcazou@adacore.com> @@ -555,8 +1034,7 @@ clear some builtins on VMS. Calls vms_patch_builtins. (ia64_asm_output_external): Remove DO_CRTL_NAME. * config/ia64/vms64.h: Do not include vms-crtl-64.h - * config.gcc (*-*-*vms*): Define extra_objs, target_gtfiles, - tm_p_file. + * config.gcc (*-*-*vms*): Define extra_objs, target_gtfiles, tm_p_file. 2011-06-27 Tristan Gingold <gingold@adacore.com> @@ -659,8 +1137,7 @@ PR target/49335 * config/arm/predicates.md (add_operator): New. - * config/arm/arm.md ("*arith_shiftsi"): Fix for SP reg usage - in Thumb2. + * config/arm/arm.md ("*arith_shiftsi"): Fix for SP reg usage in Thumb2. 2011-06-24 Andi Kleen <ak@linux.intel.com> @@ -756,9 +1233,8 @@ PR libgomp/49490 * omp-low.c (expand_omp_for_static_nochunk): Only - use n ceil/ nthreads size for the first - n % nthreads threads in the team instead of - all threads except for the last few ones which + use n ceil/ nthreads size for the first n % nthreads threads in the + team instead of all threads except for the last few ones which get less work or none at all. PR debug/49496 @@ -779,8 +1255,7 @@ 2011-06-22 Nathan Sidwell <nathan@codesourcery.com> - * config/arm/arm.h (OPTION_DEFAULT_SPECS): Fix -mtls-dialect - typo. + * config/arm/arm.h (OPTION_DEFAULT_SPECS): Fix -mtls-dialect typo. * config.gcc (arm*-*-linux*): Default to gnu tls. (arm*-*-*): Add --with-tls option. (all_defaults): Add 'tls'. @@ -816,7 +1291,7 @@ * cgraphunit.c (assemble_thunk): Use correct return type. 2011-06-22 Dmitry Plotnikov <dplotnikov@ispras.ru> - Dmitry Melnik <dm@ispras.ru> + Dmitry Melnik <dm@ispras.ru> * config/arm/arm.c (neon_immediate_valid_for_shift): New function. (neon_output_shift_immediate): Ditto. @@ -862,9 +1337,8 @@ of base type reference as argument. (resolve_addr_in_expr): Likewise. Fix keep computation. (convert_descriptor_to_signed): Renamed to... - (convert_descriptor_to_mode): ... this. For wider types convert - to unsigned instead of signed, for <= DWARF2_ADDR_SIZE convert to - untyped. + (convert_descriptor_to_mode): ... this. For wider types convert to + unsigned instead of signed, for <= DWARF2_ADDR_SIZE convert to untyped. (typed_binop): New function. (scompare_loc_descriptor, ucompare_loc_descriptor, minmax_loc_descriptor, mem_loc_descriptor): For wider integer modes @@ -917,8 +1391,7 @@ debug args vector from old_decl to new_decl. * ipa-prop.c (ipa_modify_call_arguments): For optimized away or modified parameters, add debug bind stmts before call - setting DEBUG_EXPR_DECL which is remembered in debug args - vector. + setting DEBUG_EXPR_DECL which is remembered in debug args vector. * cfgexpand.c (expand_call_stmt): Call expand_debug_expr on DECL_DEBUG_EXPRs from debug args vector. (expand_debug_source_expr): New function. @@ -927,9 +1400,8 @@ * var-tracking.c (prepare_call_arguments): Add debug args to call_arguments if any. * dwarf2out.c (dwarf_stack_op_name, size_of_loc_descr, - output_loc_operands, output_loc_operands_raw, - resolve_addr_in_expr, compare_loc_operands): Handle - DW_OP_GNU_parameter_ref. + output_loc_operands, output_loc_operands_raw, resolve_addr_in_expr, + compare_loc_operands): Handle DW_OP_GNU_parameter_ref. (get_ref_die_offset, parameter_ref_descriptor): New functions. (mem_loc_descriptor): Handle DEBUG_PARAMETER_REF. (gen_subprogram_die): Handle parameters identified by @@ -962,7 +1434,7 @@ if the function has no attributes. * tree.c (is_attribute_with_length_p): Removed. (is_attribute_p): Removed. - (private_is_attribute_p): New. + (private_is_attribute_p): New. (private_lookup_attribute): New. (lookup_attribute): Removed. (lookup_ident_attribute): New. @@ -981,7 +1453,7 @@ in the form 'text', not '__text__'. (private_is_attribute_p, private_lookup_attribute): New. Updated comments. - + 2011-06-21 Andrew MacLeod <amacleod@redhat.com> * builtins.c: Add sync_ or SYNC__ to builtin names. diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index fc7a4fff175..7ccd9fc4a4d 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20110704 +20110707 diff --git a/gcc/Makefile.in b/gcc/Makefile.in index acd8f3030b8..cb14644b49c 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -3007,7 +3007,7 @@ stmt.o : stmt.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \ $(LIBFUNCS_H) $(EXCEPT_H) $(RECOG_H) $(DIAGNOSTIC_CORE_H) \ output.h $(GGC_H) $(TM_P_H) langhooks.h $(PREDICT_H) $(OPTABS_H) \ $(TARGET_H) $(GIMPLE_H) $(MACHMODE_H) $(REGS_H) alloc-pool.h \ - $(PRETTY_PRINT_H) $(BITMAP_H) + $(PRETTY_PRINT_H) $(BITMAP_H) $(PARAMS_H) except.o : except.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \ $(TREE_H) $(FLAGS_H) $(EXCEPT_H) $(FUNCTION_H) $(EXPR_H) $(LIBFUNCS_H) \ langhooks.h insn-config.h hard-reg-set.h $(BASIC_BLOCK_H) output.h \ diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog index 9e778c102c5..a0490596d93 100644 --- a/gcc/ada/ChangeLog +++ b/gcc/ada/ChangeLog @@ -1,3 +1,18 @@ +2011-07-07 Eric Botcazou <ebotcazou@adacore.com> + + * gcc-interface/misc.c (gnat_init): Tweak previous change. + +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * gcc-interface/Makefile.in: Handle x86_64-solaris2. + +2011-07-06 Richard Guenther <rguenther@suse.de> + + * gcc-interface/misc.c (gnat_init): Merge calls to + build_common_tree_nodes and build_common_tree_nodes_2. + Re-initialize boolean_false_node. + 2011-07-02 Eric Botcazou <ebotcazou@adacore.com> Olivier Hainque <hainque@adacore.com> Nicolas Setton <setton@adacore.com> diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in index 7feb6d27b81..0b5c8795a7a 100644 --- a/gcc/ada/gcc-interface/Makefile.in +++ b/gcc/ada/gcc-interface/Makefile.in @@ -1011,8 +1011,8 @@ ifeq ($(strip $(filter-out sparc% sun solaris%,$(targ))),) endif endif -ifeq ($(strip $(filter-out %86 solaris2%,$(arch) $(osys))),) - LIBGNAT_TARGET_PAIRS = \ +ifeq ($(strip $(filter-out %86 %x86_64 solaris2%,$(arch) $(osys))),) + LIBGNAT_TARGET_PAIRS_COMMON = \ a-numaux.adb<a-numaux-x86.adb \ a-numaux.ads<a-numaux-x86.ads \ a-intnam.ads<a-intnam-solaris.ads \ @@ -1028,13 +1028,29 @@ ifeq ($(strip $(filter-out %86 solaris2%,$(arch) $(osys))),) s-tpopsp.adb<s-tpopsp-solaris.adb \ g-soliop.ads<g-soliop-solaris.ads - ifeq ($(strip $(MULTISUBDIR)),/amd64) - LIBGNAT_TARGET_PAIRS += \ - system.ads<system-solaris-x86_64.ads + LIBGNAT_TARGET_PAIRS_32 = \ + g-bytswa.adb<g-bytswa-x86.adb \ + system.ads<system-solaris-x86.ads + + LIBGNAT_TARGET_PAIRS_64 = \ + system.ads<system-solaris-x86_64.ads + + ifeq ($(strip $(filter-out %86 solaris2%,$(arch) $(osys))),) + ifeq ($(strip $(MULTISUBDIR)),/amd64) + LIBGNAT_TARGET_PAIRS = \ + $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64) + else + LIBGNAT_TARGET_PAIRS = \ + $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32) + endif else - LIBGNAT_TARGET_PAIRS += \ - g-bytswa.adb<g-bytswa-x86.adb \ - system.ads<system-solaris-x86.ads + ifeq ($(strip $(MULTISUBDIR)),/32) + LIBGNAT_TARGET_PAIRS = \ + $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_32) + else + LIBGNAT_TARGET_PAIRS = \ + $(LIBGNAT_TARGET_PAIRS_COMMON) $(LIBGNAT_TARGET_PAIRS_64) + endif endif TOOLS_TARGET_PAIRS=mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb diff --git a/gcc/ada/gcc-interface/misc.c b/gcc/ada/gcc-interface/misc.c index e69668ae852..70218578046 100644 --- a/gcc/ada/gcc-interface/misc.c +++ b/gcc/ada/gcc-interface/misc.c @@ -307,7 +307,7 @@ gnat_init (void) /* Do little here, most of the standard declarations are set up after the front-end has been run. Use the same `char' as C, this doesn't really matter since we'll use the explicit `unsigned char' for Character. */ - build_common_tree_nodes (flag_signed_char); + build_common_tree_nodes (flag_signed_char, false); /* In Ada, we use an unsigned 8-bit type for the default boolean type. */ boolean_type_node = make_unsigned_type (8); @@ -315,11 +315,11 @@ gnat_init (void) SET_TYPE_RM_MAX_VALUE (boolean_type_node, build_int_cst (boolean_type_node, 1)); SET_TYPE_RM_SIZE (boolean_type_node, bitsize_int (1)); + boolean_true_node = TYPE_MAX_VALUE (boolean_type_node); + boolean_false_node = TYPE_MIN_VALUE (boolean_type_node); - build_common_tree_nodes_2 (0); sbitsize_one_node = sbitsize_int (1); sbitsize_unit_node = sbitsize_int (BITS_PER_UNIT); - boolean_true_node = TYPE_MAX_VALUE (boolean_type_node); ptr_void_type_node = build_pointer_type (void_type_node); diff --git a/gcc/c-decl.c b/gcc/c-decl.c index 7198cb2d5d3..3ed3c46e64c 100644 --- a/gcc/c-decl.c +++ b/gcc/c-decl.c @@ -3478,8 +3478,6 @@ c_init_decl_processing (void) using preprocessed headers. */ input_location = BUILTINS_LOCATION; - build_common_tree_nodes (flag_signed_char); - c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ @@ -4359,6 +4357,8 @@ finish_decl (tree decl, location_t init_loc, tree init, when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; + if (asmspec && C_DECL_REGISTER (decl)) + DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog index 8cdeb3372b2..1e3ca7d1d6a 100644 --- a/gcc/c-family/ChangeLog +++ b/gcc/c-family/ChangeLog @@ -1,3 +1,13 @@ +2011-07-06 Richard Guenther <rguenther@suse.de> + + * c-common.c (c_common_nodes_and_builtins): + Merge calls to build_common_tree_nodes and build_common_tree_nodes_2. + +2011-07-05 Richard Guenther <rguenther@suse.de> + + * c-common.c (c_common_nodes_and_builtins): Build all common + tree nodes first. + 2011-06-27 Jakub Jelinek <jakub@redhat.com> * c-common.h (c_tree_chain_next): New static inline function. diff --git a/gcc/c-family/c-common.c b/gcc/c-family/c-common.c index a50b405262a..67291de7d4a 100644 --- a/gcc/c-family/c-common.c +++ b/gcc/c-family/c-common.c @@ -4576,6 +4576,8 @@ c_common_nodes_and_builtins (void) tree va_list_ref_type_node; tree va_list_arg_type_node; + build_common_tree_nodes (flag_signed_char, flag_short_double); + /* Define `int' and `char' first so that dbx will output them first. */ record_builtin_type (RID_INT, NULL, integer_type_node); record_builtin_type (RID_CHAR, "char", char_type_node); @@ -4675,8 +4677,6 @@ c_common_nodes_and_builtins (void) pid_type_node = TREE_TYPE (identifier_global_value (get_identifier (PID_TYPE))); - build_common_tree_nodes_2 (flag_short_double); - record_builtin_type (RID_FLOAT, NULL, float_type_node); record_builtin_type (RID_DOUBLE, NULL, double_type_node); record_builtin_type (RID_MAX, "long double", long_double_type_node); diff --git a/gcc/calls.c b/gcc/calls.c index bba477ce274..7538e4e5040 100644 --- a/gcc/calls.c +++ b/gcc/calls.c @@ -702,12 +702,6 @@ precompute_register_parameters (int num_actuals, struct arg_data *args, pop_temp_slots (); } - /* If the value is a non-legitimate constant, force it into a - pseudo now. TLS symbols sometimes need a call to resolve. */ - if (CONSTANT_P (args[i].value) - && !targetm.legitimate_constant_p (args[i].mode, args[i].value)) - args[i].value = force_reg (args[i].mode, args[i].value); - /* If we are to promote the function arg to a wider mode, do it now. */ @@ -717,6 +711,12 @@ precompute_register_parameters (int num_actuals, struct arg_data *args, TYPE_MODE (TREE_TYPE (args[i].tree_value)), args[i].value, args[i].unsignedp); + /* If the value is a non-legitimate constant, force it into a + pseudo now. TLS symbols sometimes need a call to resolve. */ + if (CONSTANT_P (args[i].value) + && !targetm.legitimate_constant_p (args[i].mode, args[i].value)) + args[i].value = force_reg (args[i].mode, args[i].value); + /* If we're going to have to load the value by parts, pull the parts into pseudos. The part extraction process can involve non-trivial computation. */ diff --git a/gcc/combine.c b/gcc/combine.c index 56fb44eaf42..4dbf022fd1b 100644 --- a/gcc/combine.c +++ b/gcc/combine.c @@ -1560,7 +1560,7 @@ set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) say what its contents were. */ && ! REGNO_REG_SET_P (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)) - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (GET_MODE (x))) { reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x)); @@ -1610,9 +1610,7 @@ set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) set what we know about X. */ if (SET_DEST (set) == x - || (GET_CODE (SET_DEST (set)) == SUBREG - && (GET_MODE_SIZE (GET_MODE (SET_DEST (set))) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set))))) + || (paradoxical_subreg_p (SET_DEST (set)) && SUBREG_REG (SET_DEST (set)) == x)) { rtx src = SET_SRC (set); @@ -1627,15 +1625,11 @@ set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) ??? For 2.5, try to tighten up the MD files in this regard instead of this kludge. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD + if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD && CONST_INT_P (src) && INTVAL (src) > 0 - && 0 != (UINTVAL (src) - & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))) - src = GEN_INT (UINTVAL (src) - | ((unsigned HOST_WIDE_INT) (-1) - << GET_MODE_BITSIZE (GET_MODE (x)))); + && val_signbit_known_set_p (GET_MODE (x), INTVAL (src))) + src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (GET_MODE (x))); #endif /* Don't call nonzero_bits if it cannot change anything. */ @@ -2764,14 +2758,14 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, offset = INTVAL (XEXP (dest, 2)); dest = XEXP (dest, 0); if (BITS_BIG_ENDIAN) - offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset; + offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset; } } else { if (GET_CODE (dest) == STRICT_LOW_PART) dest = XEXP (dest, 0); - width = GET_MODE_BITSIZE (GET_MODE (dest)); + width = GET_MODE_PRECISION (GET_MODE (dest)); offset = 0; } @@ -2781,16 +2775,16 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, if (subreg_lowpart_p (dest)) ; /* Handle the case where inner is twice the size of outer. */ - else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp))) - == 2 * GET_MODE_BITSIZE (GET_MODE (dest))) - offset += GET_MODE_BITSIZE (GET_MODE (dest)); + else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp))) + == 2 * GET_MODE_PRECISION (GET_MODE (dest))) + offset += GET_MODE_PRECISION (GET_MODE (dest)); /* Otherwise give up for now. */ else offset = -1; } if (offset >= 0 - && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp))) + && (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp))) <= HOST_BITS_PER_DOUBLE_INT)) { double_int m, o, i; @@ -3751,8 +3745,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, (REG_P (temp) && VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != 0 - && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD - && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT + && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT && (VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != GET_MODE_MASK (word_mode)))) @@ -3761,8 +3755,8 @@ try_combine (rtx i3, rtx i2, rtx i1, rtx i0, int *new_direct_jump_p, (REG_P (temp) && VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != 0 - && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD - && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT + && GET_MODE_PRECISION (GET_MODE (temp)) < BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (temp)) < HOST_BITS_PER_INT && (VEC_index (reg_stat_type, reg_stat, REGNO (temp))->nonzero_bits != GET_MODE_MASK (word_mode))))) @@ -4685,14 +4679,13 @@ find_split_point (rtx *loc, rtx insn, bool set_src) /* See if this is a bitfield assignment with everything constant. If so, this is an IOR of an AND, so split it into that. */ if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT - && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))) - <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0))) && CONST_INT_P (XEXP (SET_DEST (x), 1)) && CONST_INT_P (XEXP (SET_DEST (x), 2)) && CONST_INT_P (SET_SRC (x)) && ((INTVAL (XEXP (SET_DEST (x), 1)) + INTVAL (XEXP (SET_DEST (x), 2))) - <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))) + <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)))) && ! side_effects_p (XEXP (SET_DEST (x), 0))) { HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2)); @@ -4705,7 +4698,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src) rtx or_mask; if (BITS_BIG_ENDIAN) - pos = GET_MODE_BITSIZE (mode) - len - pos; + pos = GET_MODE_PRECISION (mode) - len - pos; or_mask = gen_int_mode (src << pos, mode); if (src == mask) @@ -4798,7 +4791,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src) break; pos = 0; - len = GET_MODE_BITSIZE (GET_MODE (inner)); + len = GET_MODE_PRECISION (GET_MODE (inner)); unsignedp = 0; break; @@ -4812,7 +4805,7 @@ find_split_point (rtx *loc, rtx insn, bool set_src) pos = INTVAL (XEXP (SET_SRC (x), 2)); if (BITS_BIG_ENDIAN) - pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos; + pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos; unsignedp = (code == ZERO_EXTRACT); } break; @@ -4821,7 +4814,8 @@ find_split_point (rtx *loc, rtx insn, bool set_src) break; } - if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner))) + if (len && pos >= 0 + && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))) { enum machine_mode mode = GET_MODE (SET_SRC (x)); @@ -4852,9 +4846,9 @@ find_split_point (rtx *loc, rtx insn, bool set_src) (unsignedp ? LSHIFTRT : ASHIFTRT, mode, gen_rtx_ASHIFT (mode, gen_lowpart (mode, inner), - GEN_INT (GET_MODE_BITSIZE (mode) + GEN_INT (GET_MODE_PRECISION (mode) - len - pos)), - GEN_INT (GET_MODE_BITSIZE (mode) - len))); + GEN_INT (GET_MODE_PRECISION (mode) - len))); split = find_split_point (&SET_SRC (x), insn, true); if (split && split != &SET_SRC (x)) @@ -5551,7 +5545,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, if (GET_CODE (temp) == ASHIFTRT && CONST_INT_P (XEXP (temp, 1)) - && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1) + && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1) return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0), INTVAL (XEXP (temp, 1))); @@ -5570,8 +5564,8 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, rtx temp1 = simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, temp, - GET_MODE_BITSIZE (mode) - 1 - i), - GET_MODE_BITSIZE (mode) - 1 - i); + GET_MODE_PRECISION (mode) - 1 - i), + GET_MODE_PRECISION (mode) - 1 - i); /* If all we did was surround TEMP with the two shifts, we haven't improved anything, so don't use it. Otherwise, @@ -5590,7 +5584,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT) break; - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (HWI_COMPUTABLE_MODE_P (mode)) SUBST (XEXP (x, 0), force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)), GET_MODE_MASK (mode), 0)); @@ -5602,7 +5596,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, /* Similarly to what we do in simplify-rtx.c, a truncate of a register whose value is a comparison can be replaced with a subreg if STORE_FLAG_VALUE permits. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0 && (temp = get_last_value (XEXP (x, 0))) && COMPARISON_P (temp)) @@ -5640,20 +5634,20 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1)) && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1)) && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)) == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1)) || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND - && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0))) + && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0))) == (unsigned int) i + 1)))) return simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (XEXP (XEXP (x, 0), 0), 0), - GET_MODE_BITSIZE (mode) - (i + 1)), - GET_MODE_BITSIZE (mode) - (i + 1)); + GET_MODE_PRECISION (mode) - (i + 1)), + GET_MODE_PRECISION (mode) - (i + 1)); /* If only the low-order bit of X is possibly nonzero, (plus x -1) can become (ashiftrt (ashift (xor x 1) C) C) where C is @@ -5667,26 +5661,31 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, return simplify_shift_const (NULL_RTX, ASHIFTRT, mode, simplify_shift_const (NULL_RTX, ASHIFT, mode, gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx), - GET_MODE_BITSIZE (mode) - 1), - GET_MODE_BITSIZE (mode) - 1); + GET_MODE_PRECISION (mode) - 1), + GET_MODE_PRECISION (mode) - 1); /* If we are adding two things that have no bits in common, convert the addition into an IOR. This will often be further simplified, for example in cases like ((a & 1) + (a & 2)), which can become a & 3. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (nonzero_bits (XEXP (x, 0), mode) & nonzero_bits (XEXP (x, 1), mode)) == 0) { /* Try to simplify the expression further. */ rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1)); - temp = combine_simplify_rtx (tor, mode, in_dest, 0); + temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0); /* If we could, great. If not, do not go ahead with the IOR replacement, since PLUS appears in many special purpose address arithmetic instructions. */ - if (GET_CODE (temp) != CLOBBER && temp != tor) + if (GET_CODE (temp) != CLOBBER + && (GET_CODE (temp) != IOR + || ((XEXP (temp, 0) != XEXP (x, 0) + || XEXP (temp, 1) != XEXP (x, 1)) + && (XEXP (temp, 0) != XEXP (x, 1) + || XEXP (temp, 1) != XEXP (x, 0))))) return temp; } break; @@ -5795,7 +5794,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NEG, mode, @@ -5820,7 +5819,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) { op0 = expand_compound_operation (op0); return plus_constant (gen_lowpart (mode, op0), 1); @@ -5835,7 +5834,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT && op1 == const0_rtx && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) return gen_lowpart (mode, expand_compound_operation (op0)); @@ -5856,7 +5855,7 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, && op1 == const0_rtx && mode == GET_MODE (op0) && (num_sign_bit_copies (op0, mode) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) { op0 = expand_compound_operation (op0); return simplify_gen_unary (NOT, mode, @@ -5881,16 +5880,15 @@ combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest, AND with STORE_FLAG_VALUE when we are done, since we are only going to test the sign bit. */ if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) - == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)) + && HWI_COMPUTABLE_MODE_P (mode) + && val_signbit_p (mode, STORE_FLAG_VALUE) && op1 == const0_rtx && mode == GET_MODE (op0) && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0) { x = simplify_shift_const (NULL_RTX, ASHIFT, mode, expand_compound_operation (op0), - GET_MODE_BITSIZE (mode) - 1 - i); + GET_MODE_PRECISION (mode) - 1 - i); if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx) return XEXP (x, 0); else @@ -6016,7 +6014,7 @@ simplify_if_then_else (rtx x) } else if (true_code == EQ && true_val == const0_rtx && (num_sign_bit_copies (from, GET_MODE (from)) - == GET_MODE_BITSIZE (GET_MODE (from)))) + == GET_MODE_PRECISION (GET_MODE (from)))) { false_code = EQ; false_val = constm1_rtx; @@ -6186,8 +6184,8 @@ simplify_if_then_else (rtx x) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) - (GET_MODE_BITSIZE (mode) - - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0)))))) + (GET_MODE_PRECISION (mode) + - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0)))))) { c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = SIGN_EXTEND; @@ -6202,8 +6200,8 @@ simplify_if_then_else (rtx x) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && (num_sign_bit_copies (f, GET_MODE (f)) > (unsigned int) - (GET_MODE_BITSIZE (mode) - - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1)))))) + (GET_MODE_PRECISION (mode) + - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1)))))) { c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0)); extend_op = SIGN_EXTEND; @@ -6218,7 +6216,7 @@ simplify_if_then_else (rtx x) || GET_CODE (XEXP (t, 0)) == LSHIFTRT || GET_CODE (XEXP (t, 0)) == ASHIFTRT) && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && subreg_lowpart_p (XEXP (XEXP (t, 0), 0)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f) && ((nonzero_bits (f, GET_MODE (f)) @@ -6234,7 +6232,7 @@ simplify_if_then_else (rtx x) || GET_CODE (XEXP (t, 0)) == IOR || GET_CODE (XEXP (t, 0)) == XOR) && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && subreg_lowpart_p (XEXP (XEXP (t, 0), 1)) && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f) && ((nonzero_bits (f, GET_MODE (f)) @@ -6274,7 +6272,7 @@ simplify_if_then_else (rtx x) && ((1 == nonzero_bits (XEXP (cond, 0), mode) && (i = exact_log2 (UINTVAL (true_rtx))) >= 0) || ((num_sign_bit_copies (XEXP (cond, 0), mode) - == GET_MODE_BITSIZE (mode)) + == GET_MODE_PRECISION (mode)) && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0))) return simplify_shift_const (NULL_RTX, ASHIFT, mode, @@ -6312,8 +6310,7 @@ simplify_set (rtx x) simplify the expression for the object knowing that we only need the low-order bits. */ - if (GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode)) { src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0); SUBST (SET_SRC (x), src); @@ -6448,7 +6445,7 @@ simplify_set (rtx x) if (((old_code == NE && new_code == EQ) || (old_code == EQ && new_code == NE)) && ! other_changed_previously && op1 == const0_rtx - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE (op0)) && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0) { rtx pat = PATTERN (other_insn), note = 0; @@ -6541,17 +6538,15 @@ simplify_set (rtx x) if (dest == cc0_rtx && GET_CODE (src) == SUBREG && subreg_lowpart_p (src) - && (GET_MODE_BITSIZE (GET_MODE (src)) - < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src))))) + && (GET_MODE_PRECISION (GET_MODE (src)) + < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src))))) { rtx inner = SUBREG_REG (src); enum machine_mode inner_mode = GET_MODE (inner); /* Here we make sure that we don't have a sign bit on. */ - if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT - && (nonzero_bits (inner, inner_mode) - < ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (src)) - 1)))) + if (val_signbit_known_clear_p (GET_MODE (src), + nonzero_bits (inner, inner_mode))) { SUBST (SET_SRC (x), inner); src = SET_SRC (x); @@ -6568,8 +6563,7 @@ simplify_set (rtx x) && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src))) && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN && SUBREG_BYTE (src) == 0 - && (GET_MODE_SIZE (GET_MODE (src)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))) + && paradoxical_subreg_p (src) && MEM_P (SUBREG_REG (src))) { SUBST (SET_SRC (x), @@ -6597,7 +6591,7 @@ simplify_set (rtx x) #endif && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), GET_MODE (XEXP (XEXP (src, 0), 0))) - == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0)))) + == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0)))) && ! side_effects_p (src)) { rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE @@ -6664,7 +6658,7 @@ simplify_logical (rtx x) any (sign) bits when converting INTVAL (op1) to "unsigned HOST_WIDE_INT". */ if (CONST_INT_P (op1) - && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (mode) || INTVAL (op1) > 0)) { x = simplify_and_const_int (x, mode, op0, INTVAL (op1)); @@ -6773,7 +6767,7 @@ expand_compound_operation (rtx x) if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0)))) return x; - len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))); + len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))); /* If the inner object has VOIDmode (the only way this can happen is if it is an ASM_OPERANDS), we can't do anything since we don't know how much masking to do. */ @@ -6807,11 +6801,11 @@ expand_compound_operation (rtx x) pos = INTVAL (XEXP (x, 2)); /* This should stay within the object being extracted, fail otherwise. */ - if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))) + if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))) return x; if (BITS_BIG_ENDIAN) - pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos; + pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos; break; @@ -6822,7 +6816,7 @@ expand_compound_operation (rtx x) bit is not set, as this is easier to optimize. It will be converted back to cheaper alternative in make_extraction. */ if (GET_CODE (x) == SIGN_EXTEND - && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (GET_MODE (x)) && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0))) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) @@ -6851,7 +6845,7 @@ expand_compound_operation (rtx x) set. */ if (GET_CODE (XEXP (x, 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x) - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE (x)) && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return XEXP (XEXP (x, 0), 0); @@ -6860,7 +6854,7 @@ expand_compound_operation (rtx x) if (GET_CODE (XEXP (x, 0)) == SUBREG && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x) && subreg_lowpart_p (XEXP (x, 0)) - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE (x)) && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x)) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return SUBREG_REG (XEXP (x, 0)); @@ -6872,7 +6866,7 @@ expand_compound_operation (rtx x) if (GET_CODE (XEXP (x, 0)) == TRUNCATE && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x) && COMPARISON_P (XEXP (XEXP (x, 0), 0)) - && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return XEXP (XEXP (x, 0), 0); @@ -6882,7 +6876,7 @@ expand_compound_operation (rtx x) && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x) && subreg_lowpart_p (XEXP (x, 0)) && COMPARISON_P (SUBREG_REG (XEXP (x, 0))) - && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) <= HOST_BITS_PER_WIDE_INT) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0) return SUBREG_REG (XEXP (x, 0)); @@ -6904,7 +6898,7 @@ expand_compound_operation (rtx x) extraction. Then the constant of 31 would be substituted in to produce such a position. */ - modewidth = GET_MODE_BITSIZE (GET_MODE (x)); + modewidth = GET_MODE_PRECISION (GET_MODE (x)); if (modewidth >= pos + len) { enum machine_mode mode = GET_MODE (x); @@ -6958,7 +6952,7 @@ expand_field_assignment (const_rtx x) && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG) { inner = SUBREG_REG (XEXP (SET_DEST (x), 0)); - len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))); + len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))); pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0))); } else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT @@ -6970,23 +6964,23 @@ expand_field_assignment (const_rtx x) /* A constant position should stay within the width of INNER. */ if (CONST_INT_P (pos) - && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner))) + && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner))) break; if (BITS_BIG_ENDIAN) { if (CONST_INT_P (pos)) - pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len + pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len - INTVAL (pos)); else if (GET_CODE (pos) == MINUS && CONST_INT_P (XEXP (pos, 1)) && (INTVAL (XEXP (pos, 1)) - == GET_MODE_BITSIZE (GET_MODE (inner)) - len)) + == GET_MODE_PRECISION (GET_MODE (inner)) - len)) /* If position is ADJUST - X, new position is X. */ pos = XEXP (pos, 0); else pos = simplify_gen_binary (MINUS, GET_MODE (pos), - GEN_INT (GET_MODE_BITSIZE ( + GEN_INT (GET_MODE_PRECISION ( GET_MODE (inner)) - len), pos); @@ -7150,8 +7144,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, && !MEM_P (inner) && (inner_mode == tmode || !REG_P (inner) - || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode), - GET_MODE_BITSIZE (inner_mode)) + || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode) || reg_truncated_to_mode (tmode, inner)) && (! in_dest || (REG_P (inner) @@ -7162,7 +7155,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, : BITS_PER_UNIT)) == 0 /* We can't do this if we are widening INNER_MODE (it may not be aligned, for one thing). */ - && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode) + && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode) && (inner_mode == tmode || (! mode_dependent_address_p (XEXP (inner, 0)) && ! MEM_VOLATILE_P (inner)))))) @@ -7180,7 +7173,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, /* POS counts from lsb, but make OFFSET count in memory order. */ if (BYTES_BIG_ENDIAN) - offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT; + offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT; else offset = pos / BITS_PER_UNIT; @@ -7250,11 +7243,9 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, bit is not set, convert the extraction to the cheaper of sign and zero extension, that are equivalent in these cases. */ if (flag_expensive_optimizations - && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (tmode) && ((nonzero_bits (new_rtx, tmode) - & ~(((unsigned HOST_WIDE_INT) - GET_MODE_MASK (tmode)) - >> 1)) + & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1)) == 0))) { rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx); @@ -7287,7 +7278,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, other cases, we would only be going outside our object in cases when an original shift would have been undefined. */ if (MEM_P (inner) - && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode)) + && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode)) || (pos_rtx != 0 && len != 1))) return 0; @@ -7420,8 +7411,8 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, /* On the LHS, don't create paradoxical subregs implicitely truncating the register unless TRULY_NOOP_TRUNCATION. */ if (in_dest - && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (inner)), - GET_MODE_BITSIZE (wanted_inner_mode))) + && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner), + wanted_inner_mode)) return NULL_RTX; if (GET_MODE (inner) != wanted_inner_mode @@ -7453,7 +7444,7 @@ make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos, SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these cases. */ if (flag_expensive_optimizations - && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx)) && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx)) & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (pos_rtx))) @@ -7562,7 +7553,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) { enum rtx_code code = GET_CODE (x); enum machine_mode mode = GET_MODE (x); - int mode_width = GET_MODE_BITSIZE (mode); + int mode_width = GET_MODE_PRECISION (mode); rtx rhs, lhs; enum rtx_code next_code; int i, j; @@ -7721,7 +7712,7 @@ make_compound_operation (rtx x, enum rtx_code in_code) { new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code); new_rtx = make_extraction (mode, new_rtx, - (GET_MODE_BITSIZE (mode) + (GET_MODE_PRECISION (mode) - INTVAL (XEXP (XEXP (x, 0), 1))), NULL_RTX, i, 1, 0, in_code == COMPARE); } @@ -8057,8 +8048,7 @@ gen_lowpart_or_truncate (enum machine_mode mode, rtx x) { if (!CONST_INT_P (x) && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x)) - && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (GET_MODE (x))) + && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x)) && !(REG_P (x) && reg_truncated_to_mode (mode, x))) { /* Bit-cast X into an integer mode. */ @@ -8113,7 +8103,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, /* It is not valid to do a right-shift in a narrower mode than the one it came in with. */ if ((code == LSHIFTRT || code == ASHIFTRT) - && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x))) + && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x))) op_mode = GET_MODE (x); /* Truncate MASK to fit OP_MODE. */ @@ -8216,12 +8206,12 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) && GET_MODE_MASK (GET_MODE (x)) != mask - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (GET_MODE (x))) { unsigned HOST_WIDE_INT cval = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (GET_MODE (x)) & ~mask); - int width = GET_MODE_BITSIZE (GET_MODE (x)); + int width = GET_MODE_PRECISION (GET_MODE (x)); rtx y; /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative @@ -8249,7 +8239,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, This may eliminate that PLUS and, later, the AND. */ { - unsigned int width = GET_MODE_BITSIZE (mode); + unsigned int width = GET_MODE_PRECISION (mode); unsigned HOST_WIDE_INT smask = mask; /* If MODE is narrower than HOST_WIDE_INT and mask is a negative @@ -8317,7 +8307,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (x, 1)) && ((INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (INTVAL (XEXP (x, 1)))) - < GET_MODE_BITSIZE (GET_MODE (x))) + < GET_MODE_PRECISION (GET_MODE (x))) && (UINTVAL (XEXP (x, 1)) & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0) { @@ -8362,10 +8352,10 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, if (! (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 - && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode)) + && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode)) && ! (GET_MODE (XEXP (x, 1)) != VOIDmode && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1))) - < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode)))) + < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode)))) break; /* If the shift count is a constant and we can do arithmetic in @@ -8373,8 +8363,8 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, conservative form of the mask. */ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 - && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode) - && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT) + && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode) + && HWI_COMPUTABLE_MODE_P (op_mode)) mask >>= INTVAL (XEXP (x, 1)); else mask = fuller_mask; @@ -8394,7 +8384,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (op_mode)) { rtx inner = XEXP (x, 0); unsigned HOST_WIDE_INT inner_mask; @@ -8424,17 +8414,17 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, bit. */ && ((INTVAL (XEXP (x, 1)) + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))) - >= GET_MODE_BITSIZE (GET_MODE (x))) + >= GET_MODE_PRECISION (GET_MODE (x))) && exact_log2 (mask + 1) >= 0 /* Number of bits left after the shift must be more than the mask needs. */ && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1)) - <= GET_MODE_BITSIZE (GET_MODE (x))) + <= GET_MODE_PRECISION (GET_MODE (x))) /* Must be more sign bit copies than the mask needs. */ && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))) >= exact_log2 (mask + 1))) x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0), - GEN_INT (GET_MODE_BITSIZE (GET_MODE (x)) + GEN_INT (GET_MODE_PRECISION (GET_MODE (x)) - exact_log2 (mask + 1))); goto shiftrt; @@ -8442,9 +8432,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, case ASHIFTRT: /* If we are just looking for the sign bit, we don't need this shift at all, even if it has a variable count. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT - && (mask == ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))) + if (val_signbit_p (GET_MODE (x), mask)) return force_to_mode (XEXP (x, 0), mode, mask, next_select); /* If this is a shift by a constant, get a mask that contains those bits @@ -8463,20 +8451,20 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, represent a mask for all its bits in a single scalar. But we only care about the lower bits, so calculate these. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) + if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT) { nonzero = ~(unsigned HOST_WIDE_INT) 0; - /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1)) + /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) is the number of bits a full-width mask would have set. We need only shift if these are fewer than nonzero can hold. If not, we must keep all bits set in nonzero. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1)) + if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT) nonzero >>= INTVAL (XEXP (x, 1)) + HOST_BITS_PER_WIDE_INT - - GET_MODE_BITSIZE (GET_MODE (x)) ; + - GET_MODE_PRECISION (GET_MODE (x)) ; } else { @@ -8496,7 +8484,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, { x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0), - GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i); + GET_MODE_PRECISION (GET_MODE (x)) - 1 - i); if (GET_CODE (x) != ASHIFTRT) return force_to_mode (x, mode, mask, next_select); @@ -8519,7 +8507,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 && (INTVAL (XEXP (x, 1)) - <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1)) + <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1)) && GET_CODE (XEXP (x, 0)) == ASHIFT && XEXP (XEXP (x, 0), 1) == XEXP (x, 1)) return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask, @@ -8567,7 +8555,7 @@ force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask, && CONST_INT_P (XEXP (XEXP (x, 0), 1)) && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask) - < GET_MODE_BITSIZE (GET_MODE (x))) + < GET_MODE_PRECISION (GET_MODE (x))) && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT) { temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), @@ -8819,15 +8807,14 @@ if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse) false values when testing X. */ else if (x == constm1_rtx || x == const0_rtx || (mode != VOIDmode - && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode))) + && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode))) { *ptrue = constm1_rtx, *pfalse = const0_rtx; return x; } /* Likewise for 0 or a single bit. */ - else if (SCALAR_INT_MODE_P (mode) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + else if (HWI_COMPUTABLE_MODE_P (mode) && exact_log2 (nz = nonzero_bits (x, mode)) >= 0) { *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx; @@ -9152,8 +9139,8 @@ make_field_assignment (rtx x) return x; pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len); - if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest)) - || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT + if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest)) + || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0) return x; @@ -9174,7 +9161,7 @@ make_field_assignment (rtx x) other, pos), dest); src = force_to_mode (src, mode, - GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT + GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT ? ~(unsigned HOST_WIDE_INT) 0 : ((unsigned HOST_WIDE_INT) 1 << len) - 1, 0); @@ -9268,15 +9255,13 @@ apply_distributive_law (rtx x) || ! subreg_lowpart_p (lhs) || (GET_MODE_CLASS (GET_MODE (lhs)) != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs)))) - || (GET_MODE_SIZE (GET_MODE (lhs)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs)))) + || paradoxical_subreg_p (lhs) || VECTOR_MODE_P (GET_MODE (lhs)) || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD /* Result might need to be truncated. Don't change mode if explicit truncation is needed. */ - || !TRULY_NOOP_TRUNCATION - (GET_MODE_BITSIZE (GET_MODE (x)), - GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs))))) + || !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (x), + GET_MODE (SUBREG_REG (lhs)))) return x; tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)), @@ -9586,15 +9571,11 @@ reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode, ??? For 2.5, try to tighten up the MD files in this regard instead of this kludge. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode) + if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode) && CONST_INT_P (tem) && INTVAL (tem) > 0 - && 0 != (UINTVAL (tem) - & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (x)) - 1)))) - tem = GEN_INT (UINTVAL (tem) - | ((unsigned HOST_WIDE_INT) (-1) - << GET_MODE_BITSIZE (GET_MODE (x)))); + && val_signbit_known_set_p (GET_MODE (x), INTVAL (tem))) + tem = GEN_INT (INTVAL (tem) | ~GET_MODE_MASK (GET_MODE (x))); #endif return tem; } @@ -9602,7 +9583,7 @@ reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode, { unsigned HOST_WIDE_INT mask = rsp->nonzero_bits; - if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)) + if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode)) /* We don't know anything about the upper bits. */ mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x)); *nonzero &= mask; @@ -9648,7 +9629,7 @@ reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode, return tem; if (nonzero_sign_valid && rsp->sign_bit_copies != 0 - && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode)) + && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode)) *result = rsp->sign_bit_copies; return NULL; @@ -9672,8 +9653,8 @@ extended_count (const_rtx x, enum machine_mode mode, int unsignedp) return 0; return (unsignedp - ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1 + ? (HWI_COMPUTABLE_MODE_P (mode) + ? (unsigned int) (GET_MODE_PRECISION (mode) - 1 - floor_log2 (nonzero_bits (x, mode))) : 0) : num_sign_bit_copies (x, mode) - 1); @@ -9824,7 +9805,7 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, { if (orig_mode == mode) return mode; - gcc_assert (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (orig_mode)); + gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode)); /* In general we can't perform in wider mode for right shift and rotate. */ switch (code) @@ -9833,14 +9814,14 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, /* We can still widen if the bits brought in from the left are identical to the sign bit of ORIG_MODE. */ if (num_sign_bit_copies (op, mode) - > (unsigned) (GET_MODE_BITSIZE (mode) - - GET_MODE_BITSIZE (orig_mode))) + > (unsigned) (GET_MODE_PRECISION (mode) + - GET_MODE_PRECISION (orig_mode))) return mode; return orig_mode; case LSHIFTRT: /* Similarly here but with zero bits. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0) return mode; @@ -9851,7 +9832,7 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, int care_bits = low_bitmask_len (orig_mode, outer_const); if (care_bits >= 0 - && GET_MODE_BITSIZE (orig_mode) - care_bits >= count) + && GET_MODE_PRECISION (orig_mode) - care_bits >= count) return mode; } /* fall through */ @@ -9867,9 +9848,9 @@ try_widen_shift_mode (enum rtx_code code, rtx op, int count, } } -/* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift. - The result of the shift is RESULT_MODE. Return NULL_RTX if we cannot - simplify it. Otherwise, return a simplified value. +/* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind + of shift. The result of the shift is RESULT_MODE. Return NULL_RTX + if we cannot simplify it. Otherwise, return a simplified value. The shift is normally computed in the widest mode we find in VAROP, as long as it isn't a different number of words than RESULT_MODE. Exceptions @@ -9901,7 +9882,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, /* If we were given an invalid count, don't do anything except exactly what was requested. */ - if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode)) + if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode)) return NULL_RTX; count = orig_count; @@ -9918,7 +9899,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, /* Convert ROTATERT to ROTATE. */ if (code == ROTATERT) { - unsigned int bitsize = GET_MODE_BITSIZE (result_mode);; + unsigned int bitsize = GET_MODE_PRECISION (result_mode); code = ROTATE; if (VECTOR_MODE_P (result_mode)) count = bitsize / GET_MODE_NUNITS (result_mode) - count; @@ -9939,12 +9920,12 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, multiple operations, each of which are defined, we know what the result is supposed to be. */ - if (count > (GET_MODE_BITSIZE (shift_mode) - 1)) + if (count > (GET_MODE_PRECISION (shift_mode) - 1)) { if (code == ASHIFTRT) - count = GET_MODE_BITSIZE (shift_mode) - 1; + count = GET_MODE_PRECISION (shift_mode) - 1; else if (code == ROTATE || code == ROTATERT) - count %= GET_MODE_BITSIZE (shift_mode); + count %= GET_MODE_PRECISION (shift_mode); else { /* We can't simply return zero because there may be an @@ -9964,7 +9945,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, is a no-op. */ if (code == ASHIFTRT && (num_sign_bit_copies (varop, shift_mode) - == GET_MODE_BITSIZE (shift_mode))) + == GET_MODE_PRECISION (shift_mode))) { count = 0; break; @@ -9977,25 +9958,23 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (code == ASHIFTRT && (count + num_sign_bit_copies (varop, shift_mode) - >= GET_MODE_BITSIZE (shift_mode))) - count = GET_MODE_BITSIZE (shift_mode) - 1; + >= GET_MODE_PRECISION (shift_mode))) + count = GET_MODE_PRECISION (shift_mode) - 1; /* We simplify the tests below and elsewhere by converting ASHIFTRT to LSHIFTRT if we know the sign bit is clear. `make_compound_operation' will convert it to an ASHIFTRT for those machines (such as VAX) that don't have an LSHIFTRT. */ - if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT - && code == ASHIFTRT - && ((nonzero_bits (varop, shift_mode) - & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (shift_mode) - 1))) == 0)) + if (code == ASHIFTRT + && val_signbit_known_clear_p (shift_mode, + nonzero_bits (varop, shift_mode))) code = LSHIFTRT; if (((code == LSHIFTRT - && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (shift_mode) && !(nonzero_bits (varop, shift_mode) >> count)) || (code == ASHIFT - && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (shift_mode) && !((nonzero_bits (varop, shift_mode) << count) & GET_MODE_MASK (shift_mode)))) && !side_effects_p (varop)) @@ -10110,9 +10089,9 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, AND of a new shift with a mask. We compute the result below. */ if (CONST_INT_P (XEXP (varop, 1)) && INTVAL (XEXP (varop, 1)) >= 0 - && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop)) - && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop)) + && HWI_COMPUTABLE_MODE_P (result_mode) + && HWI_COMPUTABLE_MODE_P (mode) && !VECTOR_MODE_P (result_mode)) { enum rtx_code first_code = GET_CODE (varop); @@ -10125,11 +10104,11 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2) with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2), we can convert it to - (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1). + (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1). This simplifies certain SIGN_EXTEND operations. */ if (code == ASHIFT && first_code == ASHIFTRT - && count == (GET_MODE_BITSIZE (result_mode) - - GET_MODE_BITSIZE (GET_MODE (varop)))) + && count == (GET_MODE_PRECISION (result_mode) + - GET_MODE_PRECISION (GET_MODE (varop)))) { /* C3 has the low-order C1 bits zero. */ @@ -10197,7 +10176,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (code == ASHIFTRT || (code == ROTATE && first_code == ASHIFTRT) - || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT + || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT || (GET_MODE (varop) != result_mode && (first_code == ASHIFTRT || first_code == LSHIFTRT || first_code == ROTATE @@ -10285,7 +10264,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, && XEXP (XEXP (varop, 0), 1) == constm1_rtx && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && (code == LSHIFTRT || code == ASHIFTRT) - && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1) + && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1) && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1))) { count = 0; @@ -10347,13 +10326,13 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, case EQ: /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE says that the sign bit can be tested, FOO has mode MODE, C is - GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit + GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit that may be nonzero. */ if (code == LSHIFTRT && XEXP (varop, 1) == const0_rtx && GET_MODE (XEXP (varop, 0)) == result_mode - && count == (GET_MODE_BITSIZE (result_mode) - 1) - && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT + && count == (GET_MODE_PRECISION (result_mode) - 1) + && HWI_COMPUTABLE_MODE_P (result_mode) && STORE_FLAG_VALUE == -1 && nonzero_bits (XEXP (varop, 0), result_mode) == 1 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode, @@ -10369,7 +10348,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less than the number of bits in the mode is equivalent to A. */ if (code == LSHIFTRT - && count == (GET_MODE_BITSIZE (result_mode) - 1) + && count == (GET_MODE_PRECISION (result_mode) - 1) && nonzero_bits (XEXP (varop, 0), result_mode) == 1) { varop = XEXP (varop, 0); @@ -10393,7 +10372,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, is one less than the number of bits in the mode is equivalent to (xor A 1). */ if (code == LSHIFTRT - && count == (GET_MODE_BITSIZE (result_mode) - 1) + && count == (GET_MODE_PRECISION (result_mode) - 1) && XEXP (varop, 1) == constm1_rtx && nonzero_bits (XEXP (varop, 0), result_mode) == 1 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode, @@ -10421,7 +10400,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, } else if ((code == ASHIFTRT || code == LSHIFTRT) && count < HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (result_mode) && 0 == (nonzero_bits (XEXP (varop, 0), result_mode) >> count) && 0 == (nonzero_bits (XEXP (varop, 0), result_mode) @@ -10477,7 +10456,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) && GET_CODE (XEXP (varop, 0)) == ASHIFTRT - && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1) + && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1) && (code == LSHIFTRT || code == ASHIFTRT) && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) && INTVAL (XEXP (XEXP (varop, 0), 1)) == count @@ -10501,8 +10480,8 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, && GET_CODE (XEXP (varop, 0)) == LSHIFTRT && CONST_INT_P (XEXP (XEXP (varop, 0), 1)) && (INTVAL (XEXP (XEXP (varop, 0), 1)) - >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0))) - - GET_MODE_BITSIZE (GET_MODE (varop))))) + >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0))) + - GET_MODE_PRECISION (GET_MODE (varop))))) { rtx varop_inner = XEXP (varop, 0); @@ -10574,7 +10553,7 @@ simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode, if (outer_op != UNKNOWN) { if (GET_RTX_CLASS (outer_op) != RTX_UNARY - && GET_MODE_BITSIZE (result_mode) < HOST_BITS_PER_WIDE_INT) + && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT) outer_const = trunc_int_for_mode (outer_const, result_mode); if (outer_op == AND) @@ -10876,7 +10855,7 @@ static enum rtx_code simplify_compare_const (enum rtx_code code, rtx op0, rtx *pop1) { enum machine_mode mode = GET_MODE (op0); - unsigned int mode_width = GET_MODE_BITSIZE (mode); + unsigned int mode_width = GET_MODE_PRECISION (mode); HOST_WIDE_INT const_op = INTVAL (*pop1); /* Get the constant we are comparing against and turn off all bits @@ -11089,8 +11068,8 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1) && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1) && (INTVAL (XEXP (op0, 1)) - == (GET_MODE_BITSIZE (GET_MODE (op0)) - - (GET_MODE_BITSIZE + == (GET_MODE_PRECISION (GET_MODE (op0)) + - (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))))))) { op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0)); @@ -11103,7 +11082,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) this shift are known to be zero for both inputs and if the type of comparison is compatible with the shift. */ if (GET_CODE (op0) == GET_CODE (op1) - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (GET_MODE(op0)) && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ)) || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT) && (code != GT && code != LT && code != GE && code != LE)) @@ -11154,12 +11133,11 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1)); int changed = 0; - if (GET_CODE (inner_op0) == SUBREG && GET_CODE (inner_op1) == SUBREG - && (GET_MODE_SIZE (GET_MODE (inner_op0)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (inner_op0)))) + if (paradoxical_subreg_p (inner_op0) + && GET_CODE (inner_op1) == SUBREG && (GET_MODE (SUBREG_REG (inner_op0)) == GET_MODE (SUBREG_REG (inner_op1))) - && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (inner_op0))) + && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0))) <= HOST_BITS_PER_WIDE_INT) && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0), GET_MODE (SUBREG_REG (inner_op0))))) @@ -11222,7 +11200,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) while (CONST_INT_P (op1)) { enum machine_mode mode = GET_MODE (op0); - unsigned int mode_width = GET_MODE_BITSIZE (mode); + unsigned int mode_width = GET_MODE_PRECISION (mode); unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode); int equality_comparison_p; int sign_bit_comparison_p; @@ -11253,11 +11231,10 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* If this is a sign bit comparison and we can do arithmetic in MODE, say that we will only be needing the sign bit of OP0. */ - if (sign_bit_comparison_p - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode)) op0 = force_to_mode (op0, mode, (unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (mode) - 1), + << (GET_MODE_PRECISION (mode) - 1), 0); /* Now try cases based on the opcode of OP0. If none of the cases @@ -11288,7 +11265,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) else { mode = new_mode; - i = (GET_MODE_BITSIZE (mode) - 1 - i); + i = (GET_MODE_PRECISION (mode) - 1 - i); } } @@ -11421,10 +11398,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) mode = GET_MODE (XEXP (op0, 0)); if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT && ! unsigned_comparison_p - && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) - && ((unsigned HOST_WIDE_INT) const_op - < (((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (mode) - 1)))) + && val_signbit_known_clear_p (mode, const_op) && have_insn_for (COMPARE, mode)) { op0 = XEXP (op0, 0); @@ -11455,7 +11429,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) if (mode_width <= HOST_BITS_PER_WIDE_INT && subreg_lowpart_p (op0) - && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) > mode_width + && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width && GET_CODE (SUBREG_REG (op0)) == PLUS && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))) { @@ -11475,14 +11449,14 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* (A - C1) sign-extends if it is positive and 1-extends if it is negative, C2 both sign- and 1-extends. */ || (num_sign_bit_copies (a, inner_mode) - > (unsigned int) (GET_MODE_BITSIZE (inner_mode) + > (unsigned int) (GET_MODE_PRECISION (inner_mode) - mode_width) && const_op < 0))) || ((unsigned HOST_WIDE_INT) c1 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2) /* (A - C1) always sign-extends, like C2. */ && num_sign_bit_copies (a, inner_mode) - > (unsigned int) (GET_MODE_BITSIZE (inner_mode) + > (unsigned int) (GET_MODE_PRECISION (inner_mode) - (mode_width - 1)))) { op0 = SUBREG_REG (op0); @@ -11493,7 +11467,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* If the inner mode is narrower and we are extracting the low part, we can treat the SUBREG as if it were a ZERO_EXTEND. */ if (subreg_lowpart_p (op0) - && GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) < mode_width) + && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width) /* Fall through */ ; else break; @@ -11504,7 +11478,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) mode = GET_MODE (XEXP (op0, 0)); if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT && (unsigned_comparison_p || equality_comparison_p) - && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (mode) && ((unsigned HOST_WIDE_INT) const_op < GET_MODE_MASK (mode)) && have_insn_for (COMPARE, mode)) { @@ -11611,11 +11585,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) /* Check for the cases where we simply want the result of the earlier test or the opposite of that result. */ if (code == NE || code == EQ - || (GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT - && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT - && (STORE_FLAG_VALUE - & (((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1)))) + || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE) && (code == LT || code == GE))) { enum rtx_code new_code; @@ -11718,8 +11688,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) + 1)) >= 0 && const_op >> i == 0 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode - && (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode), - GET_MODE_BITSIZE (GET_MODE (op0))) + && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0)) || (REG_P (XEXP (op0, 0)) && reg_truncated_to_mode (tmode, XEXP (op0, 0))))) { @@ -11747,14 +11716,14 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) the code has been changed. */ && (0 #ifdef WORD_REGISTER_OPERATIONS - || (mode_width > GET_MODE_BITSIZE (tmode) + || (mode_width > GET_MODE_PRECISION (tmode) && mode_width <= BITS_PER_WORD) #endif - || (mode_width <= GET_MODE_BITSIZE (tmode) + || (mode_width <= GET_MODE_PRECISION (tmode) && subreg_lowpart_p (XEXP (op0, 0)))) && CONST_INT_P (XEXP (op0, 1)) && mode_width <= HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (tmode) && ((c1 = INTVAL (XEXP (op0, 1))) & ~mask) == 0 && (c1 & ~GET_MODE_MASK (tmode)) == 0 && c1 != mask @@ -11793,7 +11762,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) || (GET_CODE (shift_op) == XOR && CONST_INT_P (XEXP (shift_op, 1)) && CONST_INT_P (shift_count) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && (UINTVAL (XEXP (shift_op, 1)) == (unsigned HOST_WIDE_INT) 1 << INTVAL (shift_count)))) @@ -12007,8 +11976,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT && (code == NE || code == EQ)) { - if (GET_MODE_SIZE (GET_MODE (op0)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0)))) + if (paradoxical_subreg_p (op0)) { /* For paradoxical subregs, allow case 1 as above. Case 3 isn't implemented. */ @@ -12018,7 +11986,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) op1 = gen_lowpart (GET_MODE (op0), op1); } } - else if ((GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0))) + else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) <= HOST_BITS_PER_WIDE_INT) && (nonzero_bits (SUBREG_REG (op0), GET_MODE (SUBREG_REG (op0))) @@ -12043,8 +12011,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) && GET_MODE_SIZE (mode) < UNITS_PER_WORD && ! have_insn_for (COMPARE, mode)) for (tmode = GET_MODE_WIDER_MODE (mode); - (tmode != VOIDmode - && GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT); + (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode)); tmode = GET_MODE_WIDER_MODE (tmode)) if (have_insn_for (COMPARE, tmode)) { @@ -12055,7 +12022,7 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) a paradoxical subreg to extend OP0. */ if (op1 == const0_rtx && (code == LT || code == GE) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (mode)) { op0 = simplify_gen_binary (AND, tmode, gen_lowpart (tmode, op0), @@ -12081,11 +12048,11 @@ simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1) if (zero_extended || ((num_sign_bit_copies (op0, tmode) - > (unsigned int) (GET_MODE_BITSIZE (tmode) - - GET_MODE_BITSIZE (mode))) + > (unsigned int) (GET_MODE_PRECISION (tmode) + - GET_MODE_PRECISION (mode))) && (num_sign_bit_copies (op1, tmode) - > (unsigned int) (GET_MODE_BITSIZE (tmode) - - GET_MODE_BITSIZE (mode))))) + > (unsigned int) (GET_MODE_PRECISION (tmode) + - GET_MODE_PRECISION (mode))))) { /* If OP0 is an AND and we don't have an AND in MODE either, make a new AND in the proper mode. */ @@ -12347,7 +12314,7 @@ record_value_for_reg (rtx reg, rtx insn, rtx value) subst_low_luid = DF_INSN_LUID (insn); rsp->last_set_mode = mode; if (GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + && HWI_COMPUTABLE_MODE_P (mode)) mode = nonzero_bits_mode; rsp->last_set_nonzero_bits = nonzero_bits (value, mode); rsp->last_set_sign_bit_copies @@ -12384,7 +12351,7 @@ record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data) else if (GET_CODE (setter) == SET && GET_CODE (SET_DEST (setter)) == SUBREG && SUBREG_REG (SET_DEST (setter)) == dest - && GET_MODE_BITSIZE (GET_MODE (dest)) <= BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD && subreg_lowpart_p (SET_DEST (setter))) record_value_for_reg (dest, record_dead_insn, gen_lowpart (GET_MODE (dest), @@ -12481,7 +12448,7 @@ record_promoted_value (rtx insn, rtx subreg) unsigned int regno = REGNO (SUBREG_REG (subreg)); enum machine_mode mode = GET_MODE (subreg); - if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) + if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT) return; for (links = LOG_LINKS (insn); links;) @@ -12532,8 +12499,7 @@ reg_truncated_to_mode (enum machine_mode mode, const_rtx x) return false; if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode)) return true; - if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (truncated))) + if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated)) return true; return false; } @@ -12558,8 +12524,7 @@ record_truncated_value (rtx *p, void *data ATTRIBUTE_UNUSED) if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode)) return -1; - if (TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (truncated_mode), - GET_MODE_BITSIZE (original_mode))) + if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode)) return -1; x = SUBREG_REG (x); @@ -12746,8 +12711,7 @@ get_last_value (const_rtx x) we cannot predict what values the "extra" bits might have. */ if (GET_CODE (x) == SUBREG && subreg_lowpart_p (x) - && (GET_MODE_SIZE (GET_MODE (x)) - <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) + && !paradoxical_subreg_p (x) && (value = get_last_value (SUBREG_REG (x))) != 0) return gen_lowpart (GET_MODE (x), value); diff --git a/gcc/config.gcc b/gcc/config.gcc index c77f40b0199..d73cb459bb0 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -1369,7 +1369,7 @@ i[34567]86-*-rtems*) tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/i386elf.h i386/rtemself.h rtems.h newlib-stdint.h" tmake_file="${tmake_file} i386/t-rtems t-rtems" ;; -i[34567]86-*-solaris2*) +i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) tm_file="${tm_file} i386/unix.h i386/att.h ${sol2_tm_file}" # Set default arch_32 to pentium4, tune_32 to generic like the other # i386 targets, although config.guess defaults to i386-pc-solaris2*. @@ -3529,7 +3529,7 @@ case ${target} in i[34567]86-*-gnu*) tmake_file="${tmake_file} i386/t-fprules-softfp soft-fp/t-softfp i386/t-linux" ;; - i[34567]86-*-solaris2*) + i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) tmake_file="${tmake_file} i386/t-fprules-softfp soft-fp/t-softfp" ;; i[34567]86-*-cygwin* | i[34567]86-*-mingw* | x86_64-*-mingw*) diff --git a/gcc/config.host b/gcc/config.host index a2bde4434c5..d55447cb80c 100644 --- a/gcc/config.host +++ b/gcc/config.host @@ -197,7 +197,7 @@ case ${host} in i370-*-opened* | i370-*-mvs* ) # IBM 360/370/390 Architecture host_xm_defines='FATAL_EXIT_CODE=12' ;; - i[34567]86-*-solaris2*) + i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) out_host_hook_obj=host-solaris.o host_xmake_file="${host_xmake_file} x-solaris" ;; diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 4c6041ab553..0e371f30167 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -203,7 +203,6 @@ static bool arm_output_ttype (rtx); static void arm_asm_emit_except_personality (rtx); static void arm_asm_init_sections (void); #endif -static void arm_dwarf_handle_frame_unspec (const char *, rtx, int); static rtx arm_dwarf_register_span (rtx); static tree arm_cxx_guard_type (void); @@ -501,9 +500,6 @@ static const struct attribute_spec arm_attribute_table[] = #define TARGET_ASM_INIT_SECTIONS arm_asm_init_sections #endif /* ARM_UNWIND_INFO */ -#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC -#define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec - #undef TARGET_DWARF_REGISTER_SPAN #define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span @@ -15830,9 +15826,8 @@ arm_expand_prologue (void) if (IS_STACKALIGN (func_type)) { - rtx dwarf; - rtx r0; - rtx r1; + rtx r0, r1; + /* Handle a word-aligned stack pointer. We generate the following: mov r0, sp @@ -15848,15 +15843,18 @@ arm_expand_prologue (void) r0 = gen_rtx_REG (SImode, 0); r1 = gen_rtx_REG (SImode, 1); - /* Use a real rtvec rather than NULL_RTVEC so the rest of the - compiler won't choke. */ - dwarf = gen_rtx_UNSPEC (SImode, rtvec_alloc (0), UNSPEC_STACK_ALIGN); - dwarf = gen_rtx_SET (VOIDmode, r0, dwarf); - insn = gen_movsi (r0, stack_pointer_rtx); + + insn = emit_insn (gen_movsi (r0, stack_pointer_rtx)); RTX_FRAME_RELATED_P (insn) = 1; - add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf); - emit_insn (insn); + add_reg_note (insn, REG_CFA_REGISTER, NULL); + emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7))); + + /* ??? The CFA changes here, which may cause GDB to conclude that it + has entered a different function. That said, the unwind info is + correct, individually, before and after this instruction because + we've described the save of SP, which will override the default + handling of SP as restoring from the CFA. */ emit_insn (gen_movsi (stack_pointer_rtx, r1)); } @@ -22880,13 +22878,6 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p) asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n", REGNO (e0), (int)INTVAL(XEXP (e1, 1))); } - else if (GET_CODE (e1) == UNSPEC && XINT (e1, 1) == UNSPEC_STACK_ALIGN) - { - /* Stack pointer save before alignment. */ - reg = REGNO (e0); - asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n", - reg + 0x90, reg); - } else abort (); break; @@ -22902,7 +22893,8 @@ arm_unwind_emit_set (FILE * asm_out_file, rtx p) static void arm_unwind_emit (FILE * asm_out_file, rtx insn) { - rtx pat; + rtx note, pat; + bool handled_one = false; if (arm_except_unwind_info (&global_options) != UI_TARGET) return; @@ -22912,14 +22904,56 @@ arm_unwind_emit (FILE * asm_out_file, rtx insn) || crtl->all_throwers_are_sibcalls)) return; - if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn)) + if (NOTE_P (insn) || !RTX_FRAME_RELATED_P (insn)) return; - pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX); - if (pat) - pat = XEXP (pat, 0); - else - pat = PATTERN (insn); + for (note = REG_NOTES (insn); note ; note = XEXP (note, 1)) + { + pat = XEXP (note, 0); + switch (REG_NOTE_KIND (note)) + { + case REG_FRAME_RELATED_EXPR: + goto found; + + case REG_CFA_REGISTER: + if (pat == NULL) + { + pat = PATTERN (insn); + if (GET_CODE (pat) == PARALLEL) + pat = XVECEXP (pat, 0, 0); + } + + /* Only emitted for IS_STACKALIGN re-alignment. */ + { + rtx dest, src; + unsigned reg; + + src = SET_SRC (pat); + dest = SET_DEST (pat); + + gcc_assert (src == stack_pointer_rtx); + reg = REGNO (dest); + asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n", + reg + 0x90, reg); + } + handled_one = true; + break; + + case REG_CFA_DEF_CFA: + case REG_CFA_EXPRESSION: + case REG_CFA_ADJUST_CFA: + case REG_CFA_OFFSET: + /* ??? Only handling here what we actually emit. */ + gcc_unreachable (); + + default: + break; + } + } + if (handled_one) + return; + pat = PATTERN (insn); + found: switch (GET_CODE (pat)) { @@ -22975,30 +23009,6 @@ arm_asm_init_sections (void) } #endif /* ARM_UNWIND_INFO */ -/* Handle UNSPEC DWARF call frame instructions. These are needed for dynamic - stack alignment. */ - -static void -arm_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index) -{ - rtx unspec = SET_SRC (pattern); - gcc_assert (GET_CODE (unspec) == UNSPEC); - - switch (index) - { - case UNSPEC_STACK_ALIGN: - /* ??? We should set the CFA = (SP & ~7). At this point we haven't - put anything on the stack, so hopefully it won't matter. - CFA = SP will be correct after alignment. */ - dwarf2out_reg_save_reg (label, stack_pointer_rtx, - SET_DEST (pattern)); - break; - default: - gcc_unreachable (); - } -} - - /* Output unwind directives for the start/end of a function. */ void diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h index b0d26259a08..3810f9e8f61 100644 --- a/gcc/config/arm/arm.h +++ b/gcc/config/arm/arm.h @@ -45,6 +45,8 @@ extern char arm_arch_name[]; #define TARGET_CPU_CPP_BUILTINS() \ do \ { \ + if (TARGET_DSP_MULTIPLY) \ + builtin_define ("__ARM_FEATURE_DSP"); \ /* Define __arm__ even when in thumb mode, for \ consistency with armcc. */ \ builtin_define ("__arm__"); \ diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md index 2bf3551a414..03ae72defa3 100644 --- a/gcc/config/arm/arm.md +++ b/gcc/config/arm/arm.md @@ -94,8 +94,6 @@ UNSPEC_TLS ; A symbol that has been treated properly for TLS usage. UNSPEC_PIC_LABEL ; A label used for PIC access that does not appear in the ; instruction stream. - UNSPEC_STACK_ALIGN ; Doubleword aligned stack pointer. Used to - ; generate correct unwind information. UNSPEC_PIC_OFFSET ; A symbolic 12-bit OFFSET that has been treated ; correctly for PIC usage. UNSPEC_GOTSYM_OFF ; The offset of the start of the GOT from a diff --git a/gcc/config/avr/avr-c.c b/gcc/config/avr/avr-c.c index ec314d2a139..aa1a51e538b 100644 --- a/gcc/config/avr/avr-c.c +++ b/gcc/config/avr/avr-c.c @@ -94,10 +94,7 @@ avr_cpu_cpp_builtins (struct cpp_reader *pfile) cpp_define (pfile, "__BUILTIN_AVR_SWAP"); cpp_define (pfile, "__BUILTIN_AVR_DELAY_CYCLES"); - if (AVR_HAVE_MUL) - { - cpp_define (pfile, "__BUILTIN_AVR_FMUL"); - cpp_define (pfile, "__BUILTIN_AVR_FMULS"); - cpp_define (pfile, "__BUILTIN_AVR_FMULSU"); - } + cpp_define (pfile, "__BUILTIN_AVR_FMUL"); + cpp_define (pfile, "__BUILTIN_AVR_FMULS"); + cpp_define (pfile, "__BUILTIN_AVR_FMULSU"); } diff --git a/gcc/config/avr/avr-protos.h b/gcc/config/avr/avr-protos.h index 6d06af8153a..718aa420c74 100644 --- a/gcc/config/avr/avr-protos.h +++ b/gcc/config/avr/avr-protos.h @@ -56,7 +56,7 @@ extern const char *out_movhi_r_mr (rtx insn, rtx op[], int *l); extern const char *out_movhi_mr_r (rtx insn, rtx op[], int *l); extern const char *out_movsi_r_mr (rtx insn, rtx op[], int *l); extern const char *out_movsi_mr_r (rtx insn, rtx op[], int *l); -extern const char *output_movsisf (rtx insn, rtx operands[], int *l); +extern const char *output_movsisf (rtx insn, rtx operands[], rtx clobber, int *l); extern const char *out_tstsi (rtx insn, rtx src, int *l); extern const char *out_tsthi (rtx insn, rtx src, int *l); extern const char *ret_cond_branch (rtx x, int len, int reverse); @@ -85,7 +85,7 @@ extern const char *avr_out_sbxx_branch (rtx insn, rtx operands[]); extern int extra_constraint_Q (rtx x); extern int adjust_insn_length (rtx insn, int len); extern const char *output_reload_inhi (rtx insn, rtx *operands, int *len); -extern const char *output_reload_insisf (rtx insn, rtx *operands, int *len); +extern const char *output_reload_insisf (rtx insn, rtx *operands, rtx clobber, int *len); extern enum reg_class secondary_input_reload_class (enum reg_class, enum machine_mode, rtx); diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c index 407a40f8c5e..17020ce5ced 100644 --- a/gcc/config/avr/avr.c +++ b/gcc/config/avr/avr.c @@ -1184,6 +1184,32 @@ avr_legitimize_address (rtx x, rtx oldx, enum machine_mode mode) } +/* Helper function to print assembler resp. track instruction + sequence lengths. + + If PLEN == NULL: + Output assembler code from template TPL with operands supplied + by OPERANDS. This is just forwarding to output_asm_insn. + + If PLEN != NULL: + Add N_WORDS to *PLEN. + Don't output anything. +*/ + +static void +avr_asm_len (const char* tpl, rtx* operands, int* plen, int n_words) +{ + if (NULL == plen) + { + output_asm_insn (tpl, operands); + } + else + { + *plen += n_words; + } +} + + /* Return a pointer register name as a string. */ static const char * @@ -2600,7 +2626,7 @@ out_movsi_mr_r (rtx insn, rtx op[], int *l) } const char * -output_movsisf(rtx insn, rtx operands[], int *l) +output_movsisf (rtx insn, rtx operands[], rtx clobber_reg, int *l) { int dummy; rtx dest = operands[0]; @@ -2643,6 +2669,11 @@ output_movsisf(rtx insn, rtx operands[], int *l) AS2 (mov,%D0,%D1)); } } + else if (CONST_INT_P (src) + || CONST_DOUBLE_P (src)) + { + return output_reload_insisf (insn, operands, clobber_reg, real_l); + } else if (CONSTANT_P (src)) { if (test_hard_reg_class (LD_REGS, dest)) /* ldi d,i */ @@ -2653,68 +2684,6 @@ output_movsisf(rtx insn, rtx operands[], int *l) AS2 (ldi,%C0,hlo8(%1)) CR_TAB AS2 (ldi,%D0,hhi8(%1))); } - - if (GET_CODE (src) == CONST_INT) - { - const char *const clr_op0 = - AVR_HAVE_MOVW ? (AS1 (clr,%A0) CR_TAB - AS1 (clr,%B0) CR_TAB - AS2 (movw,%C0,%A0)) - : (AS1 (clr,%A0) CR_TAB - AS1 (clr,%B0) CR_TAB - AS1 (clr,%C0) CR_TAB - AS1 (clr,%D0)); - - if (src == const0_rtx) /* mov r,L */ - { - *l = AVR_HAVE_MOVW ? 3 : 4; - return clr_op0; - } - else if (src == const1_rtx) - { - if (!real_l) - output_asm_insn (clr_op0, operands); - *l = AVR_HAVE_MOVW ? 4 : 5; - return AS1 (inc,%A0); - } - else if (src == constm1_rtx) - { - /* Immediate constants -1 to any register */ - if (AVR_HAVE_MOVW) - { - *l = 4; - return (AS1 (clr,%A0) CR_TAB - AS1 (dec,%A0) CR_TAB - AS2 (mov,%B0,%A0) CR_TAB - AS2 (movw,%C0,%A0)); - } - *l = 5; - return (AS1 (clr,%A0) CR_TAB - AS1 (dec,%A0) CR_TAB - AS2 (mov,%B0,%A0) CR_TAB - AS2 (mov,%C0,%A0) CR_TAB - AS2 (mov,%D0,%A0)); - } - else - { - int bit_nr = exact_log2 (INTVAL (src)); - - if (bit_nr >= 0) - { - *l = AVR_HAVE_MOVW ? 5 : 6; - if (!real_l) - { - output_asm_insn (clr_op0, operands); - output_asm_insn ("set", operands); - } - if (!real_l) - avr_output_bld (operands, bit_nr); - - return ""; - } - } - } - /* Last resort, better than loading from memory. */ *l = 10; return (AS2 (mov,__tmp_reg__,r31) CR_TAB @@ -2735,7 +2704,7 @@ output_movsisf(rtx insn, rtx operands[], int *l) { const char *templ; - if (src == const0_rtx) + if (src == CONST0_RTX (GET_MODE (dest))) operands[1] = zero_reg_rtx; templ = out_movsi_mr_r (insn, operands, real_l); @@ -4612,7 +4581,7 @@ adjust_insn_length (rtx insn, int len) break; case SImode: case SFmode: - output_movsisf (insn, op, &len); + output_movsisf (insn, op, NULL_RTX, &len); break; default: break; @@ -4683,7 +4652,7 @@ adjust_insn_length (rtx insn, int len) break; case SImode: case SFmode: - output_reload_insisf (insn, op, &len); + output_reload_insisf (insn, op, XEXP (op[2], 0), &len); break; default: break; @@ -5030,7 +4999,19 @@ avr_insert_attributes (tree node, tree *attributes) && (TREE_STATIC (node) || DECL_EXTERNAL (node)) && avr_progmem_p (node, *attributes)) { - if (TREE_READONLY (node)) + tree node0 = node; + + /* For C++, we have to peel arrays in order to get correct + determination of readonlyness. */ + + do + node0 = TREE_TYPE (node0); + while (TREE_CODE (node0) == ARRAY_TYPE); + + if (error_mark_node == node0) + return; + + if (TYPE_READONLY (node0)) { static const char dsec[] = ".progmem.data"; @@ -6200,53 +6181,199 @@ output_reload_inhi (rtx insn ATTRIBUTE_UNUSED, rtx *operands, int *len) } +/* Reload a SI or SF compile time constant (OP[1]) into a GPR (OP[0]). + CLOBBER_REG is a QI clobber reg needed to move vast majority of consts + into a NO_LD_REGS. If CLOBBER_REG is NULL_RTX we either don't need a + clobber reg or have to cook one up. + + LEN == NULL: Output instructions. + + LEN != NULL: Output nothing. Increment *LEN by number of words occupied + by the insns printed. + + Return "". */ + const char * -output_reload_insisf (rtx insn ATTRIBUTE_UNUSED, rtx *operands, int *len) +output_reload_insisf (rtx insn ATTRIBUTE_UNUSED, + rtx *op, rtx clobber_reg, int *len) { - rtx src = operands[1]; - int cnst = (GET_CODE (src) == CONST_INT); + rtx src = op[1]; + rtx dest = op[0]; + rtx xval, xdest[4]; + int ival[4]; + int clobber_val = 1234; + bool cooked_clobber_p = false; + bool set_p = false; + unsigned int n; + enum machine_mode mode = GET_MODE (dest); + + gcc_assert (REG_P (dest)); if (len) + *len = 0; + + /* (REG:SI 14) is special: It's neither in LD_REGS nor in NO_LD_REGS + but has some subregs that are in LD_REGS. Use the MSB (REG:QI 17). */ + + if (14 == REGNO (dest)) { - if (cnst) - *len = 4 + ((INTVAL (src) & 0xff) != 0) - + ((INTVAL (src) & 0xff00) != 0) - + ((INTVAL (src) & 0xff0000) != 0) - + ((INTVAL (src) & 0xff000000) != 0); - else - *len = 8; - - return ""; + clobber_reg = gen_rtx_REG (QImode, 17); } - if (cnst && ((INTVAL (src) & 0xff) == 0)) - output_asm_insn (AS2 (mov, %A0, __zero_reg__), operands); - else - { - output_asm_insn (AS2 (ldi, %2, lo8(%1)), operands); - output_asm_insn (AS2 (mov, %A0, %2), operands); - } - if (cnst && ((INTVAL (src) & 0xff00) == 0)) - output_asm_insn (AS2 (mov, %B0, __zero_reg__), operands); - else + /* We might need a clobber reg but don't have one. Look at the value + to be loaded more closely. A clobber is only needed if it contains + a byte that is neither 0, -1 or a power of 2. */ + + if (NULL_RTX == clobber_reg + && !test_hard_reg_class (LD_REGS, dest)) { - output_asm_insn (AS2 (ldi, %2, hi8(%1)), operands); - output_asm_insn (AS2 (mov, %B0, %2), operands); + for (n = 0; n < GET_MODE_SIZE (mode); n++) + { + xval = simplify_gen_subreg (QImode, src, mode, n); + + if (!(const0_rtx == xval + || constm1_rtx == xval + || single_one_operand (xval, QImode))) + { + /* We have no clobber reg but need one. Cook one up. + That's cheaper than loading from constant pool. */ + + cooked_clobber_p = true; + clobber_reg = gen_rtx_REG (QImode, 31); + avr_asm_len ("mov __tmp_reg__,%0", &clobber_reg, len, 1); + break; + } + } } - if (cnst && ((INTVAL (src) & 0xff0000) == 0)) - output_asm_insn (AS2 (mov, %C0, __zero_reg__), operands); - else + + /* Now start filling DEST from LSB to MSB. */ + + for (n = 0; n < GET_MODE_SIZE (mode); n++) { - output_asm_insn (AS2 (ldi, %2, hlo8(%1)), operands); - output_asm_insn (AS2 (mov, %C0, %2), operands); + bool done_byte = false; + unsigned int j; + rtx xop[3]; + + /* Crop the n-th sub-byte. */ + + xval = simplify_gen_subreg (QImode, src, mode, n); + xdest[n] = simplify_gen_subreg (QImode, dest, mode, n); + ival[n] = INTVAL (xval); + + /* Look if we can reuse the low word by means of MOVW. */ + + if (n == 2 + && AVR_HAVE_MOVW) + { + rtx lo16 = simplify_gen_subreg (HImode, src, mode, 0); + rtx hi16 = simplify_gen_subreg (HImode, src, mode, 2); + + if (INTVAL (lo16) == INTVAL (hi16)) + { + avr_asm_len ("movw %C0,%A0", &op[0], len, 1); + break; + } + } + + /* Use CLR to zero a value so that cc0 is set as expected + for zero. */ + + if (ival[n] == 0) + { + avr_asm_len ("clr %0", &xdest[n], len, 1); + continue; + } + + if (clobber_val == ival[n] + && REGNO (clobber_reg) == REGNO (xdest[n])) + { + continue; + } + + /* LD_REGS can use LDI to move a constant value */ + + if (test_hard_reg_class (LD_REGS, xdest[n])) + { + xop[0] = xdest[n]; + xop[1] = xval; + avr_asm_len ("ldi %0,lo8(%1)", xop, len, 1); + continue; + } + + /* Try to reuse value already loaded in some lower byte. */ + + for (j = 0; j < n; j++) + if (ival[j] == ival[n]) + { + xop[0] = xdest[n]; + xop[1] = xdest[j]; + + avr_asm_len ("mov %0,%1", xop, len, 1); + done_byte = true; + break; + } + + if (done_byte) + continue; + + /* Need no clobber reg for -1: Use CLR/DEC */ + + if (-1 == ival[n]) + { + avr_asm_len ("clr %0" CR_TAB + "dec %0", &xdest[n], len, 2); + continue; + } + + /* Use T flag or INC to manage powers of 2 if we have + no clobber reg. */ + + if (NULL_RTX == clobber_reg + && single_one_operand (xval, QImode)) + { + if (1 == ival[n]) + { + avr_asm_len ("clr %0" CR_TAB + "inc %0", &xdest[n], len, 2); + continue; + } + + xop[0] = xdest[n]; + xop[1] = GEN_INT (exact_log2 (ival[n] & GET_MODE_MASK (QImode))); + + gcc_assert (constm1_rtx != xop[1]); + + if (!set_p) + { + set_p = true; + avr_asm_len ("set", xop, len, 1); + } + + avr_asm_len ("clr %0" CR_TAB + "bld %0,%1", xop, len, 2); + continue; + } + + /* We actually need the LD_REGS clobber reg. */ + + gcc_assert (NULL_RTX != clobber_reg); + + xop[0] = xdest[n]; + xop[1] = xval; + xop[2] = clobber_reg; + clobber_val = ival[n]; + + avr_asm_len ("ldi %2,lo8(%1)" CR_TAB + "mov %0,%2", xop, len, 2); } - if (cnst && ((INTVAL (src) & 0xff000000) == 0)) - output_asm_insn (AS2 (mov, %D0, __zero_reg__), operands); - else + + /* If we cooked up a clobber reg above, restore it. */ + + if (cooked_clobber_p) { - output_asm_insn (AS2 (ldi, %2, hhi8(%1)), operands); - output_asm_insn (AS2 (mov, %D0, %2), operands); + avr_asm_len ("mov %0,__tmp_reg__", &clobber_reg, len, 1); } + return ""; } @@ -6536,19 +6663,12 @@ avr_init_builtins (void) DEF_BUILTIN ("__builtin_avr_delay_cycles", void_ftype_ulong, AVR_BUILTIN_DELAY_CYCLES); - if (AVR_HAVE_MUL) - { - /* FIXME: If !AVR_HAVE_MUL, make respective functions available - in libgcc. For fmul and fmuls this is straight forward with - upcoming fixed point support. */ - - DEF_BUILTIN ("__builtin_avr_fmul", uint_ftype_uchar_uchar, - AVR_BUILTIN_FMUL); - DEF_BUILTIN ("__builtin_avr_fmuls", int_ftype_char_char, - AVR_BUILTIN_FMULS); - DEF_BUILTIN ("__builtin_avr_fmulsu", int_ftype_char_uchar, - AVR_BUILTIN_FMULSU); - } + DEF_BUILTIN ("__builtin_avr_fmul", uint_ftype_uchar_uchar, + AVR_BUILTIN_FMUL); + DEF_BUILTIN ("__builtin_avr_fmuls", int_ftype_char_char, + AVR_BUILTIN_FMULS); + DEF_BUILTIN ("__builtin_avr_fmulsu", int_ftype_char_uchar, + AVR_BUILTIN_FMULSU); } #undef DEF_BUILTIN diff --git a/gcc/config/avr/avr.md b/gcc/config/avr/avr.md index a07992389cb..4637abf7d08 100644 --- a/gcc/config/avr/avr.md +++ b/gcc/config/avr/avr.md @@ -402,10 +402,10 @@ -(define_peephole2 ; movsi_lreg_const +(define_peephole2 ; *reload_insi [(match_scratch:QI 2 "d") (set (match_operand:SI 0 "l_register_operand" "") - (match_operand:SI 1 "immediate_operand" "")) + (match_operand:SI 1 "const_int_operand" "")) (match_dup 2)] "(operands[1] != const0_rtx && operands[1] != constm1_rtx)" @@ -416,12 +416,14 @@ ;; '*' because it is not used in rtl generation. (define_insn "*reload_insi" [(set (match_operand:SI 0 "register_operand" "=r") - (match_operand:SI 1 "immediate_operand" "i")) + (match_operand:SI 1 "const_int_operand" "n")) (clobber (match_operand:QI 2 "register_operand" "=&d"))] "reload_completed" - "* return output_reload_insisf (insn, operands, NULL);" + { + return output_reload_insisf (insn, operands, operands[2], NULL); + } [(set_attr "length" "8") - (set_attr "cc" "none")]) + (set_attr "cc" "clobber")]) (define_insn "*movsi" @@ -429,9 +431,11 @@ (match_operand:SI 1 "general_operand" "r,L,Qm,rL,i,i"))] "(register_operand (operands[0],SImode) || register_operand (operands[1],SImode) || const0_rtx == operands[1])" - "* return output_movsisf (insn, operands, NULL);" + { + return output_movsisf (insn, operands, NULL_RTX, NULL); + } [(set_attr "length" "4,4,8,9,4,10") - (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")]) + (set_attr "cc" "none,set_zn,clobber,clobber,clobber,clobber")]) ;; fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ;; move floating point numbers (32 bit) @@ -452,12 +456,38 @@ (define_insn "*movsf" [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r,Qm,!d,r") - (match_operand:SF 1 "general_operand" "r,G,Qm,r,F,F"))] + (match_operand:SF 1 "general_operand" "r,G,Qm,rG,F,F"))] "register_operand (operands[0], SFmode) - || register_operand (operands[1], SFmode)" - "* return output_movsisf (insn, operands, NULL);" + || register_operand (operands[1], SFmode) + || operands[1] == CONST0_RTX (SFmode)" + { + return output_movsisf (insn, operands, NULL_RTX, NULL); + } [(set_attr "length" "4,4,8,9,4,10") - (set_attr "cc" "none,set_zn,clobber,clobber,none,clobber")]) + (set_attr "cc" "none,set_zn,clobber,clobber,clobber,clobber")]) + +(define_peephole2 ; *reload_insf + [(match_scratch:QI 2 "d") + (set (match_operand:SF 0 "l_register_operand" "") + (match_operand:SF 1 "const_double_operand" "")) + (match_dup 2)] + "operands[1] != CONST0_RTX (SFmode)" + [(parallel [(set (match_dup 0) + (match_dup 1)) + (clobber (match_dup 2))])] + "") + +;; '*' because it is not used in rtl generation. +(define_insn "*reload_insf" + [(set (match_operand:SF 0 "register_operand" "=r") + (match_operand:SF 1 "const_double_operand" "F")) + (clobber (match_operand:QI 2 "register_operand" "=&d"))] + "reload_completed" + { + return output_reload_insisf (insn, operands, operands[2], NULL); + } + [(set_attr "length" "8") + (set_attr "cc" "clobber")]) ;;========================================================================= ;; move string (like memcpy) @@ -3394,7 +3424,27 @@ (set_attr "cc" "none")]) ;; FMUL -(define_insn "fmul" +(define_expand "fmul" + [(set (reg:QI 24) + (match_operand:QI 1 "register_operand" "")) + (set (reg:QI 25) + (match_operand:QI 2 "register_operand" "")) + (parallel [(set (reg:HI 22) + (unspec:HI [(reg:QI 24) + (reg:QI 25)] UNSPEC_FMUL)) + (clobber (reg:HI 24))]) + (set (match_operand:HI 0 "register_operand" "") + (reg:HI 22))] + "" + { + if (AVR_HAVE_MUL) + { + emit_insn (gen_fmul_insn (operand0, operand1, operand2)); + DONE; + } + }) + +(define_insn "fmul_insn" [(set (match_operand:HI 0 "register_operand" "=r") (unspec:HI [(match_operand:QI 1 "register_operand" "a") (match_operand:QI 2 "register_operand" "a")] @@ -3406,8 +3456,38 @@ [(set_attr "length" "3") (set_attr "cc" "clobber")]) +(define_insn "*fmul.call" + [(set (reg:HI 22) + (unspec:HI [(reg:QI 24) + (reg:QI 25)] UNSPEC_FMUL)) + (clobber (reg:HI 24))] + "!AVR_HAVE_MUL" + "%~call __fmul" + [(set_attr "type" "xcall") + (set_attr "cc" "clobber")]) + ;; FMULS -(define_insn "fmuls" +(define_expand "fmuls" + [(set (reg:QI 24) + (match_operand:QI 1 "register_operand" "")) + (set (reg:QI 25) + (match_operand:QI 2 "register_operand" "")) + (parallel [(set (reg:HI 22) + (unspec:HI [(reg:QI 24) + (reg:QI 25)] UNSPEC_FMULS)) + (clobber (reg:HI 24))]) + (set (match_operand:HI 0 "register_operand" "") + (reg:HI 22))] + "" + { + if (AVR_HAVE_MUL) + { + emit_insn (gen_fmuls_insn (operand0, operand1, operand2)); + DONE; + } + }) + +(define_insn "fmuls_insn" [(set (match_operand:HI 0 "register_operand" "=r") (unspec:HI [(match_operand:QI 1 "register_operand" "a") (match_operand:QI 2 "register_operand" "a")] @@ -3419,8 +3499,38 @@ [(set_attr "length" "3") (set_attr "cc" "clobber")]) +(define_insn "*fmuls.call" + [(set (reg:HI 22) + (unspec:HI [(reg:QI 24) + (reg:QI 25)] UNSPEC_FMULS)) + (clobber (reg:HI 24))] + "!AVR_HAVE_MUL" + "%~call __fmuls" + [(set_attr "type" "xcall") + (set_attr "cc" "clobber")]) + ;; FMULSU -(define_insn "fmulsu" +(define_expand "fmulsu" + [(set (reg:QI 24) + (match_operand:QI 1 "register_operand" "")) + (set (reg:QI 25) + (match_operand:QI 2 "register_operand" "")) + (parallel [(set (reg:HI 22) + (unspec:HI [(reg:QI 24) + (reg:QI 25)] UNSPEC_FMULSU)) + (clobber (reg:HI 24))]) + (set (match_operand:HI 0 "register_operand" "") + (reg:HI 22))] + "" + { + if (AVR_HAVE_MUL) + { + emit_insn (gen_fmulsu_insn (operand0, operand1, operand2)); + DONE; + } + }) + +(define_insn "fmulsu_insn" [(set (match_operand:HI 0 "register_operand" "=r") (unspec:HI [(match_operand:QI 1 "register_operand" "a") (match_operand:QI 2 "register_operand" "a")] @@ -3432,6 +3542,16 @@ [(set_attr "length" "3") (set_attr "cc" "clobber")]) +(define_insn "*fmulsu.call" + [(set (reg:HI 22) + (unspec:HI [(reg:QI 24) + (reg:QI 25)] UNSPEC_FMULSU)) + (clobber (reg:HI 24))] + "!AVR_HAVE_MUL" + "%~call __fmulsu" + [(set_attr "type" "xcall") + (set_attr "cc" "clobber")]) + ;; Some combiner patterns dealing with bits. ;; See PR42210 diff --git a/gcc/config/avr/libgcc.S b/gcc/config/avr/libgcc.S index 9d13d9611b4..c2459d00e6b 100644 --- a/gcc/config/avr/libgcc.S +++ b/gcc/config/avr/libgcc.S @@ -1417,3 +1417,91 @@ DEFUN __ashldi3 ret ENDF __ashldi3 #endif /* defined (L_ashldi3) */ + + +/***********************************************************/ +;;; Softmul versions of FMUL, FMULS and FMULSU to implement +;;; __builtin_avr_fmul* if !AVR_HAVE_MUL +/***********************************************************/ + +#define A1 24 +#define B1 25 +#define C0 22 +#define C1 23 +#define A0 __tmp_reg__ + +#ifdef L_fmuls +;;; r23:r22 = fmuls (r24, r25) like in FMULS instruction +;;; Clobbers: r24, r25, __tmp_reg__ +DEFUN __fmuls + ;; A0.7 = negate result? + mov A0, A1 + eor A0, B1 + ;; B1 = |B1| + sbrc B1, 7 + neg B1 + XJMP __fmulsu_exit +ENDF __fmuls +#endif /* L_fmuls */ + +#ifdef L_fmulsu +;;; r23:r22 = fmulsu (r24, r25) like in FMULSU instruction +;;; Clobbers: r24, r25, __tmp_reg__ +DEFUN __fmulsu + ;; A0.7 = negate result? + mov A0, A1 +;; FALLTHRU +ENDF __fmulsu + +;; Helper for __fmuls and __fmulsu +DEFUN __fmulsu_exit + ;; A1 = |A1| + sbrc A1, 7 + neg A1 +#ifdef __AVR_HAVE_JMP_CALL__ + ;; Some cores have problem skipping 2-word instruction + tst A0 + brmi 1f +#else + sbrs A0, 7 +#endif /* __AVR_HAVE_JMP_CALL__ */ + XJMP __fmul +1: XCALL __fmul + ;; C = -C iff A0.7 = 1 + com C1 + neg C0 + sbci C1, -1 + ret +ENDF __fmulsu_exit +#endif /* L_fmulsu */ + + +#ifdef L_fmul +;;; r22:r23 = fmul (r24, r25) like in FMUL instruction +;;; Clobbers: r24, r25, __tmp_reg__ +DEFUN __fmul + ; clear result + clr C0 + clr C1 + clr A0 +1: tst B1 + ;; 1.0 = 0x80, so test for bit 7 of B to see if A must to be added to C. +2: brpl 3f + ;; C += A + add C0, A0 + adc C1, A1 +3: ;; A >>= 1 + lsr A1 + ror A0 + ;; B <<= 1 + lsl B1 + brne 2b + ret +ENDF __fmul +#endif /* L_fmul */ + +#undef A0 +#undef A1 +#undef B1 +#undef C0 +#undef C1 diff --git a/gcc/config/avr/t-avr b/gcc/config/avr/t-avr index 026ee10b10e..a5357f0ecf6 100644 --- a/gcc/config/avr/t-avr +++ b/gcc/config/avr/t-avr @@ -78,7 +78,8 @@ LIB1ASMFUNCS = \ _bswapdi2 \ _ashldi3 \ _ashrdi3 \ - _lshrdi3 + _lshrdi3 \ + _fmul _fmuls _fmulsu LIB2FUNCS_EXCLUDE = \ _clz diff --git a/gcc/config/host-solaris.c b/gcc/config/host-solaris.c index 12eab3c61b8..15f1d782e95 100644 --- a/gcc/config/host-solaris.c +++ b/gcc/config/host-solaris.c @@ -73,7 +73,7 @@ mmap_fixed (void *addr, size_t len, int prot, int flags, int fd, off_t off) #elif defined(__sparc__) # define TRY_EMPTY_VM_SPACE 0x80000000 #elif defined(__x86_64__) -# define TRY_EMPTY_VM_SPACE 0x8000000000000000 +# define TRY_EMPTY_VM_SPACE 0x80000000000 #elif defined(__i386__) # define TRY_EMPTY_VM_SPACE 0xB0000000 #else diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index 2b1da4c79d2..a52941bd856 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -5022,11 +5022,20 @@ if (GET_CODE (op1) == SUBREG) op1 = SUBREG_REG (op1); - if (GENERAL_REG_P (op1) && TARGET_INTER_UNIT_MOVES) + if (GENERAL_REG_P (op1)) { operands[4] = simplify_gen_subreg (V4SImode, operands[0], <MODE>mode, 0); - emit_insn (gen_sse2_loadld (operands[4], - CONST0_RTX (V4SImode), operands[1])); + if (TARGET_INTER_UNIT_MOVES) + emit_insn (gen_sse2_loadld (operands[4], + CONST0_RTX (V4SImode), operands[1])); + else + { + operands[5] = ix86_force_to_memory (GET_MODE (operands[1]), + operands[1]); + emit_insn (gen_sse2_loadld (operands[4], + CONST0_RTX (V4SImode), operands[5])); + ix86_free_from_memory (GET_MODE (operands[1])); + } } /* We can ignore possible trapping value in the high part of SSE register for non-trapping math. */ diff --git a/gcc/config/i386/sol2-bi.h b/gcc/config/i386/sol2-bi.h index 394ca580884..04feeb907ae 100644 --- a/gcc/config/i386/sol2-bi.h +++ b/gcc/config/i386/sol2-bi.h @@ -31,20 +31,27 @@ along with GCC; see the file COPYING3. If not see /* GNU as understands --32 and --64, but the native Solaris assembler requires -xarch=generic or -xarch=generic64 instead. */ -#undef ASM_SPEC #ifdef USE_GAS -#define ASM_SPEC "%{m32:--32} %{m64:--64} -s %(asm_cpu)" +#define ASM_CPU32_DEFAULT_SPEC "--32" +#define ASM_CPU64_DEFAULT_SPEC "--64" #else -#define ASM_SPEC "%{v:-V} %{Qy:} %{!Qn:-Qy} %{Ym,*} " \ - "%{m32:-xarch=generic} %{m64:-xarch=generic64} " \ - "-s %(asm_cpu)" +#define ASM_CPU32_DEFAULT_SPEC "-xarch=generic" +#define ASM_CPU64_DEFAULT_SPEC "-xarch=generic64" #endif +#undef ASM_CPU_SPEC +#define ASM_CPU_SPEC "%(asm_cpu_default)" + +/* Don't let i386/x86-64.h override i386/sol2.h version. Still cannot use + -K PIC with the Solaris 10+ assembler, it gives many warnings: + Absolute relocation is used for symbol "<symbol>" */ +#undef ASM_SPEC +#define ASM_SPEC ASM_SPEC_BASE + /* We do not need to search a special directory for startup files. */ #undef MD_STARTFILE_PREFIX -/* No 64-bit default configurations. */ -#define DEFAULT_ARCH32_P 1 +#define DEFAULT_ARCH32_P !TARGET_64BIT_DEFAULT #define ARCH64_SUBDIR "amd64" diff --git a/gcc/config/i386/sol2.h b/gcc/config/i386/sol2.h index f5e5c762612..5b4e3d78f58 100644 --- a/gcc/config/i386/sol2.h +++ b/gcc/config/i386/sol2.h @@ -59,18 +59,21 @@ along with GCC; see the file COPYING3. If not see #undef CPP_SPEC #define CPP_SPEC "%{,assembler-with-cpp:-P} %(cpp_subtarget)" +#define ASM_CPU_DEFAULT_SPEC "" + #define ASM_CPU_SPEC "" -/* Removed -K PIC from generic sol2.h ASM_SPEC: the Solaris 8 and 9 assembler - gives many warnings: R_386_32 relocation is used for symbol ".text", and +/* Don't include ASM_PIC_SPEC. While the Solaris 8 and 9 assembler accepts + -K PIC, it gives many warnings: + R_386_32 relocation is used for symbol "<symbol>" GNU as doesn't recognize -K at all. */ -/* FIXME: Perhaps split between common and CPU-specific parts? */ #undef ASM_SPEC -#define ASM_SPEC "%{v:-V} %{Qy:} %{!Qn:-Qy} %{Ym,*} -s %(asm_cpu)" +#define ASM_SPEC ASM_SPEC_BASE #define SUBTARGET_CPU_EXTRA_SPECS \ { "cpp_subtarget", CPP_SUBTARGET_SPEC }, \ - { "asm_cpu", ASM_CPU_SPEC } + { "asm_cpu", ASM_CPU_SPEC }, \ + { "asm_cpu_default", ASM_CPU_DEFAULT_SPEC }, \ #undef SUBTARGET_EXTRA_SPECS #define SUBTARGET_EXTRA_SPECS \ diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c index c06903d869f..2ebb225ec8b 100644 --- a/gcc/config/ia64/ia64.c +++ b/gcc/config/ia64/ia64.c @@ -319,7 +319,6 @@ static enum machine_mode ia64_promote_function_mode (const_tree, static void ia64_trampoline_init (rtx, tree, rtx); static void ia64_override_options_after_change (void); -static void ia64_dwarf_handle_frame_unspec (const char *, rtx, int); static tree ia64_builtin_decl (unsigned, bool); static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t); @@ -551,8 +550,6 @@ static const struct attribute_spec ia64_attribute_table[] = #undef TARGET_GIMPLIFY_VA_ARG_EXPR #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg -#undef TARGET_DWARF_HANDLE_FRAME_UNSPEC -#define TARGET_DWARF_HANDLE_FRAME_UNSPEC ia64_dwarf_handle_frame_unspec #undef TARGET_ASM_UNWIND_EMIT #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY @@ -3249,7 +3246,14 @@ ia64_expand_prologue (void) GEN_INT (current_frame_info.n_local_regs), GEN_INT (current_frame_info.n_output_regs), GEN_INT (current_frame_info.n_rotate_regs))); - RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0); + if (current_frame_info.r[reg_save_ar_pfs]) + { + RTX_FRAME_RELATED_P (insn) = 1; + add_reg_note (insn, REG_CFA_REGISTER, + gen_rtx_SET (VOIDmode, + ar_pfs_save_reg, + gen_rtx_REG (DImode, AR_PFS_REGNUM))); + } } /* Set up frame pointer, stack pointer, and spill iterators. */ @@ -3437,7 +3441,8 @@ ia64_expand_prologue (void) reg_emitted (reg_save_b0); insn = emit_move_insn (alt_reg, reg); RTX_FRAME_RELATED_P (insn) = 1; - add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX); + add_reg_note (insn, REG_CFA_REGISTER, + gen_rtx_SET (VOIDmode, alt_reg, pc_rtx)); /* Even if we're not going to generate an epilogue, we still need to save the register so that EH works. */ @@ -3785,10 +3790,19 @@ ia64_expand_epilogue (int sibcall_p) if (current_frame_info.n_input_regs != 0) { rtx n_inputs = GEN_INT (current_frame_info.n_input_regs); + insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp), const0_rtx, const0_rtx, n_inputs, const0_rtx)); RTX_FRAME_RELATED_P (insn) = 1; + + /* ??? We need to mark the alloc as frame-related so that it gets + passed into ia64_asm_unwind_emit for ia64-specific unwinding. + But there's nothing dwarf2 related to be done wrt the register + windows. If we do nothing, dwarf2out will abort on the UNSPEC; + the empty parallel means dwarf2out will not see anything. */ + add_reg_note (insn, REG_FRAME_RELATED_EXPR, + gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (0))); } } } @@ -9612,70 +9626,11 @@ static bool need_copy_state; # define MAX_ARTIFICIAL_LABEL_BYTES 30 #endif -/* Emit a debugging label after a call-frame-related insn. We'd - rather output the label right away, but we'd have to output it - after, not before, the instruction, and the instruction has not - been output yet. So we emit the label after the insn, delete it to - avoid introducing basic blocks, and mark it as preserved, such that - it is still output, given that it is referenced in debug info. */ - -static const char * -ia64_emit_deleted_label_after_insn (rtx insn) -{ - char label[MAX_ARTIFICIAL_LABEL_BYTES]; - rtx lb = gen_label_rtx (); - rtx label_insn = emit_label_after (lb, insn); - - LABEL_PRESERVE_P (lb) = 1; - - delete_insn (label_insn); - - ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn)); - - return xstrdup (label); -} - -/* Define the CFA after INSN with the steady-state definition. */ - -static void -ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame) -{ - rtx fp = frame_pointer_needed - ? hard_frame_pointer_rtx - : stack_pointer_rtx; - const char *label = ia64_emit_deleted_label_after_insn (insn); - - if (!frame) - return; - - dwarf2out_def_cfa - (label, REGNO (fp), - ia64_initial_elimination_offset - (REGNO (arg_pointer_rtx), REGNO (fp)) - + ARG_POINTER_CFA_OFFSET (current_function_decl)); -} - -/* All we need to do here is avoid a crash in the generic dwarf2 - processing. The real CFA definition is set up above. */ - -static void -ia64_dwarf_handle_frame_unspec (const char * ARG_UNUSED (label), - rtx ARG_UNUSED (pattern), - int index) -{ - gcc_assert (index == UNSPECV_ALLOC); -} - -/* The generic dwarf2 frame debug info generator does not define a - separate region for the very end of the epilogue, so refrain from - doing so in the IA64-specific code as well. */ - -#define IA64_CHANGE_CFA_IN_EPILOGUE 0 - /* The function emits unwind directives for the start of an epilogue. */ static void -process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame) +process_epilogue (FILE *asm_out_file, rtx insn ATTRIBUTE_UNUSED, + bool unwind, bool frame ATTRIBUTE_UNUSED) { /* If this isn't the last block of the function, then we need to label the current state, and copy it back in at the start of the next block. */ @@ -9690,9 +9645,6 @@ process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame) if (unwind) fprintf (asm_out_file, "\t.restore sp\n"); - if (IA64_CHANGE_CFA_IN_EPILOGUE && frame) - dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn), - STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET); } /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */ @@ -9720,7 +9672,6 @@ process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn, fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n", -INTVAL (op1)); - ia64_dwarf2out_def_steady_cfa (insn, frame); } else process_epilogue (asm_out_file, insn, unwind, frame); @@ -9739,7 +9690,6 @@ process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn, if (unwind) fprintf (asm_out_file, "\t.vframe r%d\n", ia64_dbx_register_number (REGNO (dest))); - ia64_dwarf2out_def_steady_cfa (insn, frame); } else gcc_unreachable (); @@ -9752,20 +9702,22 @@ process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind) { rtx dest = SET_DEST (pat); rtx src = SET_SRC (pat); - int dest_regno = REGNO (dest); - int src_regno = REGNO (src); + int src_regno; - switch (src_regno) + if (src == pc_rtx) { - case BR_REG (0): /* Saving return address pointer. */ - gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]); if (unwind) fprintf (asm_out_file, "\t.save rp, r%d\n", ia64_dbx_register_number (dest_regno)); - break; + return; + } + + src_regno = REGNO (src); + switch (src_regno) + { case PR_REG (0): gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]); if (unwind) @@ -9939,8 +9891,6 @@ ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn) fprintf (asm_out_file, "\t.copy_state %d\n", cfun->machine->state_num); } - if (IA64_CHANGE_CFA_IN_EPILOGUE) - ia64_dwarf2out_def_steady_cfa (insn, frame); need_copy_state = false; } } diff --git a/gcc/config/rs6000/rs6000-protos.h b/gcc/config/rs6000/rs6000-protos.h index 36f2a4c124a..357f7e72005 100644 --- a/gcc/config/rs6000/rs6000-protos.h +++ b/gcc/config/rs6000/rs6000-protos.h @@ -171,6 +171,8 @@ extern unsigned int rs6000_dbx_register_number (unsigned int); extern void rs6000_emit_epilogue (int); extern void rs6000_emit_eh_reg_restore (rtx, rtx); extern const char * output_isel (rtx *); +extern void rs6000_call_indirect_aix (rtx, rtx, rtx); +extern bool rs6000_save_toc_in_prologue_p (void); extern void rs6000_aix_asm_output_dwarf_table_ref (char *); diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c index a87280c8ec4..65de2e3f2fd 100644 --- a/gcc/config/rs6000/rs6000.c +++ b/gcc/config/rs6000/rs6000.c @@ -130,6 +130,9 @@ typedef struct GTY(()) machine_function int ra_need_lr; /* Cache lr_save_p after expansion of builtin_eh_return. */ int lr_save_state; + /* Whether we need to save the TOC to the reserved stack location in the + function prologue. */ + bool save_toc_in_prologue; /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4 varargs save area. */ HOST_WIDE_INT varargs_save_offset; @@ -20325,7 +20328,7 @@ rs6000_emit_prologue (void) JUMP_LABEL (jump) = toc_save_done; LABEL_NUSES (toc_save_done) += 1; - emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, 2, + emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, TOC_REGNUM, sp_offset + 5 * reg_size, info->total_size); emit_label (toc_save_done); if (using_static_chain_p) @@ -20516,6 +20519,11 @@ rs6000_emit_prologue (void) emit_move_insn (lr, gen_rtx_REG (Pmode, 0)); } #endif + + /* If we need to, save the TOC register after doing the stack setup. */ + if (rs6000_save_toc_in_prologue_p ()) + emit_frame_save (sp_reg_rtx, sp_reg_rtx, reg_mode, TOC_REGNUM, + 5 * reg_size, info->total_size); } /* Write function prologue. */ @@ -24469,9 +24477,14 @@ rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt) /* Under AIX, just build the 3 word function descriptor */ case ABI_AIX: { - rtx fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr)); - rtx fn_reg = gen_reg_rtx (Pmode); - rtx toc_reg = gen_reg_rtx (Pmode); + rtx fnmem, fn_reg, toc_reg; + + if (!TARGET_R11) + error ("-mno-r11 must not be used if you have trampolines"); + + fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr)); + fn_reg = gen_reg_rtx (Pmode); + toc_reg = gen_reg_rtx (Pmode); /* Macro to shorten the code expansions below. */ # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET) @@ -27760,4 +27773,132 @@ rs6000_legitimate_constant_p (enum machine_mode mode, rtx x) || easy_vector_constant (x, mode)); } + +/* A function pointer under AIX is a pointer to a data area whose first word + contains the actual address of the function, whose second word contains a + pointer to its TOC, and whose third word contains a value to place in the + static chain register (r11). Note that if we load the static chain, our + "trampoline" need not have any executable code. */ + +void +rs6000_call_indirect_aix (rtx value, rtx func_desc, rtx flag) +{ + rtx func_addr; + rtx toc_reg; + rtx sc_reg; + rtx stack_ptr; + rtx stack_toc_offset; + rtx stack_toc_mem; + rtx func_toc_offset; + rtx func_toc_mem; + rtx func_sc_offset; + rtx func_sc_mem; + rtx insn; + rtx (*call_func) (rtx, rtx, rtx, rtx); + rtx (*call_value_func) (rtx, rtx, rtx, rtx, rtx); + + stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM); + toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM); + + /* Load up address of the actual function. */ + func_desc = force_reg (Pmode, func_desc); + func_addr = gen_reg_rtx (Pmode); + emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc)); + + if (TARGET_32BIT) + { + + stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_32BIT); + func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_32BIT); + func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_32BIT); + if (TARGET_R11) + { + call_func = gen_call_indirect_aix32bit; + call_value_func = gen_call_value_indirect_aix32bit; + } + else + { + call_func = gen_call_indirect_aix32bit_nor11; + call_value_func = gen_call_value_indirect_aix32bit_nor11; + } + } + else + { + stack_toc_offset = GEN_INT (TOC_SAVE_OFFSET_64BIT); + func_toc_offset = GEN_INT (AIX_FUNC_DESC_TOC_64BIT); + func_sc_offset = GEN_INT (AIX_FUNC_DESC_SC_64BIT); + if (TARGET_R11) + { + call_func = gen_call_indirect_aix64bit; + call_value_func = gen_call_value_indirect_aix64bit; + } + else + { + call_func = gen_call_indirect_aix64bit_nor11; + call_value_func = gen_call_value_indirect_aix64bit_nor11; + } + } + + /* Reserved spot to store the TOC. */ + stack_toc_mem = gen_frame_mem (Pmode, + gen_rtx_PLUS (Pmode, + stack_ptr, + stack_toc_offset)); + + gcc_assert (cfun); + gcc_assert (cfun->machine); + + /* Can we optimize saving the TOC in the prologue or do we need to do it at + every call? */ + if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca + && !cfun->calls_setjmp && !cfun->has_nonlocal_label + && !cfun->can_throw_non_call_exceptions + && ((flags_from_decl_or_type (cfun->decl) & ECF_NOTHROW) == ECF_NOTHROW)) + cfun->machine->save_toc_in_prologue = true; + + else + { + MEM_VOLATILE_P (stack_toc_mem) = 1; + emit_move_insn (stack_toc_mem, toc_reg); + } + + /* Calculate the address to load the TOC of the called function. We don't + actually load this until the split after reload. */ + func_toc_mem = gen_rtx_MEM (Pmode, + gen_rtx_PLUS (Pmode, + func_desc, + func_toc_offset)); + + /* If we have a static chain, load it up. */ + if (TARGET_R11) + { + func_sc_mem = gen_rtx_MEM (Pmode, + gen_rtx_PLUS (Pmode, + func_desc, + func_sc_offset)); + + sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM); + emit_move_insn (sc_reg, func_sc_mem); + } + + /* Create the call. */ + if (value) + insn = call_value_func (value, func_addr, flag, func_toc_mem, + stack_toc_mem); + else + insn = call_func (func_addr, flag, func_toc_mem, stack_toc_mem); + + emit_call_insn (insn); + return; +} + +/* Return whether we need to always update the saved TOC pointer when we update + the stack pointer. */ + +bool +rs6000_save_toc_in_prologue_p (void) +{ + return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue); +} + #include "gt-rs6000.h" diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md index e70598d9f3e..8c0e299a2e7 100644 --- a/gcc/config/rs6000/rs6000.md +++ b/gcc/config/rs6000/rs6000.md @@ -27,9 +27,14 @@ ;; (define_constants - [(MQ_REGNO 64) + [(STACK_POINTER_REGNUM 1) + (TOC_REGNUM 2) + (STATIC_CHAIN_REGNUM 11) + (HARD_FRAME_POINTER_REGNUM 31) + (MQ_REGNO 64) (LR_REGNO 65) (CTR_REGNO 66) + (ARG_POINTER_REGNUM 67) (CR0_REGNO 68) (CR1_REGNO 69) (CR2_REGNO 70) @@ -46,7 +51,19 @@ (VSCR_REGNO 110) (SPE_ACC_REGNO 111) (SPEFSCR_REGNO 112) - (SFP_REGNO 113) + (FRAME_POINTER_REGNUM 113) + + ; ABI defined stack offsets for storing the TOC pointer with AIX calls. + (TOC_SAVE_OFFSET_32BIT 20) + (TOC_SAVE_OFFSET_64BIT 40) + + ; Function TOC offset in the AIX function descriptor. + (AIX_FUNC_DESC_TOC_32BIT 4) + (AIX_FUNC_DESC_TOC_64BIT 8) + + ; Static chain offset in the AIX function descriptor. + (AIX_FUNC_DESC_SC_32BIT 8) + (AIX_FUNC_DESC_SC_64BIT 16) ]) ;; @@ -267,6 +284,9 @@ (define_mode_attr mptrsize [(SI "si") (DI "di")]) +(define_mode_attr ptrload [(SI "{l|lwz}") + (DI "ld")]) + (define_mode_attr rreg [(SF "f") (DF "ws") (V4SF "wf") @@ -12178,87 +12198,7 @@ "TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL" "{cal %0,%2@l(%1)|addi %0,%1,%2@l}") -;; A function pointer under AIX is a pointer to a data area whose first word -;; contains the actual address of the function, whose second word contains a -;; pointer to its TOC, and whose third word contains a value to place in the -;; static chain register (r11). Note that if we load the static chain, our -;; "trampoline" need not have any executable code. - -(define_expand "call_indirect_aix32" - [(set (match_dup 2) - (mem:SI (match_operand:SI 0 "gpc_reg_operand" ""))) - (set (mem:SI (plus:SI (reg:SI 1) (const_int 20))) - (reg:SI 2)) - (set (reg:SI 11) - (mem:SI (plus:SI (match_dup 0) - (const_int 8)))) - (parallel [(call (mem:SI (match_dup 2)) - (match_operand 1 "" "")) - (use (mem:SI (plus:SI (match_dup 0) (const_int 4)))) - (use (reg:SI 11)) - (use (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))])] - "TARGET_32BIT" - " -{ operands[2] = gen_reg_rtx (SImode); }") - -(define_expand "call_indirect_aix64" - [(set (match_dup 2) - (mem:DI (match_operand:DI 0 "gpc_reg_operand" ""))) - (set (mem:DI (plus:DI (reg:DI 1) (const_int 40))) - (reg:DI 2)) - (set (reg:DI 11) - (mem:DI (plus:DI (match_dup 0) - (const_int 16)))) - (parallel [(call (mem:SI (match_dup 2)) - (match_operand 1 "" "")) - (use (mem:DI (plus:DI (match_dup 0) (const_int 8)))) - (use (reg:DI 11)) - (use (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))])] - "TARGET_64BIT" - " -{ operands[2] = gen_reg_rtx (DImode); }") - -(define_expand "call_value_indirect_aix32" - [(set (match_dup 3) - (mem:SI (match_operand:SI 1 "gpc_reg_operand" ""))) - (set (mem:SI (plus:SI (reg:SI 1) (const_int 20))) - (reg:SI 2)) - (set (reg:SI 11) - (mem:SI (plus:SI (match_dup 1) - (const_int 8)))) - (parallel [(set (match_operand 0 "" "") - (call (mem:SI (match_dup 3)) - (match_operand 2 "" ""))) - (use (mem:SI (plus:SI (match_dup 1) (const_int 4)))) - (use (reg:SI 11)) - (use (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))])] - "TARGET_32BIT" - " -{ operands[3] = gen_reg_rtx (SImode); }") - -(define_expand "call_value_indirect_aix64" - [(set (match_dup 3) - (mem:DI (match_operand:DI 1 "gpc_reg_operand" ""))) - (set (mem:DI (plus:DI (reg:DI 1) (const_int 40))) - (reg:DI 2)) - (set (reg:DI 11) - (mem:DI (plus:DI (match_dup 1) - (const_int 16)))) - (parallel [(set (match_operand 0 "" "") - (call (mem:SI (match_dup 3)) - (match_operand 2 "" ""))) - (use (mem:DI (plus:DI (match_dup 1) (const_int 8)))) - (use (reg:DI 11)) - (use (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))])] - "TARGET_64BIT" - " -{ operands[3] = gen_reg_rtx (DImode); }") - -;; Now the definitions for the call and call_value insns +;; Call and call_value insns (define_expand "call" [(parallel [(call (mem:SI (match_operand 0 "address_operand" "")) (match_operand 1 "" "")) @@ -12294,13 +12234,7 @@ case ABI_AIX: /* AIX function pointers are really pointers to a three word area. */ - emit_call_insn (TARGET_32BIT - ? gen_call_indirect_aix32 (force_reg (SImode, - operands[0]), - operands[1]) - : gen_call_indirect_aix64 (force_reg (DImode, - operands[0]), - operands[1])); + rs6000_call_indirect_aix (NULL_RTX, operands[0], operands[1]); DONE; default: @@ -12345,15 +12279,7 @@ case ABI_AIX: /* AIX function pointers are really pointers to a three word area. */ - emit_call_insn (TARGET_32BIT - ? gen_call_value_indirect_aix32 (operands[0], - force_reg (SImode, - operands[1]), - operands[2]) - : gen_call_value_indirect_aix64 (operands[0], - force_reg (DImode, - operands[1]), - operands[2])); + rs6000_call_indirect_aix (operands[0], operands[1], operands[2]); DONE; default: @@ -12447,149 +12373,202 @@ [(set_attr "type" "branch") (set_attr "length" "4,8")]) -;; Call to function which may be in another module. Restore the TOC -;; pointer (r2) after the call unless this is System V. -;; Operand2 is nonzero if we are using the V.4 calling sequence and -;; either the function was not prototyped, or it was prototyped as a -;; variable argument function. It is > 0 if FP registers were passed -;; and < 0 if they were not. +;; Call to indirect functions with the AIX abi using a 3 word descriptor. +;; Operand0 is the addresss of the function to call +;; Operand1 is the flag for System V.4 for unprototyped or FP registers +;; Operand2 is the location in the function descriptor to load r2 from +;; Operand3 is the stack location to hold the current TOC pointer -(define_insn_and_split "*call_indirect_nonlocal_aix32_internal" - [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l")) - (match_operand 1 "" "g,g")) - (use (mem:SI (plus:SI (match_operand:SI 2 "register_operand" "b,b") (const_int 4)))) - (use (reg:SI 11)) - (use (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_32BIT && DEFAULT_ABI == ABI_AIX" +(define_insn_and_split "call_indirect_aix<ptrsize>" + [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l")) + (match_operand 1 "" "g,g")) + (use (match_operand:P 2 "memory_operand" "m,m")) + (use (match_operand:P 3 "memory_operand" "m,m")) + (use (reg:P STATIC_CHAIN_REGNUM)) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && TARGET_R11" "#" "&& reload_completed" - [(set (reg:SI 2) - (mem:SI (plus:SI (match_dup 2) (const_int 4)))) + [(set (reg:P TOC_REGNUM) (match_dup 2)) (parallel [(call (mem:SI (match_dup 0)) (match_dup 1)) - (use (reg:SI 2)) - (use (reg:SI 11)) - (set (reg:SI 2) - (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))])] + (use (reg:P TOC_REGNUM)) + (use (reg:P STATIC_CHAIN_REGNUM)) + (use (match_dup 3)) + (set (reg:P TOC_REGNUM) (match_dup 3)) + (clobber (reg:P LR_REGNO))])] "" [(set_attr "type" "jmpreg") (set_attr "length" "12")]) -(define_insn "*call_indirect_nonlocal_aix32" - [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l")) +(define_insn "*call_indirect_aix<ptrsize>_internal" + [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l")) (match_operand 1 "" "g,g")) - (use (reg:SI 2)) - (use (reg:SI 11)) - (set (reg:SI 2) - (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_32BIT && DEFAULT_ABI == ABI_AIX && reload_completed" - "b%T0l\;{l|lwz} 2,20(1)" + (use (reg:P TOC_REGNUM)) + (use (reg:P STATIC_CHAIN_REGNUM)) + (use (match_operand:P 2 "memory_operand" "m,m")) + (set (reg:P TOC_REGNUM) (match_dup 2)) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && reload_completed && TARGET_R11" + "b%T0l\;<ptrload> 2,%2" [(set_attr "type" "jmpreg") (set_attr "length" "8")]) -(define_insn "*call_nonlocal_aix32" - [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s")) - (match_operand 1 "" "g")) - (use (match_operand:SI 2 "immediate_operand" "O")) - (clobber (reg:SI LR_REGNO))] - "TARGET_32BIT - && DEFAULT_ABI == ABI_AIX - && (INTVAL (operands[2]) & CALL_LONG) == 0" - "bl %z0\;%." - [(set_attr "type" "branch") - (set_attr "length" "8")]) - -(define_insn_and_split "*call_indirect_nonlocal_aix64_internal" - [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l")) - (match_operand 1 "" "g,g")) - (use (mem:DI (plus:DI (match_operand:DI 2 "register_operand" "b,b") - (const_int 8)))) - (use (reg:DI 11)) - (use (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_64BIT && DEFAULT_ABI == ABI_AIX" +;; Like call_indirect_aix<ptrsize>, except don't load the static chain +;; Operand0 is the addresss of the function to call +;; Operand1 is the flag for System V.4 for unprototyped or FP registers +;; Operand2 is the location in the function descriptor to load r2 from +;; Operand3 is the stack location to hold the current TOC pointer + +(define_insn_and_split "call_indirect_aix<ptrsize>_nor11" + [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l")) + (match_operand 1 "" "g,g")) + (use (match_operand:P 2 "memory_operand" "m,m")) + (use (match_operand:P 3 "memory_operand" "m,m")) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && !TARGET_R11" "#" "&& reload_completed" - [(set (reg:DI 2) - (mem:DI (plus:DI (match_dup 2) (const_int 8)))) + [(set (reg:P TOC_REGNUM) (match_dup 2)) (parallel [(call (mem:SI (match_dup 0)) (match_dup 1)) - (use (reg:DI 2)) - (use (reg:DI 11)) - (set (reg:DI 2) - (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))])] + (use (reg:P TOC_REGNUM)) + (use (match_dup 3)) + (set (reg:P TOC_REGNUM) (match_dup 3)) + (clobber (reg:P LR_REGNO))])] "" [(set_attr "type" "jmpreg") (set_attr "length" "12")]) -(define_insn "*call_indirect_nonlocal_aix64" - [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l")) +(define_insn "*call_indirect_aix<ptrsize>_internal2" + [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l")) (match_operand 1 "" "g,g")) - (use (reg:DI 2)) - (use (reg:DI 11)) - (set (reg:DI 2) - (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && reload_completed" - "b%T0l\;ld 2,40(1)" + (use (reg:P TOC_REGNUM)) + (use (match_operand:P 2 "memory_operand" "m,m")) + (set (reg:P TOC_REGNUM) (match_dup 2)) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && reload_completed && !TARGET_R11" + "b%T0l\;<ptrload> 2,%2" [(set_attr "type" "jmpreg") (set_attr "length" "8")]) -(define_insn "*call_nonlocal_aix64" - [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s")) - (match_operand 1 "" "g")) - (use (match_operand:SI 2 "immediate_operand" "O")) - (clobber (reg:SI LR_REGNO))] - "TARGET_64BIT - && DEFAULT_ABI == ABI_AIX - && (INTVAL (operands[2]) & CALL_LONG) == 0" - "bl %z0\;%." - [(set_attr "type" "branch") +;; Operand0 is the return result of the function +;; Operand1 is the addresss of the function to call +;; Operand2 is the flag for System V.4 for unprototyped or FP registers +;; Operand3 is the location in the function descriptor to load r2 from +;; Operand4 is the stack location to hold the current TOC pointer + +(define_insn_and_split "call_value_indirect_aix<ptrsize>" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:P 1 "register_operand" "c,*l")) + (match_operand 2 "" "g,g"))) + (use (match_operand:P 3 "memory_operand" "m,m")) + (use (match_operand:P 4 "memory_operand" "m,m")) + (use (reg:P STATIC_CHAIN_REGNUM)) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && TARGET_R11" + "#" + "&& reload_completed" + [(set (reg:P TOC_REGNUM) (match_dup 3)) + (parallel [(set (match_dup 0) + (call (mem:SI (match_dup 1)) + (match_dup 2))) + (use (reg:P TOC_REGNUM)) + (use (reg:P STATIC_CHAIN_REGNUM)) + (use (match_dup 4)) + (set (reg:P TOC_REGNUM) (match_dup 4)) + (clobber (reg:P LR_REGNO))])] + "" + [(set_attr "type" "jmpreg") + (set_attr "length" "12")]) + +(define_insn "*call_value_indirect_aix<ptrsize>_internal" + [(set (match_operand 0 "" "") + (call (mem:SI (match_operand:P 1 "register_operand" "c,*l")) + (match_operand 2 "" "g,g"))) + (use (reg:P TOC_REGNUM)) + (use (reg:P STATIC_CHAIN_REGNUM)) + (use (match_operand:P 3 "memory_operand" "m,m")) + (set (reg:P TOC_REGNUM) (match_dup 3)) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && reload_completed && TARGET_R11" + "b%T1l\;<ptrload> 2,%3" + [(set_attr "type" "jmpreg") (set_attr "length" "8")]) -(define_insn_and_split "*call_value_indirect_nonlocal_aix32_internal" +;; Like call_value_indirect_aix<ptrsize>, but don't load the static chain +;; Operand0 is the return result of the function +;; Operand1 is the addresss of the function to call +;; Operand2 is the flag for System V.4 for unprototyped or FP registers +;; Operand3 is the location in the function descriptor to load r2 from +;; Operand4 is the stack location to hold the current TOC pointer + +(define_insn_and_split "call_value_indirect_aix<ptrsize>_nor11" [(set (match_operand 0 "" "") - (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l")) - (match_operand 2 "" "g,g"))) - (use (mem:SI (plus:SI (match_operand:SI 3 "register_operand" "b,b") - (const_int 4)))) - (use (reg:SI 11)) - (use (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_32BIT && DEFAULT_ABI == ABI_AIX" + (call (mem:SI (match_operand:P 1 "register_operand" "c,*l")) + (match_operand 2 "" "g,g"))) + (use (match_operand:P 3 "memory_operand" "m,m")) + (use (match_operand:P 4 "memory_operand" "m,m")) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && !TARGET_R11" "#" "&& reload_completed" - [(set (reg:SI 2) - (mem:SI (plus:SI (match_dup 3) (const_int 4)))) - (parallel [(set (match_dup 0) (call (mem:SI (match_dup 1)) - (match_dup 2))) - (use (reg:SI 2)) - (use (reg:SI 11)) - (set (reg:SI 2) - (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))])] + [(set (reg:P TOC_REGNUM) (match_dup 3)) + (parallel [(set (match_dup 0) + (call (mem:SI (match_dup 1)) + (match_dup 2))) + (use (reg:P TOC_REGNUM)) + (use (match_dup 4)) + (set (reg:P TOC_REGNUM) (match_dup 4)) + (clobber (reg:P LR_REGNO))])] "" [(set_attr "type" "jmpreg") (set_attr "length" "12")]) -(define_insn "*call_value_indirect_nonlocal_aix32" +(define_insn "*call_value_indirect_aix<ptrsize>_internal2" [(set (match_operand 0 "" "") - (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l")) + (call (mem:SI (match_operand:P 1 "register_operand" "c,*l")) (match_operand 2 "" "g,g"))) - (use (reg:SI 2)) - (use (reg:SI 11)) - (set (reg:SI 2) - (mem:SI (plus:SI (reg:SI 1) (const_int 20)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_32BIT && DEFAULT_ABI == ABI_AIX && reload_completed" - "b%T1l\;{l|lwz} 2,20(1)" + (use (reg:P TOC_REGNUM)) + (use (match_operand:P 3 "memory_operand" "m,m")) + (set (reg:P TOC_REGNUM) (match_dup 3)) + (clobber (reg:P LR_REGNO))] + "DEFAULT_ABI == ABI_AIX && reload_completed && !TARGET_R11" + "b%T1l\;<ptrload> 2,%3" [(set_attr "type" "jmpreg") (set_attr "length" "8")]) +;; Call to function which may be in another module. Restore the TOC +;; pointer (r2) after the call unless this is System V. +;; Operand2 is nonzero if we are using the V.4 calling sequence and +;; either the function was not prototyped, or it was prototyped as a +;; variable argument function. It is > 0 if FP registers were passed +;; and < 0 if they were not. + +(define_insn "*call_nonlocal_aix32" + [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s")) + (match_operand 1 "" "g")) + (use (match_operand:SI 2 "immediate_operand" "O")) + (clobber (reg:SI LR_REGNO))] + "TARGET_32BIT + && DEFAULT_ABI == ABI_AIX + && (INTVAL (operands[2]) & CALL_LONG) == 0" + "bl %z0\;%." + [(set_attr "type" "branch") + (set_attr "length" "8")]) + +(define_insn "*call_nonlocal_aix64" + [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s")) + (match_operand 1 "" "g")) + (use (match_operand:SI 2 "immediate_operand" "O")) + (clobber (reg:SI LR_REGNO))] + "TARGET_64BIT + && DEFAULT_ABI == ABI_AIX + && (INTVAL (operands[2]) & CALL_LONG) == 0" + "bl %z0\;%." + [(set_attr "type" "branch") + (set_attr "length" "8")]) + (define_insn "*call_value_nonlocal_aix32" [(set (match_operand 0 "" "") (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s")) @@ -12603,45 +12582,6 @@ [(set_attr "type" "branch") (set_attr "length" "8")]) -(define_insn_and_split "*call_value_indirect_nonlocal_aix64_internal" - [(set (match_operand 0 "" "") - (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l")) - (match_operand 2 "" "g,g"))) - (use (mem:DI (plus:DI (match_operand:DI 3 "register_operand" "b,b") - (const_int 8)))) - (use (reg:DI 11)) - (use (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_64BIT && DEFAULT_ABI == ABI_AIX" - "#" - "&& reload_completed" - [(set (reg:DI 2) - (mem:DI (plus:DI (match_dup 3) (const_int 8)))) - (parallel [(set (match_dup 0) (call (mem:SI (match_dup 1)) - (match_dup 2))) - (use (reg:DI 2)) - (use (reg:DI 11)) - (set (reg:DI 2) - (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))])] - "" - [(set_attr "type" "jmpreg") - (set_attr "length" "12")]) - -(define_insn "*call_value_indirect_nonlocal_aix64" - [(set (match_operand 0 "" "") - (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l")) - (match_operand 2 "" "g,g"))) - (use (reg:DI 2)) - (use (reg:DI 11)) - (set (reg:DI 2) - (mem:DI (plus:DI (reg:DI 1) (const_int 40)))) - (clobber (reg:SI LR_REGNO))] - "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && reload_completed" - "b%T1l\;ld 2,40(1)" - [(set_attr "type" "jmpreg") - (set_attr "length" "8")]) - (define_insn "*call_value_nonlocal_aix64" [(set (match_operand 0 "" "") (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s")) diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt index 35c936fe53e..329104c441c 100644 --- a/gcc/config/rs6000/rs6000.opt +++ b/gcc/config/rs6000/rs6000.opt @@ -521,4 +521,10 @@ mxilinx-fpu Target Var(rs6000_xilinx_fpu) Save Specify Xilinx FPU. +mr11 +Target Report Var(TARGET_R11) Init(1) Save +Use/do not use r11 to hold the static link in calls. +msave-toc-indirect +Target Undocumented Var(TARGET_SAVE_TOC_INDIRECT) Save Init(1) +; Control whether we save the TOC in the prologue for indirect calls or generate the save inline diff --git a/gcc/config/rs6000/vxworks.h b/gcc/config/rs6000/vxworks.h index de14bbfbb0c..bef041cf39e 100644 --- a/gcc/config/rs6000/vxworks.h +++ b/gcc/config/rs6000/vxworks.h @@ -47,6 +47,8 @@ along with GCC; see the file COPYING3. If not see /* Only big endian PPC is supported by VxWorks. */ #undef BYTES_BIG_ENDIAN #define BYTES_BIG_ENDIAN 1 +#undef WORDS_BIG_ENDIAN +#define WORDS_BIG_ENDIAN 1 /* We have to kill off the entire specs set created by rs6000/sysv4.h and substitute our own set. The top level vxworks.h has done some diff --git a/gcc/config/sol2-bi.h b/gcc/config/sol2-bi.h index 371cdd6df41..5e41efd81d4 100644 --- a/gcc/config/sol2-bi.h +++ b/gcc/config/sol2-bi.h @@ -56,6 +56,16 @@ #define DEF_ARCH64_SPEC(__str) "%{!m32:" __str "}" #endif +#undef ASM_CPU_DEFAULT_SPEC +#define ASM_CPU_DEFAULT_SPEC \ +(DEFAULT_ARCH32_P ? "\ +%{m64:" ASM_CPU64_DEFAULT_SPEC "} \ +%{!m64:" ASM_CPU32_DEFAULT_SPEC "} \ +" : "\ +%{m32:" ASM_CPU32_DEFAULT_SPEC "} \ +%{!m32:" ASM_CPU64_DEFAULT_SPEC "} \ +") + /* This should be the same as LINK_ARCH32_SPEC_BASE, except with ARCH64_SUBDIR appended to the paths and /usr/ccs/lib is no longer necessary. */ @@ -78,8 +88,14 @@ #endif #ifdef USE_GLD +#if DEFAULT_ARCH32_P +#define ARCH_DEFAULT_EMULATION ARCH32_EMULATION +#else +#define ARCH_DEFAULT_EMULATION ARCH64_EMULATION +#endif #define TARGET_LD_EMULATION "%{m32:-m " ARCH32_EMULATION "}" \ - "%{m64:-m " ARCH64_EMULATION "} " + "%{m64:-m " ARCH64_EMULATION "}" \ + "%{!m32:%{!m64:-m " ARCH_DEFAULT_EMULATION "}} " #else #define TARGET_LD_EMULATION "" #endif diff --git a/gcc/config/sol2.h b/gcc/config/sol2.h index d9c1fc99a40..3867c7dd245 100644 --- a/gcc/config/sol2.h +++ b/gcc/config/sol2.h @@ -99,13 +99,12 @@ along with GCC; see the file COPYING3. If not see TARGET_SUB_OS_CPP_BUILTINS(); \ } while (0) -/* It's safe to pass -s always, even if -g is not used. */ -#undef ASM_SPEC -#define ASM_SPEC "\ -%{v:-V} %{Qy:} %{!Qn:-Qy} %{Ym,*} -s \ -%{fpic|fpie|fPIC|fPIE:-K PIC} \ -%(asm_cpu) \ -" +/* It's safe to pass -s always, even if -g is not used. Those options are + handled by both Sun as and GNU as. */ +#define ASM_SPEC_BASE \ +"%{v:-V} %{Qy:} %{!Qn:-Qy} %{Ym,*} -s %(asm_cpu)" + +#define ASM_PIC_SPEC " %{fpic|fpie|fPIC|fPIE:-K PIC}" #undef LIB_SPEC #define LIB_SPEC \ diff --git a/gcc/config/sparc/sol2.h b/gcc/config/sparc/sol2.h index 24c7ade0177..6661c21945c 100644 --- a/gcc/config/sparc/sol2.h +++ b/gcc/config/sparc/sol2.h @@ -120,6 +120,10 @@ along with GCC; see the file COPYING3. If not see #define ASM_CPU_DEFAULT_SPEC ASM_CPU32_DEFAULT_SPEC #endif +/* Both Sun as and GNU as understand -K PIC. */ +#undef ASM_SPEC +#define ASM_SPEC ASM_SPEC_BASE ASM_PIC_SPEC + #undef CPP_CPU_SPEC #define CPP_CPU_SPEC "\ %{mcpu=sparclet|mcpu=tsc701:-D__sparclet__} \ @@ -201,16 +205,6 @@ along with GCC; see the file COPYING3. If not see %{!mcpu*:%(asm_cpu_default)} \ " -#undef ASM_CPU_DEFAULT_SPEC -#define ASM_CPU_DEFAULT_SPEC \ -(DEFAULT_ARCH32_P ? "\ -%{m64:" ASM_CPU64_DEFAULT_SPEC "} \ -%{!m64:" ASM_CPU32_DEFAULT_SPEC "} \ -" : "\ -%{m32:" ASM_CPU32_DEFAULT_SPEC "} \ -%{!m32:" ASM_CPU64_DEFAULT_SPEC "} \ -") - #undef ASM_ARCH32_SPEC #define ASM_ARCH32_SPEC "" diff --git a/gcc/config/vax/vax-protos.h b/gcc/config/vax/vax-protos.h index a8f88bfa126..3f247943314 100644 --- a/gcc/config/vax/vax-protos.h +++ b/gcc/config/vax/vax-protos.h @@ -20,6 +20,7 @@ along with GCC; see the file COPYING3. If not see extern bool legitimate_constant_address_p (rtx); extern bool vax_mode_dependent_address_p (rtx); +extern void vax_expand_prologue (void); #ifdef RTX_CODE extern const char *cond_name (rtx); diff --git a/gcc/config/vax/vax.c b/gcc/config/vax/vax.c index 7c7070c9016..13a45158f94 100644 --- a/gcc/config/vax/vax.c +++ b/gcc/config/vax/vax.c @@ -48,7 +48,6 @@ along with GCC; see the file COPYING3. If not see static void vax_option_override (void); static bool vax_legitimate_address_p (enum machine_mode, rtx, bool); -static void vax_output_function_prologue (FILE *, HOST_WIDE_INT); static void vax_file_start (void); static void vax_init_libfuncs (void); static void vax_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, @@ -70,9 +69,6 @@ static int vax_return_pops_args (tree, tree, int); #undef TARGET_ASM_ALIGNED_HI_OP #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" -#undef TARGET_ASM_FUNCTION_PROLOGUE -#define TARGET_ASM_FUNCTION_PROLOGUE vax_output_function_prologue - #undef TARGET_ASM_FILE_START #define TARGET_ASM_FILE_START vax_file_start #undef TARGET_ASM_FILE_START_APP_OFF @@ -137,6 +133,17 @@ vax_option_override (void) #endif } +static void +vax_add_reg_cfa_offset (rtx insn, int offset, rtx src) +{ + rtx x; + + x = plus_constant (frame_pointer_rtx, offset); + x = gen_rtx_MEM (SImode, x); + x = gen_rtx_SET (VOIDmode, x, src); + add_reg_note (insn, REG_CFA_OFFSET, x); +} + /* Generate the assembly code for function entry. FILE is a stdio stream to output the code to. SIZE is an int: how many units of temporary storage to allocate. @@ -146,38 +153,67 @@ vax_option_override (void) used in the function. This function is responsible for knowing which registers should not be saved even if used. */ -static void -vax_output_function_prologue (FILE * file, HOST_WIDE_INT size) +void +vax_expand_prologue (void) { - int regno; + int regno, offset; int mask = 0; + HOST_WIDE_INT size; + rtx insn; for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (df_regs_ever_live_p (regno) && !call_used_regs[regno]) mask |= 1 << regno; - fprintf (file, "\t.word 0x%x\n", mask); + insn = emit_insn (gen_procedure_entry_mask (GEN_INT (mask))); + RTX_FRAME_RELATED_P (insn) = 1; - if (dwarf2out_do_frame ()) - { - const char *label = dwarf2out_cfi_label (false); - int offset = 0; + /* The layout of the CALLG/S stack frame is follows: - for (regno = FIRST_PSEUDO_REGISTER-1; regno >= 0; --regno) - if (df_regs_ever_live_p (regno) && !call_used_regs[regno]) - dwarf2out_reg_save (label, regno, offset -= 4); + <- CFA, AP + r11 + r10 + ... Registers saved as specified by MASK + r3 + r2 + return-addr + old fp + old ap + old psw + zero + <- FP, SP - dwarf2out_reg_save (label, PC_REGNUM, offset -= 4); - dwarf2out_reg_save (label, FRAME_POINTER_REGNUM, offset -= 4); - dwarf2out_reg_save (label, ARG_POINTER_REGNUM, offset -= 4); - dwarf2out_def_cfa (label, FRAME_POINTER_REGNUM, -(offset - 4)); - } + The rest of the prologue will adjust the SP for the local frame. */ + + vax_add_reg_cfa_offset (insn, 4, arg_pointer_rtx); + vax_add_reg_cfa_offset (insn, 8, frame_pointer_rtx); + vax_add_reg_cfa_offset (insn, 12, pc_rtx); + + offset = 16; + for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) + if (mask & (1 << regno)) + { + vax_add_reg_cfa_offset (insn, offset, gen_rtx_REG (SImode, regno)); + offset += 4; + } + + /* Because add_reg_note pushes the notes, adding this last means that + it will be processed first. This is required to allow the other + notes be interpreted properly. */ + add_reg_note (insn, REG_CFA_DEF_CFA, + plus_constant (frame_pointer_rtx, offset)); + /* Allocate the local stack frame. */ + size = get_frame_size (); size -= STARTING_FRAME_OFFSET; - if (size >= 64) - asm_fprintf (file, "\tmovab %wd(%Rsp),%Rsp\n", -size); - else if (size) - asm_fprintf (file, "\tsubl2 $%wd,%Rsp\n", size); + emit_insn (gen_addsi3 (stack_pointer_rtx, + stack_pointer_rtx, GEN_INT (-size))); + + /* Do not allow instructions referencing local stack memory to be + scheduled before the frame is allocated. This is more pedantic + than anything else, given that VAX does not currently have a + scheduling description. */ + emit_insn (gen_blockage ()); } /* When debugging with stabs, we want to output an extra dummy label @@ -485,6 +521,8 @@ print_operand (FILE *file, rtx x, int code) fprintf (file, "$%d", (int) (0xff & - INTVAL (x))); else if (code == 'M' && CONST_INT_P (x)) fprintf (file, "$%d", ~((1 << INTVAL (x)) - 1)); + else if (code == 'x' && CONST_INT_P (x)) + fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x)); else if (REG_P (x)) fprintf (file, "%s", reg_names[REGNO (x)]); else if (MEM_P (x)) diff --git a/gcc/config/vax/vax.md b/gcc/config/vax/vax.md index 8c3ef0042ff..32f50fd3367 100644 --- a/gcc/config/vax/vax.md +++ b/gcc/config/vax/vax.md @@ -29,11 +29,15 @@ ;; UNSPEC_VOLATILE usage: -(define_constants - [(VUNSPEC_BLOCKAGE 0) ; `blockage' insn to prevent scheduling across an +(define_c_enum "unspecv" [ + VUNSPEC_BLOCKAGE ; 'blockage' insn to prevent scheduling across an ; insn in the code. - (VUNSPEC_SYNC_ISTREAM 1) ; sequence of insns to sync the I-stream - (VAX_AP_REGNUM 12) ; Register 12 contains the argument pointer + VUNSPEC_SYNC_ISTREAM ; sequence of insns to sync the I-stream + VUNSPEC_PEM ; 'procedure_entry_mask' insn. +]) + +(define_constants + [(VAX_AP_REGNUM 12) ; Register 12 contains the argument pointer (VAX_FP_REGNUM 13) ; Register 13 contains the frame pointer (VAX_SP_REGNUM 14) ; Register 14 contains the stack pointer (VAX_PC_REGNUM 15) ; Register 15 contains the program counter @@ -1409,11 +1413,24 @@ "" "") +(define_insn "procedure_entry_mask" + [(unspec_volatile [(match_operand 0 "const_int_operand")] VUNSPEC_PEM)] + "" + ".word %x0") + (define_insn "return" [(return)] "" "ret") +(define_expand "prologue" + [(const_int 0)] + "" +{ + vax_expand_prologue (); + DONE; +}) + (define_expand "epilogue" [(return)] "" diff --git a/gcc/configure b/gcc/configure index 10c24cc94c9..3c717c086b7 100755 --- a/gcc/configure +++ b/gcc/configure @@ -11072,6 +11072,11 @@ fi cat > plugin-version.h <<EOF #include "configargs.h" +#define GCCPLUGIN_VERSION_MAJOR `echo $gcc_BASEVER | sed -e 's/^\([0-9]*\).*$/\1/'` +#define GCCPLUGIN_VERSION_MINOR `echo $gcc_BASEVER | sed -e 's/^[0-9]*\.\([0-9]*\).*$/\1/'` +#define GCCPLUGIN_VERSION_PATCHLEVEL `echo $gcc_BASEVER | sed -e 's/^[0-9]*\.[0-9]*\.\([0-9]*\)$/\1/'` +#define GCCPLUGIN_VERSION (GCCPLUGIN_VERSION_MAJOR*1000 + GCCPLUGIN_VERSION_MINOR) + static char basever[] = "$gcc_BASEVER"; static char datestamp[] = "$gcc_DATESTAMP"; static char devphase[] = "$gcc_DEVPHASE"; @@ -17623,7 +17628,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 17626 "configure" +#line 17631 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -17729,7 +17734,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 17732 "configure" +#line 17737 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -21779,7 +21784,7 @@ foobar:' > conftest.s # Solaris 9/x86 as incorrectly emits an alias for a hidden symbol with # STV_HIDDEN, so disable .hidden support if so. case "${target}" in - i?86-*-solaris2*) + i?86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) if test x$gcc_cv_as != x && test x$gcc_cv_objdump != x; then cat > conftest.s <<EOF .globl hidden @@ -22094,7 +22099,7 @@ else gcc_cv_as_cfi_directive=no else case "$target" in - i?86-*-solaris2.1[0-9]*) + i?86-*-solaris2.1[0-9]* | x86_64-*-solaris2.1[0-9]*) # On Solaris/x86, make sure that GCC and gas agree on using # read-only .eh_frame sections for 64-bit. if $gcc_cv_as --64 -o conftest.o conftest.s > /dev/null 2>&1 && \ @@ -22525,7 +22530,7 @@ $as_echo "$gcc_cv_as_comdat_group_percent" >&6; } foo: ' ;; - i?86-*-solaris2*) + i?86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) conftest_s=' .group foo,.text%foo,#comdat .section .text%foo, "ax", @progbits @@ -22755,12 +22760,16 @@ foo: .long 25 tls_first_major=2 tls_first_minor=17 ;; - i[34567]86-*-*) + i[34567]86-*-* | x86_64-*-solaris2.1[0-9]*) case "$target" in i[34567]86-*-solaris2.*) on_solaris=yes tga_func=___tls_get_addr ;; + x86_64-*-solaris2.1[0-9]*) + on_solaris=yes + tga_func=__tls_get_addr + ;; *) on_solaris=no ;; diff --git a/gcc/configure.ac b/gcc/configure.ac index 5f3641b3198..f899991083d 100644 --- a/gcc/configure.ac +++ b/gcc/configure.ac @@ -1511,6 +1511,11 @@ fi cat > plugin-version.h <<EOF #include "configargs.h" +#define GCCPLUGIN_VERSION_MAJOR `echo $gcc_BASEVER | sed -e 's/^\([0-9]*\).*$/\1/'` +#define GCCPLUGIN_VERSION_MINOR `echo $gcc_BASEVER | sed -e 's/^[0-9]*\.\([0-9]*\).*$/\1/'` +#define GCCPLUGIN_VERSION_PATCHLEVEL `echo $gcc_BASEVER | sed -e 's/^[0-9]*\.[0-9]*\.\([0-9]*\)$/\1/'` +#define GCCPLUGIN_VERSION (GCCPLUGIN_VERSION_MAJOR*1000 + GCCPLUGIN_VERSION_MINOR) + static char basever[] = "$gcc_BASEVER"; static char datestamp[] = "$gcc_DATESTAMP"; static char devphase[] = "$gcc_DEVPHASE"; @@ -2168,7 +2173,7 @@ foobar:],[ # Solaris 9/x86 as incorrectly emits an alias for a hidden symbol with # STV_HIDDEN, so disable .hidden support if so. case "${target}" in - i?86-*-solaris2*) + i?86-*-solaris2* | x86_64-*-solaris2.1[[0-9]]*) if test x$gcc_cv_as != x && test x$gcc_cv_objdump != x; then cat > conftest.s <<EOF .globl hidden @@ -2423,7 +2428,7 @@ gcc_GAS_CHECK_FEATURE([cfi directives], gcc_cv_as_cfi_directive, gcc_cv_as_cfi_directive=no else case "$target" in - i?86-*-solaris2.1[[0-9]]*) + i?86-*-solaris2.1[[0-9]]* | x86_64-*-solaris2.1[[0-9]]*) # On Solaris/x86, make sure that GCC and gas agree on using # read-only .eh_frame sections for 64-bit. if $gcc_cv_as --64 -o conftest.o conftest.s > /dev/null 2>&1 && \ @@ -2627,7 +2632,7 @@ else foo: ' ;; - i?86-*-solaris2*) + i?86-*-solaris2* | x86_64-*-solaris2.1[[0-9]]*) conftest_s=' .group foo,.text%foo,#comdat .section .text%foo, "ax", @progbits @@ -2805,12 +2810,16 @@ foo: .long 25 tls_first_major=2 tls_first_minor=17 ;; - i[34567]86-*-*) + i[34567]86-*-* | x86_64-*-solaris2.1[0-9]*) case "$target" in i[34567]86-*-solaris2.*) on_solaris=yes tga_func=___tls_get_addr ;; + x86_64-*-solaris2.1[0-9]*) + on_solaris=yes + tga_func=__tls_get_addr + ;; *) on_solaris=no ;; diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index 6ffebf1b0fe..3957d5ade63 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,46 @@ +2011-07-06 Jason Merrill <jason@redhat.com> + + PR c++/49353 + * semantics.c (expand_or_defer_fn_1): Clear DECL_EXTERNAL + on kept inlines. + + PR c++/49568 + * method.c (make_thunk, use_thunk): Copy DECL_COMDAT. + +2011-07-05 Jason Merrill <jason@redhat.com> + + PR c++/48157 + * pt.c (tsubst_qualified_id): Preserve TEMPLATE_ID_EXPR in + partial instantiation. + + PR c++/49598 + * semantics.c (finish_id_expression): convert_from_reference. + +2011-07-05 Richard Guenther <rguenther@suse.de> + + * decl.c (cxx_init_decl_processing): Defer building common + tree nodes to c_common_nodes_and_builtins. + +2011-07-04 Jason Merrill <jason@redhat.com> + + DR 1207 + PR c++/49589 + * mangle.c (write_expression): Handle 'this'. + * parser.c (cp_parser_postfix_dot_deref_expression): Allow + incomplete *this. + * semantics.c (potential_constant_expression_1): Check that + DECL_CONTEXT is set on 'this'. + + * error.c (dump_template_bindings): Don't print typenames + for a partial instantiation. + (dump_function_decl): If we aren't printing function arguments, + print template arguments as <args> rather than [with ...]. + (dump_expr): Don't print return type or template header. + [BASELINK]: Use BASELINK_FUNCTIONS rather than get_first_fn. + * pt.c (dependent_template_arg_p): Handle null arg. + + * error.c (type_to_string): Avoid redundant akas. + 2011-07-01 Jonathan Wakely <jwakely.gcc@gmail.com> PR c++/49605 diff --git a/gcc/cp/decl.c b/gcc/cp/decl.c index 94d686d3aef..e36739b38d6 100644 --- a/gcc/cp/decl.c +++ b/gcc/cp/decl.c @@ -3518,8 +3518,6 @@ cxx_init_decl_processing (void) tree void_ftype; tree void_ftype_ptr; - build_common_tree_nodes (flag_signed_char); - /* Create all the identifiers we need. */ initialize_predefined_identifiers (); @@ -3536,8 +3534,6 @@ cxx_init_decl_processing (void) TREE_PUBLIC (global_namespace) = 1; begin_scope (sk_namespace, global_namespace); - current_lang_name = NULL_TREE; - if (flag_visibility_ms_compat) default_visibility = VISIBILITY_HIDDEN; diff --git a/gcc/cp/error.c b/gcc/cp/error.c index 7c90ec45c17..b16fce68750 100644 --- a/gcc/cp/error.c +++ b/gcc/cp/error.c @@ -307,9 +307,12 @@ dump_template_bindings (tree parms, tree args, VEC(tree,gc)* typenames) parms = TREE_CHAIN (parms); } + /* Don't bother with typenames for a partial instantiation. */ + if (VEC_empty (tree, typenames) || uses_template_parms (args)) + return; + FOR_EACH_VEC_ELT (tree, typenames, i, t) { - bool dependent = uses_template_parms (args); if (need_comma) pp_separate_with_comma (cxx_pp); dump_type (t, TFF_PLAIN_IDENTIFIER); @@ -317,11 +320,7 @@ dump_template_bindings (tree parms, tree args, VEC(tree,gc)* typenames) pp_equal (cxx_pp); pp_cxx_whitespace (cxx_pp); push_deferring_access_checks (dk_no_check); - if (dependent) - ++processing_template_decl; t = tsubst (t, args, tf_none, NULL_TREE); - if (dependent) - --processing_template_decl; pop_deferring_access_checks (); /* Strip typedefs. We can't just use TFF_CHASE_TYPEDEF because pp_simple_type_specifier doesn't know about it. */ @@ -1379,17 +1378,37 @@ dump_function_decl (tree t, int flags) if (show_return) dump_type_suffix (TREE_TYPE (fntype), flags); - } - /* If T is a template instantiation, dump the parameter binding. */ - if (template_parms != NULL_TREE && template_args != NULL_TREE) + /* If T is a template instantiation, dump the parameter binding. */ + if (template_parms != NULL_TREE && template_args != NULL_TREE) + { + pp_cxx_whitespace (cxx_pp); + pp_cxx_left_bracket (cxx_pp); + pp_cxx_ws_string (cxx_pp, M_("with")); + pp_cxx_whitespace (cxx_pp); + dump_template_bindings (template_parms, template_args, typenames); + pp_cxx_right_bracket (cxx_pp); + } + } + else if (template_args) { - pp_cxx_whitespace (cxx_pp); - pp_cxx_left_bracket (cxx_pp); - pp_cxx_ws_string (cxx_pp, M_("with")); - pp_cxx_whitespace (cxx_pp); - dump_template_bindings (template_parms, template_args, typenames); - pp_cxx_right_bracket (cxx_pp); + bool need_comma = false; + int i; + pp_cxx_begin_template_argument_list (cxx_pp); + template_args = INNERMOST_TEMPLATE_ARGS (template_args); + for (i = 0; i < TREE_VEC_LENGTH (template_args); ++i) + { + tree arg = TREE_VEC_ELT (template_args, i); + if (need_comma) + pp_separate_with_comma (cxx_pp); + if (ARGUMENT_PACK_P (arg)) + pp_cxx_left_brace (cxx_pp); + dump_template_argument (arg, TFF_PLAIN_IDENTIFIER); + if (ARGUMENT_PACK_P (arg)) + pp_cxx_right_brace (cxx_pp); + need_comma = true; + } + pp_cxx_end_template_argument_list (cxx_pp); } } @@ -1724,7 +1743,9 @@ dump_expr (tree t, int flags) case OVERLOAD: case TYPE_DECL: case IDENTIFIER_NODE: - dump_decl (t, (flags & ~TFF_DECL_SPECIFIERS) | TFF_NO_FUNCTION_ARGUMENTS); + dump_decl (t, ((flags & ~(TFF_DECL_SPECIFIERS|TFF_RETURN_TYPE + |TFF_TEMPLATE_HEADER)) + | TFF_NO_FUNCTION_ARGUMENTS)); break; case INTEGER_CST: @@ -2289,7 +2310,7 @@ dump_expr (tree t, int flags) break; case BASELINK: - dump_expr (get_first_fn (t), flags & ~TFF_EXPR_IN_PARENS); + dump_expr (BASELINK_FUNCTIONS (t), flags & ~TFF_EXPR_IN_PARENS); break; case EMPTY_CLASS_EXPR: @@ -2634,14 +2655,28 @@ type_to_string (tree typ, int verbose) reinit_cxx_pp (); dump_type (typ, flags); + /* If we're printing a type that involves typedefs, also print the + stripped version. But sometimes the stripped version looks + exactly the same, so we don't want it after all. To avoid printing + it in that case, we play ugly obstack games. */ if (typ && TYPE_P (typ) && typ != TYPE_CANONICAL (typ) && !uses_template_parms (typ)) { + int aka_start; char *p; + struct obstack *ob = pp_base (cxx_pp)->buffer->obstack; + /* Remember the end of the initial dump. */ + int len = obstack_object_size (ob); tree aka = strip_typedefs (typ); pp_string (cxx_pp, " {aka"); pp_cxx_whitespace (cxx_pp); + /* And remember the start of the aka dump. */ + aka_start = obstack_object_size (ob); dump_type (aka, flags); pp_character (cxx_pp, '}'); + p = (char*)obstack_base (ob); + /* If they are identical, cut off the aka with a NUL. */ + if (memcmp (p, p+aka_start, len) == 0) + p[len] = '\0'; } return pp_formatted_text (cxx_pp); } diff --git a/gcc/cp/mangle.c b/gcc/cp/mangle.c index 134c9ea7f19..81b772f6316 100644 --- a/gcc/cp/mangle.c +++ b/gcc/cp/mangle.c @@ -2495,6 +2495,11 @@ write_expression (tree expr) else if (TREE_CODE_CLASS (code) == tcc_constant || (abi_version_at_least (2) && code == CONST_DECL)) write_template_arg_literal (expr); + else if (code == PARM_DECL && DECL_ARTIFICIAL (expr)) + { + gcc_assert (!strcmp ("this", IDENTIFIER_POINTER (DECL_NAME (expr)))); + write_string ("fpT"); + } else if (code == PARM_DECL) { /* A function parameter used in a late-specified return type. */ diff --git a/gcc/cp/method.c b/gcc/cp/method.c index 9b9eb9a9df9..d41a4dd4238 100644 --- a/gcc/cp/method.c +++ b/gcc/cp/method.c @@ -140,11 +140,12 @@ make_thunk (tree function, bool this_adjusting, THUNK_VIRTUAL_OFFSET (thunk) = virtual_offset; THUNK_ALIAS (thunk) = NULL_TREE; - /* The thunk itself is not a constructor or destructor, even if - the thing it is thunking to is. */ DECL_INTERFACE_KNOWN (thunk) = 1; DECL_NOT_REALLY_EXTERN (thunk) = 1; + DECL_COMDAT (thunk) = DECL_COMDAT (function); DECL_SAVED_FUNCTION_DATA (thunk) = NULL; + /* The thunk itself is not a constructor or destructor, even if + the thing it is thunking to is. */ DECL_DESTRUCTOR_P (thunk) = 0; DECL_CONSTRUCTOR_P (thunk) = 0; DECL_EXTERNAL (thunk) = 1; @@ -342,6 +343,7 @@ use_thunk (tree thunk_fndecl, bool emit_p) DECL_VISIBILITY (thunk_fndecl) = DECL_VISIBILITY (function); DECL_VISIBILITY_SPECIFIED (thunk_fndecl) = DECL_VISIBILITY_SPECIFIED (function); + DECL_COMDAT (thunk_fndecl) = DECL_COMDAT (function); if (DECL_ONE_ONLY (function) || DECL_WEAK (function)) make_decl_one_only (thunk_fndecl, cxx_comdat_group (thunk_fndecl)); diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c index d79326d2808..6bb15ed9508 100644 --- a/gcc/cp/parser.c +++ b/gcc/cp/parser.c @@ -5281,7 +5281,11 @@ cp_parser_postfix_dot_deref_expression (cp_parser *parser, postfix_expression); scope = NULL_TREE; } - else + /* Unlike the object expression in other contexts, *this is not + required to be of complete type for purposes of class member + access (5.2.5) outside the member function body. */ + else if (scope != current_class_ref + && !(processing_template_decl && scope == current_class_type)) scope = complete_type_or_else (scope, NULL_TREE); /* Let the name lookup machinery know that we are processing a class member access expression. */ diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c index 7236e7e5168..17ca44cde0d 100644 --- a/gcc/cp/pt.c +++ b/gcc/cp/pt.c @@ -11287,8 +11287,12 @@ tsubst_qualified_id (tree qualified_id, tree args, expr = name; if (dependent_scope_p (scope)) - return build_qualified_name (NULL_TREE, scope, expr, - QUALIFIED_NAME_IS_TEMPLATE (qualified_id)); + { + if (is_template) + expr = build_min_nt (TEMPLATE_ID_EXPR, expr, template_args); + return build_qualified_name (NULL_TREE, scope, expr, + QUALIFIED_NAME_IS_TEMPLATE (qualified_id)); + } if (!BASELINK_P (name) && !DECL_P (expr)) { @@ -18848,7 +18852,7 @@ dependent_template_arg_p (tree arg) is dependent. This is consistent with what any_dependent_template_arguments_p [that calls this function] does. */ - if (arg == error_mark_node) + if (!arg || arg == error_mark_node) return true; if (TREE_CODE (arg) == ARGUMENT_PACK_SELECT) diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c index e29705c5782..5c53a18511c 100644 --- a/gcc/cp/semantics.c +++ b/gcc/cp/semantics.c @@ -2942,7 +2942,7 @@ finish_id_expression (tree id_expression, tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; - tree initializer = decl; + tree initializer = convert_from_reference (decl); /* Core issue 696: "[At the July 2009 meeting] the CWG expressed support for an approach in which a reference to a local @@ -3633,7 +3633,10 @@ expand_or_defer_fn_1 (tree fn) && !DECL_REALLY_EXTERN (fn)) || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn)))) - mark_needed (fn); + { + mark_needed (fn); + DECL_EXTERNAL (fn) = 0; + } } /* There's no reason to do any of the work here if we're only doing @@ -7791,7 +7794,8 @@ potential_constant_expression_1 (tree t, bool want_rval, tsubst_flags_t flags) STRIP_NOPS (x); if (is_this_parameter (x)) { - if (DECL_CONSTRUCTOR_P (DECL_CONTEXT (x)) && want_rval) + if (want_rval && DECL_CONTEXT (x) + && DECL_CONSTRUCTOR_P (DECL_CONTEXT (x))) { if (flags & tf_error) sorry ("use of the value of the object being constructed " diff --git a/gcc/cse.c b/gcc/cse.c index 8a31cd1a3da..a078329ac55 100644 --- a/gcc/cse.c +++ b/gcc/cse.c @@ -761,8 +761,8 @@ notreg_cost (rtx x, enum rtx_code outer) && (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) && subreg_lowpart_p (x) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (x)), - GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))) + && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (x), + GET_MODE (SUBREG_REG (x)))) ? 0 : rtx_cost (x, outer, optimize_this_for_speed_p) * 2); } @@ -3063,12 +3063,8 @@ find_comparison_args (enum rtx_code code, rtx *parg1, rtx *parg2, for STORE_FLAG_VALUE, also look at LT and GE operations. */ || ((code == NE || (code == LT - && GET_MODE_CLASS (inner_mode) == MODE_INT - && (GET_MODE_BITSIZE (inner_mode) - <= HOST_BITS_PER_WIDE_INT) - && (STORE_FLAG_VALUE - & ((HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (inner_mode) - 1)))) + && val_signbit_known_set_p (inner_mode, + STORE_FLAG_VALUE)) #ifdef FLOAT_STORE_FLAG_VALUE || (code == LT && SCALAR_FLOAT_MODE_P (inner_mode) @@ -3083,12 +3079,8 @@ find_comparison_args (enum rtx_code code, rtx *parg1, rtx *parg2, } else if ((code == EQ || (code == GE - && GET_MODE_CLASS (inner_mode) == MODE_INT - && (GET_MODE_BITSIZE (inner_mode) - <= HOST_BITS_PER_WIDE_INT) - && (STORE_FLAG_VALUE - & ((HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (inner_mode) - 1)))) + && val_signbit_known_set_p (inner_mode, + STORE_FLAG_VALUE)) #ifdef FLOAT_STORE_FLAG_VALUE || (code == GE && SCALAR_FLOAT_MODE_P (inner_mode) @@ -3658,7 +3650,7 @@ fold_rtx (rtx x, rtx insn) enum rtx_code associate_code; if (is_shift - && (INTVAL (const_arg1) >= GET_MODE_BITSIZE (mode) + && (INTVAL (const_arg1) >= GET_MODE_PRECISION (mode) || INTVAL (const_arg1) < 0)) { if (SHIFT_COUNT_TRUNCATED) @@ -3707,7 +3699,7 @@ fold_rtx (rtx x, rtx insn) break; if (is_shift - && (INTVAL (inner_const) >= GET_MODE_BITSIZE (mode) + && (INTVAL (inner_const) >= GET_MODE_PRECISION (mode) || INTVAL (inner_const) < 0)) { if (SHIFT_COUNT_TRUNCATED) @@ -3737,7 +3729,7 @@ fold_rtx (rtx x, rtx insn) if (is_shift && CONST_INT_P (new_const) - && INTVAL (new_const) >= GET_MODE_BITSIZE (mode)) + && INTVAL (new_const) >= GET_MODE_PRECISION (mode)) { /* As an exception, we can turn an ASHIFTRT of this form into a shift of the number of bits - 1. */ @@ -3967,9 +3959,7 @@ record_jump_cond (enum rtx_code code, enum machine_mode mode, rtx op0, is not worth testing for with no SUBREG). */ /* Note that GET_MODE (op0) may not equal MODE. */ - if (code == EQ && GET_CODE (op0) == SUBREG - && (GET_MODE_SIZE (GET_MODE (op0)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op0))))) + if (code == EQ && paradoxical_subreg_p (op0)) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op0)); rtx tem = record_jump_cond_subreg (inner_mode, op1); @@ -3978,9 +3968,7 @@ record_jump_cond (enum rtx_code code, enum machine_mode mode, rtx op0, reversed_nonequality); } - if (code == EQ && GET_CODE (op1) == SUBREG - && (GET_MODE_SIZE (GET_MODE (op1)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))))) + if (code == EQ && paradoxical_subreg_p (op1)) { enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op1)); rtx tem = record_jump_cond_subreg (inner_mode, op0); @@ -4564,9 +4552,7 @@ cse_insn (rtx insn) treat it as volatile. It may do the work of an SI in one context where the extra bits are not being used, but cannot replace an SI in general. */ - if (GET_CODE (src) == SUBREG - && (GET_MODE_SIZE (GET_MODE (src)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))) + if (paradoxical_subreg_p (src)) sets[i].src_volatile = 1; #endif @@ -4686,13 +4672,13 @@ cse_insn (rtx insn) if (src_const && src_related == 0 && CONST_INT_P (src_const) && GET_MODE_CLASS (mode) == MODE_INT - && GET_MODE_BITSIZE (mode) < BITS_PER_WORD) + && GET_MODE_PRECISION (mode) < BITS_PER_WORD) { enum machine_mode wider_mode; for (wider_mode = GET_MODE_WIDER_MODE (mode); wider_mode != VOIDmode - && GET_MODE_BITSIZE (wider_mode) <= BITS_PER_WORD + && GET_MODE_PRECISION (wider_mode) <= BITS_PER_WORD && src_related == 0; wider_mode = GET_MODE_WIDER_MODE (wider_mode)) { @@ -4844,9 +4830,7 @@ cse_insn (rtx insn) /* Also skip paradoxical subregs, unless that's what we're looking for. */ - if (code == SUBREG - && (GET_MODE_SIZE (GET_MODE (p->exp)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (p->exp)))) + if (paradoxical_subreg_p (p->exp) && ! (src != 0 && GET_CODE (src) == SUBREG && GET_MODE (src) == GET_MODE (p->exp) @@ -4955,9 +4939,7 @@ cse_insn (rtx insn) size, but later may be adjusted so that the upper bits aren't what we want. So reject it. */ if (elt != 0 - && GET_CODE (elt->exp) == SUBREG - && (GET_MODE_SIZE (GET_MODE (elt->exp)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (elt->exp)))) + && paradoxical_subreg_p (elt->exp) /* It is okay, though, if the rtx we're trying to match will ignore any of the bits we can't predict. */ && ! (src != 0 @@ -5049,7 +5031,7 @@ cse_insn (rtx insn) && CONST_INT_P (XEXP (SET_DEST (sets[i].rtl), 1)) && CONST_INT_P (XEXP (SET_DEST (sets[i].rtl), 2)) && REG_P (XEXP (SET_DEST (sets[i].rtl), 0)) - && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (sets[i].rtl))) + && (GET_MODE_PRECISION (GET_MODE (SET_DEST (sets[i].rtl))) >= INTVAL (XEXP (SET_DEST (sets[i].rtl), 1))) && ((unsigned) INTVAL (XEXP (SET_DEST (sets[i].rtl), 1)) + (unsigned) INTVAL (XEXP (SET_DEST (sets[i].rtl), 2)) @@ -5076,7 +5058,7 @@ cse_insn (rtx insn) HOST_WIDE_INT mask; unsigned int shift; if (BITS_BIG_ENDIAN) - shift = GET_MODE_BITSIZE (GET_MODE (dest_reg)) + shift = GET_MODE_PRECISION (GET_MODE (dest_reg)) - INTVAL (pos) - INTVAL (width); else shift = INTVAL (pos); @@ -5718,9 +5700,7 @@ cse_insn (rtx insn) some tracking to be wrong. ??? Think about this more later. */ - || (GET_CODE (dest) == SUBREG - && (GET_MODE_SIZE (GET_MODE (dest)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))) + || (paradoxical_subreg_p (dest) && (GET_CODE (sets[i].src) == SIGN_EXTEND || GET_CODE (sets[i].src) == ZERO_EXTEND))) continue; diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi index 0e80590534c..1817f5123d1 100644 --- a/gcc/doc/extend.texi +++ b/gcc/doc/extend.texi @@ -1629,7 +1629,7 @@ such an initializer, as shown here: char **foo = (char *[]) @{ "x", "y", "z" @}; @end smallexample -Compound literals for scalar types and union types are is +Compound literals for scalar types and union types are also allowed, but then the compound literal is equivalent to a cast. @@ -8226,8 +8226,8 @@ or if not a specific built-in is implemented or not. For example, if The following built-in functions map to the respective machine instruction, i.e. @code{nop}, @code{sei}, @code{cli}, @code{sleep}, @code{wdr}, @code{swap}, @code{fmul}, @code{fmuls} -resp. @code{fmulsu}. The latter three are only available if the AVR -device actually supports multiplication. +resp. @code{fmulsu}. The three @code{fmul*} built-ins are implemented +as library call if no hardware multiplier is available. @smallexample void __builtin_avr_nop (void) diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi index 49aac95e617..f93413fb7b8 100644 --- a/gcc/doc/install.texi +++ b/gcc/doc/install.texi @@ -1245,7 +1245,7 @@ destructors, but requires __cxa_atexit in libc. This option is currently only available on systems with GNU libc. When enabled, this will cause @option{-fuse-cxa-atexit} to be passed by default. -@item --enable-indirect-function +@item --enable-gnu-indirect-function Define if you want to enable the @code{ifunc} attribute. This option is currently only available on systems with GNU libc on certain targets. @@ -2957,6 +2957,8 @@ information are. @item @uref{#alpha-dec-osf51,,alpha*-dec-osf5.1} @item +@uref{#amd64-x-solaris210,,amd64-*-solaris2.10} +@item @uref{#arm-x-elf,,arm-*-elf} @item @uref{#avr,,avr} @@ -3057,6 +3059,8 @@ information are. @item @uref{#x86-64-x-x,,x86_64-*-*, amd64-*-*} @item +@uref{#x86-64-x-solaris210,,x86_64-*-solaris2.1[0-9]*} +@item @uref{#xtensa-x-elf,,xtensa*-*-elf} @item @uref{#xtensa-x-linux,,xtensa*-*-linux*} @@ -3157,6 +3161,13 @@ provide a fix shortly. @html <hr /> @end html +@heading @anchor{amd64-x-solaris210}amd64-*-solaris2.1[0-9]* + +This is a synonym for @samp{x86_64-*-solaris2.1[0-9]*}. + +@html +<hr /> +@end html @heading @anchor{arm-x-elf}arm-*-elf ARM-family processors. Subtargets that use the ELF object format require GNU binutils 2.13 or newer. Such subtargets include: @@ -3537,10 +3548,10 @@ you have the patch installed, you can configure GCC with an appropriate @end html @heading @anchor{ix86-x-solaris210}i?86-*-solaris2.10 Use this for Solaris 10 or later on x86 and x86-64 systems. This -configuration is supported by GCC 4.0 and later versions only. Unlike -@samp{sparcv9-sun-solaris2*}, there is no corresponding 64-bit -configuration like @samp{amd64-*-solaris2*} or @samp{x86_64-*-solaris2*}. -@c FIXME: will there ever be? +configuration is supported by GCC 4.0 and later versions only. Starting +with GCC 4.7, there is also a 64-bit @samp{amd64-*-solaris2.1[0-9]*} or +@samp{x86_64-*-solaris2.1[0-9]*} configuration that corresponds to +@samp{sparcv9-sun-solaris2*}. It is recommended that you configure GCC to use the GNU assembler, in @file{/usr/sfw/bin/gas}. The versions included in Solaris 10, from GNU @@ -4400,6 +4411,22 @@ both 64-bit x86-64 and 32-bit x86 code (via the @option{-m32} switch). @html <hr /> @end html +@heading @anchor{x86-64-x-solaris2.10}x86_64-*-solaris2.1[0-9]* + +GCC also supports the x86-64 architecture implemented by the AMD64 +processor (@samp{amd64-*-*} is an alias for @samp{x86_64-*-*}) on +Solaris 10 or later. Unlike other systems, without special options a +bi-arch compiler is built which generates 32-bit code by default, but +can generate 64-bit x86-64 code with the @option{-m64} switch. Since +GCC 4.7, there is also configuration that defaults to 64-bit code, but +can generate 32-bit code with @option{-m32}. To configure and build +this way, you have to provide all support libraries like @file{libgmp} +as 64-bit code, configure with @option{--target=x86_64-pc-solaris2.1x} +and @samp{CC=gcc -m64}. + +@html +<hr /> +@end html @heading @anchor{xtensa-x-elf}xtensa*-*-elf This target is intended for embedded Xtensa systems using the diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index 7127902e7ec..a00861d1372 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -808,7 +808,7 @@ See RS/6000 and PowerPC Options. -msdata=@var{opt} -mvxworks -G @var{num} -pthread @gol -mrecip -mrecip=@var{opt} -mno-recip -mrecip-precision @gol -mno-recip-precision @gol --mveclibabi=@var{type} -mfriz -mno-friz} +-mveclibabi=@var{type} -mfriz -mno-friz -mr11 -mno-r11} @emph{RX Options} @gccoptlist{-m64bit-doubles -m32bit-doubles -fpu -nofpu@gol @@ -9032,6 +9032,11 @@ The maximum number of conditional stores paires that can be sunk. Set to 0 if either vectorization (@option{-ftree-vectorize}) or if-conversion (@option{-ftree-loop-if-convert}) is disabled. The default is 2. +@item case-values-threshold +The smallest number of different values for which it is best to use a +jump-table instead of a tree of conditional branches. If the value is +0, use the default for the machine. The default is 0. + @end table @end table @@ -16326,6 +16331,19 @@ Generate (do not generate) the @code{friz} instruction when the rounding a floating point value to 64-bit integer and back to floating point. The @code{friz} instruction does not return the same value if the floating point number is too large to fit in an integer. + +@item -mr11 +@itemx -mno-r11 +@opindex mr11 +Generate (do not generate) code to load up the static chain register +(@var{r11}) when calling through a pointer on AIX and 64-bit Linux +systems where a function pointer points to a 3 word descriptor giving +the function address, TOC value to be loaded in register @var{r2}, and +static chain value to be loaded in register @var{r11}. The +@option{-mr11} is on by default. You will not be able to call through +pointers to nested functions or pointers to functions compiled in +other languages that use the static chain if you use the +@option{-mno-r11}. @end table @node RX Options diff --git a/gcc/doc/plugins.texi b/gcc/doc/plugins.texi index 2ab2b62021d..1ff14474ccc 100644 --- a/gcc/doc/plugins.texi +++ b/gcc/doc/plugins.texi @@ -417,6 +417,17 @@ invoking @command{gcc -print-file-name=plugin} (replace if needed Inside plugins, this @code{plugin} directory name can be queried by calling @code{default_plugin_dir_name ()}. +Plugins may know, when they are compiled, the GCC version for which +@file{plugin-version.h} is provided. The constant macros +@code{GCCPLUGIN_VERSION_MAJOR}, @code{GCCPLUGIN_VERSION_MINOR}, +@code{GCCPLUGIN_VERSION_PATCHLEVEL}, @code{GCCPLUGIN_VERSION} are +integer numbers, so a plugin could ensure it is built for GCC 4.7 with +@smallexample +#if GCCPLUGIN_VERSION != 4007 +#error this GCC plugin is for GCC 4.7 +#endif +@end smallexample + The following GNU Makefile excerpt shows how to build a simple plugin: @smallexample diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi index 341628bb3ea..ae90184b4f1 100644 --- a/gcc/doc/tm.texi +++ b/gcc/doc/tm.texi @@ -10876,7 +10876,7 @@ instructions or prefetch instructions). To create a built-in function, call the function @code{lang_hooks.builtin_function} which is defined by the language front end. You can use any type nodes set -up by @code{build_common_tree_nodes} and @code{build_common_tree_nodes_2}; +up by @code{build_common_tree_nodes}; only language front ends that use those two functions will call @samp{TARGET_INIT_BUILTINS}. @end deftypefn diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in index f7c16e9859f..254ddc2c90d 100644 --- a/gcc/doc/tm.texi.in +++ b/gcc/doc/tm.texi.in @@ -10783,7 +10783,7 @@ instructions or prefetch instructions). To create a built-in function, call the function @code{lang_hooks.builtin_function} which is defined by the language front end. You can use any type nodes set -up by @code{build_common_tree_nodes} and @code{build_common_tree_nodes_2}; +up by @code{build_common_tree_nodes}; only language front ends that use those two functions will call @samp{TARGET_INIT_BUILTINS}. @end deftypefn diff --git a/gcc/dse.c b/gcc/dse.c index 3646b0420e3..18926b28273 100644 --- a/gcc/dse.c +++ b/gcc/dse.c @@ -1722,8 +1722,7 @@ find_shift_sequence (int access_size, /* Try a wider mode if truncating the store mode to NEW_MODE requires a real instruction. */ if (GET_MODE_BITSIZE (new_mode) < GET_MODE_BITSIZE (store_mode) - && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (new_mode), - GET_MODE_BITSIZE (store_mode))) + && !TRULY_NOOP_TRUNCATION_MODES_P (new_mode, store_mode)) continue; /* Also try a wider mode if the necessary punning is either not diff --git a/gcc/dwarf2out.c b/gcc/dwarf2out.c index a0714c45d26..436110e03a9 100644 --- a/gcc/dwarf2out.c +++ b/gcc/dwarf2out.c @@ -848,7 +848,7 @@ add_cfi (cfi_vec *vec, dw_cfi_ref cfi) /* Generate a new label for the CFI info to refer to. FORCE is true if a label needs to be output even when using .cfi_* directives. */ -char * +static char * dwarf2out_cfi_label (bool force) { static char label[20]; @@ -1080,21 +1080,6 @@ static HOST_WIDE_INT args_size; /* The last args_size we actually output. */ static HOST_WIDE_INT old_args_size; -/* Entry point to update the canonical frame address (CFA). - LABEL is passed to add_fde_cfi. The value of CFA is now to be - calculated from REG+OFFSET. */ - -void -dwarf2out_def_cfa (const char *label, unsigned int reg, HOST_WIDE_INT offset) -{ - dw_cfa_location loc; - loc.indirect = 0; - loc.base_offset = 0; - loc.reg = reg; - loc.offset = offset; - def_cfa_1 (label, &loc); -} - /* Determine if two dw_cfa_location structures define the same data. */ static bool @@ -1232,33 +1217,6 @@ reg_save (const char *label, unsigned int reg, unsigned int sreg, HOST_WIDE_INT add_fde_cfi (label, cfi); } -/* Entry point for saving a register to the stack. REG is the GCC register - number. LABEL and OFFSET are passed to reg_save. */ - -void -dwarf2out_reg_save (const char *label, unsigned int reg, HOST_WIDE_INT offset) -{ - reg_save (label, DWARF_FRAME_REGNUM (reg), INVALID_REGNUM, offset); -} - -/* Entry point for saving the return address in the stack. - LABEL and OFFSET are passed to reg_save. */ - -void -dwarf2out_return_save (const char *label, HOST_WIDE_INT offset) -{ - reg_save (label, DWARF_FRAME_RETURN_COLUMN, INVALID_REGNUM, offset); -} - -/* Entry point for saving the return address in a register. - LABEL and SREG are passed to reg_save. */ - -void -dwarf2out_return_reg (const char *label, unsigned int sreg) -{ - reg_save (label, DWARF_FRAME_RETURN_COLUMN, DWARF_FRAME_REGNUM (sreg), 0); -} - /* Record the initial position of the return address. RTL is INCOMING_RETURN_ADDR_RTX. */ @@ -1816,7 +1774,7 @@ queue_reg_save (const char *label, rtx reg, rtx sreg, HOST_WIDE_INT offset) /* Output all the entries in QUEUED_REG_SAVES. */ -void +static void dwarf2out_flush_queued_reg_saves (void) { struct queued_reg_save *q; @@ -1866,20 +1824,6 @@ clobbers_queued_reg_save (const_rtx insn) return false; } -/* Entry point for saving the first register into the second. */ - -void -dwarf2out_reg_save_reg (const char *label, rtx reg, rtx sreg) -{ - unsigned int regno, sregno; - - record_reg_saved_in_reg (sreg, reg); - - regno = DWARF_FRAME_REGNUM (REGNO (reg)); - sregno = DWARF_FRAME_REGNUM (REGNO (sreg)); - reg_save (label, regno, sregno, 0); -} - /* What register, if any, is currently saved in REG? */ static rtx @@ -4273,6 +4217,8 @@ dwarf2out_end_epilogue (unsigned int line ATTRIBUTE_UNUSED, void dwarf2out_frame_init (void) { + dw_cfa_location loc; + /* Allocate the initial hunk of the fde_table. */ fde_table = ggc_alloc_cleared_vec_dw_fde_node (FDE_TABLE_INCREMENT); fde_table_allocated = FDE_TABLE_INCREMENT; @@ -4282,7 +4228,10 @@ dwarf2out_frame_init (void) sake of lookup_cfa. */ /* On entry, the Canonical Frame Address is at SP. */ - dwarf2out_def_cfa (NULL, STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET); + memset(&loc, 0, sizeof (loc)); + loc.reg = STACK_POINTER_REGNUM; + loc.offset = INCOMING_FRAME_SP_OFFSET; + def_cfa_1 (NULL, &loc); if (targetm.debug_unwind_info () == UI_DWARF2 || targetm_common.except_unwind_info (&global_options) == UI_DWARF2) diff --git a/gcc/dwarf2out.h b/gcc/dwarf2out.h index 7f5ac1351ae..ca2c26aea04 100644 --- a/gcc/dwarf2out.h +++ b/gcc/dwarf2out.h @@ -23,7 +23,6 @@ extern void dwarf2out_frame_debug (rtx, bool); extern void dwarf2out_frame_debug_init (void); extern void dwarf2out_cfi_begin_epilogue (rtx); extern void dwarf2out_frame_debug_restore_state (void); -extern void dwarf2out_flush_queued_reg_saves (void); extern void debug_dwarf (void); struct die_struct; diff --git a/gcc/emit-rtl.c b/gcc/emit-rtl.c index f010ac6f5ab..c641b7e6ca2 100644 --- a/gcc/emit-rtl.c +++ b/gcc/emit-rtl.c @@ -1334,6 +1334,16 @@ subreg_lowpart_p (const_rtx x) return (subreg_lowpart_offset (GET_MODE (x), GET_MODE (SUBREG_REG (x))) == SUBREG_BYTE (x)); } + +/* Return true if X is a paradoxical subreg, false otherwise. */ +bool +paradoxical_subreg_p (const_rtx x) +{ + if (GET_CODE (x) != SUBREG) + return false; + return (GET_MODE_PRECISION (GET_MODE (x)) + > GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))); +} /* Return subword OFFSET of operand OP. The word number, OFFSET, is interpreted as the word number starting diff --git a/gcc/explow.c b/gcc/explow.c index a042273ec94..3c692f4074e 100644 --- a/gcc/explow.c +++ b/gcc/explow.c @@ -51,7 +51,7 @@ static rtx break_out_memory_refs (rtx); HOST_WIDE_INT trunc_int_for_mode (HOST_WIDE_INT c, enum machine_mode mode) { - int width = GET_MODE_BITSIZE (mode); + int width = GET_MODE_PRECISION (mode); /* You want to truncate to a _what_? */ gcc_assert (SCALAR_INT_MODE_P (mode)); diff --git a/gcc/expmed.c b/gcc/expmed.c index 314fac79c28..662add5b2ed 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -635,9 +635,8 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, X) 0)) is (reg:N X). */ if (GET_CODE (xop0) == SUBREG && REG_P (SUBREG_REG (xop0)) - && (!TRULY_NOOP_TRUNCATION - (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (xop0))), - GET_MODE_BITSIZE (op_mode)))) + && (!TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)), + op_mode))) { rtx tem = gen_reg_rtx (op_mode); emit_move_insn (tem, xop0); @@ -1304,8 +1303,7 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ? bitpos + bitsize == BITS_PER_WORD : bitpos == 0))) && ((!MEM_P (op0) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode1), - GET_MODE_BITSIZE (GET_MODE (op0))) + && TRULY_NOOP_TRUNCATION_MODES_P (mode1, GET_MODE (op0)) && GET_MODE_SIZE (mode1) != 0 && byte_offset % GET_MODE_SIZE (mode1) == 0) || (MEM_P (op0) @@ -1475,12 +1473,11 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, mode. Instead, create a temporary and use convert_move to set the target. */ if (REG_P (xtarget) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (xtarget)), - GET_MODE_BITSIZE (ext_mode))) + && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (xtarget), ext_mode)) { xtarget = gen_lowpart (ext_mode, xtarget); - if (GET_MODE_SIZE (ext_mode) - > GET_MODE_SIZE (GET_MODE (xspec_target))) + if (GET_MODE_PRECISION (ext_mode) + > GET_MODE_PRECISION (GET_MODE (xspec_target))) xspec_target_subreg = xtarget; } else @@ -3115,7 +3112,7 @@ expand_widening_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, this_optab == umul_widen_optab)) && CONST_INT_P (cop1) && (INTVAL (cop1) >= 0 - || GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)) + || HWI_COMPUTABLE_MODE_P (mode))) { HOST_WIDE_INT coeff = INTVAL (cop1); int max_cost; @@ -3462,7 +3459,7 @@ expand_mult_highpart (enum machine_mode mode, rtx op0, rtx op1, gcc_assert (!SCALAR_FLOAT_MODE_P (mode)); /* We can't support modes wider than HOST_BITS_PER_INT. */ - gcc_assert (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT); + gcc_assert (HWI_COMPUTABLE_MODE_P (mode)); cnst1 = INTVAL (op1) & GET_MODE_MASK (mode); @@ -5039,10 +5036,8 @@ emit_cstore (rtx target, enum insn_code icode, enum rtx_code code, if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (result_mode)) { convert_move (target, subtarget, - (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT) - && 0 == (STORE_FLAG_VALUE - & ((HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (result_mode) -1)))); + val_signbit_known_clear_p (result_mode, + STORE_FLAG_VALUE)); op0 = target; result_mode = target_mode; } @@ -5066,9 +5061,7 @@ emit_cstore (rtx target, enum insn_code icode, enum rtx_code code, /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes it hard to use a value of just the sign bit due to ANSI integer constant typing rules. */ - else if (GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT - && (STORE_FLAG_VALUE - & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (result_mode) - 1)))) + else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE)) op0 = expand_shift (RSHIFT_EXPR, result_mode, op0, GET_MODE_BITSIZE (result_mode) - 1, subtarget, normalizep == 1); @@ -5206,9 +5199,9 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, target = gen_reg_rtx (target_mode); convert_move (target, tem, - 0 == ((normalizep ? normalizep : STORE_FLAG_VALUE) - & ((HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (word_mode) -1)))); + !val_signbit_known_set_p (word_mode, + (normalizep ? normalizep + : STORE_FLAG_VALUE))); return target; } } @@ -5218,10 +5211,7 @@ emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, if (op1 == const0_rtx && (code == LT || code == GE) && GET_MODE_CLASS (mode) == MODE_INT && (normalizep || STORE_FLAG_VALUE == 1 - || (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) - == ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (mode) - 1)))))) + || val_signbit_p (mode, STORE_FLAG_VALUE))) { subtarget = target; @@ -5330,9 +5320,7 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1) normalizep = STORE_FLAG_VALUE; - else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) - == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))) + else if (val_signbit_p (mode, STORE_FLAG_VALUE)) ; else return 0; diff --git a/gcc/expr.c b/gcc/expr.c index 176978a2257..af4c2fb8826 100644 --- a/gcc/expr.c +++ b/gcc/expr.c @@ -586,8 +586,7 @@ convert_move (rtx to, rtx from, int unsignedp) /* For truncation, usually we can just refer to FROM in a narrower mode. */ if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode), - GET_MODE_BITSIZE (from_mode))) + && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode)) { if (!((MEM_P (from) && ! MEM_VOLATILE_P (from) @@ -625,8 +624,7 @@ convert_move (rtx to, rtx from, int unsignedp) if (((can_extend_p (to_mode, intermediate, unsignedp) != CODE_FOR_nothing) || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (to_mode), - GET_MODE_BITSIZE (intermediate)))) + && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, intermediate))) && (can_extend_p (intermediate, from_mode, unsignedp) != CODE_FOR_nothing)) { @@ -754,8 +752,8 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns || (REG_P (x) && (! HARD_REGISTER_P (x) || HARD_REGNO_MODE_OK (REGNO (x), mode)) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (GET_MODE (x))))))))) + && TRULY_NOOP_TRUNCATION_MODES_P (mode, + GET_MODE (x)))))))) { /* ?? If we don't know OLDMODE, we have to assume here that X does not need sign- or zero-extension. This may not be @@ -764,14 +762,13 @@ convert_modes (enum machine_mode mode, enum machine_mode oldmode, rtx x, int uns && GET_MODE_SIZE (mode) > GET_MODE_SIZE (oldmode)) { HOST_WIDE_INT val = INTVAL (x); - int width = GET_MODE_BITSIZE (oldmode); /* We must sign or zero-extend in this case. Start by zero-extending, then sign extend if we need to. */ - val &= ((HOST_WIDE_INT) 1 << width) - 1; + val &= GET_MODE_MASK (oldmode); if (! unsignedp - && (val & ((HOST_WIDE_INT) 1 << (width - 1)))) - val |= (HOST_WIDE_INT) (-1) << width; + && val_signbit_known_set_p (oldmode, val)) + val |= ~GET_MODE_MASK (oldmode); return gen_int_mode (val, mode); } @@ -6500,9 +6497,7 @@ force_operand (rtx value, rtx target) #ifdef INSN_SCHEDULING /* On machines that have insn scheduling, we want all memory reference to be explicit, so we need to deal with such paradoxical SUBREGs. */ - if (GET_CODE (value) == SUBREG && MEM_P (SUBREG_REG (value)) - && (GET_MODE_SIZE (GET_MODE (value)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (value))))) + if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value))) value = simplify_gen_subreg (GET_MODE (value), force_reg (GET_MODE (SUBREG_REG (value)), diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog index e2d5d124dac..75693cc4a60 100644 --- a/gcc/fortran/ChangeLog +++ b/gcc/fortran/ChangeLog @@ -1,3 +1,38 @@ +2011-07-06 Daniel Carrera <dcarrera@gmail.com> + + * trans-array.c (gfc_array_allocate): Rename allocatable_array to + allocatable. Rename function gfc_allocate_array_with_status to + gfc_allocate_allocatable_with_status. Update function call for + gfc_allocate_with_status. + * trans-opemp.c (gfc_omp_clause_default_ctor): Rename function + gfc_allocate_array_with_status to gfc_allocate_allocatable_with_status. + * trans-stmt.c (gfc_trans_allocate): Update function call for + gfc_allocate_with_status. Rename function gfc_allocate_array_with_status + to gfc_allocate_allocatable_with_status. + * trans.c (gfc_call_malloc): Add new parameter gfc_allocate_with_status + so it uses the library for memory allocation when -fcoarray=lib. + (gfc_allocate_allocatable_with_status): Renamed from + gfc_allocate_array_with_status. + (gfc_allocate_allocatable_with_status): Update function call for + gfc_allocate_with_status. + * trans.h (gfc_coarray_type): New enum. + (gfc_allocate_with_status): Update prototype. + (gfc_allocate_allocatable_with_status): Renamed from + gfc_allocate_array_with_status. + * trans-decl.c (generate_coarray_sym_init): Use the new constant + GFC_CAF_COARRAY_ALLOC in the call to gfor_fndecl_caf_register. + +2011-07-06 Richard Guenther <rguenther@suse.de> + + * f95-lang.c (gfc_init_decl_processing): + Merge calls to build_common_tree_nodes and build_common_tree_nodes_2. + +2011-07-04 Jakub Jelinek <jakub@redhat.com> + + PR fortran/49623 + * gfortranspec.c (lang_specific_driver): Ignore options with + CL_ERR_MISSING_ARG errors. + 2011-07-02 Janus Weil <janus@gcc.gnu.org> PR fortran/49562 diff --git a/gcc/fortran/f95-lang.c b/gcc/fortran/f95-lang.c index fea76102c35..648831f2607 100644 --- a/gcc/fortran/f95-lang.c +++ b/gcc/fortran/f95-lang.c @@ -588,9 +588,8 @@ gfc_init_decl_processing (void) /* Build common tree nodes. char_type_node is unsigned because we only use it for actual characters, not for INTEGER(1). Also, we want double_type_node to actually have double precision. */ - build_common_tree_nodes (false); + build_common_tree_nodes (false, false); - build_common_tree_nodes_2 (0); void_list_node = build_tree_list (NULL_TREE, void_type_node); /* Set up F95 type nodes. */ diff --git a/gcc/fortran/gfortranspec.c b/gcc/fortran/gfortranspec.c index 4d939a05bf8..75ce05fca1c 100644 --- a/gcc/fortran/gfortranspec.c +++ b/gcc/fortran/gfortranspec.c @@ -255,6 +255,9 @@ lang_specific_driver (struct cl_decoded_option **in_decoded_options, for (i = 1; i < argc; ++i) { + if (decoded_options[i].errors & CL_ERR_MISSING_ARG) + continue; + switch (decoded_options[i].opt_index) { case OPT_SPECIAL_input_file: diff --git a/gcc/fortran/trans-array.c b/gcc/fortran/trans-array.c index 4c21389dcb3..f4f79f94161 100644 --- a/gcc/fortran/trans-array.c +++ b/gcc/fortran/trans-array.c @@ -4381,7 +4381,7 @@ gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat) gfc_expr **lower; gfc_expr **upper; gfc_ref *ref, *prev_ref = NULL; - bool allocatable_array, coarray; + bool allocatable, coarray; ref = expr->ref; @@ -4399,12 +4399,12 @@ gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat) if (!prev_ref) { - allocatable_array = expr->symtree->n.sym->attr.allocatable; + allocatable = expr->symtree->n.sym->attr.allocatable; coarray = expr->symtree->n.sym->attr.codimension; } else { - allocatable_array = prev_ref->u.c.component->attr.allocatable; + allocatable = prev_ref->u.c.component->attr.allocatable; coarray = prev_ref->u.c.component->attr.codimension; } @@ -4485,10 +4485,11 @@ gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat) STRIP_NOPS (pointer); /* The allocate_array variants take the old pointer as first argument. */ - if (allocatable_array) - tmp = gfc_allocate_array_with_status (&elseblock, pointer, size, pstat, expr); + if (allocatable) + tmp = gfc_allocate_allocatable_with_status (&elseblock, + pointer, size, pstat, expr); else - tmp = gfc_allocate_with_status (&elseblock, size, pstat); + tmp = gfc_allocate_with_status (&elseblock, size, pstat, false); tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node, pointer, tmp); diff --git a/gcc/fortran/trans-decl.c b/gcc/fortran/trans-decl.c index cca1beb2ce8..ddc7c36d98f 100644 --- a/gcc/fortran/trans-decl.c +++ b/gcc/fortran/trans-decl.c @@ -4167,7 +4167,8 @@ generate_coarray_sym_init (gfc_symbol *sym) GFC_TYPE_ARRAY_CAF_TOKEN (TREE_TYPE(decl))); tmp = build_call_expr_loc (input_location, gfor_fndecl_caf_register, 6, size, - build_int_cst (integer_type_node, 0), /* type. */ + build_int_cst (integer_type_node, + GFC_CAF_COARRAY_ALLOC), /* type. */ token, null_pointer_node, /* token, stat. */ null_pointer_node, /* errgmsg, errmsg_len. */ build_int_cst (integer_type_node, 0)); diff --git a/gcc/fortran/trans-openmp.c b/gcc/fortran/trans-openmp.c index 9677659fd47..aff8554009c 100644 --- a/gcc/fortran/trans-openmp.c +++ b/gcc/fortran/trans-openmp.c @@ -188,9 +188,9 @@ gfc_omp_clause_default_ctor (tree clause, tree decl, tree outer) size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, size, esize); size = gfc_evaluate_now (fold_convert (size_type_node, size), &cond_block); - ptr = gfc_allocate_array_with_status (&cond_block, - build_int_cst (pvoid_type_node, 0), - size, NULL, NULL); + ptr = gfc_allocate_allocatable_with_status (&cond_block, + build_int_cst (pvoid_type_node, 0), + size, NULL, NULL); gfc_conv_descriptor_data_set (&cond_block, decl, ptr); then_b = gfc_finish_block (&cond_block); @@ -241,9 +241,9 @@ gfc_omp_clause_copy_ctor (tree clause, tree dest, tree src) size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, size, esize); size = gfc_evaluate_now (fold_convert (size_type_node, size), &block); - ptr = gfc_allocate_array_with_status (&block, - build_int_cst (pvoid_type_node, 0), - size, NULL, NULL); + ptr = gfc_allocate_allocatable_with_status (&block, + build_int_cst (pvoid_type_node, 0), + size, NULL, NULL); gfc_conv_descriptor_data_set (&block, dest, ptr); call = build_call_expr_loc (input_location, built_in_decls[BUILT_IN_MEMCPY], 3, ptr, @@ -663,9 +663,9 @@ gfc_trans_omp_array_reduction (tree c, gfc_symbol *sym, locus where) size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, size, esize); size = gfc_evaluate_now (fold_convert (size_type_node, size), &block); - ptr = gfc_allocate_array_with_status (&block, - build_int_cst (pvoid_type_node, 0), - size, NULL, NULL); + ptr = gfc_allocate_allocatable_with_status (&block, + build_int_cst (pvoid_type_node, 0), + size, NULL, NULL); gfc_conv_descriptor_data_set (&block, decl, ptr); gfc_add_expr_to_block (&block, gfc_trans_assignment (e1, e2, false, false)); diff --git a/gcc/fortran/trans-stmt.c b/gcc/fortran/trans-stmt.c index f3347a6a238..88fdcd182d0 100644 --- a/gcc/fortran/trans-stmt.c +++ b/gcc/fortran/trans-stmt.c @@ -4847,10 +4847,10 @@ gfc_trans_allocate (gfc_code * code) /* Allocate - for non-pointers with re-alloc checking. */ if (gfc_expr_attr (expr).allocatable) - tmp = gfc_allocate_array_with_status (&se.pre, se.expr, memsz, - pstat, expr); + tmp = gfc_allocate_allocatable_with_status (&se.pre, se.expr, memsz, + pstat, expr); else - tmp = gfc_allocate_with_status (&se.pre, memsz, pstat); + tmp = gfc_allocate_with_status (&se.pre, memsz, pstat, false); tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node, se.expr, diff --git a/gcc/fortran/trans.c b/gcc/fortran/trans.c index 33593c5626a..683e3f1e48b 100644 --- a/gcc/fortran/trans.c +++ b/gcc/fortran/trans.c @@ -585,7 +585,8 @@ gfc_call_malloc (stmtblock_t * block, tree type, tree size) return newmem; } */ tree -gfc_allocate_with_status (stmtblock_t * block, tree size, tree status) +gfc_allocate_with_status (stmtblock_t * block, tree size, tree status, + bool coarray_lib) { stmtblock_t alloc_block; tree res, tmp, msg, cond; @@ -616,14 +617,29 @@ gfc_allocate_with_status (stmtblock_t * block, tree size, tree status) /* The allocation itself. */ gfc_start_block (&alloc_block); - gfc_add_modify (&alloc_block, res, - fold_convert (prvoid_type_node, - build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_MALLOC], 1, - fold_build2_loc (input_location, - MAX_EXPR, size_type_node, size, - build_int_cst (size_type_node, - 1))))); + if (coarray_lib) + { + gfc_add_modify (&alloc_block, res, + fold_convert (prvoid_type_node, + build_call_expr_loc (input_location, + gfor_fndecl_caf_register, 3, + fold_build2_loc (input_location, + MAX_EXPR, size_type_node, size, + build_int_cst (size_type_node, 1)), + build_int_cst (integer_type_node, + GFC_CAF_COARRAY_ALLOC), + null_pointer_node))); /* Token */ + } + else + { + gfc_add_modify (&alloc_block, res, + fold_convert (prvoid_type_node, + build_call_expr_loc (input_location, + built_in_decls[BUILT_IN_MALLOC], 1, + fold_build2_loc (input_location, + MAX_EXPR, size_type_node, size, + build_int_cst (size_type_node, 1))))); + } msg = gfc_build_addr_expr (pchar_type_node, gfc_build_localized_cstring_const ("Allocation would exceed memory limit")); @@ -658,13 +674,13 @@ gfc_allocate_with_status (stmtblock_t * block, tree size, tree status) /* Generate code for an ALLOCATE statement when the argument is an - allocatable array. If the array is currently allocated, it is an + allocatable variable. If the variable is currently allocated, it is an error to allocate it again. This function follows the following pseudo-code: void * - allocate_array (void *mem, size_t size, integer_type *stat) + allocate_allocatable (void *mem, size_t size, integer_type *stat) { if (mem == NULL) return allocate (size, stat); @@ -685,8 +701,8 @@ gfc_allocate_with_status (stmtblock_t * block, tree size, tree status) expr must be set to the original expression being allocated for its locus and variable name in case a runtime error has to be printed. */ tree -gfc_allocate_array_with_status (stmtblock_t * block, tree mem, tree size, - tree status, gfc_expr* expr) +gfc_allocate_allocatable_with_status (stmtblock_t * block, tree mem, tree size, + tree status, gfc_expr* expr) { stmtblock_t alloc_block; tree res, tmp, null_mem, alloc, error; @@ -703,11 +719,15 @@ gfc_allocate_array_with_status (stmtblock_t * block, tree mem, tree size, /* If mem is NULL, we call gfc_allocate_with_status. */ gfc_start_block (&alloc_block); - tmp = gfc_allocate_with_status (&alloc_block, size, status); + tmp = gfc_allocate_with_status (&alloc_block, size, status, + gfc_option.coarray == GFC_FCOARRAY_LIB + && gfc_expr_attr (expr).codimension); + gfc_add_modify (&alloc_block, res, fold_convert (type, tmp)); alloc = gfc_finish_block (&alloc_block); - /* Otherwise, we issue a runtime error or set the status variable. */ + /* If mem is not NULL, we issue a runtime error or set the + status variable. */ if (expr) { tree varname; @@ -737,7 +757,7 @@ gfc_allocate_array_with_status (stmtblock_t * block, tree mem, tree size, fold_convert (pvoid_type_node, mem)); gfc_add_expr_to_block (&set_status_block, tmp); - tmp = gfc_allocate_with_status (&set_status_block, size, status); + tmp = gfc_allocate_with_status (&set_status_block, size, status, false); gfc_add_modify (&set_status_block, res, fold_convert (type, tmp)); gfc_add_modify (&set_status_block, diff --git a/gcc/fortran/trans.h b/gcc/fortran/trans.h index e14e41f8a25..c56aff8ddd0 100644 --- a/gcc/fortran/trans.h +++ b/gcc/fortran/trans.h @@ -94,6 +94,18 @@ typedef struct gfc_se gfc_se; +/* Denotes different types of coarray. + Please keep in sync with libgfortran/caf/libcaf.h. */ +typedef enum +{ + GFC_CAF_COARRAY_STATIC, + GFC_CAF_COARRAY_ALLOC, + GFC_CAF_LOCK, + GFC_CAF_LOCK_COMP +} +gfc_coarray_type; + + /* Scalarization State chain. Created by walking an expression tree before creating the scalarization loops. Then passed as part of a gfc_se structure to translate the expression inside the loop. Note that these chains are @@ -528,11 +540,12 @@ tree gfc_call_malloc (stmtblock_t *, tree, tree); /* Build a memcpy call. */ tree gfc_build_memcpy_call (tree, tree, tree); -/* Allocate memory for arrays, with optional status variable. */ -tree gfc_allocate_array_with_status (stmtblock_t*, tree, tree, tree, gfc_expr*); +/* Allocate memory for allocatable variables, with optional status variable. */ +tree gfc_allocate_allocatable_with_status (stmtblock_t*, + tree, tree, tree, gfc_expr*); /* Allocate memory, with optional status variable. */ -tree gfc_allocate_with_status (stmtblock_t *, tree, tree); +tree gfc_allocate_with_status (stmtblock_t *, tree, tree, bool); /* Generate code to deallocate an array. */ tree gfc_deallocate_with_status (tree, tree, bool, gfc_expr*); diff --git a/gcc/fwprop.c b/gcc/fwprop.c index 444a539cf79..5db9ed88887 100644 --- a/gcc/fwprop.c +++ b/gcc/fwprop.c @@ -1101,6 +1101,7 @@ forward_propagate_subreg (df_ref use, rtx def_insn, rtx def_set) src = SET_SRC (def_set); if (GET_CODE (src) == SUBREG && REG_P (SUBREG_REG (src)) + && REGNO (SUBREG_REG (src)) >= FIRST_PSEUDO_REGISTER && GET_MODE (SUBREG_REG (src)) == use_mode && subreg_lowpart_p (src) && all_uses_available_at (def_insn, use_insn)) @@ -1119,6 +1120,7 @@ forward_propagate_subreg (df_ref use, rtx def_insn, rtx def_set) if ((GET_CODE (src) == ZERO_EXTEND || GET_CODE (src) == SIGN_EXTEND) && REG_P (XEXP (src, 0)) + && REGNO (XEXP (src, 0)) >= FIRST_PSEUDO_REGISTER && GET_MODE (XEXP (src, 0)) == use_mode && !free_load_extend (src, def_insn) && all_uses_available_at (def_insn, use_insn)) diff --git a/gcc/gcc.c b/gcc/gcc.c index 2996de40d23..235c54c1347 100644 --- a/gcc/gcc.c +++ b/gcc/gcc.c @@ -518,7 +518,7 @@ proper position among the other output files. */ /* XXX: should exactly match hooks provided by libmudflap.a */ #define MFWRAP_SPEC " %{static: %{fmudflap|fmudflapth: \ --wrap=malloc --wrap=free --wrap=calloc --wrap=realloc\ - --wrap=mmap --wrap=munmap --wrap=alloca\ + --wrap=mmap --wrap=mmap64 --wrap=munmap --wrap=alloca\ } %{fmudflapth: --wrap=pthread_create\ }} %{fmudflap|fmudflapth: --wrap=main}" #endif diff --git a/gcc/go/ChangeLog b/gcc/go/ChangeLog index 20385d1ea8e..fb26cabfc31 100644 --- a/gcc/go/ChangeLog +++ b/gcc/go/ChangeLog @@ -1,3 +1,8 @@ +2011-07-06 Richard Guenther <rguenther@suse.de> + + * go-lang.c (go_langhook_init): + Merge calls to build_common_tree_nodes and build_common_tree_nodes_2. + 2011-06-14 Joseph Myers <joseph@codesourcery.com> * Make-lang.in (go/go-lang.o, go/go-backend.o): Update diff --git a/gcc/go/go-lang.c b/gcc/go/go-lang.c index 576e35f7551..6abe3b84789 100644 --- a/gcc/go/go-lang.c +++ b/gcc/go/go-lang.c @@ -86,9 +86,7 @@ struct GTY(()) language_function static bool go_langhook_init (void) { - build_common_tree_nodes (false); - - build_common_tree_nodes_2 (0); + build_common_tree_nodes (false, false); /* We must create the gogo IR after calling build_common_tree_nodes (because Gogo::define_builtin_function_trees refers indirectly diff --git a/gcc/graphite-blocking.c b/gcc/graphite-blocking.c index bcd077a8310..967de9d9462 100644 --- a/gcc/graphite-blocking.c +++ b/gcc/graphite-blocking.c @@ -89,7 +89,7 @@ along with GCC; see the file COPYING3. If not see # } */ -static bool +static void pbb_strip_mine_time_depth (poly_bb_p pbb, int time_depth, int stride) { ppl_dimension_type iter, dim, strip; @@ -151,8 +151,6 @@ pbb_strip_mine_time_depth (poly_bb_p pbb, int time_depth, int stride) ppl_Polyhedron_add_constraint (res, new_cstr); ppl_delete_Constraint (new_cstr); } - - return true; } /* Returns true when strip mining with STRIDE of the loop LST is @@ -177,10 +175,10 @@ lst_strip_mine_profitable_p (lst_p lst, int stride) return res; } -/* Strip-mines all the loops of LST with STRIDE. Return true if it - did strip-mined some loops. */ +/* Strip-mines all the loops of LST with STRIDE. Return the number of + loops strip-mined. */ -static bool +static int lst_do_strip_mine_loop (lst_p lst, int depth, int stride) { int i; @@ -188,26 +186,26 @@ lst_do_strip_mine_loop (lst_p lst, int depth, int stride) poly_bb_p pbb; if (!lst) - return false; + return 0; if (LST_LOOP_P (lst)) { - bool res = false; + int res = 0; FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l) - res |= lst_do_strip_mine_loop (l, depth, stride); + res += lst_do_strip_mine_loop (l, depth, stride); return res; } pbb = LST_PBB (lst); - return pbb_strip_mine_time_depth (pbb, psct_dynamic_dim (pbb, depth), - stride); + pbb_strip_mine_time_depth (pbb, psct_dynamic_dim (pbb, depth), stride); + return 1; } /* Strip-mines all the loops of LST with STRIDE. When STRIDE is zero, - read the stride from the PARAM_LOOP_BLOCK_TILE_SIZE. Return true - if it did strip-mined some loops. + read the stride from the PARAM_LOOP_BLOCK_TILE_SIZE. Return the + number of strip-mined loops. Strip mining transforms a loop @@ -221,12 +219,12 @@ lst_do_strip_mine_loop (lst_p lst, int depth, int stride) | S (i = k + j); */ -static bool +static int lst_do_strip_mine (lst_p lst, int stride) { int i; lst_p l; - bool res = false; + int res = 0; int depth; if (!stride) @@ -237,23 +235,23 @@ lst_do_strip_mine (lst_p lst, int stride) return false; FOR_EACH_VEC_ELT (lst_p, LST_SEQ (lst), i, l) - res |= lst_do_strip_mine (l, stride); + res += lst_do_strip_mine (l, stride); depth = lst_depth (lst); if (depth >= 0 && lst_strip_mine_profitable_p (lst, stride)) { - res |= lst_do_strip_mine_loop (lst, lst_depth (lst), stride); + res += lst_do_strip_mine_loop (lst, lst_depth (lst), stride); lst_add_loop_under_loop (lst); } return res; } -/* Strip mines all the loops in SCOP. Returns true when some loops - have been strip-mined. */ +/* Strip mines all the loops in SCOP. Returns the number of + strip-mined loops. */ -bool +int scop_do_strip_mine (scop_p scop, int stride) { return lst_do_strip_mine (SCOP_TRANSFORMED_SCHEDULE (scop), stride); @@ -265,27 +263,22 @@ scop_do_strip_mine (scop_p scop, int stride) bool scop_do_block (scop_p scop) { - bool strip_mined = false; - bool interchanged = false; - store_scattering (scop); - strip_mined = lst_do_strip_mine (SCOP_TRANSFORMED_SCHEDULE (scop), 0); - interchanged = scop_do_interchange (scop); - - /* If we don't interchange loops, the strip mine alone will not be - profitable, and the transform is not a loop blocking: so revert - the transform. */ - if (!interchanged) + /* If we don't strip mine at least two loops, or not interchange + loops, the strip mine alone will not be profitable, and the + transform is not a loop blocking: so revert the transform. */ + if (lst_do_strip_mine (SCOP_TRANSFORMED_SCHEDULE (scop), 0) < 2 + || scop_do_interchange (scop) == 0) { restore_scattering (scop); return false; } - else if (strip_mined && interchanged - && dump_file && (dump_flags & TDF_DETAILS)) + + if (dump_file && (dump_flags & TDF_DETAILS)) fprintf (dump_file, "SCoP will be loop blocked.\n"); - return strip_mined || interchanged; + return true; } #endif diff --git a/gcc/graphite-clast-to-gimple.c b/gcc/graphite-clast-to-gimple.c index c8356d34a63..6b17631e408 100644 --- a/gcc/graphite-clast-to-gimple.c +++ b/gcc/graphite-clast-to-gimple.c @@ -88,7 +88,7 @@ clast_name_to_index (clast_name_p name, htab_t index_table) #ifdef CLOOG_ORG gcc_assert (name->type == clast_expr_name); - tmp.name = ((const struct clast_name*) name)->name; + tmp.name = ((const struct clast_name *) name)->name; #else tmp.name = name; #endif @@ -379,72 +379,16 @@ clast_to_gcc_expression (tree type, struct clast_expr *e, return NULL_TREE; } -/* Return the precision needed to represent the value VAL. */ - -static int -precision_for_value (mpz_t val) -{ - mpz_t x, y, two; - int precision; - - mpz_init (x); - mpz_init (y); - mpz_init (two); - mpz_set_si (x, 2); - mpz_set (y, val); - mpz_set_si (two, 2); - precision = 1; - - if (mpz_sgn (y) < 0) - mpz_neg (y, y); - - while (mpz_cmp (y, x) >= 0) - { - mpz_mul (x, x, two); - precision++; - } - - mpz_clear (x); - mpz_clear (y); - mpz_clear (two); - - return precision; -} - -/* Return the precision needed to represent the values between LOW and - UP. */ - -static int -precision_for_interval (mpz_t low, mpz_t up) -{ - mpz_t diff; - int precision; - - gcc_assert (mpz_cmp (low, up) <= 0); - - mpz_init (diff); - mpz_sub (diff, up, low); - precision = precision_for_value (diff); - mpz_clear (diff); - - return precision; -} - -/* Return a type that could represent the integer value VAL. */ +/* Return a type that could represent the values between V1 and V2. */ static tree -gcc_type_for_interval (mpz_t low, mpz_t up) +gcc_type_for_interval (mpz_t v1, mpz_t v2) { - bool unsigned_p = true; - int precision, prec_up, prec_int; + bool unsigned_p; tree type; enum machine_mode mode; - - gcc_assert (mpz_cmp (low, up) <= 0); - - prec_up = precision_for_value (up); - prec_int = precision_for_interval (low, up); - precision = MAX (prec_up, prec_int); + int precision = MAX (mpz_sizeinbase (v1, 2), + mpz_sizeinbase (v2, 2)); if (precision > BITS_PER_WORD) { @@ -452,14 +396,10 @@ gcc_type_for_interval (mpz_t low, mpz_t up) return integer_type_node; } - if (mpz_sgn (low) <= 0) - unsigned_p = false; - - else if (precision < BITS_PER_WORD) - { - unsigned_p = false; - precision++; - } + if (mpz_cmp (v1, v2) <= 0) + unsigned_p = (mpz_sgn (v1) >= 0); + else + unsigned_p = (mpz_sgn (v2) >= 0); mode = smallest_mode_for_size (precision, MODE_INT); precision = GET_MODE_PRECISION (mode); diff --git a/gcc/graphite-interchange.c b/gcc/graphite-interchange.c index 934839aace0..cb4d32cc0d2 100644 --- a/gcc/graphite-interchange.c +++ b/gcc/graphite-interchange.c @@ -664,27 +664,27 @@ lst_interchange_select_inner (scop_p scop, lst_p outer_father, int outer, } /* Interchanges all the loops of LOOP and the loops of its body that - are considered profitable to interchange. Return true if it did - interchanged some loops. OUTER is the index in LST_SEQ (LOOP) that + are considered profitable to interchange. Return the number of + interchanged loops. OUTER is the index in LST_SEQ (LOOP) that points to the next outer loop to be considered for interchange. */ -static bool +static int lst_interchange_select_outer (scop_p scop, lst_p loop, int outer) { lst_p l; - bool res = false; + int res = 0; int i = 0; lst_p father; if (!loop || !LST_LOOP_P (loop)) - return false; + return 0; father = LST_LOOP_FATHER (loop); if (father) { while (lst_interchange_select_inner (scop, father, outer, loop)) { - res = true; + res++; loop = VEC_index (lst_p, LST_SEQ (father), outer); } } @@ -692,17 +692,18 @@ lst_interchange_select_outer (scop_p scop, lst_p loop, int outer) if (LST_LOOP_P (loop)) FOR_EACH_VEC_ELT (lst_p, LST_SEQ (loop), i, l) if (LST_LOOP_P (l)) - res |= lst_interchange_select_outer (scop, l, i); + res += lst_interchange_select_outer (scop, l, i); return res; } -/* Interchanges all the loop depths that are considered profitable for SCOP. */ +/* Interchanges all the loop depths that are considered profitable for + SCOP. Return the number of interchanged loops. */ -bool +int scop_do_interchange (scop_p scop) { - bool res = lst_interchange_select_outer + int res = lst_interchange_select_outer (scop, SCOP_TRANSFORMED_SCHEDULE (scop), 0); lst_update_scattering (SCOP_TRANSFORMED_SCHEDULE (scop)); diff --git a/gcc/graphite-poly.h b/gcc/graphite-poly.h index 3bf87b088c9..417e99eef25 100644 --- a/gcc/graphite-poly.h +++ b/gcc/graphite-poly.h @@ -410,8 +410,8 @@ extern void print_iteration_domain (FILE *, poly_bb_p, int); extern void print_iteration_domains (FILE *, scop_p, int); extern void debug_iteration_domain (poly_bb_p, int); extern void debug_iteration_domains (scop_p, int); -extern bool scop_do_interchange (scop_p); -extern bool scop_do_strip_mine (scop_p, int); +extern int scop_do_interchange (scop_p); +extern int scop_do_strip_mine (scop_p, int); extern bool scop_do_block (scop_p); extern bool flatten_all_loops (scop_p); extern void pbb_number_of_iterations_at_time (poly_bb_p, graphite_dim_t, mpz_t); diff --git a/gcc/graphite-ppl.h b/gcc/graphite-ppl.h index 695d01f3425..49bde618f47 100644 --- a/gcc/graphite-ppl.h +++ b/gcc/graphite-ppl.h @@ -131,7 +131,8 @@ value_max (mpz_t res, mpz_t v1, mpz_t v2) { if (mpz_cmp (v1, v2) < 0) mpz_set (res, v2); - mpz_set (res, v1); + else + mpz_set (res, v1); } /* Builds a new identity map for dimension DIM. */ diff --git a/gcc/java/ChangeLog b/gcc/java/ChangeLog index 98dd104b2bb..b3e3ec66c83 100644 --- a/gcc/java/ChangeLog +++ b/gcc/java/ChangeLog @@ -1,3 +1,8 @@ +2011-07-06 Richard Guenther <rguenther@suse.de> + + * decl.c (java_init_decl_processing): + Merge calls to build_common_tree_nodes and build_common_tree_nodes_2. + 2011-06-21 Andrew MacLeod <amacleod@redhat.com> * builtins.c: Add sync_ or SYNC__ to builtin names. diff --git a/gcc/java/decl.c b/gcc/java/decl.c index e4a3db240d4..179a2c3e9aa 100644 --- a/gcc/java/decl.c +++ b/gcc/java/decl.c @@ -567,10 +567,7 @@ java_init_decl_processing (void) global_binding_level = current_binding_level; /* Build common tree nodes, Java has an unsigned char. */ - build_common_tree_nodes (false); - - /* Build the rest of the common tree nodes. */ - build_common_tree_nodes_2 (0); + build_common_tree_nodes (false, false); /* ??? Now we continue and override some of the built types again with Java specific types. As the above generated types are diff --git a/gcc/longlong.h b/gcc/longlong.h index 7d11e10646e..cd4c40ff32a 100644 --- a/gcc/longlong.h +++ b/gcc/longlong.h @@ -250,11 +250,27 @@ UDItype __umulsidi3 (USItype, USItype); #define COUNT_LEADING_ZEROS_0 32 #endif -#if defined (__AVR__) && W_TYPE_SIZE == 32 +#if defined (__AVR__) + +#if W_TYPE_SIZE == 16 +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clz (X)) +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctz (X)) +#define COUNT_LEADING_ZEROS_0 16 +#endif /* W_TYPE_SIZE == 16 */ + +#if W_TYPE_SIZE == 32 #define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzl (X)) #define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzl (X)) #define COUNT_LEADING_ZEROS_0 32 -#endif /* defined (__AVR__) && W_TYPE_SIZE == 32 */ +#endif /* W_TYPE_SIZE == 32 */ + +#if W_TYPE_SIZE == 64 +#define count_leading_zeros(COUNT,X) ((COUNT) = __builtin_clzll (X)) +#define count_trailing_zeros(COUNT,X) ((COUNT) = __builtin_ctzll (X)) +#define COUNT_LEADING_ZEROS_0 64 +#endif /* W_TYPE_SIZE == 64 */ + +#endif /* defined (__AVR__) */ #if defined (__CRIS__) && __CRIS_arch_version >= 3 #define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz (X)) diff --git a/gcc/loop-doloop.c b/gcc/loop-doloop.c index 560d49a32f9..f8429c4fd28 100644 --- a/gcc/loop-doloop.c +++ b/gcc/loop-doloop.c @@ -465,7 +465,7 @@ doloop_modify (struct loop *loop, struct niter_desc *desc, Note that the maximum value loaded is iterations_max - 1. */ if (desc->niter_max <= ((unsigned HOST_WIDEST_INT) 1 - << (GET_MODE_BITSIZE (mode) - 1))) + << (GET_MODE_PRECISION (mode) - 1))) nonneg = 1; break; @@ -677,7 +677,7 @@ doloop_optimize (struct loop *loop) doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max, GEN_INT (level), start_label); - word_mode_size = GET_MODE_BITSIZE (word_mode); + word_mode_size = GET_MODE_PRECISION (word_mode); word_mode_max = ((unsigned HOST_WIDE_INT) 1 << (word_mode_size - 1) << 1) - 1; if (! doloop_seq @@ -685,10 +685,10 @@ doloop_optimize (struct loop *loop) /* Before trying mode different from the one in that # of iterations is computed, we must be sure that the number of iterations fits into the new mode. */ - && (word_mode_size >= GET_MODE_BITSIZE (mode) + && (word_mode_size >= GET_MODE_PRECISION (mode) || desc->niter_max <= word_mode_max)) { - if (word_mode_size > GET_MODE_BITSIZE (mode)) + if (word_mode_size > GET_MODE_PRECISION (mode)) { zero_extend_p = true; iterations = simplify_gen_unary (ZERO_EXTEND, word_mode, diff --git a/gcc/lto/ChangeLog b/gcc/lto/ChangeLog index 27bc01563e3..c80b33a6b1a 100644 --- a/gcc/lto/ChangeLog +++ b/gcc/lto/ChangeLog @@ -1,3 +1,8 @@ +2011-07-06 Richard Guenther <rguenther@suse.de> + + * lto-lang.c (lto_init): + Merge calls to build_common_tree_nodes and build_common_tree_nodes_2. + 2011-06-11 Jan Hubicka <jh@suse.cz> PR lto/48246 diff --git a/gcc/lto/lto-lang.c b/gcc/lto/lto-lang.c index 8a4cee029d3..3574da02d11 100644 --- a/gcc/lto/lto-lang.c +++ b/gcc/lto/lto-lang.c @@ -1085,7 +1085,7 @@ lto_init (void) linemap_add (line_table, LC_RENAME, 0, NULL, 0); /* Create the basic integer types. */ - build_common_tree_nodes (flag_signed_char); + build_common_tree_nodes (flag_signed_char, /*short_double=*/false); /* The global tree for the main identifier is filled in by language-specific front-end initialization that is not run in the @@ -1102,8 +1102,6 @@ lto_init (void) ptrdiff_type_node = integer_type_node; - /* Create other basic types. */ - build_common_tree_nodes_2 (/*short_double=*/false); lto_build_c_type_nodes (); gcc_assert (va_list_type_node); diff --git a/gcc/machmode.h b/gcc/machmode.h index bdcd02d435d..f979b95ea0b 100644 --- a/gcc/machmode.h +++ b/gcc/machmode.h @@ -275,4 +275,12 @@ extern enum machine_mode ptr_mode; /* Target-dependent machine mode initialization - in insn-modes.c. */ extern void init_adjust_machine_modes (void); +#define TRULY_NOOP_TRUNCATION_MODES_P(MODE1, MODE2) \ + TRULY_NOOP_TRUNCATION (GET_MODE_PRECISION (MODE1), \ + GET_MODE_PRECISION (MODE2)) + +#define HWI_COMPUTABLE_MODE_P(MODE) \ + (SCALAR_INT_MODE_P (MODE) \ + && GET_MODE_PRECISION (MODE) <= HOST_BITS_PER_WIDE_INT) + #endif /* not HAVE_MACHINE_MODES */ diff --git a/gcc/melt-module.mk b/gcc/melt-module.mk index 3f457366649..ef0c30e9fca 100644 --- a/gcc/melt-module.mk +++ b/gcc/melt-module.mk @@ -94,21 +94,21 @@ $(MELTMODULE_OPTIMIZED): $(MELTMODULE_OBJPICFILES) $(MELTSTAMP) $(RM) $(MELTSTAMP) $(MELTMODULE_DYNAMIC): $(MELTMODULE_OBJDYNPICFILES) $(MELTSTAMP) - $(GCCMELT_CC) $(GCCMELT_CFLAGS) -DMELTGCC_DYNAMIC_OBJSTRUCT \ + $(GCCMELT_CC) $(GCCMELT_CFLAGS) -DMELT_HAVE_DEBUG=1 -DMELTGCC_DYNAMIC_OBJSTRUCT \ -fPIC -shared $(MELTMODULE_OBJDYNPICFILES) $(MELTSTAMP) -o $@ $(RM) $(MELTSTAMP) $(MELTMODULE_NOLINE): $(MELTMODULE_OBJNOLPICFILES) $(MELTSTAMP) - $(GCCMELT_CC) $(GCCMELT_CFLAGS) -g -DMELTGCC_NOLINENUMBERING \ + $(GCCMELT_CC) $(GCCMELT_CFLAGS) -g -DMELT_HAVE_DEBUG=1 -DMELTGCC_NOLINENUMBERING \ -fPIC -shared $(MELTMODULE_OBJNOLPICFILES) $(MELTSTAMP) -o $@ $(RM) $(MELTSTAMP) $(GCCMELT_MODULE_WORKSPACE)/%.pic.o: $(MELTMODULE_SRCDIR)/%.c $(GCCMELT_CC) $(GCCMELT_CFLAGS) -fPIC -c -o $@ $< $(GCCMELT_MODULE_WORKSPACE)/%.dynpic.o: $(MELTMODULE_SRCDIR)/%.c - $(GCCMELT_CC) $(GCCMELT_CFLAGS) -DMELTGCC_DYNAMIC_OBJSTRUCT -fPIC -c -o $@ $< + $(GCCMELT_CC) $(GCCMELT_CFLAGS) -DMELT_HAVE_DEBUG=1 -DMELTGCC_DYNAMIC_OBJSTRUCT -fPIC -c -o $@ $< $(GCCMELT_MODULE_WORKSPACE)/%.nolpic.o: $(MELTMODULE_SRCDIR)/%.c - $(GCCMELT_CC) $(GCCMELT_CFLAGS) -g -DMELTGCC_NOLINENUMBERING -fPIC -c -o $@ $< + $(GCCMELT_CC) $(GCCMELT_CFLAGS) -g -DMELT_HAVE_DEBUG=1 -DMELTGCC_NOLINENUMBERING -fPIC -c -o $@ $< ## There is only one make recipe. Trailing backslashes are essential. ## Even a parallel make should run it in sequence! diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog index 208decc7c86..e2741977546 100644 --- a/gcc/objc/ChangeLog +++ b/gcc/objc/ChangeLog @@ -1,3 +1,47 @@ +2011-07-04 Nicola Pero <nicola.pero@meta-innovation.com> + + Refactored encoding code into objc-encoding.h and objc-encoding.c. + * objc-act.c (util_obstack, util_firstobj, encode_type_qualifiers, + encode_type, generating_instance_variables, objc_method_parm_type, + objc_encoded_type_size, encode_method_prototype, + objc_build_encode_expr, pointee_is_readonly, encode_pointer, + encode_array, encode_vector, encode_aggregate_fields, + encode_aggregate_within, encode_next_bitfield, + encode_gnu_bitfield, encode_field_decl, + objc_v2_encode_property_attr): Moved to objc-encoding.h and + objc-encoding.c. No change in the actual code. + Include objc-encoding.h. + (objc_init): Added TODO. + (objc_build_property_setter_name): Made non-static so it can be + called from objc-encoding.c. + * objc-act.h (OBJC_ENCODE_INLINE_DEFS, + OBJC_ENCODE_DONT_INLINE_DEFS): Moved to objc-encoding.h. + * objc-runtime-shared-support.h (objc_v2_encode_property_attr, + encode_method_prototype, encode_field_decl, + generating_instance_variables): Moved to objc-encoding.h. + (objc_build_property_setter_name): Declare. + * objc-encoding.c: New. + * objc-encoding.h: New. + * objc-gnu-runtime-abi-01.c: Include objc-encoding.h. + * objc-next-runtime-abi-01.c: Likewise. + * objc-next-runtime-abi-02.c: Likewise. + * objc-runtime-shared-support.c: Likewise. + * Make-lang.in (OBJC_OBJS): Added objc-encoding.o. + (objc/objc-lang.o): Reordered dependencies. + (objc/objc-runtime-shared-support.o): Reordered dependencies. + Added dependencies on objc-encoding.h and on $(GGC_H), + $(DIAGNOSTIC_CORE_H), $(FLAGS_H) and input.h. + (objc/objc-gnu-runtime-abi-01.o): Likewise. + (objc/objc-next-runtime-abi-01.o): Likewise. + (objc/objc-next-runtime-abi-02.o): Likewise. + (objc/objc-act.o): Reordered dependencies. Added dependency on + objc-encoding.h. + (objc/objc-encoding.o): New rule. + + * objc-encoding.c (encode_type): Use "%<%T%>" format when printing + warning "unknown type %<%T%> found during Objective-C encoding" + instead of using gen_type_name. + 2011-06-05 Nicola Pero <nicola.pero@meta-innovation.com> * objc-act.c (receiver_is_class_object): Expanded comment. diff --git a/gcc/objc/Make-lang.in b/gcc/objc/Make-lang.in index 357028f26ea..1ac74ba4549 100644 --- a/gcc/objc/Make-lang.in +++ b/gcc/objc/Make-lang.in @@ -55,6 +55,7 @@ OBJC_OBJS = objc/objc-lang.o objc/objc-act.o \ objc/objc-gnu-runtime-abi-01.o \ objc/objc-next-runtime-abi-01.o \ objc/objc-next-runtime-abi-02.o \ + objc/objc-encoding.o objc_OBJS = $(OBJC_OBJS) cc1obj-checksum.o @@ -73,33 +74,75 @@ cc1obj$(exeext): $(OBJC_OBJS) $(C_AND_OBJC_OBJS) cc1obj-checksum.o $(BACKEND) $( # Objective C language specific files. -objc/objc-lang.o : objc/objc-lang.c $(START_HDRS) \ - $(GGC_H) $(LANGHOOKS_DEF_H) $(C_COMMON_H) gtype-objc.h \ +# When editing, please keep the objc/ header dependencies in +# alphabetical order, and try to use a similar logical order for the +# other files between the different targets. + +objc/objc-lang.o : objc/objc-lang.c \ + $(START_HDRS) \ + $(GGC_H) \ + $(LANGHOOKS_DEF_H) $(C_COMMON_H) gtype-objc.h \ c-objc-common.h objc/objc-runtime-shared-support.o : objc/objc-runtime-shared-support.c \ - $(START_HDRS) objc/objc-runtime-shared-support.h $(OBSTACK_H) \ - objc/objc-next-metadata-tags.h gt-objc-objc-runtime-shared-support.h - -objc/objc-gnu-runtime-abi-01.o: objc/objc-gnu-runtime-abi-01.c $(START_HDRS) \ - objc/objc-runtime-hooks.h $(GGC_H) \ - objc/objc-runtime-shared-support.h gt-objc-objc-gnu-runtime-abi-01.h toplev.h - -objc/objc-next-runtime-abi-01.o: objc/objc-next-runtime-abi-01.c $(START_HDRS) \ - $(GGC_H) objc/objc-runtime-hooks.h \ - objc/objc-next-metadata-tags.h gt-objc-objc-next-runtime-abi-01.h output.h \ - objc/objc-runtime-shared-support.h $(TARGET_H) - -objc/objc-next-runtime-abi-02.o: objc/objc-next-runtime-abi-02.c $(START_HDRS) \ - $(GGC_H) objc/objc-runtime-hooks.h \ - objc/objc-next-metadata-tags.h gt-objc-objc-next-runtime-abi-02.h $(TARGET_H) \ - objc/objc-runtime-shared-support.h $(OBSTACK_H) - -objc/objc-act.o : objc/objc-act.c $(START_HDRS) $(GGC_H) \ - $(DIAGNOSTIC_CORE_H) toplev.h $(FLAGS_H) input.h $(FUNCTION_H) output.h debug.h \ - $(LANGHOOKS_DEF_H) $(HASHTAB_H) $(C_PRAGMA_H) gt-objc-objc-act.h $(OBSTACK_H) \ - $(GIMPLE_H) objc/objc-runtime-shared-support.h objc/objc-runtime-hooks.h \ - $(C_TARGET_H) + gt-objc-objc-runtime-shared-support.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + objc/objc-encoding.h \ + objc/objc-next-metadata-tags.h \ + objc/objc-runtime-shared-support.h + +objc/objc-gnu-runtime-abi-01.o: objc/objc-gnu-runtime-abi-01.c \ + gt-objc-objc-gnu-runtime-abi-01.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + toplev.h \ + objc/objc-encoding.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h + +objc/objc-next-runtime-abi-01.o: objc/objc-next-runtime-abi-01.c \ + gt-objc-objc-next-runtime-abi-01.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + $(TARGET_H) output.h \ + objc/objc-encoding.h \ + objc/objc-next-metadata-tags.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h + +objc/objc-next-runtime-abi-02.o: objc/objc-next-runtime-abi-02.c \ + gt-objc-objc-next-runtime-abi-02.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + $(TARGET_H) \ + objc/objc-encoding.h \ + objc/objc-next-metadata-tags.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h + +objc/objc-act.o : objc/objc-act.c \ + gt-objc-objc-act.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + toplev.h $(FUNCTION_H) output.h debug.h $(LANGHOOKS_DEF_H) \ + $(HASHTAB_H) $(GIMPLE_H) \ + $(C_PRAGMA_H) $(C_TARGET_H) \ + objc/objc-encoding.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h + +objc/objc-encoding.o : objc/objc-encoding.c \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + objc/objc-encoding.h \ + objc/objc-runtime-shared-support.h objc.srcextra: diff --git a/gcc/objc/objc-act.c b/gcc/objc/objc-act.c index 321d52aaf28..14db224df75 100644 --- a/gcc/objc/objc-act.c +++ b/gcc/objc/objc-act.c @@ -64,6 +64,9 @@ along with GCC; see the file COPYING3. If not see /* For enum gimplify_status */ #include "gimple.h" +/* For encode_method_prototype(). */ +#include "objc-encoding.h" + static unsigned int should_call_super_dealloc = 0; /* When building Objective-C++, we are not linking against the C front-end @@ -105,17 +108,6 @@ static unsigned int should_call_super_dealloc = 0; #define OBJC_FORWARDING_MIN_OFFSET 0 #endif -/* Set up for use of obstacks. */ - -#include "obstack.h" - -/* This obstack is used to accumulate the encoding of a data type. */ -struct obstack util_obstack; - -/* This points to the beginning of obstack contents, so we can free - the whole contents. */ -char *util_firstobj; - /*** Private Interface (procedures) ***/ /* Init stuff. */ @@ -146,7 +138,6 @@ static bool objc_derived_from_p (tree, tree); static void objc_gen_property_data (tree, tree); static void objc_synthesize_getter (tree, tree, tree); static void objc_synthesize_setter (tree, tree, tree); -static char *objc_build_property_setter_name (tree); static tree lookup_property (tree, tree); static tree lookup_property_in_list (tree, tree); static tree lookup_property_in_protocol_list (tree, tree); @@ -195,11 +186,6 @@ static inline tree lookup_category (tree, tree); static tree lookup_protocol (tree, bool, bool); static tree lookup_and_install_protocols (tree, bool); -/* Type encoding. */ - -static void encode_type_qualifiers (tree); -static void encode_type (tree, int, int); - #ifdef OBJCPLUS static void really_start_method (tree, tree); #else @@ -279,13 +265,6 @@ static GTY((param_is (struct string_descriptor))) htab_t string_htab; FILE *gen_declaration_file; -/* Tells "encode_pointer/encode_aggregate" whether we are generating - type descriptors for instance variables (as opposed to methods). - Type descriptors for instance variables contain more information - than methods (for static typing and embedded structures). */ - -int generating_instance_variables = 0; - /* Hooks for stuff that differs between runtimes. */ objc_runtime_hooks runtime; @@ -402,9 +381,9 @@ objc_init (void) /* Set up stuff used by FE parser and all runtimes. */ errbuf = XNEWVEC (char, 1024 * 10); hash_init (); + /* TODO: Use objc_encoding_init(). */ gcc_obstack_init (&util_obstack); util_firstobj = (char *) obstack_finish (&util_obstack); - /* ... and then check flags and set-up for the selected runtime ... */ if (flag_next_runtime && flag_objc_abi >= 2) ok = objc_next_runtime_abi_02_init (&runtime); @@ -4451,110 +4430,6 @@ build_private_template (tree klass) } } -/* Begin code generation for protocols... */ - -static tree -objc_method_parm_type (tree type) -{ - type = TREE_VALUE (TREE_TYPE (type)); - if (TREE_CODE (type) == TYPE_DECL) - type = TREE_TYPE (type); - return type; -} - -static int -objc_encoded_type_size (tree type) -{ - int sz = int_size_in_bytes (type); - - /* Make all integer and enum types at least as large - as an int. */ - if (sz > 0 && INTEGRAL_TYPE_P (type)) - sz = MAX (sz, int_size_in_bytes (integer_type_node)); - /* Treat arrays as pointers, since that's how they're - passed in. */ - else if (TREE_CODE (type) == ARRAY_TYPE) - sz = int_size_in_bytes (ptr_type_node); - return sz; -} - -/* Encode a method prototype. - - The format is described in gcc/doc/objc.texi, section 'Method - signatures'. - */ - -tree -encode_method_prototype (tree method_decl) -{ - tree parms; - int parm_offset, i; - char buf[40]; - tree result; - - /* ONEWAY and BYCOPY, for remote object are the only method qualifiers. */ - encode_type_qualifiers (TREE_PURPOSE (TREE_TYPE (method_decl))); - - /* Encode return type. */ - encode_type (objc_method_parm_type (method_decl), - obstack_object_size (&util_obstack), - OBJC_ENCODE_INLINE_DEFS); - - /* Stack size. */ - /* The first two arguments (self and _cmd) are pointers; account for - their size. */ - i = int_size_in_bytes (ptr_type_node); - parm_offset = 2 * i; - for (parms = METHOD_SEL_ARGS (method_decl); parms; - parms = DECL_CHAIN (parms)) - { - tree type = objc_method_parm_type (parms); - int sz = objc_encoded_type_size (type); - - /* If a type size is not known, bail out. */ - if (sz < 0) - { - error_at (DECL_SOURCE_LOCATION (method_decl), - "type %qT does not have a known size", - type); - /* Pretend that the encoding succeeded; the compilation will - fail nevertheless. */ - goto finish_encoding; - } - parm_offset += sz; - } - - sprintf (buf, "%d@0:%d", parm_offset, i); - obstack_grow (&util_obstack, buf, strlen (buf)); - - /* Argument types. */ - parm_offset = 2 * i; - for (parms = METHOD_SEL_ARGS (method_decl); parms; - parms = DECL_CHAIN (parms)) - { - tree type = objc_method_parm_type (parms); - - /* Process argument qualifiers for user supplied arguments. */ - encode_type_qualifiers (TREE_PURPOSE (TREE_TYPE (parms))); - - /* Type. */ - encode_type (type, obstack_object_size (&util_obstack), - OBJC_ENCODE_INLINE_DEFS); - - /* Compute offset. */ - sprintf (buf, "%d", parm_offset); - parm_offset += objc_encoded_type_size (type); - - obstack_grow (&util_obstack, buf, strlen (buf)); - } - - finish_encoding: - obstack_1grow (&util_obstack, '\0'); - result = get_identifier (XOBFINISH (&util_obstack, char *)); - obstack_free (&util_obstack, util_firstobj); - return result; -} - /* Generate either '- .cxx_construct' or '- .cxx_destruct' for the current class. */ #ifdef OBJCPLUS @@ -5864,25 +5739,6 @@ objc_build_selector_expr (location_t loc, tree selnamelist) return (*runtime.build_selector_reference) (loc, selname, NULL_TREE); } -/* This is used to implement @encode(). See gcc/doc/objc.texi, - section '@encode'. */ -tree -objc_build_encode_expr (tree type) -{ - tree result; - const char *string; - - encode_type (type, obstack_object_size (&util_obstack), - OBJC_ENCODE_INLINE_DEFS); - obstack_1grow (&util_obstack, 0); /* null terminate string */ - string = XOBFINISH (&util_obstack, const char *); - - /* Synthesize a string that represents the encoded struct/union. */ - result = my_build_string (strlen (string) + 1, string); - obstack_free (&util_obstack, util_firstobj); - return result; -} - static tree build_ivar_reference (tree id) { @@ -7308,7 +7164,7 @@ continue_class (tree klass) } /* This routine builds name of the setter synthesized function. */ -static char * +char * objc_build_property_setter_name (tree ident) { /* TODO: Use alloca to allocate buffer of appropriate size. */ @@ -10306,758 +10162,6 @@ objc_check_format_arg (tree ARG_UNUSED (format_arg), { } -/* --- Encode --- */ -/* "Encode" a data type into a string, which grows in util_obstack. - - The format is described in gcc/doc/objc.texi, section 'Type - encoding'. - - Most of the encode_xxx functions have a 'type' argument, which is - the type to encode, and an integer 'curtype' argument, which is the - index in the encoding string of the beginning of the encoding of - the current type, and allows you to find what characters have - already been written for the current type (they are the ones in the - current encoding string starting from 'curtype'). - - For example, if we are encoding a method which returns 'int' and - takes a 'char **' argument, then when we get to the point of - encoding the 'char **' argument, the encoded string already - contains 'i12@0:4' (assuming a pointer size of 4 bytes). So, - 'curtype' will be set to 7 when starting to encode 'char **'. - During the whole of the encoding of 'char **', 'curtype' will be - fixed at 7, so the routine encoding the second pointer can find out - that it's actually encoding a pointer to a pointer by looking - backwards at what has already been encoded for the current type, - and seeing there is a "^" (meaning a pointer) in there. -*/ - - -/* Encode type qualifiers encodes one of the "PQ" Objective-C - keywords, ie 'in', 'out', 'inout', 'bycopy', 'byref', 'oneway'. - 'const', instead, is encoded directly as part of the type. - */ - -static void -encode_type_qualifiers (tree declspecs) -{ - tree spec; - - for (spec = declspecs; spec; spec = TREE_CHAIN (spec)) - { - /* FIXME: Shouldn't we use token->keyword here ? */ - if (ridpointers[(int) RID_IN] == TREE_VALUE (spec)) - obstack_1grow (&util_obstack, 'n'); - else if (ridpointers[(int) RID_INOUT] == TREE_VALUE (spec)) - obstack_1grow (&util_obstack, 'N'); - else if (ridpointers[(int) RID_OUT] == TREE_VALUE (spec)) - obstack_1grow (&util_obstack, 'o'); - else if (ridpointers[(int) RID_BYCOPY] == TREE_VALUE (spec)) - obstack_1grow (&util_obstack, 'O'); - else if (ridpointers[(int) RID_BYREF] == TREE_VALUE (spec)) - obstack_1grow (&util_obstack, 'R'); - else if (ridpointers[(int) RID_ONEWAY] == TREE_VALUE (spec)) - obstack_1grow (&util_obstack, 'V'); - else - gcc_unreachable (); - } -} - -/* Determine if a pointee is marked read-only. Only used by the NeXT - runtime to be compatible with gcc-3.3. */ - -static bool -pointee_is_readonly (tree pointee) -{ - while (POINTER_TYPE_P (pointee)) - pointee = TREE_TYPE (pointee); - - return TYPE_READONLY (pointee); -} - -/* Encode a pointer type. */ - -static void -encode_pointer (tree type, int curtype, int format) -{ - tree pointer_to = TREE_TYPE (type); - - if (flag_next_runtime) - { - /* This code is used to be compatible with gcc-3.3. */ - /* For historical/compatibility reasons, the read-only qualifier - of the pointee gets emitted _before_ the '^'. The read-only - qualifier of the pointer itself gets ignored, _unless_ we are - looking at a typedef! Also, do not emit the 'r' for anything - but the outermost type! */ - if (!generating_instance_variables - && (obstack_object_size (&util_obstack) - curtype <= 1) - && (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL - ? TYPE_READONLY (type) - : pointee_is_readonly (pointer_to))) - obstack_1grow (&util_obstack, 'r'); - } - - if (TREE_CODE (pointer_to) == RECORD_TYPE) - { - if (OBJC_TYPE_NAME (pointer_to) - && TREE_CODE (OBJC_TYPE_NAME (pointer_to)) == IDENTIFIER_NODE) - { - const char *name = IDENTIFIER_POINTER (OBJC_TYPE_NAME (pointer_to)); - - if (strcmp (name, TAG_OBJECT) == 0) /* '@' */ - { - obstack_1grow (&util_obstack, '@'); - return; - } - else if (TYPE_HAS_OBJC_INFO (pointer_to) - && TYPE_OBJC_INTERFACE (pointer_to)) - { - if (generating_instance_variables) - { - obstack_1grow (&util_obstack, '@'); - obstack_1grow (&util_obstack, '"'); - obstack_grow (&util_obstack, name, strlen (name)); - obstack_1grow (&util_obstack, '"'); - return; - } - else - { - obstack_1grow (&util_obstack, '@'); - return; - } - } - else if (strcmp (name, TAG_CLASS) == 0) /* '#' */ - { - obstack_1grow (&util_obstack, '#'); - return; - } - else if (strcmp (name, TAG_SELECTOR) == 0) /* ':' */ - { - obstack_1grow (&util_obstack, ':'); - return; - } - } - } - else if (TREE_CODE (pointer_to) == INTEGER_TYPE - && TYPE_MODE (pointer_to) == QImode) - { - tree pname = TREE_CODE (OBJC_TYPE_NAME (pointer_to)) == IDENTIFIER_NODE - ? OBJC_TYPE_NAME (pointer_to) - : DECL_NAME (OBJC_TYPE_NAME (pointer_to)); - - /* (BOOL *) are an exception and are encoded as ^c, while all - other pointers to char are encoded as *. */ - if (strcmp (IDENTIFIER_POINTER (pname), "BOOL")) - { - if (!flag_next_runtime) - { - /* The NeXT runtime adds the 'r' before getting here. */ - - /* It appears that "r*" means "const char *" rather than - "char *const". "char *const" is encoded as "*", - which is identical to "char *", so the "const" is - unfortunately lost. */ - if (TYPE_READONLY (pointer_to)) - obstack_1grow (&util_obstack, 'r'); - } - - obstack_1grow (&util_obstack, '*'); - return; - } - } - - /* We have a normal pointer type that does not get special treatment. */ - obstack_1grow (&util_obstack, '^'); - encode_type (pointer_to, curtype, format); -} - -static void -encode_array (tree type, int curtype, int format) -{ - tree an_int_cst = TYPE_SIZE (type); - tree array_of = TREE_TYPE (type); - char buffer[40]; - - if (an_int_cst == NULL) - { - /* We are trying to encode an incomplete array. An incomplete - array is forbidden as part of an instance variable; but it - may occur if the instance variable is a pointer to such an - array. */ - - /* So the only case in which an incomplete array could occur - (without being pointed to) is if we are encoding the - arguments or return value of a method. In that case, an - incomplete array argument or return value (eg, - -(void)display: (char[])string) is treated like a pointer - because that is how the compiler does the function call. A - special, more complicated case, is when the incomplete array - is the last member of a struct (eg, if we are encoding - "struct { unsigned long int a;double b[];}"), which is again - part of a method argument/return value. In that case, we - really need to communicate to the runtime that there is an - incomplete array (not a pointer!) there. So, we detect that - special case and encode it as a zero-length array. - - Try to detect that we are part of a struct. We do this by - searching for '=' in the type encoding for the current type. - NB: This hack assumes that you can't use '=' as part of a C - identifier. - */ - { - char *enc = obstack_base (&util_obstack) + curtype; - if (memchr (enc, '=', - obstack_object_size (&util_obstack) - curtype) == NULL) - { - /* We are not inside a struct. Encode the array as a - pointer. */ - encode_pointer (type, curtype, format); - return; - } - } - - /* Else, we are in a struct, and we encode it as a zero-length - array. */ - sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); - } - else if (TREE_INT_CST_LOW (TYPE_SIZE (array_of)) == 0) - sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); - else - sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, - TREE_INT_CST_LOW (an_int_cst) - / TREE_INT_CST_LOW (TYPE_SIZE (array_of))); - - obstack_grow (&util_obstack, buffer, strlen (buffer)); - encode_type (array_of, curtype, format); - obstack_1grow (&util_obstack, ']'); - return; -} - -/* Encode a vector. The vector type is a GCC extension to C. */ -static void -encode_vector (tree type, int curtype, int format) -{ - tree vector_of = TREE_TYPE (type); - char buffer[40]; - - /* Vectors are like simple fixed-size arrays. */ - - /* Output ![xx,yy,<code>] where xx is the vector_size, yy is the - alignment of the vector, and <code> is the base type. Eg, int - __attribute__ ((vector_size (16))) gets encoded as ![16,32,i] - assuming that the alignment is 32 bytes. We include size and - alignment in bytes so that the runtime does not have to have any - knowledge of the actual types. - */ - sprintf (buffer, "![" HOST_WIDE_INT_PRINT_DEC ",%d", - /* We want to compute the equivalent of sizeof (<vector>). - Code inspired by c_sizeof_or_alignof_type. */ - ((TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type)) - / (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT))), - /* We want to compute the equivalent of __alignof__ - (<vector>). Code inspired by - c_sizeof_or_alignof_type. */ - TYPE_ALIGN_UNIT (type)); - obstack_grow (&util_obstack, buffer, strlen (buffer)); - encode_type (vector_of, curtype, format); - obstack_1grow (&util_obstack, ']'); - return; -} - -static void -encode_aggregate_fields (tree type, bool pointed_to, int curtype, int format) -{ - tree field = TYPE_FIELDS (type); - - for (; field; field = DECL_CHAIN (field)) - { -#ifdef OBJCPLUS - /* C++ static members, and things that are not field at all, - should not appear in the encoding. */ - if (TREE_CODE (field) != FIELD_DECL || TREE_STATIC (field)) - continue; -#endif - - /* Recursively encode fields of embedded base classes. */ - if (DECL_ARTIFICIAL (field) && !DECL_NAME (field) - && TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE) - { - encode_aggregate_fields (TREE_TYPE (field), - pointed_to, curtype, format); - continue; - } - - if (generating_instance_variables && !pointed_to) - { - tree fname = DECL_NAME (field); - - obstack_1grow (&util_obstack, '"'); - - if (fname && TREE_CODE (fname) == IDENTIFIER_NODE) - obstack_grow (&util_obstack, - IDENTIFIER_POINTER (fname), - strlen (IDENTIFIER_POINTER (fname))); - - obstack_1grow (&util_obstack, '"'); - } - - encode_field_decl (field, curtype, format); - } -} - -static void -encode_aggregate_within (tree type, int curtype, int format, int left, - int right) -{ - tree name; - /* NB: aggregates that are pointed to have slightly different encoding - rules in that you never encode the names of instance variables. */ - int ob_size = obstack_object_size (&util_obstack); - bool inline_contents = false; - bool pointed_to = false; - - if (flag_next_runtime) - { - if (ob_size > 0 && *(obstack_next_free (&util_obstack) - 1) == '^') - pointed_to = true; - - if ((format == OBJC_ENCODE_INLINE_DEFS || generating_instance_variables) - && (!pointed_to || ob_size - curtype == 1 - || (ob_size - curtype == 2 - && *(obstack_next_free (&util_obstack) - 2) == 'r'))) - inline_contents = true; - } - else - { - /* c0 and c1 are the last two characters in the encoding of the - current type; if the last two characters were '^' or '^r', - then we are encoding an aggregate that is "pointed to". The - comment above applies: in that case we should avoid encoding - the names of instance variables. - */ - char c1 = ob_size > 1 ? *(obstack_next_free (&util_obstack) - 2) : 0; - char c0 = ob_size > 0 ? *(obstack_next_free (&util_obstack) - 1) : 0; - - if (c0 == '^' || (c1 == '^' && c0 == 'r')) - pointed_to = true; - - if (format == OBJC_ENCODE_INLINE_DEFS || generating_instance_variables) - { - if (!pointed_to) - inline_contents = true; - else - { - /* Note that the check (ob_size - curtype < 2) prevents - infinite recursion when encoding a structure which is - a linked list (eg, struct node { struct node *next; - }). Each time we follow a pointer, we add one - character to ob_size, and curtype is fixed, so after - at most two pointers we stop inlining contents and - break the loop. - - The other case where we don't inline is "^r", which - is a pointer to a constant struct. - */ - if ((ob_size - curtype <= 2) && !(c0 == 'r')) - inline_contents = true; - } - } - } - - /* Traverse struct aliases; it is important to get the - original struct and its tag name (if any). */ - type = TYPE_MAIN_VARIANT (type); - name = OBJC_TYPE_NAME (type); - /* Open parenth/bracket. */ - obstack_1grow (&util_obstack, left); - - /* Encode the struct/union tag name, or '?' if a tag was - not provided. Typedef aliases do not qualify. */ -#ifdef OBJCPLUS - /* For compatibility with the NeXT runtime, ObjC++ encodes template - args as a composite struct tag name. */ - if (name && TREE_CODE (name) == IDENTIFIER_NODE - /* Did this struct have a tag? */ - && !TYPE_WAS_ANONYMOUS (type)) - obstack_grow (&util_obstack, - decl_as_string (type, TFF_DECL_SPECIFIERS | TFF_UNQUALIFIED_NAME), - strlen (decl_as_string (type, TFF_DECL_SPECIFIERS | TFF_UNQUALIFIED_NAME))); -#else - if (name && TREE_CODE (name) == IDENTIFIER_NODE) - obstack_grow (&util_obstack, - IDENTIFIER_POINTER (name), - strlen (IDENTIFIER_POINTER (name))); -#endif - else - obstack_1grow (&util_obstack, '?'); - - /* Encode the types (and possibly names) of the inner fields, - if required. */ - if (inline_contents) - { - obstack_1grow (&util_obstack, '='); - encode_aggregate_fields (type, pointed_to, curtype, format); - } - /* Close parenth/bracket. */ - obstack_1grow (&util_obstack, right); -} - -/* Encode a bitfield NeXT-style (i.e., without a bit offset or the underlying - field type. */ - -static void -encode_next_bitfield (int width) -{ - char buffer[40]; - sprintf (buffer, "b%d", width); - obstack_grow (&util_obstack, buffer, strlen (buffer)); -} - -/* Encodes 'type', ignoring type qualifiers (which you should encode - beforehand if needed) with the exception of 'const', which is - encoded by encode_type. See above for the explanation of - 'curtype'. 'format' can be OBJC_ENCODE_INLINE_DEFS or - OBJC_ENCODE_DONT_INLINE_DEFS. -*/ -static void -encode_type (tree type, int curtype, int format) -{ - enum tree_code code = TREE_CODE (type); - - /* Ignore type qualifiers other than 'const' when encoding a - type. */ - - if (type == error_mark_node) - return; - - if (!flag_next_runtime) - { - if (TYPE_READONLY (type)) - obstack_1grow (&util_obstack, 'r'); - } - - switch (code) - { - case ENUMERAL_TYPE: - if (flag_next_runtime) - { - /* Kludge for backwards-compatibility with gcc-3.3: enums - are always encoded as 'i' no matter what type they - actually are (!). */ - obstack_1grow (&util_obstack, 'i'); - break; - } - /* Else, they are encoded exactly like the integer type that is - used by the compiler to store them. */ - case INTEGER_TYPE: - { - char c; - switch (GET_MODE_BITSIZE (TYPE_MODE (type))) - { - case 8: c = TYPE_UNSIGNED (type) ? 'C' : 'c'; break; - case 16: c = TYPE_UNSIGNED (type) ? 'S' : 's'; break; - case 32: - { - tree int_type = type; - if (flag_next_runtime) - { - /* Another legacy kludge for compatiblity with - gcc-3.3: 32-bit longs are encoded as 'l' or 'L', - but not always. For typedefs, we need to use 'i' - or 'I' instead if encoding a struct field, or a - pointer! */ - int_type = ((!generating_instance_variables - && (obstack_object_size (&util_obstack) - == (unsigned) curtype)) - ? TYPE_MAIN_VARIANT (type) - : type); - } - if (int_type == long_unsigned_type_node - || int_type == long_integer_type_node) - c = TYPE_UNSIGNED (type) ? 'L' : 'l'; - else - c = TYPE_UNSIGNED (type) ? 'I' : 'i'; - } - break; - case 64: c = TYPE_UNSIGNED (type) ? 'Q' : 'q'; break; - case 128: c = TYPE_UNSIGNED (type) ? 'T' : 't'; break; - default: gcc_unreachable (); - } - obstack_1grow (&util_obstack, c); - break; - } - case REAL_TYPE: - { - char c; - /* Floating point types. */ - switch (GET_MODE_BITSIZE (TYPE_MODE (type))) - { - case 32: c = 'f'; break; - case 64: c = 'd'; break; - case 96: - case 128: c = 'D'; break; - default: gcc_unreachable (); - } - obstack_1grow (&util_obstack, c); - break; - } - case VOID_TYPE: - obstack_1grow (&util_obstack, 'v'); - break; - - case BOOLEAN_TYPE: - obstack_1grow (&util_obstack, 'B'); - break; - - case ARRAY_TYPE: - encode_array (type, curtype, format); - break; - - case POINTER_TYPE: -#ifdef OBJCPLUS - case REFERENCE_TYPE: -#endif - encode_pointer (type, curtype, format); - break; - - case RECORD_TYPE: - encode_aggregate_within (type, curtype, format, '{', '}'); - break; - - case UNION_TYPE: - encode_aggregate_within (type, curtype, format, '(', ')'); - break; - - case FUNCTION_TYPE: /* '?' means an unknown type. */ - obstack_1grow (&util_obstack, '?'); - break; - - case COMPLEX_TYPE: - /* A complex is encoded as 'j' followed by the inner type (eg, - "_Complex int" is encoded as 'ji'). */ - obstack_1grow (&util_obstack, 'j'); - encode_type (TREE_TYPE (type), curtype, format); - break; - - case VECTOR_TYPE: - encode_vector (type, curtype, format); - break; - - default: - warning (0, "unknown type %s found during Objective-C encoding", - gen_type_name (type)); - obstack_1grow (&util_obstack, '?'); - break; - } - - if (flag_next_runtime) - { - /* Super-kludge. Some ObjC qualifier and type combinations need - to be rearranged for compatibility with gcc-3.3. */ - if (code == POINTER_TYPE && obstack_object_size (&util_obstack) >= 3) - { - char *enc = obstack_base (&util_obstack) + curtype; - - /* Rewrite "in const" from "nr" to "rn". */ - if (curtype >= 1 && !strncmp (enc - 1, "nr", 2)) - strncpy (enc - 1, "rn", 2); - } - } -} - -static void -encode_gnu_bitfield (int position, tree type, int size) -{ - enum tree_code code = TREE_CODE (type); - char buffer[40]; - char charType = '?'; - - /* This code is only executed for the GNU runtime, so we can ignore - the NeXT runtime kludge of always encoding enums as 'i' no matter - what integers they actually are. */ - if (code == INTEGER_TYPE || code == ENUMERAL_TYPE) - { - if (integer_zerop (TYPE_MIN_VALUE (type))) - /* Unsigned integer types. */ - { - switch (TYPE_MODE (type)) - { - case QImode: - charType = 'C'; break; - case HImode: - charType = 'S'; break; - case SImode: - { - if (type == long_unsigned_type_node) - charType = 'L'; - else - charType = 'I'; - break; - } - case DImode: - charType = 'Q'; break; - default: - gcc_unreachable (); - } - } - else - /* Signed integer types. */ - { - switch (TYPE_MODE (type)) - { - case QImode: - charType = 'c'; break; - case HImode: - charType = 's'; break; - case SImode: - { - if (type == long_integer_type_node) - charType = 'l'; - else - charType = 'i'; - break; - } - case DImode: - charType = 'q'; break; - default: - gcc_unreachable (); - } - } - } - else - { - /* Do not do any encoding, produce an error and keep going. */ - error ("trying to encode non-integer type as a bitfield"); - return; - } - - sprintf (buffer, "b%d%c%d", position, charType, size); - obstack_grow (&util_obstack, buffer, strlen (buffer)); -} - -void -encode_field_decl (tree field_decl, int curtype, int format) -{ -#ifdef OBJCPLUS - /* C++ static members, and things that are not fields at all, - should not appear in the encoding. */ - if (TREE_CODE (field_decl) != FIELD_DECL || TREE_STATIC (field_decl)) - return; -#endif - - /* Generate the bitfield typing information, if needed. Note the difference - between GNU and NeXT runtimes. */ - if (DECL_BIT_FIELD_TYPE (field_decl)) - { - int size = tree_low_cst (DECL_SIZE (field_decl), 1); - - if (flag_next_runtime) - encode_next_bitfield (size); - else - encode_gnu_bitfield (int_bit_position (field_decl), - DECL_BIT_FIELD_TYPE (field_decl), size); - } - else - encode_type (TREE_TYPE (field_decl), curtype, format); -} - -/* This routine encodes the attribute of the input PROPERTY according - to following formula: - - Property attributes are stored as a comma-delimited C string. - Simple attributes such as readonly are encoded as single - character. The parametrized attributes, getter=name and - setter=name, are encoded as a single character followed by an - identifier. Property types are also encoded as a parametrized - attribute. The characters used to encode these attributes are - defined by the following enumeration: - - enum PropertyAttributes { - kPropertyReadOnly = 'R', - kPropertyBycopy = 'C', - kPropertyByref = '&', - kPropertyDynamic = 'D', - kPropertyGetter = 'G', - kPropertySetter = 'S', - kPropertyInstanceVariable = 'V', - kPropertyType = 'T', - kPropertyWeak = 'W', - kPropertyStrong = 'P', - kPropertyNonAtomic = 'N' - }; */ -tree -objc_v2_encode_prop_attr (tree property) -{ - const char *string; - tree type = TREE_TYPE (property); - - obstack_1grow (&util_obstack, 'T'); - encode_type (type, obstack_object_size (&util_obstack), - OBJC_ENCODE_INLINE_DEFS); - - if (PROPERTY_READONLY (property)) - obstack_grow (&util_obstack, ",R", 2); - - switch (PROPERTY_ASSIGN_SEMANTICS (property)) - { - case OBJC_PROPERTY_COPY: - obstack_grow (&util_obstack, ",C", 2); - break; - case OBJC_PROPERTY_RETAIN: - obstack_grow (&util_obstack, ",&", 2); - break; - case OBJC_PROPERTY_ASSIGN: - default: - break; - } - - if (PROPERTY_DYNAMIC (property)) - obstack_grow (&util_obstack, ",D", 2); - - if (PROPERTY_NONATOMIC (property)) - obstack_grow (&util_obstack, ",N", 2); - - /* Here we want to encode the getter name, but only if it's not the - standard one. */ - if (PROPERTY_GETTER_NAME (property) != PROPERTY_NAME (property)) - { - obstack_grow (&util_obstack, ",G", 2); - string = IDENTIFIER_POINTER (PROPERTY_GETTER_NAME (property)); - obstack_grow (&util_obstack, string, strlen (string)); - } - - if (!PROPERTY_READONLY (property)) - { - /* Here we want to encode the setter name, but only if it's not - the standard one. */ - tree standard_setter = get_identifier (objc_build_property_setter_name (PROPERTY_NAME (property))); - if (PROPERTY_SETTER_NAME (property) != standard_setter) - { - obstack_grow (&util_obstack, ",S", 2); - string = IDENTIFIER_POINTER (PROPERTY_SETTER_NAME (property)); - obstack_grow (&util_obstack, string, strlen (string)); - } - } - - /* TODO: Encode strong ('P'), weak ('W') for garbage collection. */ - - if (!PROPERTY_DYNAMIC (property)) - { - obstack_grow (&util_obstack, ",V", 2); - if (PROPERTY_IVAR_NAME (property)) - string = IDENTIFIER_POINTER (PROPERTY_IVAR_NAME (property)); - else - string = IDENTIFIER_POINTER (PROPERTY_NAME (property)); - obstack_grow (&util_obstack, string, strlen (string)); - } - - /* NULL-terminate string. */ - obstack_1grow (&util_obstack, 0); - string = XOBFINISH (&util_obstack, char *); - obstack_free (&util_obstack, util_firstobj); - return get_identifier (string); -} - void objc_common_init_ts (void) { diff --git a/gcc/objc/objc-act.h b/gcc/objc/objc-act.h index 99551a80256..33f90fdb785 100644 --- a/gcc/objc/objc-act.h +++ b/gcc/objc/objc-act.h @@ -642,10 +642,6 @@ typedef enum string_section #define METHOD_DEF 0 #define METHOD_REF 1 -/* (Decide if these can ever be validly changed.) */ -#define OBJC_ENCODE_INLINE_DEFS 0 -#define OBJC_ENCODE_DONT_INLINE_DEFS 1 - #define BUFSIZE 1024 #define CLS_FACTORY 0x0001L diff --git a/gcc/objc/objc-encoding.c b/gcc/objc/objc-encoding.c new file mode 100644 index 00000000000..cb1d0c4e992 --- /dev/null +++ b/gcc/objc/objc-encoding.c @@ -0,0 +1,926 @@ +/* Routines dealing with ObjC encoding of types + Copyright (C) 1992, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, + 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#include "system.h" +#include "coretypes.h" +#include "tree.h" + +#ifdef OBJCPLUS +#include "cp-tree.h" +#else +#include "c-tree.h" +#include "c-lang.h" +#endif + +#include "c-family/c-common.h" +#include "c-family/c-objc.h" + +#include "objc-encoding.h" +#include "objc-act.h" + +/* For my_build_string(). */ +#include "objc-runtime-shared-support.h" + +/* For BITS_PER_UNIT. */ +#include "tm.h" + +/* When building Objective-C++, we are not linking against the C front-end + and so need to replicate the C tree-construction functions in some way. */ +#ifdef OBJCPLUS +#define OBJCP_REMAP_FUNCTIONS +#include "objcp-decl.h" +#endif /* OBJCPLUS */ + +/* Set up for use of obstacks. */ +#include "obstack.h" + +/* This obstack is used to accumulate the encoding of a data type. + TODO: Make this static. */ +struct obstack util_obstack; + +/* This points to the beginning of obstack contents, so we can free + the whole contents. TODO: Make this static. */ +char *util_firstobj; + +int generating_instance_variables = 0; + +static void encode_type_qualifiers (tree); +static void encode_type (tree, int, int); + +static tree +objc_method_parm_type (tree type) +{ + type = TREE_VALUE (TREE_TYPE (type)); + if (TREE_CODE (type) == TYPE_DECL) + type = TREE_TYPE (type); + return type; +} + +static int +objc_encoded_type_size (tree type) +{ + int sz = int_size_in_bytes (type); + + /* Make all integer and enum types at least as large + as an int. */ + if (sz > 0 && INTEGRAL_TYPE_P (type)) + sz = MAX (sz, int_size_in_bytes (integer_type_node)); + /* Treat arrays as pointers, since that's how they're + passed in. */ + else if (TREE_CODE (type) == ARRAY_TYPE) + sz = int_size_in_bytes (ptr_type_node); + return sz; +} + +/* Encode a method prototype. */ +tree +encode_method_prototype (tree method_decl) +{ + tree parms; + int parm_offset, i; + char buf[40]; + tree result; + + /* ONEWAY and BYCOPY, for remote object are the only method qualifiers. */ + encode_type_qualifiers (TREE_PURPOSE (TREE_TYPE (method_decl))); + + /* Encode return type. */ + encode_type (objc_method_parm_type (method_decl), + obstack_object_size (&util_obstack), + OBJC_ENCODE_INLINE_DEFS); + + /* Stack size. */ + /* The first two arguments (self and _cmd) are pointers; account for + their size. */ + i = int_size_in_bytes (ptr_type_node); + parm_offset = 2 * i; + for (parms = METHOD_SEL_ARGS (method_decl); parms; + parms = DECL_CHAIN (parms)) + { + tree type = objc_method_parm_type (parms); + int sz = objc_encoded_type_size (type); + + /* If a type size is not known, bail out. */ + if (sz < 0) + { + error_at (DECL_SOURCE_LOCATION (method_decl), + "type %qT does not have a known size", + type); + /* Pretend that the encoding succeeded; the compilation will + fail nevertheless. */ + goto finish_encoding; + } + parm_offset += sz; + } + + sprintf (buf, "%d@0:%d", parm_offset, i); + obstack_grow (&util_obstack, buf, strlen (buf)); + + /* Argument types. */ + parm_offset = 2 * i; + for (parms = METHOD_SEL_ARGS (method_decl); parms; + parms = DECL_CHAIN (parms)) + { + tree type = objc_method_parm_type (parms); + + /* Process argument qualifiers for user supplied arguments. */ + encode_type_qualifiers (TREE_PURPOSE (TREE_TYPE (parms))); + + /* Type. */ + encode_type (type, obstack_object_size (&util_obstack), + OBJC_ENCODE_INLINE_DEFS); + + /* Compute offset. */ + sprintf (buf, "%d", parm_offset); + parm_offset += objc_encoded_type_size (type); + + obstack_grow (&util_obstack, buf, strlen (buf)); + } + + finish_encoding: + obstack_1grow (&util_obstack, '\0'); + result = get_identifier (XOBFINISH (&util_obstack, char *)); + obstack_free (&util_obstack, util_firstobj); + return result; +} + +/* This is used to implement @encode(). */ +tree +objc_build_encode_expr (tree type) +{ + tree result; + const char *string; + + encode_type (type, obstack_object_size (&util_obstack), + OBJC_ENCODE_INLINE_DEFS); + obstack_1grow (&util_obstack, 0); /* null terminate string */ + string = XOBFINISH (&util_obstack, const char *); + + /* Synthesize a string that represents the encoded struct/union. */ + result = my_build_string (strlen (string) + 1, string); + obstack_free (&util_obstack, util_firstobj); + return result; +} + +/* "Encode" a data type into a string, which grows in util_obstack. + + The format is described in gcc/doc/objc.texi, section 'Type + encoding'. + + Most of the encode_xxx functions have a 'type' argument, which is + the type to encode, and an integer 'curtype' argument, which is the + index in the encoding string of the beginning of the encoding of + the current type, and allows you to find what characters have + already been written for the current type (they are the ones in the + current encoding string starting from 'curtype'). + + For example, if we are encoding a method which returns 'int' and + takes a 'char **' argument, then when we get to the point of + encoding the 'char **' argument, the encoded string already + contains 'i12@0:4' (assuming a pointer size of 4 bytes). So, + 'curtype' will be set to 7 when starting to encode 'char **'. + During the whole of the encoding of 'char **', 'curtype' will be + fixed at 7, so the routine encoding the second pointer can find out + that it's actually encoding a pointer to a pointer by looking + backwards at what has already been encoded for the current type, + and seeing there is a "^" (meaning a pointer) in there. */ + + +/* Encode type qualifiers encodes one of the "PQ" Objective-C + keywords, ie 'in', 'out', 'inout', 'bycopy', 'byref', 'oneway'. + 'const', instead, is encoded directly as part of the type. */ +static void +encode_type_qualifiers (tree declspecs) +{ + tree spec; + + for (spec = declspecs; spec; spec = TREE_CHAIN (spec)) + { + /* FIXME: Shouldn't we use token->keyword here ? */ + if (ridpointers[(int) RID_IN] == TREE_VALUE (spec)) + obstack_1grow (&util_obstack, 'n'); + else if (ridpointers[(int) RID_INOUT] == TREE_VALUE (spec)) + obstack_1grow (&util_obstack, 'N'); + else if (ridpointers[(int) RID_OUT] == TREE_VALUE (spec)) + obstack_1grow (&util_obstack, 'o'); + else if (ridpointers[(int) RID_BYCOPY] == TREE_VALUE (spec)) + obstack_1grow (&util_obstack, 'O'); + else if (ridpointers[(int) RID_BYREF] == TREE_VALUE (spec)) + obstack_1grow (&util_obstack, 'R'); + else if (ridpointers[(int) RID_ONEWAY] == TREE_VALUE (spec)) + obstack_1grow (&util_obstack, 'V'); + else + gcc_unreachable (); + } +} + +/* Determine if a pointee is marked read-only. Only used by the NeXT + runtime to be compatible with gcc-3.3. */ +static bool +pointee_is_readonly (tree pointee) +{ + while (POINTER_TYPE_P (pointee)) + pointee = TREE_TYPE (pointee); + + return TYPE_READONLY (pointee); +} + +/* Encode a pointer type. */ +static void +encode_pointer (tree type, int curtype, int format) +{ + tree pointer_to = TREE_TYPE (type); + + if (flag_next_runtime) + { + /* This code is used to be compatible with gcc-3.3. */ + /* For historical/compatibility reasons, the read-only qualifier + of the pointee gets emitted _before_ the '^'. The read-only + qualifier of the pointer itself gets ignored, _unless_ we are + looking at a typedef! Also, do not emit the 'r' for anything + but the outermost type! */ + if (!generating_instance_variables + && (obstack_object_size (&util_obstack) - curtype <= 1) + && (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL + ? TYPE_READONLY (type) + : pointee_is_readonly (pointer_to))) + obstack_1grow (&util_obstack, 'r'); + } + + if (TREE_CODE (pointer_to) == RECORD_TYPE) + { + if (OBJC_TYPE_NAME (pointer_to) + && TREE_CODE (OBJC_TYPE_NAME (pointer_to)) == IDENTIFIER_NODE) + { + const char *name = IDENTIFIER_POINTER (OBJC_TYPE_NAME (pointer_to)); + + if (strcmp (name, TAG_OBJECT) == 0) /* '@' */ + { + obstack_1grow (&util_obstack, '@'); + return; + } + else if (TYPE_HAS_OBJC_INFO (pointer_to) + && TYPE_OBJC_INTERFACE (pointer_to)) + { + if (generating_instance_variables) + { + obstack_1grow (&util_obstack, '@'); + obstack_1grow (&util_obstack, '"'); + obstack_grow (&util_obstack, name, strlen (name)); + obstack_1grow (&util_obstack, '"'); + return; + } + else + { + obstack_1grow (&util_obstack, '@'); + return; + } + } + else if (strcmp (name, TAG_CLASS) == 0) /* '#' */ + { + obstack_1grow (&util_obstack, '#'); + return; + } + else if (strcmp (name, TAG_SELECTOR) == 0) /* ':' */ + { + obstack_1grow (&util_obstack, ':'); + return; + } + } + } + else if (TREE_CODE (pointer_to) == INTEGER_TYPE + && TYPE_MODE (pointer_to) == QImode) + { + tree pname = TREE_CODE (OBJC_TYPE_NAME (pointer_to)) == IDENTIFIER_NODE + ? OBJC_TYPE_NAME (pointer_to) + : DECL_NAME (OBJC_TYPE_NAME (pointer_to)); + + /* (BOOL *) are an exception and are encoded as ^c, while all + other pointers to char are encoded as *. */ + if (strcmp (IDENTIFIER_POINTER (pname), "BOOL")) + { + if (!flag_next_runtime) + { + /* The NeXT runtime adds the 'r' before getting here. */ + + /* It appears that "r*" means "const char *" rather than + "char *const". "char *const" is encoded as "*", + which is identical to "char *", so the "const" is + unfortunately lost. */ + if (TYPE_READONLY (pointer_to)) + obstack_1grow (&util_obstack, 'r'); + } + + obstack_1grow (&util_obstack, '*'); + return; + } + } + + /* We have a normal pointer type that does not get special treatment. */ + obstack_1grow (&util_obstack, '^'); + encode_type (pointer_to, curtype, format); +} + +static void +encode_array (tree type, int curtype, int format) +{ + tree an_int_cst = TYPE_SIZE (type); + tree array_of = TREE_TYPE (type); + char buffer[40]; + + if (an_int_cst == NULL) + { + /* We are trying to encode an incomplete array. An incomplete + array is forbidden as part of an instance variable; but it + may occur if the instance variable is a pointer to such an + array. */ + + /* So the only case in which an incomplete array could occur + (without being pointed to) is if we are encoding the + arguments or return value of a method. In that case, an + incomplete array argument or return value (eg, + -(void)display: (char[])string) is treated like a pointer + because that is how the compiler does the function call. A + special, more complicated case, is when the incomplete array + is the last member of a struct (eg, if we are encoding + "struct { unsigned long int a;double b[];}"), which is again + part of a method argument/return value. In that case, we + really need to communicate to the runtime that there is an + incomplete array (not a pointer!) there. So, we detect that + special case and encode it as a zero-length array. + + Try to detect that we are part of a struct. We do this by + searching for '=' in the type encoding for the current type. + NB: This hack assumes that you can't use '=' as part of a C + identifier. + */ + { + char *enc = obstack_base (&util_obstack) + curtype; + if (memchr (enc, '=', + obstack_object_size (&util_obstack) - curtype) == NULL) + { + /* We are not inside a struct. Encode the array as a + pointer. */ + encode_pointer (type, curtype, format); + return; + } + } + + /* Else, we are in a struct, and we encode it as a zero-length + array. */ + sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); + } + else if (TREE_INT_CST_LOW (TYPE_SIZE (array_of)) == 0) + sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)0); + else + sprintf (buffer, "[" HOST_WIDE_INT_PRINT_DEC, + TREE_INT_CST_LOW (an_int_cst) + / TREE_INT_CST_LOW (TYPE_SIZE (array_of))); + + obstack_grow (&util_obstack, buffer, strlen (buffer)); + encode_type (array_of, curtype, format); + obstack_1grow (&util_obstack, ']'); + return; +} + +/* Encode a vector. The vector type is a GCC extension to C. */ +static void +encode_vector (tree type, int curtype, int format) +{ + tree vector_of = TREE_TYPE (type); + char buffer[40]; + + /* Vectors are like simple fixed-size arrays. */ + + /* Output ![xx,yy,<code>] where xx is the vector_size, yy is the + alignment of the vector, and <code> is the base type. Eg, int + __attribute__ ((vector_size (16))) gets encoded as ![16,32,i] + assuming that the alignment is 32 bytes. We include size and + alignment in bytes so that the runtime does not have to have any + knowledge of the actual types. + */ + sprintf (buffer, "![" HOST_WIDE_INT_PRINT_DEC ",%d", + /* We want to compute the equivalent of sizeof (<vector>). + Code inspired by c_sizeof_or_alignof_type. */ + ((TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type)) + / (TYPE_PRECISION (char_type_node) / BITS_PER_UNIT))), + /* We want to compute the equivalent of __alignof__ + (<vector>). Code inspired by + c_sizeof_or_alignof_type. */ + TYPE_ALIGN_UNIT (type)); + obstack_grow (&util_obstack, buffer, strlen (buffer)); + encode_type (vector_of, curtype, format); + obstack_1grow (&util_obstack, ']'); + return; +} + +static void +encode_aggregate_fields (tree type, bool pointed_to, int curtype, int format) +{ + tree field = TYPE_FIELDS (type); + + for (; field; field = DECL_CHAIN (field)) + { +#ifdef OBJCPLUS + /* C++ static members, and things that are not field at all, + should not appear in the encoding. */ + if (TREE_CODE (field) != FIELD_DECL || TREE_STATIC (field)) + continue; +#endif + + /* Recursively encode fields of embedded base classes. */ + if (DECL_ARTIFICIAL (field) && !DECL_NAME (field) + && TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE) + { + encode_aggregate_fields (TREE_TYPE (field), + pointed_to, curtype, format); + continue; + } + + if (generating_instance_variables && !pointed_to) + { + tree fname = DECL_NAME (field); + + obstack_1grow (&util_obstack, '"'); + + if (fname && TREE_CODE (fname) == IDENTIFIER_NODE) + obstack_grow (&util_obstack, + IDENTIFIER_POINTER (fname), + strlen (IDENTIFIER_POINTER (fname))); + + obstack_1grow (&util_obstack, '"'); + } + + encode_field_decl (field, curtype, format); + } +} + +static void +encode_aggregate_within (tree type, int curtype, int format, int left, + int right) +{ + tree name; + /* NB: aggregates that are pointed to have slightly different encoding + rules in that you never encode the names of instance variables. */ + int ob_size = obstack_object_size (&util_obstack); + bool inline_contents = false; + bool pointed_to = false; + + if (flag_next_runtime) + { + if (ob_size > 0 && *(obstack_next_free (&util_obstack) - 1) == '^') + pointed_to = true; + + if ((format == OBJC_ENCODE_INLINE_DEFS || generating_instance_variables) + && (!pointed_to || ob_size - curtype == 1 + || (ob_size - curtype == 2 + && *(obstack_next_free (&util_obstack) - 2) == 'r'))) + inline_contents = true; + } + else + { + /* c0 and c1 are the last two characters in the encoding of the + current type; if the last two characters were '^' or '^r', + then we are encoding an aggregate that is "pointed to". The + comment above applies: in that case we should avoid encoding + the names of instance variables. + */ + char c1 = ob_size > 1 ? *(obstack_next_free (&util_obstack) - 2) : 0; + char c0 = ob_size > 0 ? *(obstack_next_free (&util_obstack) - 1) : 0; + + if (c0 == '^' || (c1 == '^' && c0 == 'r')) + pointed_to = true; + + if (format == OBJC_ENCODE_INLINE_DEFS || generating_instance_variables) + { + if (!pointed_to) + inline_contents = true; + else + { + /* Note that the check (ob_size - curtype < 2) prevents + infinite recursion when encoding a structure which is + a linked list (eg, struct node { struct node *next; + }). Each time we follow a pointer, we add one + character to ob_size, and curtype is fixed, so after + at most two pointers we stop inlining contents and + break the loop. + + The other case where we don't inline is "^r", which + is a pointer to a constant struct. + */ + if ((ob_size - curtype <= 2) && !(c0 == 'r')) + inline_contents = true; + } + } + } + + /* Traverse struct aliases; it is important to get the + original struct and its tag name (if any). */ + type = TYPE_MAIN_VARIANT (type); + name = OBJC_TYPE_NAME (type); + /* Open parenth/bracket. */ + obstack_1grow (&util_obstack, left); + + /* Encode the struct/union tag name, or '?' if a tag was + not provided. Typedef aliases do not qualify. */ +#ifdef OBJCPLUS + /* For compatibility with the NeXT runtime, ObjC++ encodes template + args as a composite struct tag name. */ + if (name && TREE_CODE (name) == IDENTIFIER_NODE + /* Did this struct have a tag? */ + && !TYPE_WAS_ANONYMOUS (type)) + obstack_grow (&util_obstack, + decl_as_string (type, TFF_DECL_SPECIFIERS | TFF_UNQUALIFIED_NAME), + strlen (decl_as_string (type, TFF_DECL_SPECIFIERS | TFF_UNQUALIFIED_NAME))); +#else + if (name && TREE_CODE (name) == IDENTIFIER_NODE) + obstack_grow (&util_obstack, + IDENTIFIER_POINTER (name), + strlen (IDENTIFIER_POINTER (name))); +#endif + else + obstack_1grow (&util_obstack, '?'); + + /* Encode the types (and possibly names) of the inner fields, + if required. */ + if (inline_contents) + { + obstack_1grow (&util_obstack, '='); + encode_aggregate_fields (type, pointed_to, curtype, format); + } + /* Close parenth/bracket. */ + obstack_1grow (&util_obstack, right); +} + +/* Encode a bitfield NeXT-style (i.e., without a bit offset or the underlying + field type. */ +static void +encode_next_bitfield (int width) +{ + char buffer[40]; + sprintf (buffer, "b%d", width); + obstack_grow (&util_obstack, buffer, strlen (buffer)); +} + +/* Encodes 'type', ignoring type qualifiers (which you should encode + beforehand if needed) with the exception of 'const', which is + encoded by encode_type. See above for the explanation of + 'curtype'. 'format' can be OBJC_ENCODE_INLINE_DEFS or + OBJC_ENCODE_DONT_INLINE_DEFS. */ +static void +encode_type (tree type, int curtype, int format) +{ + enum tree_code code = TREE_CODE (type); + + /* Ignore type qualifiers other than 'const' when encoding a + type. */ + + if (type == error_mark_node) + return; + + if (!flag_next_runtime) + { + if (TYPE_READONLY (type)) + obstack_1grow (&util_obstack, 'r'); + } + + switch (code) + { + case ENUMERAL_TYPE: + if (flag_next_runtime) + { + /* Kludge for backwards-compatibility with gcc-3.3: enums + are always encoded as 'i' no matter what type they + actually are (!). */ + obstack_1grow (&util_obstack, 'i'); + break; + } + /* Else, they are encoded exactly like the integer type that is + used by the compiler to store them. */ + case INTEGER_TYPE: + { + char c; + switch (GET_MODE_BITSIZE (TYPE_MODE (type))) + { + case 8: c = TYPE_UNSIGNED (type) ? 'C' : 'c'; break; + case 16: c = TYPE_UNSIGNED (type) ? 'S' : 's'; break; + case 32: + { + tree int_type = type; + if (flag_next_runtime) + { + /* Another legacy kludge for compatiblity with + gcc-3.3: 32-bit longs are encoded as 'l' or 'L', + but not always. For typedefs, we need to use 'i' + or 'I' instead if encoding a struct field, or a + pointer! */ + int_type = ((!generating_instance_variables + && (obstack_object_size (&util_obstack) + == (unsigned) curtype)) + ? TYPE_MAIN_VARIANT (type) + : type); + } + if (int_type == long_unsigned_type_node + || int_type == long_integer_type_node) + c = TYPE_UNSIGNED (type) ? 'L' : 'l'; + else + c = TYPE_UNSIGNED (type) ? 'I' : 'i'; + } + break; + case 64: c = TYPE_UNSIGNED (type) ? 'Q' : 'q'; break; + case 128: c = TYPE_UNSIGNED (type) ? 'T' : 't'; break; + default: gcc_unreachable (); + } + obstack_1grow (&util_obstack, c); + break; + } + case REAL_TYPE: + { + char c; + /* Floating point types. */ + switch (GET_MODE_BITSIZE (TYPE_MODE (type))) + { + case 32: c = 'f'; break; + case 64: c = 'd'; break; + case 96: + case 128: c = 'D'; break; + default: gcc_unreachable (); + } + obstack_1grow (&util_obstack, c); + break; + } + case VOID_TYPE: + obstack_1grow (&util_obstack, 'v'); + break; + + case BOOLEAN_TYPE: + obstack_1grow (&util_obstack, 'B'); + break; + + case ARRAY_TYPE: + encode_array (type, curtype, format); + break; + + case POINTER_TYPE: +#ifdef OBJCPLUS + case REFERENCE_TYPE: +#endif + encode_pointer (type, curtype, format); + break; + + case RECORD_TYPE: + encode_aggregate_within (type, curtype, format, '{', '}'); + break; + + case UNION_TYPE: + encode_aggregate_within (type, curtype, format, '(', ')'); + break; + + case FUNCTION_TYPE: /* '?' means an unknown type. */ + obstack_1grow (&util_obstack, '?'); + break; + + case COMPLEX_TYPE: + /* A complex is encoded as 'j' followed by the inner type (eg, + "_Complex int" is encoded as 'ji'). */ + obstack_1grow (&util_obstack, 'j'); + encode_type (TREE_TYPE (type), curtype, format); + break; + + case VECTOR_TYPE: + encode_vector (type, curtype, format); + break; + + default: + warning (0, "unknown type %<%T%> found during Objective-C encoding", + TREE_TYPE (type)); + obstack_1grow (&util_obstack, '?'); + break; + } + + if (flag_next_runtime) + { + /* Super-kludge. Some ObjC qualifier and type combinations need + to be rearranged for compatibility with gcc-3.3. */ + if (code == POINTER_TYPE && obstack_object_size (&util_obstack) >= 3) + { + char *enc = obstack_base (&util_obstack) + curtype; + + /* Rewrite "in const" from "nr" to "rn". */ + if (curtype >= 1 && !strncmp (enc - 1, "nr", 2)) + strncpy (enc - 1, "rn", 2); + } + } +} + +static void +encode_gnu_bitfield (int position, tree type, int size) +{ + enum tree_code code = TREE_CODE (type); + char buffer[40]; + char charType = '?'; + + /* This code is only executed for the GNU runtime, so we can ignore + the NeXT runtime kludge of always encoding enums as 'i' no matter + what integers they actually are. */ + if (code == INTEGER_TYPE || code == ENUMERAL_TYPE) + { + if (integer_zerop (TYPE_MIN_VALUE (type))) + /* Unsigned integer types. */ + { + switch (TYPE_MODE (type)) + { + case QImode: + charType = 'C'; break; + case HImode: + charType = 'S'; break; + case SImode: + { + if (type == long_unsigned_type_node) + charType = 'L'; + else + charType = 'I'; + break; + } + case DImode: + charType = 'Q'; break; + default: + gcc_unreachable (); + } + } + else + /* Signed integer types. */ + { + switch (TYPE_MODE (type)) + { + case QImode: + charType = 'c'; break; + case HImode: + charType = 's'; break; + case SImode: + { + if (type == long_integer_type_node) + charType = 'l'; + else + charType = 'i'; + break; + } + case DImode: + charType = 'q'; break; + default: + gcc_unreachable (); + } + } + } + else + { + /* Do not do any encoding, produce an error and keep going. */ + error ("trying to encode non-integer type as a bitfield"); + return; + } + + sprintf (buffer, "b%d%c%d", position, charType, size); + obstack_grow (&util_obstack, buffer, strlen (buffer)); +} + +void +encode_field_decl (tree field_decl, int curtype, int format) +{ +#ifdef OBJCPLUS + /* C++ static members, and things that are not fields at all, + should not appear in the encoding. */ + if (TREE_CODE (field_decl) != FIELD_DECL || TREE_STATIC (field_decl)) + return; +#endif + + /* Generate the bitfield typing information, if needed. Note the difference + between GNU and NeXT runtimes. */ + if (DECL_BIT_FIELD_TYPE (field_decl)) + { + int size = tree_low_cst (DECL_SIZE (field_decl), 1); + + if (flag_next_runtime) + encode_next_bitfield (size); + else + encode_gnu_bitfield (int_bit_position (field_decl), + DECL_BIT_FIELD_TYPE (field_decl), size); + } + else + encode_type (TREE_TYPE (field_decl), curtype, format); +} + +/* This routine encodes the attribute of the input PROPERTY according + to following formula: + + Property attributes are stored as a comma-delimited C string. + Simple attributes such as readonly are encoded as single + character. The parametrized attributes, getter=name and + setter=name, are encoded as a single character followed by an + identifier. Property types are also encoded as a parametrized + attribute. The characters used to encode these attributes are + defined by the following enumeration: + + enum PropertyAttributes { + kPropertyReadOnly = 'R', + kPropertyBycopy = 'C', + kPropertyByref = '&', + kPropertyDynamic = 'D', + kPropertyGetter = 'G', + kPropertySetter = 'S', + kPropertyInstanceVariable = 'V', + kPropertyType = 'T', + kPropertyWeak = 'W', + kPropertyStrong = 'P', + kPropertyNonAtomic = 'N' + }; */ +tree +objc_v2_encode_prop_attr (tree property) +{ + const char *string; + tree type = TREE_TYPE (property); + + obstack_1grow (&util_obstack, 'T'); + encode_type (type, obstack_object_size (&util_obstack), + OBJC_ENCODE_INLINE_DEFS); + + if (PROPERTY_READONLY (property)) + obstack_grow (&util_obstack, ",R", 2); + + switch (PROPERTY_ASSIGN_SEMANTICS (property)) + { + case OBJC_PROPERTY_COPY: + obstack_grow (&util_obstack, ",C", 2); + break; + case OBJC_PROPERTY_RETAIN: + obstack_grow (&util_obstack, ",&", 2); + break; + case OBJC_PROPERTY_ASSIGN: + default: + break; + } + + if (PROPERTY_DYNAMIC (property)) + obstack_grow (&util_obstack, ",D", 2); + + if (PROPERTY_NONATOMIC (property)) + obstack_grow (&util_obstack, ",N", 2); + + /* Here we want to encode the getter name, but only if it's not the + standard one. */ + if (PROPERTY_GETTER_NAME (property) != PROPERTY_NAME (property)) + { + obstack_grow (&util_obstack, ",G", 2); + string = IDENTIFIER_POINTER (PROPERTY_GETTER_NAME (property)); + obstack_grow (&util_obstack, string, strlen (string)); + } + + if (!PROPERTY_READONLY (property)) + { + /* Here we want to encode the setter name, but only if it's not + the standard one. */ + tree standard_setter = get_identifier (objc_build_property_setter_name (PROPERTY_NAME (property))); + if (PROPERTY_SETTER_NAME (property) != standard_setter) + { + obstack_grow (&util_obstack, ",S", 2); + string = IDENTIFIER_POINTER (PROPERTY_SETTER_NAME (property)); + obstack_grow (&util_obstack, string, strlen (string)); + } + } + + /* TODO: Encode strong ('P'), weak ('W') for garbage collection. */ + + if (!PROPERTY_DYNAMIC (property)) + { + obstack_grow (&util_obstack, ",V", 2); + if (PROPERTY_IVAR_NAME (property)) + string = IDENTIFIER_POINTER (PROPERTY_IVAR_NAME (property)); + else + string = IDENTIFIER_POINTER (PROPERTY_NAME (property)); + obstack_grow (&util_obstack, string, strlen (string)); + } + + /* NULL-terminate string. */ + obstack_1grow (&util_obstack, 0); + string = XOBFINISH (&util_obstack, char *); + obstack_free (&util_obstack, util_firstobj); + return get_identifier (string); +} diff --git a/gcc/objc/objc-encoding.h b/gcc/objc/objc-encoding.h new file mode 100644 index 00000000000..2fad5937bc9 --- /dev/null +++ b/gcc/objc/objc-encoding.h @@ -0,0 +1,74 @@ +/* Routines dealing with ObjC encoding of types + Copyright (C) 1992, 1993, 1994, 1995, 1997, 1998, 1999, 2000, 2001, + 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011 + Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#ifndef GCC_OBJC_ENCODING_H +#define GCC_OBJC_ENCODING_H + +/* TODO: Hide the following obstack code in objc-encoding.c, and have + a objc_encoding_init() that is called by objc_init() to set them + up. */ + +/* Set up for use of obstacks. */ +#include "obstack.h" + +/* This obstack is used to accumulate the encoding of a data type. */ +extern struct obstack util_obstack; + +/* This points to the beginning of obstack contents, so we can free + the whole contents. */ +extern char *util_firstobj; + +/* This will be used to initialize the obstacks used by encoding. It + should be called before any encoding function is used. It is + usually done in objc_init(). */ +/* extern void objc_encoding_init (void); */ + + +/* Encode a method prototype. The format is described in + gcc/doc/objc.texi, section 'Method signatures'. */ +extern tree encode_method_prototype (tree method_decl); + +/* This is used to implement @encode(). See gcc/doc/objc.texi, + section '@encode'. */ +extern tree objc_build_encode_expr (tree type); + +/* (Decide if these can ever be validly changed.) */ +#define OBJC_ENCODE_INLINE_DEFS 0 +#define OBJC_ENCODE_DONT_INLINE_DEFS 1 + +/* Encode the attributes of a property. */ +extern tree objc_v2_encode_prop_attr (tree property); + +/* Encode the type of a field. */ +extern void encode_field_decl (tree field_decl, int curtype, int format); + +/* Tells "encode_pointer/encode_aggregate" whether we are generating + type descriptors for instance variables (as opposed to methods). + Type descriptors for instance variables contain more information + than methods (for static typing and embedded structures). + + TODO: Replace this global variable with an argument that is passed + to the various encode() functions. + + TODO: Change it to a 'bool'. */ +extern int generating_instance_variables; + +#endif /* GCC_OBJC_ENCODING_H */ diff --git a/gcc/objc/objc-gnu-runtime-abi-01.c b/gcc/objc/objc-gnu-runtime-abi-01.c index d21f2e9a6b5..fef5355c6a6 100644 --- a/gcc/objc/objc-gnu-runtime-abi-01.c +++ b/gcc/objc/objc-gnu-runtime-abi-01.c @@ -47,6 +47,7 @@ along with GCC; see the file COPYING3. If not see #include "objc-runtime-hooks.h" #include "objc-runtime-shared-support.h" +#include "objc-encoding.h" /* GNU runtime private definitions. */ #define DEF_CONSTANT_STRING_CLASS_NAME "NXConstantString" diff --git a/gcc/objc/objc-next-runtime-abi-01.c b/gcc/objc/objc-next-runtime-abi-01.c index d5b795fbb0e..6183a21e5e8 100644 --- a/gcc/objc/objc-next-runtime-abi-01.c +++ b/gcc/objc/objc-next-runtime-abi-01.c @@ -54,6 +54,7 @@ along with GCC; see the file COPYING3. If not see #include "objc-runtime-hooks.h" #include "objc-runtime-shared-support.h" +#include "objc-encoding.h" /* NeXT ABI 0 and 1 private definitions. */ #define DEF_CONSTANT_STRING_CLASS_NAME "NSConstantString" diff --git a/gcc/objc/objc-next-runtime-abi-02.c b/gcc/objc/objc-next-runtime-abi-02.c index fd9bb9923de..e7570c7e4b2 100644 --- a/gcc/objc/objc-next-runtime-abi-02.c +++ b/gcc/objc/objc-next-runtime-abi-02.c @@ -58,8 +58,8 @@ extern struct obstack util_obstack; extern char *util_firstobj; #include "objc-runtime-hooks.h" - #include "objc-runtime-shared-support.h" +#include "objc-encoding.h" /* ABI 2 Private definitions. */ #define DEF_CONSTANT_STRING_CLASS_NAME "NSConstantString" diff --git a/gcc/objc/objc-runtime-shared-support.c b/gcc/objc/objc-runtime-shared-support.c index 78505f4d5e8..913b0fd7e2b 100644 --- a/gcc/objc/objc-runtime-shared-support.c +++ b/gcc/objc/objc-runtime-shared-support.c @@ -51,6 +51,7 @@ extern char *util_firstobj; #include "objc-runtime-hooks.h" #include "objc-runtime-shared-support.h" +#include "objc-encoding.h" /* rt_trees identifiers - shared between NeXT implementations. These allow the FE to tag meta-data in a manner that survives LTO and can be used when diff --git a/gcc/objc/objc-runtime-shared-support.h b/gcc/objc/objc-runtime-shared-support.h index bb0e5711df0..0db8bed1240 100644 --- a/gcc/objc/objc-runtime-shared-support.h +++ b/gcc/objc/objc-runtime-shared-support.h @@ -52,20 +52,17 @@ extern void objc_push_parm (tree); extern tree build_function_type_for_method (tree, tree, int, bool); +extern char *objc_build_property_setter_name (tree); + /* Stuff that should be migrated to shared support (or some v1-only file). */ extern void build_super_template (void); extern tree objc_build_component_ref (tree, tree); -extern tree objc_v2_encode_prop_attr (tree); extern tree build_descriptor_table_initializer (tree, tree); extern tree build_method_prototype_list_template (tree, int); extern tree build_protocol_initializer (tree, tree, tree, tree, tree); -/* Stuff that should be migrated to shared encode. */ -extern tree encode_method_prototype (tree); -extern void encode_field_decl (tree, int, int); - /* Moved or new routines in objc-runtime-shared-support.c */ extern tree build_selector (tree); @@ -85,7 +82,6 @@ extern tree build_ivar_template (void); extern void generate_strings (void); extern void dump_interface (FILE *, tree); -extern int generating_instance_variables; extern FILE *gen_declaration_file; #endif /* _OBJC_RUNTIME_SHARED_SUPPORT_H_ */ diff --git a/gcc/objcp/ChangeLog b/gcc/objcp/ChangeLog index dba3907fa76..e137c9936e3 100644 --- a/gcc/objcp/ChangeLog +++ b/gcc/objcp/ChangeLog @@ -1,3 +1,19 @@ +2011-07-04 Nicola Pero <nicola.pero@meta-innovation.com> + + * Make-lang.in (OBJCXX_OBJS): Added objc-encoding.o. + (objcp/objcp-lang.o): Reordered dependencies. Depend on GGC_H. + (objcp/objcp-decl.o): Reordered dependencies. + (objcp/objc-runtime-shared-support.o): Reordered dependencies. + Updated them to be identical to the corresponding new objc/ ones, + with the addition of objcp-decl.h. + (objcp/objc-runtime-shared-support.o): Likewise. + (objcp/objc-gnu-runtime-abi-01.o): Likewise. + (objcp/objc-next-runtime-abi-01.o): Likewise. + (objcp/objc-next-runtime-abi-02.o): Likewise. + (objcp/objcp-act.o): Reordered dependencies. Added dependency on + objc-encoding.h. + (objcp/objc-encoding.o): New rule. + 2011-04-15 Nicola Pero <nicola.pero@meta-innovation.com> * objcp-decl.c (objcp_finish_struct): Use diff --git a/gcc/objcp/Make-lang.in b/gcc/objcp/Make-lang.in index 7e0ef5c4257..f454e5fcd22 100644 --- a/gcc/objcp/Make-lang.in +++ b/gcc/objcp/Make-lang.in @@ -57,6 +57,7 @@ OBJCXX_OBJS = objcp/objcp-act.o objcp/objcp-lang.o objcp/objcp-decl.o \ objcp/objc-gnu-runtime-abi-01.o \ objcp/objc-next-runtime-abi-01.o \ objcp/objc-next-runtime-abi-02.o \ + objcp/objc-encoding.o \ $(CXX_AND_OBJCXX_OBJS) obj-c++_OBJS = $(OBJCXX_OBJS) cc1objplus-checksum.o @@ -76,48 +77,96 @@ cc1objplus$(exeext): $(OBJCXX_OBJS) cc1objplus-checksum.o $(BACKEND) $(LIBDEPS) # Objective C++ language specific files. -objcp/objcp-lang.o : objcp/objcp-lang.c $(START_HDRS) \ +objcp/objcp-lang.o : objcp/objcp-lang.c \ + $(START_HDRS) \ + $(GGC_H) \ $(C_COMMON_H) $(LANGHOOKS_DEF_H) cp/cp-objcp-common.h \ $(TARGET_H) gtype-objcp.h objcp/objcp-decl.o : objcp/objcp-decl.c \ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(CXX_TREE_H) \ - objc/objc-act.h objcp/objcp-decl.h c-family/c-objc.h + objc/objc-act.h c-family/c-objc.h \ + objcp/objcp-decl.h objcp/objc-runtime-shared-support.o : objc/objc-runtime-shared-support.c \ - $(START_HDRS) objc/objc-runtime-shared-support.h $(OBSTACK_H) \ - objc/objc-next-metadata-tags.h gt-objc-objc-runtime-shared-support.h + gt-objc-objc-runtime-shared-support.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + objc/objc-encoding.h \ + objc/objc-next-metadata-tags.h \ + objc/objc-runtime-shared-support.h \ + objcp/objcp-decl.h $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< \ $(OUTPUT_OPTION) -objcp/objc-gnu-runtime-abi-01.o: objc/objc-gnu-runtime-abi-01.c $(START_HDRS) \ - objc/objc-runtime-hooks.h $(GGC_H) \ - gt-objc-objc-gnu-runtime-abi-01.h toplev.h +objcp/objc-gnu-runtime-abi-01.o: objc/objc-gnu-runtime-abi-01.c \ + gt-objc-objc-gnu-runtime-abi-01.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + toplev.h \ + objc/objc-encoding.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h \ + objcp/objcp-decl.h $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< \ $(OUTPUT_OPTION) -objcp/objc-next-runtime-abi-01.o: objc/objc-next-runtime-abi-01.c $(START_HDRS) \ - $(GGC_H) objc/objc-runtime-hooks.h \ - objc/objc-next-metadata-tags.h gt-objc-objc-next-runtime-abi-01.h output.h \ - objc/objc-runtime-shared-support.h $(TARGET_H) +objcp/objc-next-runtime-abi-01.o: objc/objc-next-runtime-abi-01.c \ + gt-objc-objc-next-runtime-abi-01.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + $(TARGET_H) output.h \ + objc/objc-encoding.h \ + objc/objc-next-metadata-tags.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h \ + objcp/objcp-decl.h $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< \ $(OUTPUT_OPTION) -objcp/objc-next-runtime-abi-02.o: objc/objc-next-runtime-abi-02.c $(START_HDRS) \ - $(GGC_H) objc/objc-runtime-hooks.h \ - objc/objc-next-metadata-tags.h gt-objc-objc-next-runtime-abi-02.h $(TARGET_H) \ - objc/objc-runtime-shared-support.h $(OBSTACK_H) +objcp/objc-next-runtime-abi-02.o: objc/objc-next-runtime-abi-02.c \ + gt-objc-objc-next-runtime-abi-02.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + $(TARGET_H) \ + objc/objc-encoding.h \ + objc/objc-next-metadata-tags.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h \ + objcp/objcp-decl.h $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< \ $(OUTPUT_OPTION) # The following must be an explicit rule; please keep in sync with the implicit # one in Makefile.in. -objcp/objcp-act.o : objc/objc-act.c $(START_HDRS) $(GGC_H) \ - $(RTL_H) $(EXPR_H) $(TARGET_H) $(DIAGNOSTIC_H) toplev.h $(FLAGS_H) \ - input.h $(FUNCTION_H) output.h debug.h $(OBSTACK_H) \ - objcp/objcp-decl.h $(LANGHOOKS_DEF_H) $(HASHTAB_H) gt-objc-objc-act.h \ - $(GIMPLE_H) objc/objc-runtime-hooks.h objc/objc-runtime-shared-support.h +objcp/objcp-act.o : objc/objc-act.c \ + gt-objc-objc-act.h \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + toplev.h $(FUNCTION_H) output.h debug.h $(LANGHOOKS_DEF_H) \ + $(HASHTAB_H) $(GIMPLE_H) \ + $(RTL_H) $(EXPR_H) $(TARGET_H) \ + objcp/objcp-decl.h \ + objc/objc-encoding.h \ + objc/objc-runtime-hooks.h \ + objc/objc-runtime-shared-support.h \ + objcp/objcp-decl.h + $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< \ + $(OUTPUT_OPTION) + +objcp/objc-encoding.o : objc/objc-encoding.c \ + $(START_HDRS) \ + $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \ + $(OBSTACK_H) \ + objc/objc-encoding.h \ + objc/objc-runtime-shared-support.h \ + objcp/objcp-decl.h $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< \ $(OUTPUT_OPTION) diff --git a/gcc/optabs.c b/gcc/optabs.c index bf15ab4fbc6..1a8aebd9438 100644 --- a/gcc/optabs.c +++ b/gcc/optabs.c @@ -1440,8 +1440,7 @@ expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, if (temp != 0) { if (GET_MODE_CLASS (mode) == MODE_INT - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (GET_MODE (temp)))) + && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (temp))) return gen_lowpart (mode, temp); else return convert_to_mode (mode, temp, unsignedp); @@ -1498,8 +1497,7 @@ expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, if (temp) { if (mclass != MODE_INT - || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (wider_mode))) + || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) { if (target == 0) target = gen_reg_rtx (mode); @@ -2027,8 +2025,7 @@ expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, if (temp) { if (mclass != MODE_INT - || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (wider_mode))) + || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) { if (target == 0) target = gen_reg_rtx (mode); @@ -2915,8 +2912,7 @@ expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target, if (temp) { if (mclass != MODE_INT - || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (wider_mode))) + || !TRULY_NOOP_TRUNCATION_MODES_P (mode, wider_mode)) { if (target == 0) target = gen_reg_rtx (mode); diff --git a/gcc/params.def b/gcc/params.def index 1dccbf73163..7c20b92ef1b 100644 --- a/gcc/params.def +++ b/gcc/params.def @@ -916,6 +916,16 @@ DEFPARAM (PARAM_MAX_STORES_TO_SINK, "Maximum number of conditional store pairs that can be sunk", 2, 0, 0) +/* Override CASE_VALUES_THRESHOLD of when to switch from doing switch + statements via if statements to using a table jump operation. If the value + is 0, the default CASE_VALUES_THRESHOLD will be used. */ +DEFPARAM (PARAM_CASE_VALUES_THRESHOLD, + "case-values-threshold", + "The smallest number of different values for which it is best to " + "use a jump-table instead of a tree of conditional branches, " + "if 0, use the default for the machine", + 0, 0, 0) + /* Local variables: diff --git a/gcc/postreload.c b/gcc/postreload.c index e5c6ce7cdbc..72e487e057f 100644 --- a/gcc/postreload.c +++ b/gcc/postreload.c @@ -1643,8 +1643,7 @@ static int move2add_last_label_luid; #define MODES_OK_FOR_MOVE2ADD(OUTMODE, INMODE) \ (GET_MODE_SIZE (OUTMODE) == GET_MODE_SIZE (INMODE) \ || (GET_MODE_SIZE (OUTMODE) <= GET_MODE_SIZE (INMODE) \ - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (OUTMODE), \ - GET_MODE_BITSIZE (INMODE)))) + && TRULY_NOOP_TRUNCATION_MODES_P (OUTMODE, INMODE))) /* This function is called with INSN that sets REG to (SYM + OFF), while REG is known to already have value (SYM + offset). diff --git a/gcc/regmove.c b/gcc/regmove.c index 89dbd592103..a2baca1c3bb 100644 --- a/gcc/regmove.c +++ b/gcc/regmove.c @@ -548,8 +548,7 @@ optimize_reg_copy_3 (rtx insn, rtx dest, rtx src) /* Do not use a SUBREG to truncate from one mode to another if truncation is not a nop. */ if (GET_MODE_BITSIZE (GET_MODE (src_reg)) <= GET_MODE_BITSIZE (GET_MODE (src)) - && !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (GET_MODE (src)), - GET_MODE_BITSIZE (GET_MODE (src_reg)))) + && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (src), GET_MODE (src_reg))) return; set_insn = p; diff --git a/gcc/reload.c b/gcc/reload.c index 2e9a8910e8b..605f23d5962 100644 --- a/gcc/reload.c +++ b/gcc/reload.c @@ -347,9 +347,7 @@ push_secondary_reload (int in_p, rtx x, int opnum, int optional, /* If X is a paradoxical SUBREG, use the inner value to determine both the mode and object being reloaded. */ - if (GET_CODE (x) == SUBREG - && (GET_MODE_SIZE (GET_MODE (x)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) + if (paradoxical_subreg_p (x)) { x = SUBREG_REG (x); reload_mode = GET_MODE (x); @@ -1026,20 +1024,20 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, || (((REG_P (SUBREG_REG (in)) && REGNO (SUBREG_REG (in)) >= FIRST_PSEUDO_REGISTER) || MEM_P (SUBREG_REG (in))) - && ((GET_MODE_SIZE (inmode) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) + && ((GET_MODE_PRECISION (inmode) + > GET_MODE_PRECISION (GET_MODE (SUBREG_REG (in)))) #ifdef LOAD_EXTEND_OP || (GET_MODE_SIZE (inmode) <= UNITS_PER_WORD && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) <= UNITS_PER_WORD) - && (GET_MODE_SIZE (inmode) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) + && (GET_MODE_PRECISION (inmode) + > GET_MODE_PRECISION (GET_MODE (SUBREG_REG (in)))) && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (in))) && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (in))) != UNKNOWN) #endif #ifdef WORD_REGISTER_OPERATIONS - || ((GET_MODE_SIZE (inmode) - < GET_MODE_SIZE (GET_MODE (SUBREG_REG (in)))) + || ((GET_MODE_PRECISION (inmode) + < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (in)))) && ((GET_MODE_SIZE (inmode) - 1) / UNITS_PER_WORD == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (in))) - 1) / UNITS_PER_WORD))) @@ -1134,11 +1132,11 @@ push_reload (rtx in, rtx out, rtx *inloc, rtx *outloc, || (((REG_P (SUBREG_REG (out)) && REGNO (SUBREG_REG (out)) >= FIRST_PSEUDO_REGISTER) || MEM_P (SUBREG_REG (out))) - && ((GET_MODE_SIZE (outmode) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))) + && ((GET_MODE_PRECISION (outmode) + > GET_MODE_PRECISION (GET_MODE (SUBREG_REG (out)))) #ifdef WORD_REGISTER_OPERATIONS - || ((GET_MODE_SIZE (outmode) - < GET_MODE_SIZE (GET_MODE (SUBREG_REG (out)))) + || ((GET_MODE_PRECISION (outmode) + < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (out)))) && ((GET_MODE_SIZE (outmode) - 1) / UNITS_PER_WORD == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (out))) - 1) / UNITS_PER_WORD))) @@ -4752,16 +4750,15 @@ find_reloads_toplev (rtx x, int opnum, enum reload_type type, if (regno >= FIRST_PSEUDO_REGISTER #ifdef LOAD_EXTEND_OP - && (GET_MODE_SIZE (GET_MODE (x)) - <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) + && !paradoxical_subreg_p (x) #endif - && (reg_equiv_address (regno) != 0 - || (reg_equiv_mem (regno) != 0 - && (! strict_memory_address_addr_space_p - (GET_MODE (x), XEXP (reg_equiv_mem (regno), 0), - MEM_ADDR_SPACE (reg_equiv_mem (regno))) - || ! offsettable_memref_p (reg_equiv_mem (regno)) - || num_not_at_initial_offset)))) + && (reg_equiv_address (regno) != 0 + || (reg_equiv_mem (regno) != 0 + && (! strict_memory_address_addr_space_p + (GET_MODE (x), XEXP (reg_equiv_mem (regno), 0), + MEM_ADDR_SPACE (reg_equiv_mem (regno))) + || ! offsettable_memref_p (reg_equiv_mem (regno)) + || num_not_at_initial_offset)))) x = find_reloads_subreg_address (x, 1, opnum, type, ind_levels, insn, address_reloaded); } diff --git a/gcc/reload1.c b/gcc/reload1.c index a9aa01723ee..7f84fc81416 100644 --- a/gcc/reload1.c +++ b/gcc/reload1.c @@ -2840,8 +2840,7 @@ eliminate_regs_1 (rtx x, enum machine_mode mem_mode, rtx insn, eliminated version of the memory location because push_reload may do the replacement in certain circumstances. */ if (REG_P (SUBREG_REG (x)) - && (GET_MODE_SIZE (GET_MODE (x)) - <= GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) + && !paradoxical_subreg_p (x) && reg_equivs && reg_equiv_memory_loc (REGNO (SUBREG_REG (x))) != 0) { @@ -4495,12 +4494,9 @@ strip_paradoxical_subreg (rtx *op_ptr, rtx *other_ptr) rtx op, inner, other, tem; op = *op_ptr; - if (GET_CODE (op) != SUBREG) + if (!paradoxical_subreg_p (op)) return false; - inner = SUBREG_REG (op); - if (GET_MODE_SIZE (GET_MODE (op)) <= GET_MODE_SIZE (GET_MODE (inner))) - return false; other = *other_ptr; tem = gen_lowpart_common (GET_MODE (inner), other); diff --git a/gcc/rtl.h b/gcc/rtl.h index da18788a292..ac3c87112c7 100644 --- a/gcc/rtl.h +++ b/gcc/rtl.h @@ -1633,6 +1633,7 @@ extern rtx operand_subword (rtx, unsigned int, int, enum machine_mode); /* In emit-rtl.c */ extern rtx operand_subword_force (rtx, unsigned int, enum machine_mode); +extern bool paradoxical_subreg_p (const_rtx); extern int subreg_lowpart_p (const_rtx); extern unsigned int subreg_lowpart_offset (enum machine_mode, enum machine_mode); @@ -1816,6 +1817,11 @@ extern rtx simplify_rtx (const_rtx); extern rtx avoid_constant_pool_reference (rtx); extern rtx delegitimize_mem_from_attrs (rtx); extern bool mode_signbit_p (enum machine_mode, const_rtx); +extern bool val_signbit_p (enum machine_mode, unsigned HOST_WIDE_INT); +extern bool val_signbit_known_set_p (enum machine_mode, + unsigned HOST_WIDE_INT); +extern bool val_signbit_known_clear_p (enum machine_mode, + unsigned HOST_WIDE_INT); /* In reginfo.c */ extern enum machine_mode choose_hard_reg_mode (unsigned int, unsigned int, diff --git a/gcc/rtlanal.c b/gcc/rtlanal.c index 0be6504a943..ac9da152c3c 100644 --- a/gcc/rtlanal.c +++ b/gcc/rtlanal.c @@ -3177,7 +3177,7 @@ subreg_lsb_1 (enum machine_mode outer_mode, unsigned int word; /* A paradoxical subreg begins at bit position 0. */ - if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode)) + if (GET_MODE_PRECISION (outer_mode) > GET_MODE_PRECISION (inner_mode)) return 0; if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN) @@ -3281,7 +3281,7 @@ subreg_get_info (unsigned int xregno, enum machine_mode xmode, /* Paradoxical subregs are otherwise valid. */ if (!rknown && offset == 0 - && GET_MODE_SIZE (ymode) > GET_MODE_SIZE (xmode)) + && GET_MODE_PRECISION (ymode) > GET_MODE_PRECISION (xmode)) { info->representable_p = true; /* If this is a big endian paradoxical subreg, which uses more @@ -3849,7 +3849,8 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode); unsigned HOST_WIDE_INT inner_nz; enum rtx_code code; - unsigned int mode_width = GET_MODE_BITSIZE (mode); + enum machine_mode inner_mode; + unsigned int mode_width = GET_MODE_PRECISION (mode); /* For floating-point and vector values, assume all bits are needed. */ if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode) @@ -3857,11 +3858,11 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, return nonzero; /* If X is wider than MODE, use its mode instead. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width) + if (GET_MODE_PRECISION (GET_MODE (x)) > mode_width) { mode = GET_MODE (x); nonzero = GET_MODE_MASK (mode); - mode_width = GET_MODE_BITSIZE (mode); + mode_width = GET_MODE_PRECISION (mode); } if (mode_width > HOST_BITS_PER_WIDE_INT) @@ -3878,9 +3879,9 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, not known to be zero. */ if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode - && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD - && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT - && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x))) + && GET_MODE_PRECISION (GET_MODE (x)) <= BITS_PER_WORD + && GET_MODE_PRECISION (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT + && GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (GET_MODE (x))) { nonzero &= cached_nonzero_bits (x, GET_MODE (x), known_x, known_mode, known_ret); @@ -3988,7 +3989,7 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, /* Disabled to avoid exponential mutual recursion between nonzero_bits and num_sign_bit_copies. */ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x)) - == GET_MODE_BITSIZE (GET_MODE (x))) + == GET_MODE_PRECISION (GET_MODE (x))) nonzero = 1; #endif @@ -4001,7 +4002,7 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, /* Disabled to avoid exponential mutual recursion between nonzero_bits and num_sign_bit_copies. */ if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x)) - == GET_MODE_BITSIZE (GET_MODE (x))) + == GET_MODE_PRECISION (GET_MODE (x))) nonzero = 1; #endif break; @@ -4028,9 +4029,7 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, if (GET_MODE (XEXP (x, 0)) != VOIDmode) { inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0))); - if (inner_nz - & (((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1)))) + if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz)) inner_nz |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))); } @@ -4076,7 +4075,7 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, unsigned HOST_WIDE_INT nz1 = cached_nonzero_bits (XEXP (x, 1), mode, known_x, known_mode, known_ret); - int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1; + int sign_index = GET_MODE_PRECISION (GET_MODE (x)) - 1; int width0 = floor_log2 (nz0) + 1; int width1 = floor_log2 (nz1) + 1; int low0 = floor_log2 (nz0 & -nz0); @@ -4153,12 +4152,12 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x), known_x, known_mode, known_ret); + inner_mode = GET_MODE (SUBREG_REG (x)); /* If the inner mode is a single word for both the host and target machines, we can compute this from which bits of the inner object might be nonzero. */ - if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD - && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - <= HOST_BITS_PER_WIDE_INT)) + if (GET_MODE_PRECISION (inner_mode) <= BITS_PER_WORD + && (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT)) { nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode, known_x, known_mode, known_ret); @@ -4166,22 +4165,19 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP) /* If this is a typical RISC machine, we only have to worry about the way loads are extended. */ - if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND - ? (((nonzero - & (((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1)))) - != 0)) - : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND) + if ((LOAD_EXTEND_OP (inner_mode) == SIGN_EXTEND + ? val_signbit_known_set_p (inner_mode, nonzero) + : LOAD_EXTEND_OP (inner_mode) != ZERO_EXTEND) || !MEM_P (SUBREG_REG (x))) #endif { /* On many CISC machines, accessing an object in a wider mode causes the high-order bits to become undefined. So they are not known to be zero. */ - if (GET_MODE_SIZE (GET_MODE (x)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) + if (GET_MODE_PRECISION (GET_MODE (x)) + > GET_MODE_PRECISION (inner_mode)) nonzero |= (GET_MODE_MASK (GET_MODE (x)) - & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))); + & ~GET_MODE_MASK (inner_mode)); } } break; @@ -4199,10 +4195,10 @@ nonzero_bits1 (const_rtx x, enum machine_mode mode, const_rtx known_x, if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT - && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x))) + && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x))) { enum machine_mode inner_mode = GET_MODE (x); - unsigned int width = GET_MODE_BITSIZE (inner_mode); + unsigned int width = GET_MODE_PRECISION (inner_mode); int count = INTVAL (XEXP (x, 1)); unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode); unsigned HOST_WIDE_INT op_nonzero @@ -4355,7 +4351,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, unsigned int known_ret) { enum rtx_code code = GET_CODE (x); - unsigned int bitwidth = GET_MODE_BITSIZE (mode); + unsigned int bitwidth = GET_MODE_PRECISION (mode); int num0, num1, result; unsigned HOST_WIDE_INT nonzero; @@ -4371,26 +4367,26 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, return 1; /* For a smaller object, just ignore the high bits. */ - if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x))) + if (bitwidth < GET_MODE_PRECISION (GET_MODE (x))) { num0 = cached_num_sign_bit_copies (x, GET_MODE (x), known_x, known_mode, known_ret); return MAX (1, - num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth)); + num0 - (int) (GET_MODE_PRECISION (GET_MODE (x)) - bitwidth)); } - if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x))) + if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_PRECISION (GET_MODE (x))) { #ifndef WORD_REGISTER_OPERATIONS - /* If this machine does not do all register operations on the entire - register and MODE is wider than the mode of X, we can say nothing - at all about the high-order bits. */ + /* If this machine does not do all register operations on the entire + register and MODE is wider than the mode of X, we can say nothing + at all about the high-order bits. */ return 1; #else /* Likewise on machines that do, if the mode of the object is smaller than a word and loads of that size don't sign extend, we can say nothing about the high order bits. */ - if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD + if (GET_MODE_PRECISION (GET_MODE (x)) < BITS_PER_WORD #ifdef LOAD_EXTEND_OP && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND #endif @@ -4412,7 +4408,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, if (target_default_pointer_address_modes_p () && ! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode && REG_POINTER (x)) - return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1; + return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1; #endif { @@ -4437,7 +4433,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, /* Some RISC machines sign-extend all loads of smaller than a word. */ if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND) return MAX (1, ((int) bitwidth - - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1)); + - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1)); #endif break; @@ -4461,17 +4457,17 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode, known_x, known_mode, known_ret); return MAX ((int) bitwidth - - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1, + - (int) GET_MODE_PRECISION (GET_MODE (x)) + 1, num0); } /* For a smaller object, just ignore the high bits. */ - if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))) + if (bitwidth <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x)))) { num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode, known_x, known_mode, known_ret); return MAX (1, (num0 - - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) + - (int) (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (x))) - bitwidth))); } @@ -4487,8 +4483,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, then we lose all sign bit copies that existed before the store to the stack. */ - if ((GET_MODE_SIZE (GET_MODE (x)) - > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))) + if (paradoxical_subreg_p (x) && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND && MEM_P (SUBREG_REG (x))) return cached_num_sign_bit_copies (SUBREG_REG (x), mode, @@ -4503,7 +4498,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, break; case SIGN_EXTEND: - return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + return (bitwidth - GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode, known_x, known_mode, known_ret)); @@ -4511,7 +4506,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, /* For a smaller object, just ignore the high bits. */ num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode, known_x, known_mode, known_ret); - return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) + return MAX (1, (num0 - (int) (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - bitwidth))); case NOT: @@ -4688,7 +4683,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, known_x, known_mode, known_ret); if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) > 0 - && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (GET_MODE (x))) + && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (GET_MODE (x))) num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1))); return num0; @@ -4698,7 +4693,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, if (!CONST_INT_P (XEXP (x, 1)) || INTVAL (XEXP (x, 1)) < 0 || INTVAL (XEXP (x, 1)) >= (int) bitwidth - || INTVAL (XEXP (x, 1)) >= GET_MODE_BITSIZE (GET_MODE (x))) + || INTVAL (XEXP (x, 1)) >= GET_MODE_PRECISION (GET_MODE (x))) return 1; num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode, @@ -4734,7 +4729,7 @@ num_sign_bit_copies1 (const_rtx x, enum machine_mode mode, const_rtx known_x, count those bits and return one less than that amount. If we can't safely compute the mask for this mode, always return BITWIDTH. */ - bitwidth = GET_MODE_BITSIZE (mode); + bitwidth = GET_MODE_PRECISION (mode); if (bitwidth > HOST_BITS_PER_WIDE_INT) return 1; @@ -4921,12 +4916,8 @@ canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, if ((GET_CODE (SET_SRC (set)) == COMPARE || (((code == NE || (code == LT - && GET_MODE_CLASS (inner_mode) == MODE_INT - && (GET_MODE_BITSIZE (inner_mode) - <= HOST_BITS_PER_WIDE_INT) - && (STORE_FLAG_VALUE - & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (inner_mode) - 1)))) + && val_signbit_known_set_p (inner_mode, + STORE_FLAG_VALUE)) #ifdef FLOAT_STORE_FLAG_VALUE || (code == LT && SCALAR_FLOAT_MODE_P (inner_mode) @@ -4941,12 +4932,8 @@ canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, x = SET_SRC (set); else if (((code == EQ || (code == GE - && (GET_MODE_BITSIZE (inner_mode) - <= HOST_BITS_PER_WIDE_INT) - && GET_MODE_CLASS (inner_mode) == MODE_INT - && (STORE_FLAG_VALUE - & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (inner_mode) - 1)))) + && val_signbit_known_set_p (inner_mode, + STORE_FLAG_VALUE)) #ifdef FLOAT_STORE_FLAG_VALUE || (code == GE && SCALAR_FLOAT_MODE_P (inner_mode) @@ -5011,7 +4998,7 @@ canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC && CONST_INT_P (op1) && GET_MODE (op0) != VOIDmode - && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) + && GET_MODE_PRECISION (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT) { HOST_WIDE_INT const_val = INTVAL (op1); unsigned HOST_WIDE_INT uconst_val = const_val; @@ -5030,7 +5017,7 @@ canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest, case GE: if ((const_val & max_val) != ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))) + << (GET_MODE_PRECISION (GET_MODE (op0)) - 1))) code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0)); break; @@ -5136,7 +5123,7 @@ init_num_sign_bit_copies_in_rep (void) have to be sign-bit copies too. */ || num_sign_bit_copies_in_rep [in_mode][mode]) num_sign_bit_copies_in_rep [in_mode][mode] - += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i); + += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i); } } } @@ -5196,7 +5183,7 @@ low_bitmask_len (enum machine_mode mode, unsigned HOST_WIDE_INT m) { if (mode != VOIDmode) { - if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT) + if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT) return -1; m &= GET_MODE_MASK (mode); } diff --git a/gcc/rtlhooks.c b/gcc/rtlhooks.c index a64c0674dc8..60e4d52890e 100644 --- a/gcc/rtlhooks.c +++ b/gcc/rtlhooks.c @@ -61,8 +61,7 @@ gen_lowpart_general (enum machine_mode mode, rtx x) /* The following exposes the use of "x" to CSE. */ if (GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD && SCALAR_INT_MODE_P (GET_MODE (x)) - && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (GET_MODE (x))) + && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x)) && !reload_completed) return gen_lowpart_general (mode, force_reg (GET_MODE (x), x)); diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c index 3c4df973ff5..82b818b02f1 100644 --- a/gcc/simplify-rtx.c +++ b/gcc/simplify-rtx.c @@ -1,7 +1,7 @@ /* RTL simplification functions for GNU compiler. Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 - Free Software Foundation, Inc. + 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, + 2011 Free Software Foundation, Inc. This file is part of GCC. @@ -82,7 +82,7 @@ mode_signbit_p (enum machine_mode mode, const_rtx x) if (GET_MODE_CLASS (mode) != MODE_INT) return false; - width = GET_MODE_BITSIZE (mode); + width = GET_MODE_PRECISION (mode); if (width == 0) return false; @@ -103,6 +103,62 @@ mode_signbit_p (enum machine_mode mode, const_rtx x) val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); } + +/* Test whether VAL is equal to the most significant bit of mode MODE + (after masking with the mode mask of MODE). Returns false if the + precision of MODE is too large to handle. */ + +bool +val_signbit_p (enum machine_mode mode, unsigned HOST_WIDE_INT val) +{ + unsigned int width; + + if (GET_MODE_CLASS (mode) != MODE_INT) + return false; + + width = GET_MODE_PRECISION (mode); + if (width == 0 || width > HOST_BITS_PER_WIDE_INT) + return false; + + val &= GET_MODE_MASK (mode); + return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1)); +} + +/* Test whether the most significant bit of mode MODE is set in VAL. + Returns false if the precision of MODE is too large to handle. */ +bool +val_signbit_known_set_p (enum machine_mode mode, unsigned HOST_WIDE_INT val) +{ + unsigned int width; + + if (GET_MODE_CLASS (mode) != MODE_INT) + return false; + + width = GET_MODE_PRECISION (mode); + if (width == 0 || width > HOST_BITS_PER_WIDE_INT) + return false; + + val &= (unsigned HOST_WIDE_INT) 1 << (width - 1); + return val != 0; +} + +/* Test whether the most significant bit of mode MODE is clear in VAL. + Returns false if the precision of MODE is too large to handle. */ +bool +val_signbit_known_clear_p (enum machine_mode mode, unsigned HOST_WIDE_INT val) +{ + unsigned int width; + + if (GET_MODE_CLASS (mode) != MODE_INT) + return false; + + width = GET_MODE_PRECISION (mode); + if (width == 0 || width > HOST_BITS_PER_WIDE_INT) + return false; + + val &= (unsigned HOST_WIDE_INT) 1 << (width - 1); + return val == 0; +} /* Make a binary operation by properly ordering the operands and seeing if the expression folds. */ @@ -593,7 +649,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) if (STORE_FLAG_VALUE == -1 && GET_CODE (op) == ASHIFTRT && GET_CODE (XEXP (op, 1)) - && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) + && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) return simplify_gen_relational (GE, mode, VOIDmode, XEXP (op, 0), const0_rtx); @@ -686,13 +742,13 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1)); } - /* (neg (mult A B)) becomes (mult (neg A) B). + /* (neg (mult A B)) becomes (mult A (neg B)). This works even for floating-point values. */ if (GET_CODE (op) == MULT && !HONOR_SIGN_DEPENDENT_ROUNDING (mode)) { - temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode); - return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1)); + temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode); + return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp); } /* NEG commutes with ASHIFT since it is multiplication. Only do @@ -709,7 +765,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == ASHIFTRT && CONST_INT_P (XEXP (op, 1)) - && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) + && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); @@ -717,7 +773,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == LSHIFTRT && CONST_INT_P (XEXP (op, 1)) - && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1) + && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); @@ -734,14 +790,14 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0)))) { enum machine_mode inner = GET_MODE (XEXP (op, 0)); - int isize = GET_MODE_BITSIZE (inner); + int isize = GET_MODE_PRECISION (inner); if (STORE_FLAG_VALUE == 1) { temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0), GEN_INT (isize - 1)); if (mode == inner) return temp; - if (GET_MODE_BITSIZE (mode) > isize) + if (GET_MODE_PRECISION (mode) > isize) return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner); return simplify_gen_unary (TRUNCATE, mode, temp, inner); } @@ -751,7 +807,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) GEN_INT (isize - 1)); if (mode == inner) return temp; - if (GET_MODE_BITSIZE (mode) > isize) + if (GET_MODE_PRECISION (mode) > isize) return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner); return simplify_gen_unary (TRUNCATE, mode, temp, inner); } @@ -796,11 +852,10 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) truncation. But don't do this for an (LSHIFTRT (MULT ...)) since this will cause problems with the umulXi3_highpart patterns. */ - if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), - GET_MODE_BITSIZE (GET_MODE (op))) + if ((TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)) ? (num_sign_bit_copies (op, GET_MODE (op)) - > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op)) - - GET_MODE_BITSIZE (mode))) + > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op)) + - GET_MODE_PRECISION (mode))) : truncated_to_mode (mode, op)) && ! (GET_CODE (op) == LSHIFTRT && GET_CODE (XEXP (op, 0)) == MULT)) @@ -810,7 +865,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) STORE_FLAG_VALUE permits. This is like the previous test, but it works even if the comparison is done in a mode larger than HOST_BITS_PER_WIDE_INT. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && COMPARISON_P (op) && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0) return rtl_hooks.gen_lowpart_no_emit (mode, op); @@ -849,7 +904,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) && (flag_unsafe_math_optimizations || (SCALAR_FLOAT_MODE_P (GET_MODE (op)) && ((unsigned)significand_size (GET_MODE (op)) - >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) + >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))) - num_sign_bit_copies (XEXP (op, 0), GET_MODE (XEXP (op, 0)))))))) return simplify_gen_unary (FLOAT, mode, @@ -886,7 +941,7 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) || (GET_CODE (op) == FLOAT && SCALAR_FLOAT_MODE_P (GET_MODE (op)) && ((unsigned)significand_size (GET_MODE (op)) - >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))) + >= (GET_MODE_PRECISION (GET_MODE (XEXP (op, 0))) - num_sign_bit_copies (XEXP (op, 0), GET_MODE (XEXP (op, 0))))))) return simplify_gen_unary (GET_CODE (op), mode, @@ -908,16 +963,12 @@ simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op) /* If operand is something known to be positive, ignore the ABS. */ if (GET_CODE (op) == FFS || GET_CODE (op) == ABS - || ((GET_MODE_BITSIZE (GET_MODE (op)) - <= HOST_BITS_PER_WIDE_INT) - && ((nonzero_bits (op, GET_MODE (op)) - & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (GET_MODE (op)) - 1))) - == 0))) + || val_signbit_known_clear_p (GET_MODE (op), + nonzero_bits (op, GET_MODE (op)))) return op; /* If operand is known to be only -1 or 0, convert ABS to NEG. */ - if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode)) + if (num_sign_bit_copies (op, mode) == GET_MODE_PRECISION (mode)) return gen_rtx_NEG (mode, op); break; @@ -1210,8 +1261,8 @@ rtx simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, rtx op, enum machine_mode op_mode) { - unsigned int width = GET_MODE_BITSIZE (mode); - unsigned int op_width = GET_MODE_BITSIZE (op_mode); + unsigned int width = GET_MODE_PRECISION (mode); + unsigned int op_width = GET_MODE_PRECISION (op_mode); if (code == VEC_DUPLICATE) { @@ -1311,7 +1362,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, if (hv < 0) return 0; } - else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) + else if (GET_MODE_PRECISION (op_mode) >= HOST_BITS_PER_WIDE_INT * 2) ; else hv = 0, lv &= GET_MODE_MASK (op_mode); @@ -1352,17 +1403,17 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (op_mode, val)) ; else - val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 1; + val = GET_MODE_PRECISION (op_mode) - floor_log2 (arg0) - 1; break; case CLRSB: arg0 &= GET_MODE_MASK (op_mode); if (arg0 == 0) - val = GET_MODE_BITSIZE (op_mode) - 1; + val = GET_MODE_PRECISION (op_mode) - 1; else if (arg0 >= 0) - val = GET_MODE_BITSIZE (op_mode) - floor_log2 (arg0) - 2; + val = GET_MODE_PRECISION (op_mode) - floor_log2 (arg0) - 2; else if (arg0 < 0) - val = GET_MODE_BITSIZE (op_mode) - floor_log2 (~arg0) - 2; + val = GET_MODE_PRECISION (op_mode) - floor_log2 (~arg0) - 2; break; case CTZ: @@ -1372,7 +1423,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, /* Even if the value at zero is undefined, we have to come up with some replacement. Seems good enough. */ if (! CTZ_DEFINED_VALUE_AT_ZERO (op_mode, val)) - val = GET_MODE_BITSIZE (op_mode); + val = GET_MODE_PRECISION (op_mode); } else val = ctz_hwi (arg0); @@ -1416,17 +1467,16 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, /* When zero-extending a CONST_INT, we need to know its original mode. */ gcc_assert (op_mode != VOIDmode); - if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) + if (op_width == HOST_BITS_PER_WIDE_INT) { /* If we were really extending the mode, we would have to distinguish between zero-extension and sign-extension. */ - gcc_assert (width == GET_MODE_BITSIZE (op_mode)); + gcc_assert (width == op_width); val = arg0; } else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) - val = arg0 & ~((unsigned HOST_WIDE_INT) (-1) - << GET_MODE_BITSIZE (op_mode)); + val = arg0 & GET_MODE_MASK (op_mode); else return 0; break; @@ -1434,23 +1484,20 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, case SIGN_EXTEND: if (op_mode == VOIDmode) op_mode = mode; - if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT) + op_width = GET_MODE_PRECISION (op_mode); + if (op_width == HOST_BITS_PER_WIDE_INT) { /* If we were really extending the mode, we would have to distinguish between zero-extension and sign-extension. */ - gcc_assert (width == GET_MODE_BITSIZE (op_mode)); + gcc_assert (width == op_width); val = arg0; } - else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT) + else if (op_width < HOST_BITS_PER_WIDE_INT) { - val - = arg0 & ~((unsigned HOST_WIDE_INT) (-1) - << GET_MODE_BITSIZE (op_mode)); - if (val & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (op_mode) - 1))) - val - -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); + val = arg0 & GET_MODE_MASK (op_mode); + if (val_signbit_known_set_p (op_mode, val)) + val |= ~GET_MODE_MASK (op_mode); } else return 0; @@ -1519,12 +1566,12 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, case CLZ: hv = 0; if (h1 != 0) - lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1 + lv = GET_MODE_PRECISION (mode) - floor_log2 (h1) - 1 - HOST_BITS_PER_WIDE_INT; else if (l1 != 0) - lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1; + lv = GET_MODE_PRECISION (mode) - floor_log2 (l1) - 1; else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv)) - lv = GET_MODE_BITSIZE (mode); + lv = GET_MODE_PRECISION (mode); break; case CTZ: @@ -1534,7 +1581,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, else if (h1 != 0) lv = HOST_BITS_PER_WIDE_INT + ctz_hwi (h1); else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv)) - lv = GET_MODE_BITSIZE (mode); + lv = GET_MODE_PRECISION (mode); break; case POPCOUNT: @@ -1588,7 +1635,7 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, case ZERO_EXTEND: gcc_assert (op_mode != VOIDmode); - if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) + if (op_width > HOST_BITS_PER_WIDE_INT) return 0; hv = 0; @@ -1597,15 +1644,13 @@ simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode, case SIGN_EXTEND: if (op_mode == VOIDmode - || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT) + || op_width > HOST_BITS_PER_WIDE_INT) return 0; else { lv = l1 & GET_MODE_MASK (op_mode); - if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT - && (lv & ((unsigned HOST_WIDE_INT) 1 - << (GET_MODE_BITSIZE (op_mode) - 1))) != 0) - lv -= (unsigned HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode); + if (val_signbit_known_set_p (op_mode, lv)) + lv |= ~GET_MODE_MASK (op_mode); hv = HWI_SIGN_EXTEND (lv); } @@ -1876,7 +1921,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, { rtx tem, reversed, opleft, opright; HOST_WIDE_INT val; - unsigned int width = GET_MODE_BITSIZE (mode); + unsigned int width = GET_MODE_PRECISION (mode); /* Even if we can't compute a constant result, there are some cases worth simplifying. */ @@ -2271,12 +2316,34 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if (GET_CODE (op0) == NEG) { rtx temp = simplify_unary_operation (NEG, mode, op1, mode); + /* If op1 is a MULT as well and simplify_unary_operation + just moved the NEG to the second operand, simplify_gen_binary + below could through simplify_associative_operation move + the NEG around again and recurse endlessly. */ + if (temp + && GET_CODE (op1) == MULT + && GET_CODE (temp) == MULT + && XEXP (op1, 0) == XEXP (temp, 0) + && GET_CODE (XEXP (temp, 1)) == NEG + && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0)) + temp = NULL_RTX; if (temp) return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp); } if (GET_CODE (op1) == NEG) { rtx temp = simplify_unary_operation (NEG, mode, op0, mode); + /* If op0 is a MULT as well and simplify_unary_operation + just moved the NEG to the second operand, simplify_gen_binary + below could through simplify_associative_operation move + the NEG around again and recurse endlessly. */ + if (temp + && GET_CODE (op0) == MULT + && GET_CODE (temp) == MULT + && XEXP (op0, 0) == XEXP (temp, 0) + && GET_CODE (XEXP (temp, 1)) == NEG + && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0)) + temp = NULL_RTX; if (temp) return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0)); } @@ -2380,7 +2447,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, /* (ior A C) is C if all bits of A that might be nonzero are on in C. */ if (CONST_INT_P (op1) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0) return op1; @@ -2439,7 +2506,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && CONST_INT_P (XEXP (opleft, 1)) && CONST_INT_P (XEXP (opright, 1)) && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1)) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1)); /* Same, but for ashift that has been "simplified" to a wider mode @@ -2458,14 +2525,14 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1)) && CONST_INT_P (XEXP (opright, 1)) && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1)) - == GET_MODE_BITSIZE (mode))) + == GET_MODE_PRECISION (mode))) return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (SUBREG_REG (opleft), 1)); /* If we have (ior (and (X C1) C2)), simplify this by making C1 as small as possible if C1 actually changes. */ if (CONST_INT_P (op1) - && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && (HWI_COMPUTABLE_MODE_P (mode) || INTVAL (op1) > 0) && GET_CODE (op0) == AND && CONST_INT_P (XEXP (op0, 1)) @@ -2536,7 +2603,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, convert them into an IOR. This helps to detect rotation encoded using those methods and possibly other simplifications. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && (nonzero_bits (op0, mode) & nonzero_bits (op1, mode)) == 0) return (simplify_gen_binary (IOR, mode, op0, op1)); @@ -2636,14 +2703,12 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, && trueop1 == const1_rtx && GET_CODE (op0) == LSHIFTRT && CONST_INT_P (XEXP (op0, 1)) - && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1) + && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (mode) - 1) return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx); /* (xor (comparison foo bar) (const_int sign-bit)) when STORE_FLAG_VALUE is the sign bit. */ - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT - && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode)) - == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)) + if (val_signbit_p (mode, STORE_FLAG_VALUE) && trueop1 == const_true_rtx && COMPARISON_P (op0) && (reversed = reversed_comparison (op0, mode))) @@ -2657,7 +2722,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case AND: if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0)) return trueop1; - if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT) + if (HWI_COMPUTABLE_MODE_P (mode)) { HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, mode); HOST_WIDE_INT nzop1; @@ -2690,7 +2755,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, if ((GET_CODE (op0) == SIGN_EXTEND || GET_CODE (op0) == ZERO_EXTEND) && CONST_INT_P (trueop1) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0))) & UINTVAL (trueop1)) == 0) { @@ -2772,7 +2837,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, Also, if (N & M) == 0, then (A +- N) & M -> A & M. */ if (CONST_INT_P (trueop1) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + && HWI_COMPUTABLE_MODE_P (mode) && ~UINTVAL (trueop1) && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS)) @@ -2997,7 +3062,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, unsigned HOST_WIDE_INT zero_val = 0; if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val) - && zero_val == GET_MODE_BITSIZE (imode) + && zero_val == GET_MODE_PRECISION (imode) && INTVAL (trueop1) == exact_log2 (zero_val)) return simplify_gen_relational (EQ, mode, imode, XEXP (op0, 0), const0_rtx); @@ -3006,8 +3071,7 @@ simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode, case SMIN: if (width <= HOST_BITS_PER_WIDE_INT - && CONST_INT_P (trueop1) - && UINTVAL (trueop1) == (unsigned HOST_WIDE_INT) 1 << (width -1) + && mode_signbit_p (mode, trueop1) && ! side_effects_p (op0)) return op1; if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)) @@ -3288,7 +3352,7 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, { HOST_WIDE_INT arg0, arg1, arg0s, arg1s; HOST_WIDE_INT val; - unsigned int width = GET_MODE_BITSIZE (mode); + unsigned int width = GET_MODE_PRECISION (mode); if (VECTOR_MODE_P (mode) && code != VEC_CONCAT @@ -3573,24 +3637,24 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, unsigned HOST_WIDE_INT cnt; if (SHIFT_COUNT_TRUNCATED) - o1 = double_int_zext (o1, GET_MODE_BITSIZE (mode)); + o1 = double_int_zext (o1, GET_MODE_PRECISION (mode)); if (!double_int_fits_in_uhwi_p (o1) - || double_int_to_uhwi (o1) >= GET_MODE_BITSIZE (mode)) + || double_int_to_uhwi (o1) >= GET_MODE_PRECISION (mode)) return 0; cnt = double_int_to_uhwi (o1); if (code == LSHIFTRT || code == ASHIFTRT) - res = double_int_rshift (o0, cnt, GET_MODE_BITSIZE (mode), + res = double_int_rshift (o0, cnt, GET_MODE_PRECISION (mode), code == ASHIFTRT); else if (code == ASHIFT) - res = double_int_lshift (o0, cnt, GET_MODE_BITSIZE (mode), + res = double_int_lshift (o0, cnt, GET_MODE_PRECISION (mode), true); else if (code == ROTATE) - res = double_int_lrotate (o0, cnt, GET_MODE_BITSIZE (mode)); + res = double_int_lrotate (o0, cnt, GET_MODE_PRECISION (mode)); else /* code == ROTATERT */ - res = double_int_rrotate (o0, cnt, GET_MODE_BITSIZE (mode)); + res = double_int_rrotate (o0, cnt, GET_MODE_PRECISION (mode)); } break; @@ -3612,16 +3676,16 @@ simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode, if (width < HOST_BITS_PER_WIDE_INT) { - arg0 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; - arg1 &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; + arg0 &= GET_MODE_MASK (mode); + arg1 &= GET_MODE_MASK (mode); arg0s = arg0; - if (arg0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) - arg0s |= ((unsigned HOST_WIDE_INT) (-1) << width); + if (val_signbit_known_set_p (mode, arg0s)) + arg0s |= ~GET_MODE_MASK (mode); - arg1s = arg1; - if (arg1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) - arg1s |= ((unsigned HOST_WIDE_INT) (-1) << width); + arg1s = arg1; + if (val_signbit_known_set_p (mode, arg1s)) + arg1s |= ~GET_MODE_MASK (mode); } else { @@ -4563,7 +4627,7 @@ simplify_const_relational_operation (enum rtx_code code, && (GET_CODE (trueop1) == CONST_DOUBLE || CONST_INT_P (trueop1))) { - int width = GET_MODE_BITSIZE (mode); + int width = GET_MODE_PRECISION (mode); HOST_WIDE_INT l0s, h0s, l1s, h1s; unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u; @@ -4594,14 +4658,14 @@ simplify_const_relational_operation (enum rtx_code code, we have to sign or zero-extend the values. */ if (width != 0 && width < HOST_BITS_PER_WIDE_INT) { - l0u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; - l1u &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; + l0u &= GET_MODE_MASK (mode); + l1u &= GET_MODE_MASK (mode); - if (l0s & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) - l0s |= ((unsigned HOST_WIDE_INT) (-1) << width); + if (val_signbit_known_set_p (mode, l0s)) + l0s |= ~GET_MODE_MASK (mode); - if (l1s & ((unsigned HOST_WIDE_INT) 1 << (width - 1))) - l1s |= ((unsigned HOST_WIDE_INT) (-1) << width); + if (val_signbit_known_set_p (mode, l1s)) + l1s |= ~GET_MODE_MASK (mode); } if (width != 0 && width <= HOST_BITS_PER_WIDE_INT) h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s); @@ -4618,8 +4682,7 @@ simplify_const_relational_operation (enum rtx_code code, } /* Optimize comparisons with upper and lower bounds. */ - if (SCALAR_INT_MODE_P (mode) - && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT + if (HWI_COMPUTABLE_MODE_P (mode) && CONST_INT_P (trueop1)) { int sign; @@ -4752,7 +4815,7 @@ simplify_const_relational_operation (enum rtx_code code, rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1)); if (CONST_INT_P (inner_const) && inner_const != const0_rtx) { - int sign_bitnum = GET_MODE_BITSIZE (mode) - 1; + int sign_bitnum = GET_MODE_PRECISION (mode) - 1; int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum && (UINTVAL (inner_const) & ((unsigned HOST_WIDE_INT) 1 @@ -4844,7 +4907,7 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2) { - unsigned int width = GET_MODE_BITSIZE (mode); + unsigned int width = GET_MODE_PRECISION (mode); bool any_change = false; rtx tem; @@ -4889,32 +4952,24 @@ simplify_ternary_operation (enum rtx_code code, enum machine_mode mode, { /* Extracting a bit-field from a constant */ unsigned HOST_WIDE_INT val = UINTVAL (op0); - + HOST_WIDE_INT op1val = INTVAL (op1); + HOST_WIDE_INT op2val = INTVAL (op2); if (BITS_BIG_ENDIAN) - val >>= GET_MODE_BITSIZE (op0_mode) - INTVAL (op2) - INTVAL (op1); + val >>= GET_MODE_PRECISION (op0_mode) - op2val - op1val; else - val >>= INTVAL (op2); + val >>= op2val; - if (HOST_BITS_PER_WIDE_INT != INTVAL (op1)) + if (HOST_BITS_PER_WIDE_INT != op1val) { /* First zero-extend. */ - val &= ((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1; + val &= ((unsigned HOST_WIDE_INT) 1 << op1val) - 1; /* If desired, propagate sign bit. */ if (code == SIGN_EXTRACT - && (val & ((unsigned HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))) + && (val & ((unsigned HOST_WIDE_INT) 1 << (op1val - 1))) != 0) - val |= ~ (((unsigned HOST_WIDE_INT) 1 << INTVAL (op1)) - 1); + val |= ~ (((unsigned HOST_WIDE_INT) 1 << op1val) - 1); } - /* Clear the bits that don't belong in our mode, - unless they and our sign bit are all one. - So we get either a reasonable negative value or a reasonable - unsigned value for this mode. */ - if (width < HOST_BITS_PER_WIDE_INT - && ((val & ((unsigned HOST_WIDE_INT) (-1) << (width - 1))) - != ((unsigned HOST_WIDE_INT) (-1) << (width - 1)))) - val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1; - return gen_int_mode (val, mode); } break; @@ -5557,7 +5612,7 @@ simplify_subreg (enum machine_mode outermode, rtx op, /* Optimize SUBREG truncations of zero and sign extended values. */ if ((GET_CODE (op) == ZERO_EXTEND || GET_CODE (op) == SIGN_EXTEND) - && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)) + && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode)) { unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte); @@ -5573,7 +5628,7 @@ simplify_subreg (enum machine_mode outermode, rtx op, enum machine_mode origmode = GET_MODE (XEXP (op, 0)); if (outermode == origmode) return XEXP (op, 0); - if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode)) + if (GET_MODE_PRECISION (outermode) <= GET_MODE_PRECISION (origmode)) return simplify_gen_subreg (outermode, XEXP (op, 0), origmode, subreg_lowpart_offset (outermode, origmode)); @@ -5585,7 +5640,7 @@ simplify_subreg (enum machine_mode outermode, rtx op, /* A SUBREG resulting from a zero extension may fold to zero if it extracts higher bits that the ZERO_EXTEND's source bits. */ if (GET_CODE (op) == ZERO_EXTEND - && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))) + && bitpos >= GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))) return CONST0_RTX (outermode); } @@ -5599,11 +5654,11 @@ simplify_subreg (enum machine_mode outermode, rtx op, to avoid the possibility that an outer LSHIFTRT shifts by more than the sign extension's sign_bit_copies and introduces zeros into the high bits of the result. */ - && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode) + && (2 * GET_MODE_PRECISION (outermode)) <= GET_MODE_PRECISION (innermode) && CONST_INT_P (XEXP (op, 1)) && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode - && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) + && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode) && subreg_lsb_1 (outermode, innermode, byte) == 0) return simplify_gen_binary (ASHIFTRT, outermode, XEXP (XEXP (op, 0), 0), XEXP (op, 1)); @@ -5614,11 +5669,11 @@ simplify_subreg (enum machine_mode outermode, rtx op, if ((GET_CODE (op) == LSHIFTRT || GET_CODE (op) == ASHIFTRT) && SCALAR_INT_MODE_P (outermode) - && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode) + && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode) && CONST_INT_P (XEXP (op, 1)) && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode - && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) + && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode) && subreg_lsb_1 (outermode, innermode, byte) == 0) return simplify_gen_binary (LSHIFTRT, outermode, XEXP (XEXP (op, 0), 0), XEXP (op, 1)); @@ -5628,12 +5683,12 @@ simplify_subreg (enum machine_mode outermode, rtx op, the outer subreg is effectively a truncation to the original mode. */ if (GET_CODE (op) == ASHIFT && SCALAR_INT_MODE_P (outermode) - && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode) + && GET_MODE_PRECISION (outermode) < GET_MODE_PRECISION (innermode) && CONST_INT_P (XEXP (op, 1)) && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND) && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode - && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode) + && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (outermode) && subreg_lsb_1 (outermode, innermode, byte) == 0) return simplify_gen_binary (ASHIFT, outermode, XEXP (XEXP (op, 0), 0), XEXP (op, 1)); @@ -5642,12 +5697,12 @@ simplify_subreg (enum machine_mode outermode, rtx op, if ((GET_CODE (op) == LSHIFTRT || GET_CODE (op) == ASHIFTRT) && SCALAR_INT_MODE_P (outermode) - && GET_MODE_BITSIZE (outermode) >= BITS_PER_WORD - && GET_MODE_BITSIZE (innermode) >= (2 * GET_MODE_BITSIZE (outermode)) + && GET_MODE_PRECISION (outermode) >= BITS_PER_WORD + && GET_MODE_PRECISION (innermode) >= (2 * GET_MODE_PRECISION (outermode)) && CONST_INT_P (XEXP (op, 1)) - && (INTVAL (XEXP (op, 1)) & (GET_MODE_BITSIZE (outermode) - 1)) == 0 + && (INTVAL (XEXP (op, 1)) & (GET_MODE_PRECISION (outermode) - 1)) == 0 && INTVAL (XEXP (op, 1)) >= 0 - && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (innermode) + && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (innermode) && byte == subreg_lowpart_offset (outermode, innermode)) { int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT; diff --git a/gcc/stmt.c b/gcc/stmt.c index 1a9f9e505da..38e1e285234 100644 --- a/gcc/stmt.c +++ b/gcc/stmt.c @@ -53,6 +53,7 @@ along with GCC; see the file COPYING3. If not see #include "alloc-pool.h" #include "pretty-print.h" #include "bitmap.h" +#include "params.h" /* Functions and data structures for expanding case statements. */ @@ -2270,6 +2271,20 @@ expand_switch_using_bit_tests_p (tree index_expr, tree range, || (uniq == 3 && count >= 6))); } +/* Return the smallest number of different values for which it is best to use a + jump-table instead of a tree of conditional branches. */ + +static unsigned int +case_values_threshold (void) +{ + unsigned int threshold = PARAM_VALUE (PARAM_CASE_VALUES_THRESHOLD); + + if (threshold == 0) + threshold = targetm.case_values_threshold (); + + return threshold; +} + /* Terminate a case (Pascal/Ada) or switch (C) statement in which ORIG_INDEX is the expression to be tested. If ORIG_TYPE is not NULL, it is the original ORIG_INDEX @@ -2424,7 +2439,7 @@ expand_case (gimple stmt) If the switch-index is a constant, do it this way because we can optimize it. */ - else if (count < targetm.case_values_threshold () + else if (count < case_values_threshold () || compare_tree_int (range, (optimize_insn_for_size_p () ? 3 : 10) * count) > 0 /* RANGE may be signed, and really large ranges will show up diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 326ba5f8770..2c390b4c3d6 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,141 @@ +2011-07-07 Richard Guenther <rguenther@suse.de> + + * gcc.dg/graphite/pr37485.c: Add -floop-block. + +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * gcc.misc-tests/linkage.exp: Handle x86_64-*-solaris2.1[0-9]*. + +2011-07-06 Michael Meissner <meissner@linux.vnet.ibm.com> + + * gcc.target/powerpc/no-r11-1.c: New test for -mr11, -mno-r11. + * gcc.target/powerpc/no-r11-2.c: Ditto. + * gcc.target/powerpc/no-r11-3.c: Ditto. + +2011-07-06 Uros Bizjak <ubizjak@gmail.com> + + * gcc.dg/stack-layout-2.c: Cleanup expand rtl dump. + * gfortran.dg/move_alloc_4.f90: Cleanup original tree dump. + +2011-07-06 Georg-Johann Lay <avr@gjlay.de> + + * lib/target-supports.exp (check_profiling_available): Disable + profiling with -fprofile-generate for target avr. + +2011-07-06 Richard Guenther <rguenther@suse.de> + + PR tree-optimization/49645 + * g++.dg/tree-ssa/pr8781.C: Disable SRA. + +2011-07-06 H.J. Lu <hongjiu.lu@intel.com> + + PR middle-end/47383 + * gcc.dg/pr47383.c: New. + +2011-07-06 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR tree-optimization/49647 + * gcc.dg/tree-ssa/20030807-7.c: Remove xfail *-*-*. + +2011-07-06 Eric Botcazou <ebotcazou@adacore.com> + + PR testsuite/49542 + * gcc.dg/vect/pr33804.c: XFAIL if vect_no_align unconditionally. + * gcc.dg/vect/slp-multitypes-3.c: XFAIL on SPARC unconditionally. + +2011-07-05 Jason Merrill <jason@redhat.com> + + PR c++/48157 + * g++.dg/template/template-id-4.C: New. + +2011-07-05 Georg-Johann Lay <avr@gjlay.de> + + * gcc.dg/pr44023.c: Add dg-require-effective-target int32plus + * gcc.dg/pr43402.c: Ditto. + +2011-07-05 Georg-Johann Lay <avr@gjlay.de> + + * gcc.dg/cpp/pragma-3.c: Add dg-require-effective-target fopenmp. + +2011-07-05 Michael Meissner <meissner@linux.vnet.ibm.com> + + * gcc.target/powerpc/ppc-switch-1.c: New test for + --param case-values-threshold. + * gcc.target/powerpc/ppc-switch-2.c: Ditto. + +2011-07-05 Janis Johnson <janisjo@codesourcery.com> + + * gcc.target/arm/pr42093.c: Use "-fno-reorder-blocks". + +2011-07-05 Jason Merrill <jason@redhat.com> + + * g++.dg/rtti/anon-ns1.C: Allow for null-termination. + +2011-07-05 Sebastian Pop <sebastian.pop@amd.com> + + PR tree-optimization/47654 + * gcc.dg/graphite/block-pr47654.c: New. + +2011-07-05 Jason Merrill <jason@redhat.com> + + * g++.dg/cpp0x/lambda/lambda-ref2.C: New. + +2011-07-05 Razya Ladelsky <razya@il.ibm.com> + + PR tree-optimization/49580 + * gcc.dg/autopar/pr49580.c: New test. + +2011-07-05 Richard Guenther <rguenther@suse.de> + + PR tree-optimization/49518 + PR tree-optimization/49628 + * g++.dg/torture/pr49628.C: New testcase. + * gcc.dg/torture/pr49518.c: Likewise. + +2011-07-05 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + * ada/acats/run_acats (which): Extract last field from type -p, + type output only if command succeeded. + +2011-07-04 Jason Merrill <jason@redhat.com> + + * g++.dg/abi/mangle48.C: New. + + * g++.dg/cpp0x/diag1.C: New. + + * g++.dg/diagnostic/aka1.C: New. + +2011-07-04 Jakub Jelinek <jakub@redhat.com> + + PR rtl-optimization/49619 + * gcc.dg/pr49619.c: New test. + + PR rtl-optimization/49472 + * gfortran.dg/pr49472.f90: New test. + + PR debug/49602 + * gcc.dg/pr49602.c: New test. + +2011-07-04 Georg-Johann Lay <avr@gjlay.de> + + PR target/34734 + PR target/44643 + * gcc.target/avr/avr.exp: Run over cpp files, too. + * gcc.target/avr/torture/avr-torture.exp: Ditto. + * gcc.target/avr/progmem.h: New file. + * gcc.target/avr/exit-abort.h: New file. + * gcc.target/avr/progmem-error-1.c: New file. + * gcc.target/avr/progmem-error-1.cpp: New file. + * gcc.target/avr/progmem-warning-1.c: New file. + * gcc.target/avr/torture/progmem-1.c: New file. + * gcc.target/avr/torture/progmem-1.cpp: New file. + +2011-07-04 Richard Guenther <rguenther@suse.de> + + PR tree-optimization/49615 + * g++.dg/torture/pr49615.C: New testcase. + 2011-07-03 Ira Rosen <ira.rosen@linaro.org> PR tree-optimization/49610 diff --git a/gcc/testsuite/ada/acats/run_acats b/gcc/testsuite/ada/acats/run_acats index 5d68e6c223a..c1ed39f8555 100755 --- a/gcc/testsuite/ada/acats/run_acats +++ b/gcc/testsuite/ada/acats/run_acats @@ -14,8 +14,8 @@ fi # Fall back to whence which ksh88 and ksh93 provide, but bash does not. which () { - path=`type -p $* 2>/dev/null | awk '{print $NF}'` && { echo $path; return 0; } - path=`type $* 2>/dev/null | awk '{print $NF}'` && { echo $path; return 0; } + path=`type -p $* 2>/dev/null` && { echo $path | awk '{print $NF}'; return 0; } + path=`type $* 2>/dev/null` && { echo $path | awk '{print $NF}'; return 0; } path=`whence $* 2>/dev/null` && { echo $path; return 0; } return 1 } diff --git a/gcc/testsuite/g++.dg/abi/mangle48.C b/gcc/testsuite/g++.dg/abi/mangle48.C new file mode 100644 index 00000000000..dc9c492cf00 --- /dev/null +++ b/gcc/testsuite/g++.dg/abi/mangle48.C @@ -0,0 +1,23 @@ +// Testcase for 'this' mangling +// { dg-options -std=c++0x } + +struct B +{ + template <class U> U f(); +}; + +struct A +{ + B b; + // { dg-final { scan-assembler "_ZN1A1fIiEEDTcldtdtdefpT1b1fIT_EEEv" } } + template <class U> auto f() -> decltype (b.f<U>()); + // { dg-final { scan-assembler "_ZN1A1gIiEEDTcldtptfpT1b1fIT_EEEv" } } + template <class U> auto g() -> decltype (this->b.f<U>()); +}; + +int main() +{ + A a; + a.f<int>(); + a.g<int>(); +} diff --git a/gcc/testsuite/g++.dg/cpp0x/diag1.C b/gcc/testsuite/g++.dg/cpp0x/diag1.C new file mode 100644 index 00000000000..b3f30bcd0b4 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp0x/diag1.C @@ -0,0 +1,32 @@ +// { dg-options -std=c++0x } + +template <int U> +struct TypeA +{ + typedef int type; +}; + +template <int N> +struct TypeB +{ + template <int U> typename TypeA<U>::type fn(); +}; + +struct TypeC +{ + TypeB<10> b; + // This was being printed as: + // template<int N> + // decltype (((TypeC*)this)-> + // TypeC::b. + // template<int U> typename TypeA<U>::type TypeB::fn [with int U = U, int N = 10, typename TypeA<U>::type = TypeA<U>::type]()) + // TypeC::fn() + // we don't want to see the template header, return type, or parameter bindings + // for TypeB::fn. + template <int N> auto fn() -> decltype(b.fn<N>()); // { dg-bogus "typename|with" } +}; + +int main() +{ + TypeC().fn<4>(1); // { dg-error "no match" } +} diff --git a/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ref2.C b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ref2.C new file mode 100644 index 00000000000..15f1d9034a3 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-ref2.C @@ -0,0 +1,13 @@ +// PR c++/49598 +// { dg-options -std=c++0x } +// { dg-do run } + +int +main() +{ + int i = 10; + int& ir = i; + + if ([=]{ return ir; }() != 10) + return 1; +} diff --git a/gcc/testsuite/g++.dg/diagnostic/aka1.C b/gcc/testsuite/g++.dg/diagnostic/aka1.C new file mode 100644 index 00000000000..37f8df9399d --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/aka1.C @@ -0,0 +1,15 @@ +// Basic test for typedef stripping in diagnostics. + +struct A { + void f(); +}; + +void A::f() { + // We don't want an aka for the injected-class-name. + A a = 0; // { dg-error "type .A. requested" } +} + +typedef A B; + +// We do want an aka for a real typedef. +B b = 0; // { dg-error "B .aka A." } diff --git a/gcc/testsuite/g++.dg/rtti/anon-ns1.C b/gcc/testsuite/g++.dg/rtti/anon-ns1.C index fd6f8af61b0..e18be4300a3 100644 --- a/gcc/testsuite/g++.dg/rtti/anon-ns1.C +++ b/gcc/testsuite/g++.dg/rtti/anon-ns1.C @@ -2,7 +2,7 @@ // The typeinfo name for A should start with * so we compare // it by address rather than contents. -// { dg-final { scan-assembler "\"\*N\[^\"\]+1AE\"" } } +// { dg-final { scan-assembler "\"\*N\[^\"\]+1AE" } } namespace { diff --git a/gcc/testsuite/g++.dg/template/template-id-4.C b/gcc/testsuite/g++.dg/template/template-id-4.C new file mode 100644 index 00000000000..26f4809bc96 --- /dev/null +++ b/gcc/testsuite/g++.dg/template/template-id-4.C @@ -0,0 +1,22 @@ +// PR c++/48157 + +struct AType +{ + template<class AA> + void SomeFuncTemplate() + { } +}; + +template < class T > +struct TTest2 +{ + template<T> struct helper; + + template<class U> + static void check(helper<&U::template SomeFuncTemplate<int> > *); +}; + +int main() +{ + TTest2< void (AType::*)() >::check<AType>(0); +} diff --git a/gcc/testsuite/g++.dg/torture/pr49615.C b/gcc/testsuite/g++.dg/torture/pr49615.C new file mode 100644 index 00000000000..98a2f95b8b3 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/pr49615.C @@ -0,0 +1,29 @@ +/* { dg-do compile } */ +/* { dg-options "-g" } */ + +template <class T> +static inline bool Dispatch (T* obj, void (T::*func) ()) +{ + (obj->*func) (); +} +class C +{ + bool f (int); + void g (); +}; +bool C::f (int n) +{ + bool b; + switch (n) + { + case 0: + b = Dispatch (this, &C::g); + case 1: + b = Dispatch (this, &C::g); + } +} +void C::g () +{ + for (;;) { } +} + diff --git a/gcc/testsuite/g++.dg/torture/pr49628.C b/gcc/testsuite/g++.dg/torture/pr49628.C new file mode 100644 index 00000000000..4bc6543a35e --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/pr49628.C @@ -0,0 +1,37 @@ +/* { dg-do compile } */ + +#include <vector> + +template <int rank, int dim> class Tensor; +template <int dim> class Tensor<1,dim> { +public: + explicit Tensor (const bool initialize = true); + Tensor (const Tensor<1,dim> &); + Tensor<1,dim> & operator = (const Tensor<1,dim> &); + double values[(dim!=0) ? (dim) : 1]; +}; +template <int dim> +inline Tensor<1,dim> & Tensor<1,dim>::operator = (const Tensor<1,dim> &p) +{ + for (unsigned int i=0; i<dim; ++i) + values[i] = p.values[i]; +}; +template <int dim> class Quadrature { +public: + const unsigned int n_quadrature_points; +}; +class MappingQ1 +{ + class InternalData { + public: + std::vector<Tensor<1,3> > shape_derivatives; + unsigned int n_shape_functions; + }; + void compute_data (const Quadrature<3> &quadrature, InternalData &data) + const; +}; +void MappingQ1::compute_data (const Quadrature<3> &q, InternalData &data) const +{ + const unsigned int n_q_points = q.n_quadrature_points; + data.shape_derivatives.resize(data.n_shape_functions * n_q_points); +} diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr8781.C b/gcc/testsuite/g++.dg/tree-ssa/pr8781.C index fc5f44394d9..cc518a0d25a 100644 --- a/gcc/testsuite/g++.dg/tree-ssa/pr8781.C +++ b/gcc/testsuite/g++.dg/tree-ssa/pr8781.C @@ -1,5 +1,5 @@ /* { dg-do compile } */ -/* { dg-options "-O -fdump-tree-fre1-details" } */ +/* { dg-options "-O -fno-tree-sra -fdump-tree-fre1-details" } */ int f(); diff --git a/gcc/testsuite/gcc.dg/autopar/pr49580.c b/gcc/testsuite/gcc.dg/autopar/pr49580.c new file mode 100644 index 00000000000..e08fd0e0c6c --- /dev/null +++ b/gcc/testsuite/gcc.dg/autopar/pr49580.c @@ -0,0 +1,38 @@ +/* PR debug/49580 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -ftree-parallelize-loops=4 -fdump-tree-parloops-details" } */ + +#include <stdarg.h> +#include <stdlib.h> + +#define N 1600 + +unsigned int ub[N]; +unsigned char reg_has_output_reload[N]; +unsigned int uc[N]; + + __attribute__ ((noinline)) + void main2 (unsigned int regno, unsigned int n_reloads) + { + unsigned int nr=0; + + if (regno> ub[regno]) + nr=regno; + else + nr=ub[nr]; + + while (nr-- > 0) + if (n_reloads == 0 || reg_has_output_reload[regno + nr] == 0) + ub[regno + nr] = 0; +} + +int main (void) +{ + main2 (799, 0); + return 0; +} + + +/* { dg-final { scan-tree-dump-times "SUCCESS: may be parallelized" 1 "parloops" } } */ +/* { dg-final { cleanup-tree-dump "parloops" } } */ + diff --git a/gcc/testsuite/gcc.dg/cpp/pragma-3.c b/gcc/testsuite/gcc.dg/cpp/pragma-3.c index 9afc3919a8d..53daee76640 100644 --- a/gcc/testsuite/gcc.dg/cpp/pragma-3.c +++ b/gcc/testsuite/gcc.dg/cpp/pragma-3.c @@ -1,6 +1,7 @@ /* { dg-options "-fopenmp" } { dg-do preprocess } + { dg-require-effective-target fopenmp } */ void foo (void) diff --git a/gcc/testsuite/gcc.dg/graphite/block-pr47654.c b/gcc/testsuite/gcc.dg/graphite/block-pr47654.c new file mode 100644 index 00000000000..9cdeb0c3579 --- /dev/null +++ b/gcc/testsuite/gcc.dg/graphite/block-pr47654.c @@ -0,0 +1,25 @@ +int a[128][40]; + +void __attribute__ ((noinline, noclone)) +foo (void) +{ + int i, j; + for (i = 0; i < 40; i++) + for (j = 0; j < 128; j++) + a[j][i] = 4; +} + +int +main () +{ + int i, j; + foo (); + for (i = 0; i < 40; i++) + for (j = 0; j < 128; j++) + if (a[j][i] != 4) + __builtin_abort (); + return 0; +} + +/* { dg-final { scan-tree-dump-not "will be loop blocked" "graphite" } } */ +/* { dg-final { cleanup-tree-dump "graphite" } } */ diff --git a/gcc/testsuite/gcc.dg/graphite/pr37485.c b/gcc/testsuite/gcc.dg/graphite/pr37485.c index ce2507b3d6c..cf0969bac1d 100644 --- a/gcc/testsuite/gcc.dg/graphite/pr37485.c +++ b/gcc/testsuite/gcc.dg/graphite/pr37485.c @@ -1,4 +1,4 @@ -/* { dg-options "-O2 -fdump-tree-graphite-all" } */ +/* { dg-options "-O2 -floop-block -fdump-tree-graphite-all" } */ typedef unsigned char UChar; typedef int Int32; diff --git a/gcc/testsuite/gcc.dg/pr43402.c b/gcc/testsuite/gcc.dg/pr43402.c index 82234c74a85..e6ef2d7d54d 100644 --- a/gcc/testsuite/gcc.dg/pr43402.c +++ b/gcc/testsuite/gcc.dg/pr43402.c @@ -1,5 +1,7 @@ /* { dg-do run } */ /* { dg-options "-O1 -fno-inline" } */ +/* { dg-require-effective-target int32plus } */ + extern void abort (void); static int something; diff --git a/gcc/testsuite/gcc.dg/pr44023.c b/gcc/testsuite/gcc.dg/pr44023.c index ee99bf601bb..df22bba83f6 100644 --- a/gcc/testsuite/gcc.dg/pr44023.c +++ b/gcc/testsuite/gcc.dg/pr44023.c @@ -2,6 +2,7 @@ /* { dg-do compile } */ /* { dg-options "-fcompare-debug -O2" } */ /* { dg-options "-fcompare-debug -O2 -mcpu=ev67" { target alpha*-*-* } } */ +/* { dg-require-effective-target int32plus } */ void foo (unsigned f, long v, unsigned *w, unsigned a, unsigned b, unsigned e, unsigned c, unsigned d) diff --git a/gcc/testsuite/gcc.dg/pr47383.c b/gcc/testsuite/gcc.dg/pr47383.c new file mode 100644 index 00000000000..3e2b9ba18a2 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr47383.c @@ -0,0 +1,31 @@ +/* { dg-do run { target fpic } } */ +/* { dg-options "-O2 -fPIC" } */ + +static int heap[2*(256 +1+29)+1]; +static int heap_len; +static int heap_max; +void +__attribute__ ((noinline)) +foo (int elems) +{ + int n, m; + int max_code = -1; + int node = elems; + heap_len = 0, heap_max = (2*(256 +1+29)+1); + for (n = 0; n < elems; n++) + heap[++heap_len] = max_code = n; + do { + n = heap[1]; + heap[1] = heap[heap_len--]; + m = heap[1]; + heap[--heap_max] = n; + heap[--heap_max] = m; + } while (heap_len >= 2); +} + +int +main () +{ + foo (286); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/pr49602.c b/gcc/testsuite/gcc.dg/pr49602.c new file mode 100644 index 00000000000..bb8eeb0c332 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr49602.c @@ -0,0 +1,17 @@ +/* PR debug/49602 */ +/* { dg-do compile } */ +/* { dg-options "-g -O2" } */ + +static void +foo (int *x) +{ +} + +void +bar (int *x) +{ + int i; + for (i = 0; i == 1; ++i) + x = 0; + foo (x); +} diff --git a/gcc/testsuite/gcc.dg/pr49619.c b/gcc/testsuite/gcc.dg/pr49619.c new file mode 100644 index 00000000000..d0a72a6edd1 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr49619.c @@ -0,0 +1,13 @@ +/* PR rtl-optimization/49619 */ +/* { dg-do compile } */ +/* { dg-options "-O -fno-tree-fre" } */ + +extern int a, b; + +void +foo (int x) +{ + a = 2; + b = 0; + b = (a && ((a = 1, 0 >= b) || (short) (x + (b & x)))); +} diff --git a/gcc/testsuite/gcc.dg/stack-layout-2.c b/gcc/testsuite/gcc.dg/stack-layout-2.c index 5d5b385f675..9ed545d727a 100644 --- a/gcc/testsuite/gcc.dg/stack-layout-2.c +++ b/gcc/testsuite/gcc.dg/stack-layout-2.c @@ -21,3 +21,4 @@ int foo() } /* { dg-final { scan-rtl-dump "size 8192" "expand" } } */ /* { dg-final { scan-rtl-dump "size 32" "expand" } } */ +/* { dg-final { cleanup-rtl-dump "expand" } } */ diff --git a/gcc/testsuite/gcc.dg/torture/pr49518.c b/gcc/testsuite/gcc.dg/torture/pr49518.c new file mode 100644 index 00000000000..84a10fbbeeb --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr49518.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ + +int a, b; +struct S { unsigned int s, t, u; } c, d = { 0, 1, 0 }; + +void +test (unsigned char z) +{ + char e[] = {0, 0, 0, 0, 1}; + for (c.s = 1; c.s; c.s++) + { + b = e[c.s]; + if (a) + break; + b = z >= c.u; + if (d.t) + break; + } +} diff --git a/gcc/testsuite/gcc.dg/tree-ssa/20030807-7.c b/gcc/testsuite/gcc.dg/tree-ssa/20030807-7.c index ed6f7c0d5d5..f9f5fb798ef 100644 --- a/gcc/testsuite/gcc.dg/tree-ssa/20030807-7.c +++ b/gcc/testsuite/gcc.dg/tree-ssa/20030807-7.c @@ -33,5 +33,5 @@ simplify_condition (cond_p) } /* There should be exactly one IF conditional. */ -/* { dg-final { scan-tree-dump-times "if " 1 "vrp1" { xfail *-*-* } } } */ +/* { dg-final { scan-tree-dump-times "if " 1 "vrp1" } } */ /* { dg-final { cleanup-tree-dump "vrp1" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr33804.c b/gcc/testsuite/gcc.dg/vect/pr33804.c index 168213ee35f..a4fb3868397 100644 --- a/gcc/testsuite/gcc.dg/vect/pr33804.c +++ b/gcc/testsuite/gcc.dg/vect/pr33804.c @@ -11,6 +11,6 @@ void f(unsigned char *s, unsigned char *d, int n) { } } -/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { vect_no_align && ilp32 } } } } */ -/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail { vect_no_align && ilp32 } } } } */ +/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail vect_no_align } } } */ +/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" { xfail vect_no_align } } } */ /* { dg-final { cleanup-tree-dump "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/slp-multitypes-3.c b/gcc/testsuite/gcc.dg/vect/slp-multitypes-3.c index 63534489336..c5fd52ead58 100644 --- a/gcc/testsuite/gcc.dg/vect/slp-multitypes-3.c +++ b/gcc/testsuite/gcc.dg/vect/slp-multitypes-3.c @@ -87,7 +87,6 @@ int main (void) return 0; } -/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { sparc*-*-* && ilp32 } } } } */ -/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { xfail { sparc*-*-* && ilp32 } }} } */ +/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail sparc*-*-* } } } */ +/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { xfail sparc*-*-* } } } */ /* { dg-final { cleanup-tree-dump "vect" } } */ - diff --git a/gcc/testsuite/gcc.misc-tests/linkage.exp b/gcc/testsuite/gcc.misc-tests/linkage.exp index 954dc29a50e..f6702e4d024 100644 --- a/gcc/testsuite/gcc.misc-tests/linkage.exp +++ b/gcc/testsuite/gcc.misc-tests/linkage.exp @@ -46,7 +46,8 @@ if { [isnative] && ![is_remote host] } then { set native_cflags "-xarch=v9" } } - if [istarget "i?86*-*-solaris2*"] { + if {[istarget "i?86*-*-solaris2*"] + || [istarget "x86_64-*-solaris2.1\[0-9\]*"]} { set file_string [exec file "linkage-x.o"] if [ string match "*64*" $file_string ] { set native_cflags "-xarch=amd64" diff --git a/gcc/testsuite/gcc.target/arm/pr42093.c b/gcc/testsuite/gcc.target/arm/pr42093.c index 5d43982bf1c..7ba2f933eef 100644 --- a/gcc/testsuite/gcc.target/arm/pr42093.c +++ b/gcc/testsuite/gcc.target/arm/pr42093.c @@ -1,4 +1,4 @@ -/* { dg-options "-mthumb -O2" } */ +/* { dg-options "-mthumb -O2 -fno-reorder-blocks" } */ /* { dg-require-effective-target arm_thumb2_ok } */ /* { dg-final { scan-assembler-not "tbb" } } */ /* { dg-final { scan-assembler-not "tbh" } } */ diff --git a/gcc/testsuite/gcc.target/avr/avr.exp b/gcc/testsuite/gcc.target/avr/avr.exp index 90aeed41e1f..a552a968560 100644 --- a/gcc/testsuite/gcc.target/avr/avr.exp +++ b/gcc/testsuite/gcc.target/avr/avr.exp @@ -34,7 +34,7 @@ if ![info exists DEFAULT_CFLAGS] then { dg-init
# Main loop.
-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \
+dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.{\[cCS\],cpp}]] \
"" $DEFAULT_CFLAGS
# All done.
diff --git a/gcc/testsuite/gcc.target/avr/exit-abort.h b/gcc/testsuite/gcc.target/avr/exit-abort.h new file mode 100644 index 00000000000..cf7df203a95 --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/exit-abort.h @@ -0,0 +1,8 @@ +#ifdef __cplusplus +extern "C" { +#endif + extern void exit (int); + extern void abort (void); +#ifdef __cplusplus +} +#endif diff --git a/gcc/testsuite/gcc.target/avr/progmem-error-1.c b/gcc/testsuite/gcc.target/avr/progmem-error-1.c new file mode 100644 index 00000000000..cf53cc8e92b --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/progmem-error-1.c @@ -0,0 +1,5 @@ +/* { dg-do compile } */ + +#include "progmem.h" + +char str[] PROGMEM = "Hallo"; /* { dg-error "must be const" } */ diff --git a/gcc/testsuite/gcc.target/avr/progmem-error-1.cpp b/gcc/testsuite/gcc.target/avr/progmem-error-1.cpp new file mode 100644 index 00000000000..cf53cc8e92b --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/progmem-error-1.cpp @@ -0,0 +1,5 @@ +/* { dg-do compile } */ + +#include "progmem.h" + +char str[] PROGMEM = "Hallo"; /* { dg-error "must be const" } */ diff --git a/gcc/testsuite/gcc.target/avr/progmem-warning-1.c b/gcc/testsuite/gcc.target/avr/progmem-warning-1.c new file mode 100644 index 00000000000..67af05fe5b3 --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/progmem-warning-1.c @@ -0,0 +1,7 @@ +/* PR target/34734 */ +/* { dg-do compile } */ +/* { dg-options "-Wuninitialized" } */ + +#include "progmem.h" + +const char c PROGMEM; /* { dg-warning "uninitialized variable 'c' put into program memory area" } */ diff --git a/gcc/testsuite/gcc.target/avr/progmem.h b/gcc/testsuite/gcc.target/avr/progmem.h new file mode 100644 index 00000000000..d5e86c9fe3c --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/progmem.h @@ -0,0 +1,14 @@ +#define PROGMEM __attribute__((progmem)) + +#define PSTR(s) \ + (__extension__({ \ + static const char __c[] PROGMEM = (s); \ + &__c[0];})) + +#define pgm_read_char(addr) \ + (__extension__({ \ + unsigned int __addr16 = (unsigned int)(addr); \ + char __result; \ + __asm__ ("lpm %0, %a1" \ + : "=r" (__result) : "z" (__addr16)); \ + __result; })) diff --git a/gcc/testsuite/gcc.target/avr/torture/avr-torture.exp b/gcc/testsuite/gcc.target/avr/torture/avr-torture.exp index 355b3ad88bd..e2f83d6689b 100644 --- a/gcc/testsuite/gcc.target/avr/torture/avr-torture.exp +++ b/gcc/testsuite/gcc.target/avr/torture/avr-torture.exp @@ -52,7 +52,7 @@ set-torture-options $AVR_TORTURE_OPTIONS # Main loop.
-gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cS\]]] $DEFAULT_CFLAGS
+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.{\[cS\],cpp}]] $DEFAULT_CFLAGS
# Finalize use of torture lists.
torture-finish
diff --git a/gcc/testsuite/gcc.target/avr/torture/progmem-1.c b/gcc/testsuite/gcc.target/avr/torture/progmem-1.c new file mode 100644 index 00000000000..790c676c911 --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/torture/progmem-1.c @@ -0,0 +1,30 @@ +/* { dg-do run } */ + +#include "../exit-abort.h" +#include "../progmem.h" + +const char strA[] PROGMEM = "@A"; +const char strc PROGMEM = 'c'; + +unsigned int volatile s = 2; + +int main() +{ + char c; + + c = pgm_read_char (&strA[s-1]); + if (c != 'A') + abort(); + + c = pgm_read_char (&PSTR ("@@B")[s]); + if (c != 'B') + abort(); + + c = pgm_read_char (&strc); + if (c != 'c') + abort(); + + exit (0); + + return 0; +} diff --git a/gcc/testsuite/gcc.target/avr/torture/progmem-1.cpp b/gcc/testsuite/gcc.target/avr/torture/progmem-1.cpp new file mode 100644 index 00000000000..a1df9e78e72 --- /dev/null +++ b/gcc/testsuite/gcc.target/avr/torture/progmem-1.cpp @@ -0,0 +1,2 @@ +/* { dg-do run } */ +#include "progmem-1.c" diff --git a/gcc/testsuite/gcc.target/powerpc/no-r11-1.c b/gcc/testsuite/gcc.target/powerpc/no-r11-1.c new file mode 100644 index 00000000000..7e880e7d861 --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/no-r11-1.c @@ -0,0 +1,11 @@ +/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */ +/* { dg-skip-if "" { *-*-darwin* } { "*" } { "" } } */ +/* { dg-options "-O2 -mno-r11" } */ + +int +call_ptr (int (func) (void)) +{ + return func () + 1; +} + +/* { dg-final { scan-assembler-not "ld 11,16(3)" } } */ diff --git a/gcc/testsuite/gcc.target/powerpc/no-r11-2.c b/gcc/testsuite/gcc.target/powerpc/no-r11-2.c new file mode 100644 index 00000000000..981bc0c954c --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/no-r11-2.c @@ -0,0 +1,11 @@ +/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */ +/* { dg-skip-if "" { *-*-darwin* } { "*" } { "" } } */ +/* { dg-options "-O2 -mr11" } */ + +int +call_ptr (int (func) (void)) +{ + return func () + 1; +} + +/* { dg-final { scan-assembler "ld 11,16" } } */ diff --git a/gcc/testsuite/gcc.target/powerpc/no-r11-3.c b/gcc/testsuite/gcc.target/powerpc/no-r11-3.c new file mode 100644 index 00000000000..bc57385ca4e --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/no-r11-3.c @@ -0,0 +1,20 @@ +/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */ +/* { dg-skip-if "" { *-*-darwin* } { "*" } { "" } } */ +/* { dg-options "-O2 -mno-r11" } */ + +extern void ext_call (int (func) (void)); + +int +outer_func (int init) /* { dg-error "-mno-r11 must not be used if you have trampolines" "" } */ +{ + int value = init; + + int inner (void) + { + return ++value; + } + + ext_call (inner); + return value; +} + diff --git a/gcc/testsuite/gcc.target/powerpc/ppc-switch-1.c b/gcc/testsuite/gcc.target/powerpc/ppc-switch-1.c new file mode 100644 index 00000000000..ac1dac9faa9 --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/ppc-switch-1.c @@ -0,0 +1,26 @@ +/* { dg-do compile { target { powerpc*-*-* } } } */ +/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */ +/* { dg-options "-O2 --param case-values-threshold=2" } */ +/* { dg-final { scan-assembler "mtctr" } } */ +/* { dg-final { scan-assembler "bctr" } } */ + +/* Force using a dispatch table even though by default we would generate + ifs. */ + +extern long call (long); + +long +test_switch (long a, long b) +{ + long c; + + switch (a) + { + case 0: c = -b; break; + case 1: c = ~b; break; + case 2: c = b+1; break; + default: c = b & 9; break; + } + + return call (c) + 1; +} diff --git a/gcc/testsuite/gcc.target/powerpc/ppc-switch-2.c b/gcc/testsuite/gcc.target/powerpc/ppc-switch-2.c new file mode 100644 index 00000000000..4f2efccfbd7 --- /dev/null +++ b/gcc/testsuite/gcc.target/powerpc/ppc-switch-2.c @@ -0,0 +1,32 @@ +/* { dg-do compile { target { powerpc*-*-* } } } */ +/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */ +/* { dg-options "-O2 --param case-values-threshold=20" } */ +/* { dg-final { scan-assembler-not "mtctr" } } */ +/* { dg-final { scan-assembler-not "bctr" } } */ + +/* Force using if tests, instead of a dispatch table. */ + +extern long call (long); + +long +test_switch (long a, long b) +{ + long c; + + switch (a) + { + case 0: c = -b; break; + case 1: c = ~b; break; + case 2: c = b+1; break; + case 3: c = b-2; break; + case 4: c = b*3; break; + case 5: c = b/4; break; + case 6: c = b<<5; break; + case 7: c = b>>6; break; + case 8: c = b|7; break; + case 9: c = b^8; break; + default: c = b&9; break; + } + + return call (c) + 1; +} diff --git a/gcc/testsuite/gfortran.dg/move_alloc_4.f90 b/gcc/testsuite/gfortran.dg/move_alloc_4.f90 index 1f5deed60fe..4dc493f097f 100644 --- a/gcc/testsuite/gfortran.dg/move_alloc_4.f90 +++ b/gcc/testsuite/gfortran.dg/move_alloc_4.f90 @@ -20,3 +20,4 @@ program testmv3 end program testmv3 ! { dg-final { scan-tree-dump-times "__builtin_free" 9 "original" } } +! { dg-final { cleanup-tree-dump "original" } } diff --git a/gcc/testsuite/gfortran.dg/pr49472.f90 b/gcc/testsuite/gfortran.dg/pr49472.f90 new file mode 100644 index 00000000000..1baf82e8b11 --- /dev/null +++ b/gcc/testsuite/gfortran.dg/pr49472.f90 @@ -0,0 +1,15 @@ +! PR rtl-optimization/49472 +! { dg-do compile } +! { dg-options "-O -fcompare-debug -ffast-math" } +subroutine pr49472 + integer, parameter :: n = 3 + real(8) :: a, b, c, d, e (n+1) + integer :: i + do i=2, (n+1) + b = 1. / ((i - 1.5d0) * 1.) + c = b * a + d = -b * c / (1. + b * b) ** 1.5d0 + e(i) = d + end do + call dummy (e) +end subroutine diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp index 4e56e48412f..1b06771e4e1 100644 --- a/gcc/testsuite/lib/target-supports.exp +++ b/gcc/testsuite/lib/target-supports.exp @@ -497,6 +497,11 @@ proc check_profiling_available { test_what } { # Tree profiling requires TLS runtime support. if { $test_what == "-fprofile-generate" } { + # AVR does not support profile generation because + # it does not implement needed support functions. + if { [istarget avr-*-*] } { + return 0 + } return [check_effective_target_tls_runtime] } diff --git a/gcc/tree-cfg.c b/gcc/tree-cfg.c index 349f56ecf74..12d8fb4e5f0 100644 --- a/gcc/tree-cfg.c +++ b/gcc/tree-cfg.c @@ -5411,12 +5411,10 @@ gimple_duplicate_sese_tail (edge entry ATTRIBUTE_UNUSED, edge exit ATTRIBUTE_UNU int total_freq = 0, exit_freq = 0; gcov_type total_count = 0, exit_count = 0; edge exits[2], nexits[2], e; - gimple_stmt_iterator gsi,gsi1; + gimple_stmt_iterator gsi; gimple cond_stmt; edge sorig, snew; basic_block exit_bb; - basic_block iters_bb; - tree new_rhs; gimple_stmt_iterator psi; gimple phi; tree def; @@ -5497,35 +5495,6 @@ gimple_duplicate_sese_tail (edge entry ATTRIBUTE_UNUSED, edge exit ATTRIBUTE_UNU gcc_assert (gimple_code (cond_stmt) == GIMPLE_COND); cond_stmt = gimple_copy (cond_stmt); - /* If the block consisting of the exit condition has the latch as - successor, then the body of the loop is executed before - the exit condition is tested. In such case, moving the - condition to the entry, causes that the loop will iterate - one less iteration (which is the wanted outcome, since we - peel out the last iteration). If the body is executed after - the condition, moving the condition to the entry requires - decrementing one iteration. */ - if (exits[1]->dest == orig_loop->latch) - new_rhs = gimple_cond_rhs (cond_stmt); - else - { - new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)), - gimple_cond_rhs (cond_stmt), - build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1)); - - if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME) - { - iters_bb = gimple_bb (SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt))); - for (gsi1 = gsi_start_bb (iters_bb); !gsi_end_p (gsi1); gsi_next (&gsi1)) - if (gsi_stmt (gsi1) == SSA_NAME_DEF_STMT (gimple_cond_rhs (cond_stmt))) - break; - - new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true, - NULL_TREE,false,GSI_CONTINUE_LINKING); - } - } - gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs)); - gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt))); gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT); sorig = single_succ_edge (switch_bb); diff --git a/gcc/tree-cfgcleanup.c b/gcc/tree-cfgcleanup.c index 1036e1e98f3..0c8c0852024 100644 --- a/gcc/tree-cfgcleanup.c +++ b/gcc/tree-cfgcleanup.c @@ -599,7 +599,7 @@ split_bbs_on_noreturn_calls (void) BB is present in the cfg. */ if (bb == NULL || bb->index < NUM_FIXED_BLOCKS - || bb->index >= n_basic_blocks + || bb->index >= last_basic_block || BASIC_BLOCK (bb->index) != bb || !gimple_call_noreturn_p (stmt)) continue; diff --git a/gcc/tree-eh.c b/gcc/tree-eh.c index 5831d34826c..f10d72d1d68 100644 --- a/gcc/tree-eh.c +++ b/gcc/tree-eh.c @@ -2449,8 +2449,42 @@ tree_could_trap_p (tree expr) case CALL_EXPR: t = get_callee_fndecl (expr); /* Assume that calls to weak functions may trap. */ - if (!t || !DECL_P (t) || DECL_WEAK (t)) + if (!t || !DECL_P (t)) return true; + if (DECL_WEAK (t)) + return tree_could_trap_p (t); + return false; + + case FUNCTION_DECL: + /* Assume that accesses to weak functions may trap, unless we know + they are certainly defined in current TU or in some other + LTO partition. */ + if (DECL_WEAK (expr)) + { + struct cgraph_node *node; + if (!DECL_EXTERNAL (expr)) + return false; + node = cgraph_function_node (cgraph_get_node (expr), NULL); + if (node && node->in_other_partition) + return false; + return true; + } + return false; + + case VAR_DECL: + /* Assume that accesses to weak vars may trap, unless we know + they are certainly defined in current TU or in some other + LTO partition. */ + if (DECL_WEAK (expr)) + { + struct varpool_node *node; + if (!DECL_EXTERNAL (expr)) + return false; + node = varpool_variable_node (varpool_get_node (expr), NULL); + if (node && node->in_other_partition) + return false; + return true; + } return false; default: diff --git a/gcc/tree-into-ssa.c b/gcc/tree-into-ssa.c index 0db9085f053..5fd34455df4 100644 --- a/gcc/tree-into-ssa.c +++ b/gcc/tree-into-ssa.c @@ -1343,7 +1343,41 @@ rewrite_debug_stmt_uses (gimple stmt) } } else - def = get_current_def (var); + { + def = get_current_def (var); + /* Check if get_current_def can be trusted. */ + if (def) + { + basic_block bb = gimple_bb (stmt); + basic_block def_bb + = SSA_NAME_IS_DEFAULT_DEF (def) + ? NULL : gimple_bb (SSA_NAME_DEF_STMT (def)); + + /* If definition is in current bb, it is fine. */ + if (bb == def_bb) + ; + /* If definition bb doesn't dominate the current bb, + it can't be used. */ + else if (def_bb && !dominated_by_p (CDI_DOMINATORS, bb, def_bb)) + def = NULL; + /* If there is just one definition and dominates the current + bb, it is fine. */ + else if (get_phi_state (var) == NEED_PHI_STATE_NO) + ; + else + { + struct def_blocks_d *db_p = get_def_blocks_for (var); + + /* If there are some non-debug uses in the current bb, + it is fine. */ + if (bitmap_bit_p (db_p->livein_blocks, bb->index)) + ; + /* Otherwise give up for now. */ + else + def = NULL; + } + } + } if (def == NULL) { gimple_debug_bind_reset_value (stmt); diff --git a/gcc/tree-parloops.c b/gcc/tree-parloops.c index 921821d5fbd..339ddcc18a5 100644 --- a/gcc/tree-parloops.c +++ b/gcc/tree-parloops.c @@ -1474,6 +1474,8 @@ transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit gimple phi, nphi, cond_stmt, stmt, cond_nit; gimple_stmt_iterator gsi; tree nit_1; + edge exit_1; + tree new_rhs; split_block_after_labels (loop->header); orig_header = single_succ (loop->header); @@ -1502,6 +1504,38 @@ transform_to_exit_first_loop (struct loop *loop, htab_t reduction_list, tree nit control = t; } } + + /* Setting the condition towards peeling the last iteration: + If the block consisting of the exit condition has the latch as + successor, then the body of the loop is executed before + the exit condition is tested. In such case, moving the + condition to the entry, causes that the loop will iterate + one less iteration (which is the wanted outcome, since we + peel out the last iteration). If the body is executed after + the condition, moving the condition to the entry requires + decrementing one iteration. */ + exit_1 = EDGE_SUCC (exit->src, EDGE_SUCC (exit->src, 0) == exit); + if (exit_1->dest == loop->latch) + new_rhs = gimple_cond_rhs (cond_stmt); + else + { + new_rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (gimple_cond_rhs (cond_stmt)), + gimple_cond_rhs (cond_stmt), + build_int_cst (TREE_TYPE (gimple_cond_rhs (cond_stmt)), 1)); + if (TREE_CODE (gimple_cond_rhs (cond_stmt)) == SSA_NAME) + { + basic_block preheader; + gimple_stmt_iterator gsi1; + + preheader = loop_preheader_edge(loop)->src; + gsi1 = gsi_after_labels (preheader); + new_rhs = force_gimple_operand_gsi (&gsi1, new_rhs, true, + NULL_TREE,false,GSI_CONTINUE_LINKING); + } + } + gimple_cond_set_rhs (cond_stmt, unshare_expr (new_rhs)); + gimple_cond_set_lhs (cond_stmt, unshare_expr (gimple_cond_lhs (cond_stmt))); + bbs = get_loop_body_in_dom_order (loop); for (n = 0; bbs[n] != loop->latch; n++) diff --git a/gcc/tree-ssa-address.c b/gcc/tree-ssa-address.c index e3934e15506..c6dced114b7 100644 --- a/gcc/tree-ssa-address.c +++ b/gcc/tree-ssa-address.c @@ -189,11 +189,12 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, bool really_expand) { enum machine_mode address_mode = targetm.addr_space.address_mode (as); + enum machine_mode pointer_mode = targetm.addr_space.pointer_mode (as); rtx address, sym, bse, idx, st, off; struct mem_addr_template *templ; if (addr->step && !integer_onep (addr->step)) - st = immed_double_int_const (tree_to_double_int (addr->step), address_mode); + st = immed_double_int_const (tree_to_double_int (addr->step), pointer_mode); else st = NULL_RTX; @@ -201,7 +202,7 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, off = immed_double_int_const (double_int_sext (tree_to_double_int (addr->offset), TYPE_PRECISION (TREE_TYPE (addr->offset))), - address_mode); + pointer_mode); else off = NULL_RTX; @@ -220,16 +221,16 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, if (!templ->ref) { sym = (addr->symbol ? - gen_rtx_SYMBOL_REF (address_mode, ggc_strdup ("test_symbol")) + gen_rtx_SYMBOL_REF (pointer_mode, ggc_strdup ("test_symbol")) : NULL_RTX); bse = (addr->base ? - gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 1) + gen_raw_REG (pointer_mode, LAST_VIRTUAL_REGISTER + 1) : NULL_RTX); idx = (addr->index ? - gen_raw_REG (address_mode, LAST_VIRTUAL_REGISTER + 2) + gen_raw_REG (pointer_mode, LAST_VIRTUAL_REGISTER + 2) : NULL_RTX); - gen_addr_rtx (address_mode, sym, bse, idx, + gen_addr_rtx (pointer_mode, sym, bse, idx, st? const0_rtx : NULL_RTX, off? const0_rtx : NULL_RTX, &templ->ref, @@ -247,16 +248,18 @@ addr_for_mem_ref (struct mem_address *addr, addr_space_t as, /* Otherwise really expand the expressions. */ sym = (addr->symbol - ? expand_expr (addr->symbol, NULL_RTX, address_mode, EXPAND_NORMAL) + ? expand_expr (addr->symbol, NULL_RTX, pointer_mode, EXPAND_NORMAL) : NULL_RTX); bse = (addr->base - ? expand_expr (addr->base, NULL_RTX, address_mode, EXPAND_NORMAL) + ? expand_expr (addr->base, NULL_RTX, pointer_mode, EXPAND_NORMAL) : NULL_RTX); idx = (addr->index - ? expand_expr (addr->index, NULL_RTX, address_mode, EXPAND_NORMAL) + ? expand_expr (addr->index, NULL_RTX, pointer_mode, EXPAND_NORMAL) : NULL_RTX); - gen_addr_rtx (address_mode, sym, bse, idx, st, off, &address, NULL, NULL); + gen_addr_rtx (pointer_mode, sym, bse, idx, st, off, &address, NULL, NULL); + if (pointer_mode != address_mode) + address = convert_memory_address (address_mode, address); return address; } diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c index 8f96d6c0356..125d0444f4a 100644 --- a/gcc/tree-ssa-sccvn.c +++ b/gcc/tree-ssa-sccvn.c @@ -391,11 +391,15 @@ vn_reference_op_eq (const void *p1, const void *p2) const_vn_reference_op_t const vro1 = (const_vn_reference_op_t) p1; const_vn_reference_op_t const vro2 = (const_vn_reference_op_t) p2; - return vro1->opcode == vro2->opcode - && types_compatible_p (vro1->type, vro2->type) - && expressions_equal_p (vro1->op0, vro2->op0) - && expressions_equal_p (vro1->op1, vro2->op1) - && expressions_equal_p (vro1->op2, vro2->op2); + return (vro1->opcode == vro2->opcode + /* We do not care for differences in type qualification. */ + && (vro1->type == vro2->type + || (vro1->type && vro2->type + && types_compatible_p (TYPE_MAIN_VARIANT (vro1->type), + TYPE_MAIN_VARIANT (vro2->type)))) + && expressions_equal_p (vro1->op0, vro2->op0) + && expressions_equal_p (vro1->op1, vro2->op1) + && expressions_equal_p (vro1->op2, vro2->op2)); } /* Compute the hash for a reference operand VRO1. */ @@ -578,8 +582,7 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result) vn_reference_op_s temp; memset (&temp, 0, sizeof (temp)); - /* We do not care for spurious type qualifications. */ - temp.type = TYPE_MAIN_VARIANT (TREE_TYPE (ref)); + temp.type = TREE_TYPE (ref); temp.opcode = TREE_CODE (ref); temp.op0 = TMR_INDEX (ref); temp.op1 = TMR_STEP (ref); @@ -610,8 +613,7 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result) vn_reference_op_s temp; memset (&temp, 0, sizeof (temp)); - /* We do not care for spurious type qualifications. */ - temp.type = TYPE_MAIN_VARIANT (TREE_TYPE (ref)); + temp.type = TREE_TYPE (ref); temp.opcode = TREE_CODE (ref); temp.off = -1; @@ -676,16 +678,33 @@ copy_reference_ops_from_ref (tree ref, VEC(vn_reference_op_s, heap) **result) temp.off = off.low; } break; + case VAR_DECL: + if (DECL_HARD_REGISTER (ref)) + { + temp.op0 = ref; + break; + } + /* Fallthru. */ + case PARM_DECL: + case CONST_DECL: + case RESULT_DECL: + /* Canonicalize decls to MEM[&decl] which is what we end up with + when valueizing MEM[ptr] with ptr = &decl. */ + temp.opcode = MEM_REF; + temp.op0 = build_int_cst (build_pointer_type (TREE_TYPE (ref)), 0); + temp.off = 0; + VEC_safe_push (vn_reference_op_s, heap, *result, &temp); + temp.opcode = ADDR_EXPR; + temp.op0 = build_fold_addr_expr (ref); + temp.type = TREE_TYPE (temp.op0); + temp.off = -1; + break; case STRING_CST: case INTEGER_CST: case COMPLEX_CST: case VECTOR_CST: case REAL_CST: case CONSTRUCTOR: - case VAR_DECL: - case PARM_DECL: - case CONST_DECL: - case RESULT_DECL: case SSA_NAME: temp.op0 = ref; break; @@ -1580,7 +1599,7 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *vr_) op.op0 = build_int_cst (ptr_type_node, at - rhs_offset); op.off = at - lhs_offset + rhs_offset; VEC_replace (vn_reference_op_s, vr->operands, 0, &op); - op.type = TYPE_MAIN_VARIANT (TREE_TYPE (rhs)); + op.type = TREE_TYPE (rhs); op.opcode = TREE_CODE (rhs); op.op0 = rhs; op.off = -1; @@ -1692,7 +1711,12 @@ vn_reference_lookup (tree op, tree vuse, vn_lookup_kind kind, { vn_reference_t wvnresult; ao_ref r; - ao_ref_init (&r, op); + /* Make sure to use a valueized reference ... */ + if (!ao_ref_init_from_vn_reference (&r, vr1.set, vr1.type, vr1.operands)) + ao_ref_init (&r, op); + else + /* ... but also preserve a full reference tree for advanced TBAA. */ + r.ref = op; vn_walk_kind = kind; wvnresult = (vn_reference_t)walk_non_aliased_vuses (&r, vr1.vuse, diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index 1a494233088..28147382aac 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -1495,12 +1495,19 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) stmt = DR_STMT (dr); stmt_info = vinfo_for_stmt (stmt); + if (!STMT_VINFO_RELEVANT (stmt_info)) + continue; + /* For interleaving, only the alignment of the first access matters. */ if (STMT_VINFO_STRIDED_ACCESS (stmt_info) && GROUP_FIRST_ELEMENT (stmt_info) != stmt) continue; + /* For invariant accesses there is nothing to enhance. */ + if (integer_zerop (DR_STEP (dr))) + continue; + supportable_dr_alignment = vect_supportable_dr_alignment (dr, true); do_peeling = vector_alignment_reachable_p (dr); if (do_peeling) @@ -2304,7 +2311,10 @@ vect_analyze_data_ref_access (struct data_reference *dr) /* Allow invariant loads in loops. */ if (loop_vinfo && dr_step == 0) - return DR_IS_READ (dr); + { + GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; + return DR_IS_READ (dr); + } if (loop && nested_in_vect_loop_p (loop, stmt)) { diff --git a/gcc/tree.c b/gcc/tree.c index 28720340bab..e9876dd81a1 100644 --- a/gcc/tree.c +++ b/gcc/tree.c @@ -9167,10 +9167,12 @@ make_or_reuse_accum_type (unsigned size, int unsignedp, int satp) } /* Create nodes for all integer types (and error_mark_node) using the sizes - of C datatypes. */ + of C datatypes. SIGNED_CHAR specifies whether char is signed, + SHORT_DOUBLE specifies whether double should be of the same precision + as float. */ void -build_common_tree_nodes (bool signed_char) +build_common_tree_nodes (bool signed_char, bool short_double) { error_mark_node = make_node (ERROR_MARK); TREE_TYPE (error_mark_node) = error_mark_node; @@ -9247,14 +9249,7 @@ build_common_tree_nodes (bool signed_char) access_public_node = get_identifier ("public"); access_protected_node = get_identifier ("protected"); access_private_node = get_identifier ("private"); -} - -/* Call this function after calling build_common_tree_nodes. - It will create several other common tree nodes. */ -void -build_common_tree_nodes_2 (int short_double) -{ /* Define these next since types below may used them. */ integer_zero_node = build_int_cst (integer_type_node, 0); integer_one_node = build_int_cst (integer_type_node, 1); diff --git a/gcc/tree.h b/gcc/tree.h index ab0b6cc66c6..253d489a949 100644 --- a/gcc/tree.h +++ b/gcc/tree.h @@ -5396,8 +5396,7 @@ extern int real_onep (const_tree); extern int real_twop (const_tree); extern int real_minus_onep (const_tree); extern void init_ttree (void); -extern void build_common_tree_nodes (bool); -extern void build_common_tree_nodes_2 (int); +extern void build_common_tree_nodes (bool, bool); extern void build_common_builtin_nodes (void); extern tree build_nonstandard_integer_type (unsigned HOST_WIDE_INT, int); extern tree build_range_type (tree, tree, tree); @@ -5602,33 +5601,6 @@ extern tree tree_overlaps_hard_reg_set (tree, HARD_REG_SET *); #endif -/* In dwarf2out.c */ -/* Interface of the DWARF2 unwind info support. */ - -/* Generate a new label for the CFI info to refer to. */ - -extern char *dwarf2out_cfi_label (bool); - -/* Entry point to update the canonical frame address (CFA). */ - -extern void dwarf2out_def_cfa (const char *, unsigned, HOST_WIDE_INT); - -/* Entry point for saving a register to the stack. */ - -extern void dwarf2out_reg_save (const char *, unsigned, HOST_WIDE_INT); - -/* Entry point for saving the return address in the stack. */ - -extern void dwarf2out_return_save (const char *, HOST_WIDE_INT); - -/* Entry point for saving the return address in a register. */ - -extern void dwarf2out_return_reg (const char *, unsigned); - -/* Entry point for saving the first register into the second. */ - -extern void dwarf2out_reg_save_reg (const char *, rtx, rtx); - /* In tree-inline.c */ /* The type of a set of already-visited pointers. Functions for creating diff --git a/gnattools/ChangeLog b/gnattools/ChangeLog index 83f30164c06..9bcd6213230 100644 --- a/gnattools/ChangeLog +++ b/gnattools/ChangeLog @@ -1,3 +1,10 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * configure.ac (*86-*-solaris2*): Also accept + x86_64-*-solaris2.1[0-9]*. + * configure: Regenerate. + 2010-11-20 Ralf Wildenhues <Ralf.Wildenhues@gmx.de> PR other/46202 diff --git a/gnattools/configure b/gnattools/configure index a85c044056a..8667058788f 100755 --- a/gnattools/configure +++ b/gnattools/configure @@ -2041,7 +2041,7 @@ case "${target}" in sparc-sun-solaris*) TOOLS_TARGET_PAIRS="mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb" ;; - *86-*-solaris2*) + *86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) TOOLS_TARGET_PAIRS="mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb" ;; *86-*-linux* \ diff --git a/gnattools/configure.ac b/gnattools/configure.ac index 712ed44063e..450cb727243 100644 --- a/gnattools/configure.ac +++ b/gnattools/configure.ac @@ -81,7 +81,7 @@ case "${target}" in sparc-sun-solaris*) TOOLS_TARGET_PAIRS="mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb" ;; - *86-*-solaris2*) + *86-*-solaris2* | x86_64-*-solaris2.1[[0-9]]*) TOOLS_TARGET_PAIRS="mlib-tgt-specific.adb<mlib-tgt-specific-solaris.adb" ;; *86-*-linux* \ diff --git a/libcpp/ChangeLog b/libcpp/ChangeLog index e36edfa981d..6abcb25081c 100644 --- a/libcpp/ChangeLog +++ b/libcpp/ChangeLog @@ -1,3 +1,10 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * configure.ac (host_wide_int): Handle x86_64-*-solaris2.1[0-9] + like i[34567]86-*-solaris2.1[0-9]*. + * configure: Regenerate. + 2011-06-16 Jason Merrill <jason@redhat.com> PR c++/45399 diff --git a/libcpp/configure b/libcpp/configure index 41677ce1f3c..d261f2a09f6 100755 --- a/libcpp/configure +++ b/libcpp/configure @@ -7095,7 +7095,7 @@ case $target in ia64-*-* | \ hppa*64*-*-* | \ i[34567]86-*-darwin* | \ - i[34567]86-*-solaris2.1[0-9]* | \ + i[34567]86-*-solaris2.1[0-9]* | x86_64-*-solaris2.1[0-9]* | \ i[34567]86-w64-mingw* | \ mips*-*-* | \ mmix-*-* | \ diff --git a/libcpp/configure.ac b/libcpp/configure.ac index 1c67eaf641c..ce8cae61bf0 100644 --- a/libcpp/configure.ac +++ b/libcpp/configure.ac @@ -145,7 +145,7 @@ case $target in ia64-*-* | \ hppa*64*-*-* | \ i[34567]86-*-darwin* | \ - i[34567]86-*-solaris2.1[0-9]* | \ + i[34567]86-*-solaris2.1[0-9]* | x86_64-*-solaris2.1[0-9]* | \ i[34567]86-w64-mingw* | \ mips*-*-* | \ mmix-*-* | \ diff --git a/libgcc/ChangeLog b/libgcc/ChangeLog index 358cc1eb242..54b1f3d7393 100644 --- a/libgcc/ChangeLog +++ b/libgcc/ChangeLog @@ -1,3 +1,26 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * config.host (*-*-solaris2*): Handle x86_64-*-solaris2.1[0-9]* + like i?86-*-solaris2.1[0-9]*. + (i[34567]86-*-solaris2*): Also handle x86_64-*-solaris2.1[0-9]*. + * configure.ac (i?86-*-solaris2*): Likewise. + * configure: Regenerate. + +2011-07-06 Thomas Schwinge <thomas@schwinge.name> + + * config.host (i[34567]86-*-kfreebsd*-gnu, i[34567]86-*-knetbsd*-gnu) + (i[34567]86-*-gnu*, i[34567]86-*-kopensolaris*-gnu): Remove + md_unwind_header by splitting out of... + (i[34567]86-*-linux*): ... this. + * config.host (x86_64-*-kfreebsd*-gnu, x86_64-*-knetbsd*-gnu): + Remove md_unwind_header by splitting out of... + (x86_64-*-linux*): ... this. + +2011-07-04 Georg-Johann Lay <avr@gjlay.de> + + * config/avr/t-avr (intfuncs16): Add _clrsbXX2. + 2011-06-28 Nick Clifton <nickc@redhat.com> * config.host: Recognize all V850 variants. diff --git a/libgcc/config.host b/libgcc/config.host index 326ce91f165..8faeee3ee01 100644 --- a/libgcc/config.host +++ b/libgcc/config.host @@ -188,7 +188,7 @@ case ${host} in tmake_file="$tmake_file $cpu_type/t-sol2" extra_parts="gmon.o crtbegin.o crtend.o" case ${host} in - i?86-*-solaris2.1[0-9]*) + i?86-*-solaris2.1[0-9]* | x86_64-*-solaris2.1[0-9]*) # Solaris 10+/x86 provides crt1.o, crti.o, crtn.o, and gcrt1.o as # part of the base system. ;; @@ -351,16 +351,24 @@ i[34567]86-*-openbsd2.*|i[34567]86-*openbsd3.[0123]) ;; i[34567]86-*-openbsd*) ;; -i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i[34567]86-*-gnu* | i[34567]86-*-kopensolaris*-gnu) +i[34567]86-*-linux*) extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o" tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm" md_unwind_header=i386/linux-unwind.h ;; -x86_64-*-linux* | x86_64-*-kfreebsd*-gnu | x86_64-*-knetbsd*-gnu) +i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-knetbsd*-gnu | i[34567]86-*-gnu* | i[34567]86-*-kopensolaris*-gnu) + extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o" + tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm" + ;; +x86_64-*-linux*) extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o" tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm" md_unwind_header=i386/linux-unwind.h ;; +x86_64-*-kfreebsd*-gnu | x86_64-*-knetbsd*-gnu) + extra_parts="$extra_parts crtprec32.o crtprec64.o crtprec80.o crtfastmath.o" + tmake_file="${tmake_file} i386/t-crtpc i386/t-crtfm" + ;; i[34567]86-pc-msdosdjgpp*) ;; i[34567]86-*-lynxos*) @@ -378,7 +386,7 @@ i[34567]86-*-rtems*) extra_parts="crtbegin.o crtend.o crti.o crtn.o" tmake_file="${tmake_file} t-crtin i386/t-softfp i386/t-crtstuff t-rtems" ;; -i[34567]86-*-solaris2*) +i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) tmake_file="$tmake_file i386/t-crtfm" extra_parts="$extra_parts crtfastmath.o" md_unwind_header=i386/sol2-unwind.h @@ -692,7 +700,7 @@ i[34567]86-*-darwin* | x86_64-*-darwin* | \ i[34567]86-*-kfreebsd*-gnu | x86_64-*-kfreebsd*-gnu | \ i[34567]86-*-linux* | x86_64-*-linux* | \ i[34567]86-*-gnu* | \ - i[34567]86-*-solaris2* | \ + i[34567]86-*-solaris2* | x86_64-*-solaris2.1[0-9]* | \ i[34567]86-*-cygwin* | i[34567]86-*-mingw* | x86_64-*-mingw* | \ i[34567]86-*-freebsd* | x86_64-*-freebsd*) if test "${host_address}" = 32; then diff --git a/libgcc/config/avr/t-avr b/libgcc/config/avr/t-avr index 7c8783ee212..78829c76af4 100644 --- a/libgcc/config/avr/t-avr +++ b/libgcc/config/avr/t-avr @@ -1,5 +1,5 @@ # Extra 16-bit integer functions. -intfuncs16 = _absvXX2 _addvXX3 _subvXX3 _mulvXX3 _negvXX2 +intfuncs16 = _absvXX2 _addvXX3 _subvXX3 _mulvXX3 _negvXX2 _clrsbXX2 hiintfuncs16 = $(subst XX,hi,$(intfuncs16)) siintfuncs16 = $(subst XX,si,$(intfuncs16)) diff --git a/libgcc/configure b/libgcc/configure index 13510224944..45fe914ddf9 100644 --- a/libgcc/configure +++ b/libgcc/configure @@ -3813,7 +3813,7 @@ esac # Link with -nostartfiles -nodefaultlibs since neither are present while # building libgcc. case ${host} in -i?86-*-solaris2*) +i?86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) cat > conftest.s <<EOF .section .eh_frame,"a",@unwind .zero 4 diff --git a/libgcc/configure.ac b/libgcc/configure.ac index 4f56c3b596f..20536c7152d 100644 --- a/libgcc/configure.ac +++ b/libgcc/configure.ac @@ -207,7 +207,7 @@ esac # Link with -nostartfiles -nodefaultlibs since neither are present while # building libgcc. case ${host} in -i?86-*-solaris2*) +i?86-*-solaris2* | x86_64-*-solaris2.1[[0-9]]*) cat > conftest.s <<EOF .section .eh_frame,"a",@unwind .zero 4 diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog index 88ee3638d17..442c032f477 100644 --- a/libgfortran/ChangeLog +++ b/libgfortran/ChangeLog @@ -1,3 +1,53 @@ +2011-07-05 Thomas Koenig <tkoenig@gcc.gnu.org> + + * runtime/memory.c (internal_malloc_size): If size is zero, + allocate a single byte. + * m4/pack.m4 (pack_'rtype_code`): Don't check for zero size + for the argument of internal_malloc_size. + * m4/spread.m4 (spread_'rtype_code`): Likewise. + * m4/eoshift1.m4 (eoshift1): Don't allocate twice. Don't check + for zero size for the argument of internal_malloc_size. + * m4/eoshift3.m4: Don't check for zero size for the argument of + internal_malloc_size. + * intrinsics/pack_generic.c (pack_internal): Likewise. + (pack_s_internal): Likewise. + * intrinsics/cshift0.c (cshift0): Likewise. + * intrinsics/spread_generic.c (spread_internal): Likewise. + * intrinsics/eoshift0.c (eoshift0): Likewise. + * intrinsics/eoshift2.c (eoshift2): Likewise. + * generated/eoshift1_16.c: Regenerated. + * generated/eoshift1_4.c: Regenerated. + * generated/eoshift1_8.c: Regenerated. + * generated/eoshift3_16.c: Regenerated. + * generated/eoshift3_4.c: Regenerated. + * generated/eoshift3_8.c: Regenerated. + * generated/pack_c10.c: Regenerated. + * generated/pack_c16.c: Regenerated. + * generated/pack_c4.c: Regenerated. + * generated/pack_c8.c: Regenerated. + * generated/pack_i16.c: Regenerated. + * generated/pack_i1.c: Regenerated. + * generated/pack_i2.c: Regenerated. + * generated/pack_i4.c: Regenerated. + * generated/pack_i8.c: Regenerated. + * generated/pack_r10.c: Regenerated. + * generated/pack_r16.c: Regenerated. + * generated/pack_r4.c: Regenerated. + * generated/pack_r8.c: Regenerated. + * generated/spread_c10.c: Regenerated. + * generated/spread_c16.c: Regenerated. + * generated/spread_c4.c: Regenerated. + * generated/spread_c8.c: Regenerated. + * generated/spread_i16.c: Regenerated. + * generated/spread_i1.c: Regenerated. + * generated/spread_i2.c: Regenerated. + * generated/spread_i4.c: Regenerated. + * generated/spread_i8.c: Regenerated. + * generated/spread_r10.c: Regenerated. + * generated/spread_r16.c: Regenerated. + * generated/spread_r4.c: Regenerated. + * generated/spread_r8.c: Regenerated. + 2011-06-28 Thomas Koenig <tkoenig@gcc.gnu.org> PR fortran/49479 diff --git a/libgfortran/generated/eoshift1_16.c b/libgfortran/generated/eoshift1_16.c index a14bd292715..eb3a27a1c64 100644 --- a/libgfortran/generated/eoshift1_16.c +++ b/libgfortran/generated/eoshift1_16.c @@ -88,7 +88,6 @@ eoshift1 (gfc_array_char * const restrict ret, { int i; - ret->data = internal_malloc_size (size * arraysize); ret->offset = 0; ret->dtype = array->dtype; for (i = 0; i < GFC_DESCRIPTOR_RANK (array); i++) @@ -106,10 +105,8 @@ eoshift1 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/generated/eoshift1_4.c b/libgfortran/generated/eoshift1_4.c index 06bc309c4a8..d3776b911b0 100644 --- a/libgfortran/generated/eoshift1_4.c +++ b/libgfortran/generated/eoshift1_4.c @@ -88,7 +88,6 @@ eoshift1 (gfc_array_char * const restrict ret, { int i; - ret->data = internal_malloc_size (size * arraysize); ret->offset = 0; ret->dtype = array->dtype; for (i = 0; i < GFC_DESCRIPTOR_RANK (array); i++) @@ -106,10 +105,8 @@ eoshift1 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/generated/eoshift1_8.c b/libgfortran/generated/eoshift1_8.c index 3e9162d0f08..5f026acdfcb 100644 --- a/libgfortran/generated/eoshift1_8.c +++ b/libgfortran/generated/eoshift1_8.c @@ -88,7 +88,6 @@ eoshift1 (gfc_array_char * const restrict ret, { int i; - ret->data = internal_malloc_size (size * arraysize); ret->offset = 0; ret->dtype = array->dtype; for (i = 0; i < GFC_DESCRIPTOR_RANK (array); i++) @@ -106,10 +105,8 @@ eoshift1 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/generated/eoshift3_16.c b/libgfortran/generated/eoshift3_16.c index ec21d1ec14d..1cf216767ac 100644 --- a/libgfortran/generated/eoshift3_16.c +++ b/libgfortran/generated/eoshift3_16.c @@ -107,10 +107,8 @@ eoshift3 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/generated/eoshift3_4.c b/libgfortran/generated/eoshift3_4.c index ce4cede1f1d..c6033d0a58a 100644 --- a/libgfortran/generated/eoshift3_4.c +++ b/libgfortran/generated/eoshift3_4.c @@ -107,10 +107,8 @@ eoshift3 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/generated/eoshift3_8.c b/libgfortran/generated/eoshift3_8.c index 4af36f72bb4..7a7b196eec4 100644 --- a/libgfortran/generated/eoshift3_8.c +++ b/libgfortran/generated/eoshift3_8.c @@ -107,10 +107,8 @@ eoshift3 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/generated/pack_c10.c b/libgfortran/generated/pack_c10.c index cc66c538e1c..621ba8d8b79 100644 --- a/libgfortran/generated/pack_c10.c +++ b/libgfortran/generated/pack_c10.c @@ -166,14 +166,12 @@ pack_c10 (gfc_array_c10 *ret, const gfc_array_c10 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_10) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_10) * total); + return; } else { diff --git a/libgfortran/generated/pack_c16.c b/libgfortran/generated/pack_c16.c index 9397262dd06..e98de98c91c 100644 --- a/libgfortran/generated/pack_c16.c +++ b/libgfortran/generated/pack_c16.c @@ -166,14 +166,12 @@ pack_c16 (gfc_array_c16 *ret, const gfc_array_c16 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_16) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_16) * total); + return; } else { diff --git a/libgfortran/generated/pack_c4.c b/libgfortran/generated/pack_c4.c index 093bdcc9a1d..85e15417851 100644 --- a/libgfortran/generated/pack_c4.c +++ b/libgfortran/generated/pack_c4.c @@ -166,14 +166,12 @@ pack_c4 (gfc_array_c4 *ret, const gfc_array_c4 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_4) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_4) * total); + return; } else { diff --git a/libgfortran/generated/pack_c8.c b/libgfortran/generated/pack_c8.c index 7971e2ba135..bfaec2fc910 100644 --- a/libgfortran/generated/pack_c8.c +++ b/libgfortran/generated/pack_c8.c @@ -166,14 +166,12 @@ pack_c8 (gfc_array_c8 *ret, const gfc_array_c8 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_8) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_COMPLEX_8) * total); + return; } else { diff --git a/libgfortran/generated/pack_i1.c b/libgfortran/generated/pack_i1.c index 3e4647dbd55..c168affaea0 100644 --- a/libgfortran/generated/pack_i1.c +++ b/libgfortran/generated/pack_i1.c @@ -166,14 +166,12 @@ pack_i1 (gfc_array_i1 *ret, const gfc_array_i1 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_INTEGER_1) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_INTEGER_1) * total); + return; } else { diff --git a/libgfortran/generated/pack_i16.c b/libgfortran/generated/pack_i16.c index 99d3491c38f..81ed49a8208 100644 --- a/libgfortran/generated/pack_i16.c +++ b/libgfortran/generated/pack_i16.c @@ -166,14 +166,12 @@ pack_i16 (gfc_array_i16 *ret, const gfc_array_i16 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_INTEGER_16) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_INTEGER_16) * total); + return; } else { diff --git a/libgfortran/generated/pack_i2.c b/libgfortran/generated/pack_i2.c index e796d169f76..a04b7af6969 100644 --- a/libgfortran/generated/pack_i2.c +++ b/libgfortran/generated/pack_i2.c @@ -166,14 +166,12 @@ pack_i2 (gfc_array_i2 *ret, const gfc_array_i2 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_INTEGER_2) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_INTEGER_2) * total); + return; } else { diff --git a/libgfortran/generated/pack_i4.c b/libgfortran/generated/pack_i4.c index 91ce99fe4fd..c5d112bd394 100644 --- a/libgfortran/generated/pack_i4.c +++ b/libgfortran/generated/pack_i4.c @@ -166,14 +166,12 @@ pack_i4 (gfc_array_i4 *ret, const gfc_array_i4 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_INTEGER_4) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_INTEGER_4) * total); + return; } else { diff --git a/libgfortran/generated/pack_i8.c b/libgfortran/generated/pack_i8.c index e49d8c29e95..7307192749b 100644 --- a/libgfortran/generated/pack_i8.c +++ b/libgfortran/generated/pack_i8.c @@ -166,14 +166,12 @@ pack_i8 (gfc_array_i8 *ret, const gfc_array_i8 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_INTEGER_8) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_INTEGER_8) * total); + return; } else { diff --git a/libgfortran/generated/pack_r10.c b/libgfortran/generated/pack_r10.c index f70c932640e..c3b55cc1a9f 100644 --- a/libgfortran/generated/pack_r10.c +++ b/libgfortran/generated/pack_r10.c @@ -166,14 +166,12 @@ pack_r10 (gfc_array_r10 *ret, const gfc_array_r10 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_REAL_10) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_REAL_10) * total); + return; } else { diff --git a/libgfortran/generated/pack_r16.c b/libgfortran/generated/pack_r16.c index ff2ad6e7eed..489ff957aa1 100644 --- a/libgfortran/generated/pack_r16.c +++ b/libgfortran/generated/pack_r16.c @@ -166,14 +166,12 @@ pack_r16 (gfc_array_r16 *ret, const gfc_array_r16 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_REAL_16) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_REAL_16) * total); + return; } else { diff --git a/libgfortran/generated/pack_r4.c b/libgfortran/generated/pack_r4.c index 0c08b8c8c94..05172dbbb06 100644 --- a/libgfortran/generated/pack_r4.c +++ b/libgfortran/generated/pack_r4.c @@ -166,14 +166,12 @@ pack_r4 (gfc_array_r4 *ret, const gfc_array_r4 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_REAL_4) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_REAL_4) * total); + return; } else { diff --git a/libgfortran/generated/pack_r8.c b/libgfortran/generated/pack_r8.c index 2b307e29a2b..df5cc3f3a6e 100644 --- a/libgfortran/generated/pack_r8.c +++ b/libgfortran/generated/pack_r8.c @@ -166,14 +166,12 @@ pack_r8 (gfc_array_r8 *ret, const gfc_array_r8 *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof (GFC_REAL_8) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof (GFC_REAL_8) * total); + return; } else { diff --git a/libgfortran/generated/spread_c10.c b/libgfortran/generated/spread_c10.c index 77a838f01a4..d8a5bafe235 100644 --- a/libgfortran/generated/spread_c10.c +++ b/libgfortran/generated/spread_c10.c @@ -100,13 +100,11 @@ spread_c10 (gfc_array_c10 *ret, const gfc_array_c10 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_10)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_10)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_c16.c b/libgfortran/generated/spread_c16.c index 1276e4dfb44..0bfba7f4145 100644 --- a/libgfortran/generated/spread_c16.c +++ b/libgfortran/generated/spread_c16.c @@ -100,13 +100,11 @@ spread_c16 (gfc_array_c16 *ret, const gfc_array_c16 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_16)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_16)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_c4.c b/libgfortran/generated/spread_c4.c index 5224e8477a9..e1a475f6f12 100644 --- a/libgfortran/generated/spread_c4.c +++ b/libgfortran/generated/spread_c4.c @@ -100,13 +100,11 @@ spread_c4 (gfc_array_c4 *ret, const gfc_array_c4 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_4)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_4)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_c8.c b/libgfortran/generated/spread_c8.c index 96ecb3afb87..a2902908e76 100644 --- a/libgfortran/generated/spread_c8.c +++ b/libgfortran/generated/spread_c8.c @@ -100,13 +100,11 @@ spread_c8 (gfc_array_c8 *ret, const gfc_array_c8 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_8)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_COMPLEX_8)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_i1.c b/libgfortran/generated/spread_i1.c index e002c146262..111c4ccea5f 100644 --- a/libgfortran/generated/spread_i1.c +++ b/libgfortran/generated/spread_i1.c @@ -100,13 +100,11 @@ spread_i1 (gfc_array_i1 *ret, const gfc_array_i1 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_1)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_1)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_i16.c b/libgfortran/generated/spread_i16.c index bdefdac3271..2a4864ba41c 100644 --- a/libgfortran/generated/spread_i16.c +++ b/libgfortran/generated/spread_i16.c @@ -100,13 +100,11 @@ spread_i16 (gfc_array_i16 *ret, const gfc_array_i16 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_16)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_16)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_i2.c b/libgfortran/generated/spread_i2.c index 8482cfde857..5a5d87064e6 100644 --- a/libgfortran/generated/spread_i2.c +++ b/libgfortran/generated/spread_i2.c @@ -100,13 +100,11 @@ spread_i2 (gfc_array_i2 *ret, const gfc_array_i2 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_2)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_2)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_i4.c b/libgfortran/generated/spread_i4.c index 6eff6326b26..e990408b0a4 100644 --- a/libgfortran/generated/spread_i4.c +++ b/libgfortran/generated/spread_i4.c @@ -100,13 +100,11 @@ spread_i4 (gfc_array_i4 *ret, const gfc_array_i4 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_4)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_4)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_i8.c b/libgfortran/generated/spread_i8.c index 29312636306..1e3b3055b8f 100644 --- a/libgfortran/generated/spread_i8.c +++ b/libgfortran/generated/spread_i8.c @@ -100,13 +100,11 @@ spread_i8 (gfc_array_i8 *ret, const gfc_array_i8 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_8)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_INTEGER_8)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_r10.c b/libgfortran/generated/spread_r10.c index 3c3f197b4c2..ef86bc59007 100644 --- a/libgfortran/generated/spread_r10.c +++ b/libgfortran/generated/spread_r10.c @@ -100,13 +100,11 @@ spread_r10 (gfc_array_r10 *ret, const gfc_array_r10 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_10)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_10)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_r16.c b/libgfortran/generated/spread_r16.c index 13162609696..9678e8768a0 100644 --- a/libgfortran/generated/spread_r16.c +++ b/libgfortran/generated/spread_r16.c @@ -100,13 +100,11 @@ spread_r16 (gfc_array_r16 *ret, const gfc_array_r16 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_16)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_16)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_r4.c b/libgfortran/generated/spread_r4.c index cc0f1197b69..fac0900cbb4 100644 --- a/libgfortran/generated/spread_r4.c +++ b/libgfortran/generated/spread_r4.c @@ -100,13 +100,11 @@ spread_r4 (gfc_array_r4 *ret, const gfc_array_r4 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_4)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_4)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/generated/spread_r8.c b/libgfortran/generated/spread_r8.c index f38ef3885fc..0dd9e7bf5ad 100644 --- a/libgfortran/generated/spread_r8.c +++ b/libgfortran/generated/spread_r8.c @@ -100,13 +100,11 @@ spread_r8 (gfc_array_r8 *ret, const gfc_array_r8 *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_8)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof(GFC_REAL_8)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/intrinsics/cshift0.c b/libgfortran/intrinsics/cshift0.c index 00a50d5db7c..026dd1adc0d 100644 --- a/libgfortran/intrinsics/cshift0.c +++ b/libgfortran/intrinsics/cshift0.c @@ -79,10 +79,8 @@ cshift0 (gfc_array_char * ret, const gfc_array_char * array, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) { diff --git a/libgfortran/intrinsics/eoshift0.c b/libgfortran/intrinsics/eoshift0.c index 74ba5ab7a97..6ceed2000e0 100644 --- a/libgfortran/intrinsics/eoshift0.c +++ b/libgfortran/intrinsics/eoshift0.c @@ -86,11 +86,8 @@ eoshift0 (gfc_array_char * ret, const gfc_array_char * array, } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); - + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) { diff --git a/libgfortran/intrinsics/eoshift2.c b/libgfortran/intrinsics/eoshift2.c index 2fbf62e118c..b4f82786964 100644 --- a/libgfortran/intrinsics/eoshift2.c +++ b/libgfortran/intrinsics/eoshift2.c @@ -91,10 +91,8 @@ eoshift2 (gfc_array_char *ret, const gfc_array_char *array, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } } diff --git a/libgfortran/intrinsics/pack_generic.c b/libgfortran/intrinsics/pack_generic.c index c15bdd08f41..de1e07facab 100644 --- a/libgfortran/intrinsics/pack_generic.c +++ b/libgfortran/intrinsics/pack_generic.c @@ -153,14 +153,11 @@ pack_internal (gfc_array_char *ret, const gfc_array_char *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (size * total); + return; /* In this case, nothing remains to be done. */ } else { @@ -523,13 +520,10 @@ pack_s_internal (gfc_array_char *ret, const gfc_array_char *array, ret->offset = 0; + ret->data = internal_malloc_size (size * total); + if (total == 0) - { - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (size * total); + return; } rstride0 = GFC_DESCRIPTOR_STRIDE_BYTES(ret,0); diff --git a/libgfortran/intrinsics/spread_generic.c b/libgfortran/intrinsics/spread_generic.c index 29671ce4c86..2eeb24b8444 100644 --- a/libgfortran/intrinsics/spread_generic.c +++ b/libgfortran/intrinsics/spread_generic.c @@ -100,13 +100,10 @@ spread_internal (gfc_array_char *ret, const gfc_array_char *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * size); - else - { - ret->data = internal_malloc_size (1); - return; - } + ret->data = internal_malloc_size (rs * size); + + if (rs <= 0) + return; } else { diff --git a/libgfortran/m4/eoshift1.m4 b/libgfortran/m4/eoshift1.m4 index be9b1008a60..339e1d89965 100644 --- a/libgfortran/m4/eoshift1.m4 +++ b/libgfortran/m4/eoshift1.m4 @@ -89,7 +89,6 @@ eoshift1 (gfc_array_char * const restrict ret, { int i; - ret->data = internal_malloc_size (size * arraysize); ret->offset = 0; ret->dtype = array->dtype; for (i = 0; i < GFC_DESCRIPTOR_RANK (array); i++) @@ -107,10 +106,8 @@ eoshift1 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/m4/eoshift3.m4 b/libgfortran/m4/eoshift3.m4 index 6fa3bd2f7dc..1c19575ffb8 100644 --- a/libgfortran/m4/eoshift3.m4 +++ b/libgfortran/m4/eoshift3.m4 @@ -108,10 +108,8 @@ eoshift3 (gfc_array_char * const restrict ret, GFC_DIMENSION_SET(ret->dim[i], 0, ub, str); } - if (arraysize > 0) - ret->data = internal_malloc_size (size * arraysize); - else - ret->data = internal_malloc_size (1); + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (size * arraysize); } else if (unlikely (compile_options.bounds_check)) diff --git a/libgfortran/m4/pack.m4 b/libgfortran/m4/pack.m4 index c5fd2fd817d..e1882d077c2 100644 --- a/libgfortran/m4/pack.m4 +++ b/libgfortran/m4/pack.m4 @@ -167,14 +167,12 @@ pack_'rtype_code` ('rtype` *ret, const 'rtype` *array, GFC_DIMENSION_SET(ret->dim[0], 0, total-1, 1); ret->offset = 0; + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (sizeof ('rtype_name`) * total); + if (total == 0) - { - /* In this case, nothing remains to be done. */ - ret->data = internal_malloc_size (1); - return; - } - else - ret->data = internal_malloc_size (sizeof ('rtype_name`) * total); + return; } else { diff --git a/libgfortran/m4/spread.m4 b/libgfortran/m4/spread.m4 index 5e73d97423a..89a2e65297c 100644 --- a/libgfortran/m4/spread.m4 +++ b/libgfortran/m4/spread.m4 @@ -101,13 +101,11 @@ spread_'rtype_code` ('rtype` *ret, const 'rtype` *source, GFC_DIMENSION_SET(ret->dim[n], 0, ub, stride); } ret->offset = 0; - if (rs > 0) - ret->data = internal_malloc_size (rs * sizeof('rtype_name`)); - else - { - ret->data = internal_malloc_size (1); - return; - } + + /* internal_malloc_size allocates a single byte for zero size. */ + ret->data = internal_malloc_size (rs * sizeof('rtype_name`)); + if (rs <= 0) + return; } else { diff --git a/libgfortran/runtime/memory.c b/libgfortran/runtime/memory.c index 5f710849e52..a26d9e59efa 100644 --- a/libgfortran/runtime/memory.c +++ b/libgfortran/runtime/memory.c @@ -54,8 +54,8 @@ get_mem (size_t n) void * internal_malloc_size (size_t size) { - if (size == 0) - return NULL; + if (unlikely (size == 0)) + size = 1; return get_mem (size); } diff --git a/libgo/Makefile.am b/libgo/Makefile.am index 761a11ad265..26a6e78d64f 100644 --- a/libgo/Makefile.am +++ b/libgo/Makefile.am @@ -1619,7 +1619,7 @@ GOTESTFLAGS = # Check a package. CHECK = \ - GC="$(GOC) -L `${PWD_COMMAND}` -L `${PWD_COMMAND}`/.libs"; \ + GC="$(GOC) $(GOCFLAGS) -L `${PWD_COMMAND}` -L `${PWD_COMMAND}`/.libs"; \ export GC; \ RUNTESTFLAGS="$(RUNTESTFLAGS)"; \ export RUNTESTFLAGS; \ diff --git a/libgo/Makefile.in b/libgo/Makefile.in index 8637ec41b7d..5b33859becf 100644 --- a/libgo/Makefile.in +++ b/libgo/Makefile.in @@ -1937,7 +1937,7 @@ GOTESTFLAGS = # Check a package. CHECK = \ - GC="$(GOC) -L `${PWD_COMMAND}` -L `${PWD_COMMAND}`/.libs"; \ + GC="$(GOC) $(GOCFLAGS) -L `${PWD_COMMAND}` -L `${PWD_COMMAND}`/.libs"; \ export GC; \ RUNTESTFLAGS="$(RUNTESTFLAGS)"; \ export RUNTESTFLAGS; \ diff --git a/libgo/config/libtool.m4 b/libgo/config/libtool.m4 index a546739eb4e..1a667d31a5a 100644 --- a/libgo/config/libtool.m4 +++ b/libgo/config/libtool.m4 @@ -1296,7 +1296,7 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) case $lt_cv_prog_gnu_ld in yes*) case $host in - i?86-*-solaris*) + i?86-*-solaris* | x86_64-*-solaris2.1[[0-9]]*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) diff --git a/libgo/configure b/libgo/configure index 4bf5a2e6096..607533d0e38 100755 --- a/libgo/configure +++ b/libgo/configure @@ -6422,7 +6422,7 @@ $as_echo "$lt_cv_cc_needs_belf" >&6; } case $lt_cv_prog_gnu_ld in yes*) case $host in - i?86-*-solaris*) + i?86-*-solaris* | x86_64-*-solaris2.1[0-9]*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) diff --git a/libgo/go/json/scanner_test.go b/libgo/go/json/scanner_test.go index 0d4de3246dd..df87c716aff 100644 --- a/libgo/go/json/scanner_test.go +++ b/libgo/go/json/scanner_test.go @@ -252,6 +252,9 @@ func genArray(n int) []interface{} { if f > n { f = n } + if n > 0 && f == 0 { + f = 1 + } x := make([]interface{}, int(f)) for i := range x { x[i] = genValue(((i+1)*n)/f - (i*n)/f) diff --git a/libiberty/ChangeLog b/libiberty/ChangeLog index 2f8e73497d9..cd71565e582 100644 --- a/libiberty/ChangeLog +++ b/libiberty/ChangeLog @@ -1,3 +1,8 @@ +2011-07-04 Jason Merrill <jason@redhat.com> + + * cp-demangle.c (d_expression): Handle 'this'. + (d_print_comp) [DEMANGLE_COMPONENT_FUNCTION_PARAM]: Likewise. + 2011-07-01 Joel Brobecker <brobecker@adacore.com> * filename_cmp.c (filename_cmp, filename_ncmp): Add handling of diff --git a/libiberty/cp-demangle.c b/libiberty/cp-demangle.c index f1363226c56..29badbba187 100644 --- a/libiberty/cp-demangle.c +++ b/libiberty/cp-demangle.c @@ -2738,10 +2738,18 @@ d_expression (struct d_info *di) /* Function parameter used in a late-specified return type. */ int index; d_advance (di, 2); - index = d_compact_number (di); - if (index < 0) - return NULL; - + if (d_peek_char (di) == 'T') + { + /* 'this' parameter. */ + d_advance (di, 1); + index = 0; + } + else + { + index = d_compact_number (di) + 1; + if (index == 0) + return NULL; + } return d_make_function_param (di, index); } else if (IS_DIGIT (peek) @@ -4400,9 +4408,17 @@ d_print_comp (struct d_print_info *dpi, int options, return; case DEMANGLE_COMPONENT_FUNCTION_PARAM: - d_append_string (dpi, "{parm#"); - d_append_num (dpi, dc->u.s_number.number + 1); - d_append_char (dpi, '}'); + { + long num = dc->u.s_number.number; + if (num == 0) + d_append_string (dpi, "this"); + else + { + d_append_string (dpi, "{parm#"); + d_append_num (dpi, num); + d_append_char (dpi, '}'); + } + } return; case DEMANGLE_COMPONENT_GLOBAL_CONSTRUCTORS: diff --git a/libiberty/testsuite/demangle-expected b/libiberty/testsuite/demangle-expected index 4980cf1728d..2dc74beb9e2 100644 --- a/libiberty/testsuite/demangle-expected +++ b/libiberty/testsuite/demangle-expected @@ -3905,6 +3905,10 @@ decltype ({parm#1}+{parm#2}) add<int, double>(int, double) --format=gnu-v3 _Z4add3IidEDTclL_Z1gEfp_fp0_EET_T0_ decltype (g({parm#1}, {parm#2})) add3<int, double>(int, double) +# 'this' test +--format=gnu-v3 +_ZN1A1fIiEEDTcldtdtdefpT1b1fIT_EEEv +decltype ((((*this).b).(f<int>))()) A::f<int>() # new (2008) built in types test --format=gnu-v3 _Z1fDfDdDeDhDsDi diff --git a/libjava/ChangeLog b/libjava/ChangeLog index dfaa803c0a1..cac38751080 100644 --- a/libjava/ChangeLog +++ b/libjava/ChangeLog @@ -1,3 +1,10 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR target/39150 + * configure.host (x86_64-*): Add -Usun to libgcj_flags. + (x86_64-*-solaris2.1[0-9]*): New case. + (i?86-*-solaris2*): Also accept x86_64-*-solaris2.1[0-9]*. + 2011-06-21 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> PR libgcj/49314 diff --git a/libjava/configure.host b/libjava/configure.host index fab8c603a81..105785e5434 100644 --- a/libjava/configure.host +++ b/libjava/configure.host @@ -141,6 +141,9 @@ case "${host}" in libgcj_flags="${libgcj_flags} -ffloat-store" ;; esac libgcj_flags="${libgcj_flags} -fomit-frame-pointer" + # On Solaris we have defined 'sun' which later conflicts with + # namespace usage. So to work this away we use the below undefine. + libgcj_flags="${libgcj_flags} -Usun" libgcj_cxxflags= libgcj_cflags= DIVIDESPEC=-fno-use-divide-subroutine @@ -285,6 +288,10 @@ EOF sysdeps_dir=i386 DIVIDESPEC=-f%{m32:no-}%{!m32:%{!m64:no-}}%{m64:}use-divide-subroutine ;; + x86_64-*-solaris2.1[0-9]* ) + sysdeps_dir=i386 + DIVIDESPEC=-f%{m32:no-}use-divide-subroutine + ;; mips-sgi-irix6* ) can_unwind_signal=yes sysdeps_dir=mips @@ -322,7 +329,7 @@ EOF DIVIDESPEC=-fuse-divide-subroutine CHECKREFSPEC=-fcheck-references ;; - i?86-*-solaris2*) + i?86-*-solaris2* | x86_64-*-solaris2.1[0-9]*) can_unwind_signal=yes ;; *-*-freebsd*) diff --git a/libmudflap/ChangeLog b/libmudflap/ChangeLog index 177c01865d3..d755ddb349a 100644 --- a/libmudflap/ChangeLog +++ b/libmudflap/ChangeLog @@ -1,3 +1,33 @@ +2011-07-07 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> + + PR libmudflap/49550 + * mf-runtime.c (__wrap_main) [__sun__ && __svr4__]: Don't register + stdin, stdout, stderr. + Register __ctype, __ctype_mask. + + * configure.ac: Check for mmap64. + Check for rawmemchr, stpcpy, mempcpy. + * configure: Regenerate. + * config.h.in: Regenerate. + * mf-hooks1.c [HAVE_MMAP64] (__mf_0fn_mmap64): New function. + (mmap64): New wrapper function. + * mf-impl.h (__mf_dynamic_index) [HAVE_MMAP64]: Add dyn_mmap64. + * mf-runtime.c (__mf_dynamic) [HAVE_MMAP64]: Handle mmap64. + + * mf-hooks2.c [HAVE_GETMNTENT && HAVE_SYS_MNTTAB_H]: Implement + getmntent wrapper. + + * mf-hooks3.c (_REENTRANT): Define. + + * testsuite/libmudflap.c/heap-scalestress.c (SCALE): Reduce to 10000. + + * testsuite/libmudflap.c/pass-stratcliff.c: Include ../config.h. + (MIN): Define. + Use HAVE_RAWMEMCHR, HAVE_STPCPY, HAVE_MEMPCPY as guards. + + * testsuite/libmudflap.c/pass47-frag.c: Expect __ctype warning on + *-*-solaris2.*. + 2011-07-01 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> PR libmudflap/49549 diff --git a/libmudflap/config.h.in b/libmudflap/config.h.in index fd0ccb0c27e..9e4e1910dfe 100644 --- a/libmudflap/config.h.in +++ b/libmudflap/config.h.in @@ -135,12 +135,18 @@ /* Define to 1 if you have the <memory.h> header file. */ #undef HAVE_MEMORY_H +/* Define to 1 if you have the `mempcpy' function. */ +#undef HAVE_MEMPCPY + /* Define to 1 if you have the `memrchr' function. */ #undef HAVE_MEMRCHR /* Define to 1 if you have the `mmap' function. */ #undef HAVE_MMAP +/* Define to 1 if you have the `mmap64' function. */ +#undef HAVE_MMAP64 + /* Define to 1 if you have the <mntent.h> header file. */ #undef HAVE_MNTENT_H @@ -159,6 +165,9 @@ /* Define to 1 if you have the <pwd.h> header file. */ #undef HAVE_PWD_H +/* Define to 1 if you have the `rawmemchr' function. */ +#undef HAVE_RAWMEMCHR + /* Define to 1 if you have the `setbuf' function. */ #undef HAVE_SETBUF @@ -195,6 +204,9 @@ /* Define to 1 if you have the <stdlib.h> header file. */ #undef HAVE_STDLIB_H +/* Define to 1 if you have the `stpcpy' function. */ +#undef HAVE_STPCPY + /* Define to 1 if you have the <strings.h> header file. */ #undef HAVE_STRINGS_H diff --git a/libmudflap/configure b/libmudflap/configure index ebf8f2d9ce4..d2b94187932 100755 --- a/libmudflap/configure +++ b/libmudflap/configure @@ -4455,6 +4455,17 @@ _ACEOF fi done +for ac_func in mmap64 +do : + ac_fn_c_check_func "$LINENO" "mmap64" "ac_cv_func_mmap64" +if test "x$ac_cv_func_mmap64" = x""yes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_MMAP64 1 +_ACEOF + +fi +done + for ac_func in __libc_freeres do : ac_fn_c_check_func "$LINENO" "__libc_freeres" "ac_cv_func___libc_freeres" @@ -4466,6 +4477,19 @@ _ACEOF fi done +for ac_func in rawmemchr stpcpy mempcpy +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +eval as_val=\$$as_ac_var + if test "x$as_val" = x""yes; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -10583,7 +10607,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 10586 "configure" +#line 10610 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -10689,7 +10713,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 10692 "configure" +#line 10716 "configure" #include "confdefs.h" #if HAVE_DLFCN_H diff --git a/libmudflap/configure.ac b/libmudflap/configure.ac index b61b56f2708..f8b99955743 100644 --- a/libmudflap/configure.ac +++ b/libmudflap/configure.ac @@ -75,7 +75,9 @@ AC_CHECK_FUNCS(getservent getservbyname getservbyport getaddrinfo gai_strerror) AC_CHECK_FUNCS(getprotoent getprotobyname getprotobynumber) AC_CHECK_FUNCS(getmntent setmntent addmntent) AC_CHECK_FUNCS(inet_ntoa mmap munmap) +AC_CHECK_FUNCS(mmap64) AC_CHECK_FUNCS(__libc_freeres) +AC_CHECK_FUNCS(rawmemchr stpcpy mempcpy) AC_TRY_COMPILE([#include <sys/types.h> #include <sys/ipc.h> diff --git a/libmudflap/mf-hooks1.c b/libmudflap/mf-hooks1.c index 1e46a650947..3dd332e12c6 100644 --- a/libmudflap/mf-hooks1.c +++ b/libmudflap/mf-hooks1.c @@ -1,5 +1,5 @@ /* Mudflap: narrow-pointer bounds-checking by tree rewriting. - Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc. + Copyright (C) 2002, 2003, 2004, 2009, 2011 Free Software Foundation, Inc. Contributed by Frank Ch. Eigler <fche@redhat.com> and Graydon Hoare <graydon@redhat.com> @@ -414,6 +414,61 @@ WRAPPER(int , munmap, void *start, size_t length) #endif /* HAVE_MMAP */ +#ifdef HAVE_MMAP64 +#if PIC +/* A special bootstrap variant. */ +void * +__mf_0fn_mmap64 (void *start, size_t l, int prot, int f, int fd, off64_t off) +{ + return (void *) -1; +} +#endif + + +#undef mmap +WRAPPER(void *, mmap64, + void *start, size_t length, int prot, + int flags, int fd, off64_t offset) +{ + DECLARE(void *, mmap64, void *, size_t, int, + int, int, off64_t); + void *result; + BEGIN_PROTECT (mmap64, start, length, prot, flags, fd, offset); + + result = CALL_REAL (mmap64, start, length, prot, + flags, fd, offset); + + /* + VERBOSE_TRACE ("mmap64 (%08lx, %08lx, ...) => %08lx\n", + (uintptr_t) start, (uintptr_t) length, + (uintptr_t) result); + */ + + if (result != (void *)-1) + { + /* Register each page as a heap object. Why not register it all + as a single segment? That's so that a later munmap() call + can unmap individual pages. XXX: would __MF_TYPE_GUESS make + this more automatic? */ + size_t ps = getpagesize (); + uintptr_t base = (uintptr_t) result; + uintptr_t offset; + + for (offset=0; offset<length; offset+=ps) + { + /* XXX: We could map PROT_NONE to __MF_TYPE_NOACCESS. */ + /* XXX: Unaccessed HEAP pages are reported as leaks. Is this + appropriate for unaccessed mmap pages? */ + __mf_register ((void *) CLAMPADD (base, offset), ps, + __MF_TYPE_HEAP_I, "mmap64 page"); + } + } + + return result; +} +#endif /* HAVE_MMAP64 */ + + /* This wrapper is a little different, as it's called indirectly from __mf_fini also to clean up pending allocations. */ void * diff --git a/libmudflap/mf-hooks2.c b/libmudflap/mf-hooks2.c index 77d58131bfa..c030e694035 100644 --- a/libmudflap/mf-hooks2.c +++ b/libmudflap/mf-hooks2.c @@ -2102,7 +2102,42 @@ WRAPPER2(struct mntent *, getmntent, FILE *filep) return m; } #elif defined HAVE_SYS_MNTTAB_H -/* FIXME: Implement. */ +WRAPPER2(int, getmntent, FILE *filep, struct mnttab *mp) +{ + static struct mnttab *last = NULL; + int res; + + MF_VALIDATE_EXTENT (filep, sizeof (*filep), __MF_CHECK_WRITE, + "getmntent stream"); +#define UR(field) __mf_unregister(last->field, strlen (last->field)+1, __MF_TYPE_STATIC) + if (last) + { + UR (mnt_special); + UR (mnt_mountp); + UR (mnt_fstype); + UR (mnt_mntopts); + UR (mnt_time); + __mf_unregister (last, sizeof (*last), __MF_TYPE_STATIC); + } +#undef UR + + res = getmntent (filep, mp); + last = mp; + +#define R(field) __mf_register(last->field, strlen (last->field)+1, __MF_TYPE_STATIC, "mntent " #field) + if (mp) + { + R (mnt_special); + R (mnt_mountp); + R (mnt_fstype); + R (mnt_mntopts); + R (mnt_time); + __mf_register (last, sizeof (*last), __MF_TYPE_STATIC, "getmntent result"); + } +#undef R + + return res; +} #endif #endif diff --git a/libmudflap/mf-hooks3.c b/libmudflap/mf-hooks3.c index 79a5d5e8d14..3a26224d7fb 100644 --- a/libmudflap/mf-hooks3.c +++ b/libmudflap/mf-hooks3.c @@ -1,5 +1,5 @@ /* Mudflap: narrow-pointer bounds-checking by tree rewriting. - Copyright (C) 2002, 2003, 2004, 2005, 2009 + Copyright (C) 2002, 2003, 2004, 2005, 2009, 2011 Free Software Foundation, Inc. Contributed by Frank Ch. Eigler <fche@redhat.com> and Graydon Hoare <graydon@redhat.com> @@ -44,6 +44,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see #define _ALL_SOURCE #define _LARGE_FILE_API #define _XOPEN_SOURCE_EXTENDED 1 +#define _REENTRANT #include <string.h> #include <stdio.h> diff --git a/libmudflap/mf-impl.h b/libmudflap/mf-impl.h index 6f0268b86dd..865079c239f 100644 --- a/libmudflap/mf-impl.h +++ b/libmudflap/mf-impl.h @@ -1,6 +1,6 @@ /* Implementation header for mudflap runtime library. Mudflap: narrow-pointer bounds-checking by tree rewriting. - Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc. + Copyright (C) 2002, 2003, 2004, 2009, 2011 Free Software Foundation, Inc. Contributed by Frank Ch. Eigler <fche@redhat.com> and Graydon Hoare <graydon@redhat.com> @@ -212,6 +212,9 @@ extern struct __mf_dynamic_entry __mf_dynamic[]; enum __mf_dynamic_index { dyn_calloc, dyn_free, dyn_malloc, dyn_mmap, +#ifdef HAVE_MMAP64 + dyn_mmap64, +#endif dyn_munmap, dyn_realloc, dyn_INITRESOLVE, /* Marker for last init-time resolution. */ #ifdef LIBMUDFLAPTH diff --git a/libmudflap/mf-runtime.c b/libmudflap/mf-runtime.c index 6892804ca72..1036ccc93db 100644 --- a/libmudflap/mf-runtime.c +++ b/libmudflap/mf-runtime.c @@ -666,6 +666,9 @@ struct __mf_dynamic_entry __mf_dynamic [] = {NULL, "free", NULL}, {NULL, "malloc", NULL}, {NULL, "mmap", NULL}, +#ifdef HAVE_MMAP64 + {NULL, "mmap64", NULL}, +#endif {NULL, "munmap", NULL}, {NULL, "realloc", NULL}, {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */ @@ -781,12 +784,22 @@ __wrap_main (int argc, char* argv[]) __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area"); +#if !(defined(__sun__) && defined(__svr4__)) + /* Conflicts with the automatic registration of __iob[]. */ __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin"); __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout"); __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr"); +#endif /* Make some effort to register ctype.h static arrays. */ - /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */ +#if defined(__sun__) && defined(__svr4__) + /* __ctype[] is declared without size, but MB_CUR_MAX is the last + member. There seems to be no proper way to determine the size. */ + __mf_register (__ctype, &MB_CUR_MAX - &__ctype[0] + 1, __MF_TYPE_STATIC, "__ctype"); + /* __ctype_mask points at _C_masks[1]. The size can only determined + using nm on libc.so.1. */ + __mf_register (__ctype_mask - 1, 1028, __MF_TYPE_STATIC, "_C_masks"); +#endif /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt with in mf-hooks2.c. */ } diff --git a/libmudflap/testsuite/libmudflap.c/heap-scalestress.c b/libmudflap/testsuite/libmudflap.c/heap-scalestress.c index 2d51731d611..6e7813ba2ac 100644 --- a/libmudflap/testsuite/libmudflap.c/heap-scalestress.c +++ b/libmudflap/testsuite/libmudflap.c/heap-scalestress.c @@ -8,7 +8,7 @@ #include <unistd.h> #ifndef SCALE -#define SCALE 100000 +#define SCALE 10000 #endif diff --git a/libmudflap/testsuite/libmudflap.c/pass-stratcliff.c b/libmudflap/testsuite/libmudflap.c/pass-stratcliff.c index 985c796e13c..cf5430657ff 100644 --- a/libmudflap/testsuite/libmudflap.c/pass-stratcliff.c +++ b/libmudflap/testsuite/libmudflap.c/pass-stratcliff.c @@ -1,5 +1,6 @@ /* Test for string function add boundaries of usable memory. - Copyright (C) 1996,1997,1999,2000,2001,2002 Free Software Foundation, Inc. + Copyright (C) 1996, 1997, 1999, 2000, 2001, 2002, 2011 + Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <drepper@cygnus.com>, 1996. @@ -25,6 +26,8 @@ test the real implementation. */ #undef __USE_STRING_INLINES +#include "../config.h" + #include <errno.h> #include <stdio.h> #include <string.h> @@ -36,6 +39,10 @@ #define MAX(a, b) ((a) > (b) ? (a) : (b)) #endif +#ifndef MIN +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + int main (int argc, char *argv[]) { @@ -153,7 +160,7 @@ main (int argc, char *argv[]) } } -#if !defined __FreeBSD__ && !(defined __sun__ && defined __svr4__) +#ifdef HAVE_RAWMEMCHR /* rawmemchr test */ for (outer = size - 1; outer >= MAX (0, size - 128); --outer) { @@ -250,7 +257,7 @@ main (int argc, char *argv[]) } } -#ifndef __FreeBSD__ && !(defined __sun__ && defined __svr4__) +#ifdef HAVE_STPCPY /* stpcpy test */ for (outer = size - 1; outer >= MAX (0, size - 128); --outer) { @@ -302,7 +309,7 @@ main (int argc, char *argv[]) result = 1; } -#if !defined __FreeBSD__ && !(defined __sun__ && defined __svr4__) +#ifdef HAVE_MEMPCPY /* mempcpy test */ for (outer = size - 1; outer >= MAX (0, size - 128); --outer) for (inner = 0; inner < size - outer; ++inner) diff --git a/libmudflap/testsuite/libmudflap.c/pass47-frag.c b/libmudflap/testsuite/libmudflap.c/pass47-frag.c index 9e4ac50ef3f..71175827209 100644 --- a/libmudflap/testsuite/libmudflap.c/pass47-frag.c +++ b/libmudflap/testsuite/libmudflap.c/pass47-frag.c @@ -8,3 +8,5 @@ int main () tolower (buf[4]) == 'o' && tolower ('X') == 'x' && isdigit (buf[3])) == 0 && isalnum ('4')); } + +/* { dg-warning "cannot track unknown size extern .__ctype." "Solaris __ctype declared without size" { target *-*-solaris2.* } 0 } */ |