diff options
Diffstat (limited to 'gcc/config/ia64/ia64.c')
-rw-r--r-- | gcc/config/ia64/ia64.c | 547 |
1 files changed, 311 insertions, 236 deletions
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c index 41adc4adc96..6ef22d99705 100644 --- a/gcc/config/ia64/ia64.c +++ b/gcc/config/ia64/ia64.c @@ -1,5 +1,5 @@ /* Definitions of target machine for GNU compiler. - Copyright (C) 1999-2014 Free Software Foundation, Inc. + Copyright (C) 1999-2015 Free Software Foundation, Inc. Contributed by James E. Wilson <wilson@cygnus.com> and David Mosberger <davidm@hpl.hp.com>. @@ -24,7 +24,17 @@ along with GCC; see the file COPYING3. If not see #include "coretypes.h" #include "tm.h" #include "rtl.h" +#include "hash-set.h" +#include "machmode.h" +#include "vec.h" +#include "double-int.h" +#include "input.h" +#include "alias.h" +#include "symtab.h" +#include "wide-int.h" +#include "inchash.h" #include "tree.h" +#include "fold-const.h" #include "stringpool.h" #include "stor-layout.h" #include "calls.h" @@ -37,11 +47,29 @@ along with GCC; see the file COPYING3. If not see #include "insn-attr.h" #include "flags.h" #include "recog.h" +#include "hashtab.h" +#include "function.h" +#include "statistics.h" +#include "real.h" +#include "fixed-value.h" +#include "expmed.h" +#include "dojump.h" +#include "explow.h" +#include "emit-rtl.h" +#include "stmt.h" #include "expr.h" +#include "insn-codes.h" #include "optabs.h" #include "except.h" -#include "function.h" #include "ggc.h" +#include "predict.h" +#include "dominance.h" +#include "cfg.h" +#include "cfgrtl.h" +#include "cfganal.h" +#include "lcm.h" +#include "cfgbuild.h" +#include "cfgcleanup.h" #include "basic-block.h" #include "libfuncs.h" #include "diagnostic-core.h" @@ -53,9 +81,6 @@ along with GCC; see the file COPYING3. If not see #include "tm_p.h" #include "hash-table.h" #include "langhooks.h" -#include "pointer-set.h" -#include "vec.h" -#include "basic-block.h" #include "tree-ssa-alias.h" #include "internal-fn.h" #include "gimple-fold.h" @@ -74,6 +99,7 @@ along with GCC; see the file COPYING3. If not see #include "reload.h" #include "opts.h" #include "dumpfile.h" +#include "builtins.h" /* This is used for communication between ASM_OUTPUT_LABEL and ASM_OUTPUT_LABELREF. */ @@ -166,26 +192,25 @@ static struct ia64_frame_info current_frame_info; static int emitted_frame_related_regs[number_of_ia64_frame_regs]; static int ia64_first_cycle_multipass_dfa_lookahead (void); -static void ia64_dependencies_evaluation_hook (rtx, rtx); +static void ia64_dependencies_evaluation_hook (rtx_insn *, rtx_insn *); static void ia64_init_dfa_pre_cycle_insn (void); static rtx ia64_dfa_pre_cycle_insn (void); -static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx); -static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx); -static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *); +static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int); +static int ia64_dfa_new_cycle (FILE *, int, rtx_insn *, int, int, int *); static void ia64_h_i_d_extended (void); static void * ia64_alloc_sched_context (void); static void ia64_init_sched_context (void *, bool); static void ia64_set_sched_context (void *); static void ia64_clear_sched_context (void *); static void ia64_free_sched_context (void *); -static int ia64_mode_to_int (enum machine_mode); +static int ia64_mode_to_int (machine_mode); static void ia64_set_sched_flags (spec_info_t); -static ds_t ia64_get_insn_spec_ds (rtx); -static ds_t ia64_get_insn_checked_ds (rtx); +static ds_t ia64_get_insn_spec_ds (rtx_insn *); +static ds_t ia64_get_insn_checked_ds (rtx_insn *); static bool ia64_skip_rtx_p (const_rtx); -static int ia64_speculate_insn (rtx, ds_t, rtx *); +static int ia64_speculate_insn (rtx_insn *, ds_t, rtx *); static bool ia64_needs_block_p (ds_t); -static rtx ia64_gen_spec_check (rtx, rtx, ds_t); +static rtx ia64_gen_spec_check (rtx_insn *, rtx_insn *, ds_t); static int ia64_spec_check_p (rtx); static int ia64_spec_check_src_p (rtx); static rtx gen_tls_get_addr (void); @@ -205,29 +230,29 @@ static rtx gen_fr_restore_x (rtx, rtx, rtx); static void ia64_option_override (void); static bool ia64_can_eliminate (const int, const int); -static enum machine_mode hfa_element_mode (const_tree, bool); -static void ia64_setup_incoming_varargs (cumulative_args_t, enum machine_mode, +static machine_mode hfa_element_mode (const_tree, bool); +static void ia64_setup_incoming_varargs (cumulative_args_t, machine_mode, tree, int *, int); -static int ia64_arg_partial_bytes (cumulative_args_t, enum machine_mode, +static int ia64_arg_partial_bytes (cumulative_args_t, machine_mode, tree, bool); -static rtx ia64_function_arg_1 (cumulative_args_t, enum machine_mode, +static rtx ia64_function_arg_1 (cumulative_args_t, machine_mode, const_tree, bool, bool); -static rtx ia64_function_arg (cumulative_args_t, enum machine_mode, +static rtx ia64_function_arg (cumulative_args_t, machine_mode, const_tree, bool); static rtx ia64_function_incoming_arg (cumulative_args_t, - enum machine_mode, const_tree, bool); -static void ia64_function_arg_advance (cumulative_args_t, enum machine_mode, + machine_mode, const_tree, bool); +static void ia64_function_arg_advance (cumulative_args_t, machine_mode, const_tree, bool); -static unsigned int ia64_function_arg_boundary (enum machine_mode, +static unsigned int ia64_function_arg_boundary (machine_mode, const_tree); static bool ia64_function_ok_for_sibcall (tree, tree); static bool ia64_return_in_memory (const_tree, const_tree); static rtx ia64_function_value (const_tree, const_tree, bool); -static rtx ia64_libcall_value (enum machine_mode, const_rtx); +static rtx ia64_libcall_value (machine_mode, const_rtx); static bool ia64_function_value_regno_p (const unsigned int); -static int ia64_register_move_cost (enum machine_mode, reg_class_t, +static int ia64_register_move_cost (machine_mode, reg_class_t, reg_class_t); -static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t, +static int ia64_memory_move_cost (machine_mode mode, reg_class_t, bool); static bool ia64_rtx_costs (rtx, int, int, int, int *, bool); static int ia64_unspec_may_trap_p (const_rtx, unsigned); @@ -251,17 +276,17 @@ static void ia64_print_operand_address (FILE *, rtx); static bool ia64_print_operand_punct_valid_p (unsigned char code); static int ia64_issue_rate (void); -static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t); +static int ia64_adjust_cost_2 (rtx_insn *, int, rtx_insn *, int, dw_t); static void ia64_sched_init (FILE *, int, int); static void ia64_sched_init_global (FILE *, int, int); static void ia64_sched_finish_global (FILE *, int); static void ia64_sched_finish (FILE *, int); -static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int); -static int ia64_sched_reorder (FILE *, int, rtx *, int *, int); -static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int); -static int ia64_variable_issue (FILE *, int, rtx, int); +static int ia64_dfa_sched_reorder (FILE *, int, rtx_insn **, int *, int, int); +static int ia64_sched_reorder (FILE *, int, rtx_insn **, int *, int); +static int ia64_sched_reorder2 (FILE *, int, rtx_insn **, int *, int); +static int ia64_variable_issue (FILE *, int, rtx_insn *, int); -static void ia64_asm_unwind_emit (FILE *, rtx); +static void ia64_asm_unwind_emit (FILE *, rtx_insn *); static void ia64_asm_emit_except_personality (rtx); static void ia64_asm_init_sections (void); @@ -276,14 +301,15 @@ static void initiate_bundle_state_table (void); static void finish_bundle_state_table (void); static int try_issue_nops (struct bundle_state *, int); static int try_issue_insn (struct bundle_state *, rtx); -static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int); +static void issue_nops_and_insn (struct bundle_state *, int, rtx_insn *, + int, int); static int get_max_pos (state_t); static int get_template (state_t, int); -static rtx get_next_important_insn (rtx, rtx); -static bool important_for_bundling_p (rtx); -static bool unknown_for_bundling_p (rtx); -static void bundling (FILE *, int, rtx, rtx); +static rtx_insn *get_next_important_insn (rtx_insn *, rtx_insn *); +static bool important_for_bundling_p (rtx_insn *); +static bool unknown_for_bundling_p (rtx_insn *); +static void bundling (FILE *, int, rtx_insn *, rtx_insn *); static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); @@ -292,7 +318,7 @@ static void ia64_globalize_decl_name (FILE *, tree); static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED; static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED; -static section *ia64_select_rtx_section (enum machine_mode, rtx, +static section *ia64_select_rtx_section (machine_mode, rtx, unsigned HOST_WIDE_INT); static void ia64_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED; @@ -307,38 +333,40 @@ static void ia64_vms_init_libfuncs (void) ATTRIBUTE_UNUSED; static void ia64_soft_fp_init_libfuncs (void) ATTRIBUTE_UNUSED; -static bool ia64_vms_valid_pointer_mode (enum machine_mode mode) +static bool ia64_vms_valid_pointer_mode (machine_mode mode) ATTRIBUTE_UNUSED; static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *) ATTRIBUTE_UNUSED; +static bool ia64_attribute_takes_identifier_p (const_tree); static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *); static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *); static void ia64_encode_section_info (tree, rtx, int); static rtx ia64_struct_value_rtx (tree, int); static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *); -static bool ia64_scalar_mode_supported_p (enum machine_mode mode); -static bool ia64_vector_mode_supported_p (enum machine_mode mode); -static bool ia64_legitimate_constant_p (enum machine_mode, rtx); -static bool ia64_legitimate_address_p (enum machine_mode, rtx, bool); -static bool ia64_cannot_force_const_mem (enum machine_mode, rtx); +static bool ia64_scalar_mode_supported_p (machine_mode mode); +static bool ia64_vector_mode_supported_p (machine_mode mode); +static bool ia64_libgcc_floating_mode_supported_p (machine_mode mode); +static bool ia64_legitimate_constant_p (machine_mode, rtx); +static bool ia64_legitimate_address_p (machine_mode, rtx, bool); +static bool ia64_cannot_force_const_mem (machine_mode, rtx); static const char *ia64_mangle_type (const_tree); static const char *ia64_invalid_conversion (const_tree, const_tree); static const char *ia64_invalid_unary_op (int, const_tree); static const char *ia64_invalid_binary_op (int, const_tree, const_tree); -static enum machine_mode ia64_c_mode_for_suffix (char); +static machine_mode ia64_c_mode_for_suffix (char); static void ia64_trampoline_init (rtx, tree, rtx); static void ia64_override_options_after_change (void); -static bool ia64_member_type_forces_blk (const_tree, enum machine_mode); +static bool ia64_member_type_forces_blk (const_tree, machine_mode); static tree ia64_builtin_decl (unsigned, bool); static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t); -static enum machine_mode ia64_get_reg_raw_mode (int regno); +static machine_mode ia64_get_reg_raw_mode (int regno); static section * ia64_hpux_function_section (tree, enum node_frequency, bool, bool); -static bool ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode, +static bool ia64_vectorize_vec_perm_const_ok (machine_mode vmode, const unsigned char *sel); #define MAX_VECT_LEN 8 @@ -347,7 +375,7 @@ struct expand_vec_perm_d { rtx target, op0, op1; unsigned char perm[MAX_VECT_LEN]; - enum machine_mode vmode; + machine_mode vmode; unsigned char nelt; bool one_operand_p; bool testing_p; @@ -496,10 +524,6 @@ static const struct attribute_spec ia64_attribute_table[] = #undef TARGET_SCHED_GEN_SPEC_CHECK #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check -#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC -#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\ - ia64_first_cycle_multipass_dfa_lookahead_guard_spec - #undef TARGET_SCHED_SKIP_RTX_P #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p @@ -602,6 +626,10 @@ static const struct attribute_spec ia64_attribute_table[] = #undef TARGET_VECTOR_MODE_SUPPORTED_P #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p +#undef TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P +#define TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P \ + ia64_libgcc_floating_mode_supported_p + /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur in an order different from the specified program order. */ #undef TARGET_RELAXED_ORDERING @@ -637,7 +665,7 @@ static const struct attribute_spec ia64_attribute_table[] = #undef TARGET_CAN_USE_DOLOOP_P #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost #undef TARGET_INVALID_WITHIN_DOLOOP -#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null +#define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_insn_null #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change @@ -656,8 +684,26 @@ static const struct attribute_spec ia64_attribute_table[] = #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK #define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok +#undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P +#define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P ia64_attribute_takes_identifier_p + struct gcc_target targetm = TARGET_INITIALIZER; +/* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain + identifier as an argument, so the front end shouldn't look it up. */ + +static bool +ia64_attribute_takes_identifier_p (const_tree attr_id) +{ + if (is_attribute_p ("model", attr_id)) + return true; +#if TARGET_ABI_OPEN_VMS + if (is_attribute_p ("common_object", attr_id)) + return true; +#endif + return false; +} + typedef enum { ADDR_AREA_NORMAL, /* normal address area */ @@ -990,7 +1036,7 @@ ia64_legitimate_address_disp (const_rtx reg, const_rtx disp, bool strict) /* Implement TARGET_LEGITIMATE_ADDRESS_P. */ static bool -ia64_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, +ia64_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x, bool strict) { if (ia64_legitimate_address_reg (x, strict)) @@ -1012,7 +1058,7 @@ ia64_legitimate_address_p (enum machine_mode mode ATTRIBUTE_UNUSED, field in an instruction. */ static bool -ia64_legitimate_constant_p (enum machine_mode mode, rtx x) +ia64_legitimate_constant_p (machine_mode mode, rtx x) { switch (GET_CODE (x)) { @@ -1067,7 +1113,7 @@ ia64_legitimate_constant_p (enum machine_mode mode, rtx x) /* Don't allow TLS addresses to get spilled to memory. */ static bool -ia64_cannot_force_const_mem (enum machine_mode mode, rtx x) +ia64_cannot_force_const_mem (machine_mode mode, rtx x) { if (mode == RFmode) return true; @@ -1164,7 +1210,8 @@ static rtx ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1, rtx orig_op1, HOST_WIDE_INT addend) { - rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns; + rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp; + rtx_insn *insns; rtx orig_op0 = op0; HOST_WIDE_INT addend_lo, addend_hi; @@ -1273,7 +1320,7 @@ ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1, rtx ia64_expand_move (rtx op0, rtx op1) { - enum machine_mode mode = GET_MODE (op0); + machine_mode mode = GET_MODE (op0); if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1)) op1 = force_reg (mode, op1); @@ -1345,7 +1392,7 @@ ia64_expand_move (rtx op0, rtx op1) void ia64_emit_cond_move (rtx op0, rtx op1, rtx cond) { - rtx insn, first = get_last_insn (); + rtx_insn *insn, *first = get_last_insn (); emit_move_insn (op0, op1); @@ -1600,7 +1647,7 @@ ia64_split_tmode_move (rtx operands[]) we see something like the above, we spill the inner register to memory. */ static rtx -spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode) +spill_xfmode_rfmode_operand (rtx in, int force, machine_mode mode) { if (GET_CODE (in) == SUBREG && GET_MODE (SUBREG_REG (in)) == TImode @@ -1625,7 +1672,7 @@ spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode) DONE. */ bool -ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[]) +ia64_expand_movxf_movrf (machine_mode mode, rtx operands[]) { rtx op0 = operands[0]; @@ -1848,7 +1895,7 @@ ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1) been reversed, and so the sense of the comparison should be inverted. */ static bool -ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode, +ia64_expand_vecint_compare (enum rtx_code code, machine_mode mode, rtx dest, rtx op0, rtx op1) { bool negate = false; @@ -1939,7 +1986,7 @@ ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode, void ia64_expand_vecint_cmov (rtx operands[]) { - enum machine_mode mode = GET_MODE (operands[0]); + machine_mode mode = GET_MODE (operands[0]); enum rtx_code code = GET_CODE (operands[3]); bool negate; rtx cmp, x, ot, of; @@ -1989,7 +2036,7 @@ ia64_expand_vecint_cmov (rtx operands[]) /* Emit an integral vector min or max operation. Return true if all done. */ bool -ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode, +ia64_expand_vecint_minmax (enum rtx_code code, machine_mode mode, rtx operands[]) { rtx xops[6]; @@ -2046,7 +2093,7 @@ ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode, void ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp) { - enum machine_mode vmode = GET_MODE (lo); + machine_mode vmode = GET_MODE (lo); unsigned int i, high, nelt = GET_MODE_NUNITS (vmode); struct expand_vec_perm_d d; bool ok; @@ -2075,7 +2122,7 @@ ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp) static rtx ia64_unpack_sign (rtx vec, bool unsignedp) { - enum machine_mode mode = GET_MODE (vec); + machine_mode mode = GET_MODE (vec); rtx zero = CONST0_RTX (mode); if (unsignedp) @@ -2106,7 +2153,7 @@ ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp) void ia64_expand_widen_sum (rtx operands[3], bool unsignedp) { - enum machine_mode wmode; + machine_mode wmode; rtx l, h, t, sign; sign = ia64_unpack_sign (operands[1], unsignedp); @@ -2323,7 +2370,7 @@ void ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx old_dst, rtx new_dst, enum memmodel model) { - enum machine_mode mode = GET_MODE (mem); + machine_mode mode = GET_MODE (mem); rtx old_reg, new_reg, cmp_reg, ar_ccv, label; enum insn_code icode; @@ -3000,11 +3047,11 @@ ia64_initial_elimination_offset (int from, int to) struct spill_fill_data { - rtx init_after; /* point at which to emit initializations */ + rtx_insn *init_after; /* point at which to emit initializations */ rtx init_reg[2]; /* initial base register */ rtx iter_reg[2]; /* the iterator registers */ rtx *prev_addr[2]; /* address of last memory use */ - rtx prev_insn[2]; /* the insn corresponding to prev_addr */ + rtx_insn *prev_insn[2]; /* the insn corresponding to prev_addr */ HOST_WIDE_INT prev_off[2]; /* last offset */ int n_iter; /* number of iterators in use */ int next_iter; /* next iterator to use */ @@ -3092,7 +3139,8 @@ spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off) } else { - rtx seq, insn; + rtx seq; + rtx_insn *insn; if (disp == 0) seq = gen_movdi (spill_fill_data.iter_reg[iter], @@ -3121,7 +3169,7 @@ spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off) insn = emit_insn_after (seq, spill_fill_data.init_after); else { - rtx first = get_insns (); + rtx_insn *first = get_insns (); if (first) insn = emit_insn_before (seq, first); else @@ -3152,7 +3200,8 @@ do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off, rtx frame_reg) { int iter = spill_fill_data.next_iter; - rtx mem, insn; + rtx mem; + rtx_insn *insn; mem = spill_restore_mem (reg, cfa_off); insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off))); @@ -3193,7 +3242,7 @@ static void do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off) { int iter = spill_fill_data.next_iter; - rtx insn; + rtx_insn *insn; insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off), GEN_INT (cfa_off))); @@ -3448,7 +3497,8 @@ output_probe_stack_range (rtx reg1, rtx reg2) void ia64_expand_prologue (void) { - rtx insn, ar_pfs_save_reg, ar_unat_save_reg; + rtx_insn *insn; + rtx ar_pfs_save_reg, ar_unat_save_reg; int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs; rtx reg, alt_reg; @@ -3859,7 +3909,8 @@ ia64_start_function (FILE *file, const char *fnname, void ia64_expand_epilogue (int sibcall_p) { - rtx insn, reg, alt_reg, ar_unat_save_reg; + rtx_insn *insn; + rtx reg, alt_reg, ar_unat_save_reg; int regno, alt_regno, cfa_off; ia64_compute_frame_size (get_frame_size ()); @@ -4486,7 +4537,7 @@ ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain) We generate the actual spill instructions during prologue generation. */ static void -ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode, +ia64_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode, tree type, int * pretend_size, int second_time ATTRIBUTE_UNUSED) { @@ -4515,11 +4566,11 @@ ia64_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode, have already decided to pass them by reference. Top-level zero-sized aggregates are excluded because our parallels crash the middle-end. */ -static enum machine_mode +static machine_mode hfa_element_mode (const_tree type, bool nested) { - enum machine_mode element_mode = VOIDmode; - enum machine_mode mode; + machine_mode element_mode = VOIDmode; + machine_mode mode; enum tree_code code = TREE_CODE (type); int know_element_mode = 0; tree t; @@ -4594,7 +4645,7 @@ hfa_element_mode (const_tree type, bool nested) /* Return the number of words required to hold a quantity of TYPE and MODE when passed as an argument. */ static int -ia64_function_arg_words (const_tree type, enum machine_mode mode) +ia64_function_arg_words (const_tree type, machine_mode mode) { int words; @@ -4641,7 +4692,7 @@ ia64_function_arg_offset (const CUMULATIVE_ARGS *cum, registers. */ static rtx -ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode, +ia64_function_arg_1 (cumulative_args_t cum_v, machine_mode mode, const_tree type, bool named, bool incoming) { const CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); @@ -4649,7 +4700,7 @@ ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode, int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST); int words = ia64_function_arg_words (type, mode); int offset = ia64_function_arg_offset (cum, type, words); - enum machine_mode hfa_mode = VOIDmode; + machine_mode hfa_mode = VOIDmode; /* For OPEN VMS, emit the instruction setting up the argument register here, when we know this will be together with the other arguments setup related @@ -4736,7 +4787,7 @@ ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode, for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++) { - enum machine_mode gr_mode = DImode; + machine_mode gr_mode = DImode; unsigned int gr_size; /* If we have an odd 4 byte hunk because we ran out of FR regs, @@ -4816,7 +4867,7 @@ ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode, else { /* See comment above. */ - enum machine_mode inner_mode = + machine_mode inner_mode = (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode; rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode, @@ -4836,7 +4887,7 @@ ia64_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode, /* Implement TARGET_FUNCION_ARG target hook. */ static rtx -ia64_function_arg (cumulative_args_t cum, enum machine_mode mode, +ia64_function_arg (cumulative_args_t cum, machine_mode mode, const_tree type, bool named) { return ia64_function_arg_1 (cum, mode, type, named, false); @@ -4846,7 +4897,7 @@ ia64_function_arg (cumulative_args_t cum, enum machine_mode mode, static rtx ia64_function_incoming_arg (cumulative_args_t cum, - enum machine_mode mode, + machine_mode mode, const_tree type, bool named) { return ia64_function_arg_1 (cum, mode, type, named, true); @@ -4857,7 +4908,7 @@ ia64_function_incoming_arg (cumulative_args_t cum, in memory. */ static int -ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode, +ia64_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode, tree type, bool named ATTRIBUTE_UNUSED) { CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); @@ -4883,7 +4934,7 @@ ia64_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode, /* Return ivms_arg_type based on machine_mode. */ static enum ivms_arg_type -ia64_arg_type (enum machine_mode mode) +ia64_arg_type (machine_mode mode) { switch (mode) { @@ -4900,13 +4951,13 @@ ia64_arg_type (enum machine_mode mode) ia64_function_arg. */ static void -ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode, +ia64_function_arg_advance (cumulative_args_t cum_v, machine_mode mode, const_tree type, bool named) { CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); int words = ia64_function_arg_words (type, mode); int offset = ia64_function_arg_offset (cum, type, words); - enum machine_mode hfa_mode = VOIDmode; + machine_mode hfa_mode = VOIDmode; /* If all arg slots are already full, then there is nothing to do. */ if (cum->words >= MAX_ARGUMENT_SLOTS) @@ -4998,7 +5049,7 @@ ia64_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode, even though their normal alignment is 8 bytes. See ia64_function_arg. */ static unsigned int -ia64_function_arg_boundary (enum machine_mode mode, const_tree type) +ia64_function_arg_boundary (machine_mode mode, const_tree type) { if (mode == TFmode && TARGET_HPUX && TARGET_ILP32) return PARM_BOUNDARY * 2; @@ -5072,8 +5123,8 @@ ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p, static bool ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED) { - enum machine_mode mode; - enum machine_mode hfa_mode; + machine_mode mode; + machine_mode hfa_mode; HOST_WIDE_INT byte_size; mode = TYPE_MODE (valtype); @@ -5110,8 +5161,8 @@ ia64_function_value (const_tree valtype, const_tree fn_decl_or_type, bool outgoing ATTRIBUTE_UNUSED) { - enum machine_mode mode; - enum machine_mode hfa_mode; + machine_mode mode; + machine_mode hfa_mode; int unsignedp; const_tree func = fn_decl_or_type; @@ -5199,7 +5250,7 @@ ia64_function_value (const_tree valtype, /* Worker function for TARGET_LIBCALL_VALUE. */ static rtx -ia64_libcall_value (enum machine_mode mode, +ia64_libcall_value (machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED) { return gen_rtx_REG (mode, @@ -5661,7 +5712,7 @@ ia64_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED, one in class TO, using MODE. */ static int -ia64_register_move_cost (enum machine_mode mode, reg_class_t from, +ia64_register_move_cost (machine_mode mode, reg_class_t from, reg_class_t to) { /* ADDL_REGS is the same as GR_REGS for movement purposes. */ @@ -5733,7 +5784,7 @@ ia64_register_move_cost (enum machine_mode mode, reg_class_t from, memory. */ static int -ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, +ia64_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED, reg_class_t rclass, bool in ATTRIBUTE_UNUSED) { @@ -5787,7 +5838,7 @@ ia64_preferred_reload_class (rtx x, reg_class_t rclass) enum reg_class ia64_secondary_reload_class (enum reg_class rclass, - enum machine_mode mode ATTRIBUTE_UNUSED, rtx x) + machine_mode mode ATTRIBUTE_UNUSED, rtx x) { int regno = -1; @@ -6046,14 +6097,14 @@ void ia64_init_expanders (void) static struct machine_function * ia64_init_machine_status (void) { - return ggc_alloc_cleared_machine_function (); + return ggc_cleared_alloc<machine_function> (); } -static enum attr_itanium_class ia64_safe_itanium_class (rtx); -static enum attr_type ia64_safe_type (rtx); +static enum attr_itanium_class ia64_safe_itanium_class (rtx_insn *); +static enum attr_type ia64_safe_type (rtx_insn *); static enum attr_itanium_class -ia64_safe_itanium_class (rtx insn) +ia64_safe_itanium_class (rtx_insn *insn) { if (recog_memoized (insn) >= 0) return get_attr_itanium_class (insn); @@ -6064,7 +6115,7 @@ ia64_safe_itanium_class (rtx insn) } static enum attr_type -ia64_safe_type (rtx insn) +ia64_safe_type (rtx_insn *insn) { if (recog_memoized (insn) >= 0) return get_attr_type (insn); @@ -6190,8 +6241,8 @@ static void update_set_flags (rtx, struct reg_flags *); static int set_src_needs_barrier (rtx, struct reg_flags, int); static int rtx_needs_barrier (rtx, struct reg_flags, int); static void init_insn_group_barriers (void); -static int group_barrier_needed (rtx); -static int safe_group_barrier_needed (rtx); +static int group_barrier_needed (rtx_insn *); +static int safe_group_barrier_needed (rtx_insn *); static int in_safe_group_barrier; /* Update *RWS for REGNO, which is being written by the current instruction, @@ -6819,7 +6870,7 @@ init_insn_group_barriers (void) include the effects of INSN as a side-effect. */ static int -group_barrier_needed (rtx insn) +group_barrier_needed (rtx_insn *insn) { rtx pat; int need_barrier = 0; @@ -6928,7 +6979,7 @@ group_barrier_needed (rtx insn) /* Like group_barrier_needed, but do not clobber the current state. */ static int -safe_group_barrier_needed (rtx insn) +safe_group_barrier_needed (rtx_insn *insn) { int saved_first_instruction; int t; @@ -6954,8 +7005,8 @@ safe_group_barrier_needed (rtx insn) static void emit_insn_group_barriers (FILE *dump) { - rtx insn; - rtx last_label = 0; + rtx_insn *insn; + rtx_insn *last_label = 0; int insns_since_last_label = 0; init_insn_group_barriers (); @@ -7010,7 +7061,7 @@ emit_insn_group_barriers (FILE *dump) static void emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED) { - rtx insn; + rtx_insn *insn; init_insn_group_barriers (); @@ -7018,7 +7069,7 @@ emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED) { if (BARRIER_P (insn)) { - rtx last = prev_active_insn (insn); + rtx_insn *last = prev_active_insn (insn); if (! last) continue; @@ -7083,11 +7134,11 @@ static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6; /* The following variable value is an insn group barrier. */ -static rtx dfa_stop_insn; +static rtx_insn *dfa_stop_insn; /* The following variable value is the last issued insn. */ -static rtx last_scheduled_insn; +static rtx_insn *last_scheduled_insn; /* The following variable value is pointer to a DFA state used as temporary variable. */ @@ -7122,7 +7173,7 @@ static char mem_ops_in_group[4]; /* Number of current processor cycle (from scheduler's point of view). */ static int current_cycle; -static rtx ia64_single_set (rtx); +static rtx ia64_single_set (rtx_insn *); static void ia64_emit_insn_before (rtx, rtx); /* Map a bundle number to its pseudo-op. */ @@ -7145,7 +7196,7 @@ ia64_issue_rate (void) /* Helper function - like single_set, but look inside COND_EXEC. */ static rtx -ia64_single_set (rtx insn) +ia64_single_set (rtx_insn *insn) { rtx x = PATTERN (insn), ret; if (GET_CODE (x) == COND_EXEC) @@ -7177,7 +7228,8 @@ ia64_single_set (rtx insn) Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN. COST is the current cost, DW is dependency weakness. */ static int -ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw) +ia64_adjust_cost_2 (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn, + int cost, dw_t dw) { enum reg_note dep_type = (enum reg_note) dep_type1; enum attr_itanium_class dep_class; @@ -7233,9 +7285,9 @@ ia64_emit_insn_before (rtx insn, rtx before) `ia64_produce_address_p' and the DFA descriptions). */ static void -ia64_dependencies_evaluation_hook (rtx head, rtx tail) +ia64_dependencies_evaluation_hook (rtx_insn *head, rtx_insn *tail) { - rtx insn, next, next_tail; + rtx_insn *insn, *next, *next_tail; /* Before reload, which_alternative is not set, which means that ia64_safe_itanium_class will produce wrong results for (at least) @@ -7293,7 +7345,7 @@ ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED, int max_ready ATTRIBUTE_UNUSED) { #ifdef ENABLE_CHECKING - rtx insn; + rtx_insn *insn; if (!sel_sched_p () && reload_completed) for (insn = NEXT_INSN (current_sched_info->prev_head); @@ -7301,7 +7353,7 @@ ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED, insn = NEXT_INSN (insn)) gcc_assert (!SCHED_GROUP_P (insn)); #endif - last_scheduled_insn = NULL_RTX; + last_scheduled_insn = NULL; init_insn_group_barriers (); current_cycle = 0; @@ -7329,7 +7381,7 @@ ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED, /* Return TRUE if INSN is a load (either normal or speculative, but not a speculation check), FALSE otherwise. */ static bool -is_load_p (rtx insn) +is_load_p (rtx_insn *insn) { enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn); @@ -7343,7 +7395,7 @@ is_load_p (rtx insn) Itanium 2 Reference Manual for Software Development and Optimization, 6.7.3.1). */ static void -record_memory_reference (rtx insn) +record_memory_reference (rtx_insn *insn) { enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn); @@ -7364,14 +7416,14 @@ record_memory_reference (rtx insn) Override the default sort algorithm to better slot instructions. */ static int -ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, +ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready, int *pn_ready, int clock_var, int reorder_type) { int n_asms; int n_ready = *pn_ready; - rtx *e_ready = ready + n_ready; - rtx *insnp; + rtx_insn **e_ready = ready + n_ready; + rtx_insn **insnp; if (sched_verbose) fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type); @@ -7383,21 +7435,21 @@ ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, for (insnp = ready; insnp < e_ready; insnp++) if (insnp < e_ready) { - rtx insn = *insnp; + rtx_insn *insn = *insnp; enum attr_type t = ia64_safe_type (insn); if (t == TYPE_UNKNOWN) { if (GET_CODE (PATTERN (insn)) == ASM_INPUT || asm_noperands (PATTERN (insn)) >= 0) { - rtx lowest = ready[n_asms]; + rtx_insn *lowest = ready[n_asms]; ready[n_asms] = insn; *insnp = lowest; n_asms++; } else { - rtx highest = ready[n_ready - 1]; + rtx_insn *highest = ready[n_ready - 1]; ready[n_ready - 1] = insn; *insnp = highest; return 1; @@ -7434,7 +7486,7 @@ ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, while (insnp-- > ready + deleted) while (insnp >= ready + deleted) { - rtx insn = *insnp; + rtx_insn *insn = *insnp; if (! safe_group_barrier_needed (insn)) break; memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx)); @@ -7455,7 +7507,7 @@ ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, while (insnp-- > ready + moved) while (insnp >= ready + moved) { - rtx insn = *insnp; + rtx_insn *insn = *insnp; if (! is_load_p (insn)) break; memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx)); @@ -7473,8 +7525,8 @@ ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, the default sort algorithm to better slot instructions. */ static int -ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready, - int clock_var) +ia64_sched_reorder (FILE *dump, int sched_verbose, rtx_insn **ready, + int *pn_ready, int clock_var) { return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready, clock_var, 0); @@ -7485,7 +7537,7 @@ ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready, static int ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED, - int sched_verbose ATTRIBUTE_UNUSED, rtx *ready, + int sched_verbose ATTRIBUTE_UNUSED, rtx_insn **ready, int *pn_ready, int clock_var) { return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready, @@ -7498,7 +7550,7 @@ ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED, static int ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose ATTRIBUTE_UNUSED, - rtx insn ATTRIBUTE_UNUSED, + rtx_insn *insn, int can_issue_more ATTRIBUTE_UNUSED) { if (sched_deps_info->generate_spec_deps && !sel_sched_p ()) @@ -7531,44 +7583,42 @@ ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED, return 1; } -/* We are choosing insn from the ready queue. Return nonzero if INSN +/* We are choosing insn from the ready queue. Return zero if INSN can be chosen. */ static int -ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn) +ia64_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index) { gcc_assert (insn && INSN_P (insn)); - return ((!reload_completed - || !safe_group_barrier_needed (insn)) - && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn) - && (!mflag_sched_mem_insns_hard_limit - || !is_load_p (insn) - || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns)); -} -/* We are choosing insn from the ready queue. Return nonzero if INSN - can be chosen. */ + /* Size of ALAT is 32. As far as we perform conservative + data speculation, we keep ALAT half-empty. */ + if (pending_data_specs >= 16 && (TODO_SPEC (insn) & BEGIN_DATA)) + return ready_index == 0 ? -1 : 1; -static bool -ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn) -{ - gcc_assert (insn && INSN_P (insn)); - /* Size of ALAT is 32. As far as we perform conservative data speculation, - we keep ALAT half-empty. */ - return (pending_data_specs < 16 - || !(TODO_SPEC (insn) & BEGIN_DATA)); + if (ready_index == 0) + return 0; + + if ((!reload_completed + || !safe_group_barrier_needed (insn)) + && (!mflag_sched_mem_insns_hard_limit + || !is_load_p (insn) + || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns)) + return 0; + + return 1; } /* The following variable value is pseudo-insn used by the DFA insn scheduler to change the DFA state when the simulated clock is increased. */ -static rtx dfa_pre_cycle_insn; +static rtx_insn *dfa_pre_cycle_insn; /* Returns 1 when a meaningful insn was scheduled between the last group barrier and LAST. */ static int -scheduled_good_insn (rtx last) +scheduled_good_insn (rtx_insn *last) { if (last && recog_memoized (last) >= 0) return 1; @@ -7590,7 +7640,7 @@ scheduled_good_insn (rtx last) the ready queue on the next clock start. */ static int -ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock, +ia64_dfa_new_cycle (FILE *dump, int verbose, rtx_insn *insn, int last_clock, int clock, int *sort_p) { gcc_assert (insn && INSN_P (insn)); @@ -7670,7 +7720,7 @@ ia64_h_i_d_extended (void) struct _ia64_sched_context { state_t prev_cycle_state; - rtx last_scheduled_insn; + rtx_insn *last_scheduled_insn; struct reg_write_state rws_sum[NUM_REGS]; struct reg_write_state rws_insn[NUM_REGS]; int first_instruction; @@ -7698,7 +7748,7 @@ ia64_init_sched_context (void *_sc, bool clean_p) if (clean_p) { state_reset (sc->prev_cycle_state); - sc->last_scheduled_insn = NULL_RTX; + sc->last_scheduled_insn = NULL; memset (sc->rws_sum, 0, sizeof (rws_sum)); memset (sc->rws_insn, 0, sizeof (rws_insn)); sc->first_instruction = 1; @@ -7860,7 +7910,7 @@ get_spec_load_gen_function (ds_t ts, int mode_no) return gen_ld[mode_no]; } -/* Constants that help mapping 'enum machine_mode' to int. */ +/* Constants that help mapping 'machine_mode' to int. */ enum SPEC_MODES { SPEC_MODE_INVALID = -1, @@ -7878,7 +7928,7 @@ enum /* Return index of the MODE. */ static int -ia64_mode_to_int (enum machine_mode mode) +ia64_mode_to_int (machine_mode mode) { switch (mode) { @@ -7943,17 +7993,9 @@ ia64_set_sched_flags (spec_info_t spec_info) spec_info->flags = 0; - if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns) - spec_info->flags |= PREFER_NON_DATA_SPEC; - - if (mask & CONTROL_SPEC) - { - if (mflag_sched_prefer_non_control_spec_insns) - spec_info->flags |= PREFER_NON_CONTROL_SPEC; - - if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec) - spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL; - } + if ((mask & CONTROL_SPEC) + && sel_sched_p () && mflag_sel_sched_dont_check_control_spec) + spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL; if (sched_verbose >= 1) spec_info->dump = sched_dump; @@ -7971,7 +8013,7 @@ ia64_set_sched_flags (spec_info_t spec_info) /* If INSN is an appropriate load return its mode. Return -1 otherwise. */ static int -get_mode_no_for_insn (rtx insn) +get_mode_no_for_insn (rtx_insn *insn) { rtx reg, mem, mode_rtx; int mode_no; @@ -8122,7 +8164,7 @@ get_insn_spec_code (const_rtx insn) /* If INSN is a speculative load, return a ds with the speculation types. Otherwise [if INSN is a normal instruction] return 0. */ static ds_t -ia64_get_insn_spec_ds (rtx insn) +ia64_get_insn_spec_ds (rtx_insn *insn) { int code = get_insn_spec_code (insn); @@ -8147,7 +8189,7 @@ ia64_get_insn_spec_ds (rtx insn) will be checked. Otherwise [if INSN is a normal instruction] return 0. */ static ds_t -ia64_get_insn_checked_ds (rtx insn) +ia64_get_insn_checked_ds (rtx_insn *insn) { int code = get_insn_spec_code (insn); @@ -8205,7 +8247,7 @@ insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED, If current pattern of the INSN already provides TS speculation, return 0. */ static int -ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat) +ia64_speculate_insn (rtx_insn *insn, ds_t ts, rtx *new_pat) { int mode_no; int res; @@ -8380,7 +8422,7 @@ ia64_needs_block_p (ds_t ts) /* Generate (or regenerate) a recovery check for INSN. */ static rtx -ia64_gen_spec_check (rtx insn, rtx label, ds_t ds) +ia64_gen_spec_check (rtx_insn *insn, rtx_insn *label, ds_t ds) { rtx op1, pat, check_pat; gen_func_t gen_check; @@ -8467,7 +8509,7 @@ struct bundle_state /* Unique bundle state number to identify them in the debugging output */ int unique_num; - rtx insn; /* corresponding insn, NULL for the 1st and the last state */ + rtx_insn *insn; /* corresponding insn, NULL for the 1st and the last state */ /* number nops before and after the insn */ short before_nops_num, after_nops_num; int insn_num; /* insn number (0 - for initial state, 1 - for the 1st @@ -8600,7 +8642,7 @@ bundle_state_hasher::equal (const value_type *state1, /* Hash table of the bundle states. The key is dfa_state and insn_num of the bundle states. */ -static hash_table <bundle_state_hasher> bundle_state_table; +static hash_table<bundle_state_hasher> *bundle_state_table; /* The function inserts the BUNDLE_STATE into the hash table. The function returns nonzero if the bundle has been inserted into the @@ -8611,7 +8653,7 @@ insert_bundle_state (struct bundle_state *bundle_state) { struct bundle_state **entry_ptr; - entry_ptr = bundle_state_table.find_slot (bundle_state, INSERT); + entry_ptr = bundle_state_table->find_slot (bundle_state, INSERT); if (*entry_ptr == NULL) { bundle_state->next = index_to_bundle_states [bundle_state->insn_num]; @@ -8648,7 +8690,7 @@ insert_bundle_state (struct bundle_state *bundle_state) static void initiate_bundle_state_table (void) { - bundle_state_table.create (50); + bundle_state_table = new hash_table<bundle_state_hasher> (50); } /* Finish work with the hash table. */ @@ -8656,7 +8698,8 @@ initiate_bundle_state_table (void) static void finish_bundle_state_table (void) { - bundle_state_table.dispose (); + delete bundle_state_table; + bundle_state_table = NULL; } @@ -8664,7 +8707,7 @@ finish_bundle_state_table (void) /* The following variable is a insn `nop' used to check bundle states with different number of inserted nops. */ -static rtx ia64_nop; +static rtx_insn *ia64_nop; /* The following function tries to issue NOPS_NUM nops for the current state without advancing processor cycle. If it failed, the @@ -8708,7 +8751,8 @@ try_issue_insn (struct bundle_state *curr_state, rtx insn) static void issue_nops_and_insn (struct bundle_state *originator, int before_nops_num, - rtx insn, int try_bundle_end_p, int only_bundle_end_p) + rtx_insn *insn, int try_bundle_end_p, + int only_bundle_end_p) { struct bundle_state *curr_state; @@ -8911,7 +8955,7 @@ get_template (state_t state, int pos) /* True when INSN is important for bundling. */ static bool -important_for_bundling_p (rtx insn) +important_for_bundling_p (rtx_insn *insn) { return (INSN_P (insn) && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE @@ -8922,19 +8966,19 @@ important_for_bundling_p (rtx insn) /* The following function returns an insn important for insn bundling followed by INSN and before TAIL. */ -static rtx -get_next_important_insn (rtx insn, rtx tail) +static rtx_insn * +get_next_important_insn (rtx_insn *insn, rtx_insn *tail) { for (; insn && insn != tail; insn = NEXT_INSN (insn)) if (important_for_bundling_p (insn)) return insn; - return NULL_RTX; + return NULL; } /* True when INSN is unknown, but important, for bundling. */ static bool -unknown_for_bundling_p (rtx insn) +unknown_for_bundling_p (rtx_insn *insn) { return (INSN_P (insn) && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_UNKNOWN @@ -8945,7 +8989,7 @@ unknown_for_bundling_p (rtx insn) /* Add a bundle selector TEMPLATE0 before INSN. */ static void -ia64_add_bundle_selector_before (int template0, rtx insn) +ia64_add_bundle_selector_before (int template0, rtx_insn *insn) { rtx b = gen_bundle_selector (GEN_INT (template0)); @@ -9025,15 +9069,14 @@ ia64_add_bundle_selector_before (int template0, rtx insn) EBB. */ static void -bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail) +bundling (FILE *dump, int verbose, rtx_insn *prev_head_insn, rtx_insn *tail) { struct bundle_state *curr_state, *next_state, *best_state; - rtx insn, next_insn; + rtx_insn *insn, *next_insn; int insn_num; int i, bundle_end_p, only_bundle_end_p, asm_p; int pos = 0, max_pos, template0, template1; - rtx b; - rtx nop; + rtx_insn *b; enum attr_type type; insn_num = 0; @@ -9245,8 +9288,8 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail) /* Emit nops after the current insn. */ for (i = 0; i < curr_state->after_nops_num; i++) { - nop = gen_nop (); - emit_insn_after (nop, insn); + rtx nop_pat = gen_nop (); + rtx_insn *nop = emit_insn_after (nop_pat, insn); pos--; gcc_assert (pos >= 0); if (pos % 3 == 0) @@ -9289,9 +9332,9 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail) /* Emit nops after the current insn. */ for (i = 0; i < curr_state->before_nops_num; i++) { - nop = gen_nop (); - ia64_emit_insn_before (nop, insn); - nop = PREV_INSN (insn); + rtx nop_pat = gen_nop (); + ia64_emit_insn_before (nop_pat, insn); + rtx_insn *nop = PREV_INSN (insn); insn = nop; pos--; gcc_assert (pos >= 0); @@ -9325,7 +9368,7 @@ bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail) start_bundle = true; else { - rtx next_insn; + rtx_insn *next_insn; for (next_insn = NEXT_INSN (insn); next_insn && next_insn != tail; @@ -9393,7 +9436,7 @@ ia64_sched_finish (FILE *dump, int sched_verbose) static void final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED) { - rtx insn; + rtx_insn *insn; int need_barrier_p = 0; int seen_good_insn = 0; @@ -9405,7 +9448,7 @@ final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED) { if (BARRIER_P (insn)) { - rtx last = prev_active_insn (insn); + rtx_insn *last = prev_active_insn (insn); if (! last) continue; @@ -9433,7 +9476,7 @@ final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED) { if (TARGET_EARLY_STOP_BITS) { - rtx last; + rtx_insn *last; for (last = insn; last != current_sched_info->prev_head; @@ -9503,10 +9546,10 @@ ia64_init_dfa_pre_cycle_insn (void) prev_cycle_state = xmalloc (dfa_state_size); } dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ()); - PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX; + SET_PREV_INSN (dfa_pre_cycle_insn) = SET_NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX; recog_memoized (dfa_pre_cycle_insn); dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3))); - PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX; + SET_PREV_INSN (dfa_stop_insn) = SET_NEXT_INSN (dfa_stop_insn) = NULL_RTX; recog_memoized (dfa_stop_insn); } @@ -9523,7 +9566,7 @@ ia64_dfa_pre_cycle_insn (void) ld) produces address for CONSUMER (of type st or stf). */ int -ia64_st_address_bypass_p (rtx producer, rtx consumer) +ia64_st_address_bypass_p (rtx_insn *producer, rtx_insn *consumer) { rtx dest, reg, mem; @@ -9547,7 +9590,7 @@ ia64_st_address_bypass_p (rtx producer, rtx consumer) ld) produces address for CONSUMER (of type ld or fld). */ int -ia64_ld_address_bypass_p (rtx producer, rtx consumer) +ia64_ld_address_bypass_p (rtx_insn *producer, rtx_insn *consumer) { rtx dest, src, reg, mem; @@ -9616,7 +9659,7 @@ emit_predicate_relation_info (void) FOR_EACH_BB_REVERSE_FN (bb, cfun) { int r; - rtx head = BB_HEAD (bb); + rtx_insn *head = BB_HEAD (bb); /* We only need such notes at code labels. */ if (! LABEL_P (head)) @@ -9630,7 +9673,7 @@ emit_predicate_relation_info (void) if (REGNO_REG_SET_P (df_get_live_in (bb), r)) { rtx p = gen_rtx_REG (BImode, r); - rtx n = emit_insn_after (gen_pred_rel_mutex (p), head); + rtx_insn *n = emit_insn_after (gen_pred_rel_mutex (p), head); if (head == BB_END (bb)) BB_END (bb) = n; head = n; @@ -9643,7 +9686,7 @@ emit_predicate_relation_info (void) the call. */ FOR_EACH_BB_REVERSE_FN (bb, cfun) { - rtx insn = BB_HEAD (bb); + rtx_insn *insn = BB_HEAD (bb); while (1) { @@ -9651,8 +9694,9 @@ emit_predicate_relation_info (void) && GET_CODE (PATTERN (insn)) == COND_EXEC && find_reg_note (insn, REG_NORETURN, NULL_RTX)) { - rtx b = emit_insn_before (gen_safe_across_calls_all (), insn); - rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn); + rtx_insn *b = + emit_insn_before (gen_safe_across_calls_all (), insn); + rtx_insn *a = emit_insn_after (gen_safe_across_calls_normal (), insn); if (BB_HEAD (bb) == insn) BB_HEAD (bb) = b; if (BB_END (bb) == insn) @@ -9693,7 +9737,7 @@ ia64_reorg (void) initiate_bundle_states (); ia64_nop = make_insn_raw (gen_nop ()); - PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX; + SET_PREV_INSN (ia64_nop) = SET_NEXT_INSN (ia64_nop) = NULL_RTX; recog_memoized (ia64_nop); clocks_length = get_max_uid () + 1; stops_p = XCNEWVEC (char, clocks_length); @@ -9786,7 +9830,7 @@ ia64_reorg (void) properly. Note that IA-64 differs from dwarf2 on this point. */ if (ia64_except_unwind_info (&global_options) == UI_TARGET) { - rtx insn; + rtx_insn *insn; int saw_stop = 0; insn = get_last_insn (); @@ -9907,7 +9951,7 @@ ia64_in_small_data_p (const_tree exp) if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp)) { - const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp)); + const char *section = DECL_SECTION_NAME (exp); if (strcmp (section, ".sdata") == 0 || strncmp (section, ".sdata.", 7) == 0 @@ -10186,7 +10230,7 @@ process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind) required to unwind this insn. */ static void -ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn) +ia64_asm_unwind_emit (FILE *asm_out_file, rtx_insn *insn) { bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET; bool frame = dwarf2out_do_frame (); @@ -10433,7 +10477,7 @@ ia64_init_builtins (void) rtx ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, - enum machine_mode mode ATTRIBUTE_UNUSED, + machine_mode mode ATTRIBUTE_UNUSED, int ignore ATTRIBUTE_UNUSED) { tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); @@ -10457,7 +10501,7 @@ ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, case IA64_BUILTIN_INFQ: case IA64_BUILTIN_HUGE_VALQ: { - enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp)); + machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp)); REAL_VALUE_TYPE inf; rtx tmp; @@ -10499,7 +10543,7 @@ ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED) most significant bits of the stack slot. */ enum direction -ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type) +ia64_hpux_function_arg_padding (machine_mode mode, const_tree type) { /* Exception to normal case for structures/unions/etc. */ @@ -10666,7 +10710,7 @@ ia64_soft_fp_init_libfuncs (void) } static bool -ia64_vms_valid_pointer_mode (enum machine_mode mode) +ia64_vms_valid_pointer_mode (machine_mode mode) { return (mode == SImode || mode == DImode); } @@ -10693,7 +10737,7 @@ ia64_reloc_rw_mask (void) is to honor small data. */ static section * -ia64_select_rtx_section (enum machine_mode mode, rtx x, +ia64_select_rtx_section (machine_mode mode, rtx x, unsigned HOST_WIDE_INT align) { if (GET_MODE_SIZE (mode) > 0 @@ -10742,7 +10786,7 @@ ia64_struct_retval_addr_is_first_parm_p (tree fntype) && ret_type && TYPE_MODE (ret_type) == BLKmode && TREE_ADDRESSABLE (ret_type) - && strcmp (lang_hooks.name, "GNU C++") == 0); + && lang_GNU_CXX ()); } /* Output the assembler code for a thunk function. THUNK_DECL is the @@ -10756,7 +10800,8 @@ ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, tree function) { - rtx this_rtx, insn, funexp; + rtx this_rtx, funexp; + rtx_insn *insn; unsigned int this_parmno; unsigned int this_regno; rtx delta_rtx; @@ -10901,7 +10946,7 @@ ia64_struct_value_rtx (tree fntype, } static bool -ia64_scalar_mode_supported_p (enum machine_mode mode) +ia64_scalar_mode_supported_p (machine_mode mode) { switch (mode) { @@ -10927,7 +10972,7 @@ ia64_scalar_mode_supported_p (enum machine_mode mode) } static bool -ia64_vector_mode_supported_p (enum machine_mode mode) +ia64_vector_mode_supported_p (machine_mode mode) { switch (mode) { @@ -10944,6 +10989,36 @@ ia64_vector_mode_supported_p (enum machine_mode mode) } } +/* Implement TARGET_LIBGCC_FLOATING_MODE_SUPPORTED_P. */ + +static bool +ia64_libgcc_floating_mode_supported_p (machine_mode mode) +{ + switch (mode) + { + case SFmode: + case DFmode: + return true; + + case XFmode: +#ifdef IA64_NO_LIBGCC_XFMODE + return false; +#else + return true; +#endif + + case TFmode: +#ifdef IA64_NO_LIBGCC_TFMODE + return false; +#else + return true; +#endif + + default: + return false; + } +} + /* Implement the FUNCTION_PROFILER macro. */ void @@ -11134,7 +11209,7 @@ ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED, /* Target hook for c_mode_for_suffix. */ -static enum machine_mode +static machine_mode ia64_c_mode_for_suffix (char suffix) { if (suffix == 'q') @@ -11173,7 +11248,7 @@ ia64_dconst_0_375 (void) return ia64_dconst_0_375_rtx; } -static enum machine_mode +static machine_mode ia64_get_reg_raw_mode (int regno) { if (FR_REGNO_P (regno)) @@ -11185,7 +11260,7 @@ ia64_get_reg_raw_mode (int regno) anymore. */ bool -ia64_member_type_forces_blk (const_tree, enum machine_mode mode) +ia64_member_type_forces_blk (const_tree, machine_mode mode) { return TARGET_HPUX && mode == TFmode; } @@ -11217,10 +11292,10 @@ expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt) x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); x = gen_rtx_SET (VOIDmode, target, x); - x = emit_insn (x); - if (recog_memoized (x) < 0) + rtx_insn *insn = emit_insn (x); + if (recog_memoized (insn) < 0) { - remove_insn (x); + remove_insn (insn); return false; } return true; @@ -11232,7 +11307,7 @@ static bool expand_vselect_vconcat (rtx target, rtx op0, rtx op1, const unsigned char *perm, unsigned nelt) { - enum machine_mode v2mode; + machine_mode v2mode; rtx x; v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0)); @@ -11415,7 +11490,7 @@ expand_vec_perm_interleave_2 (struct expand_vec_perm_d *d) unsigned char remap[2 * MAX_VECT_LEN]; unsigned contents, i, nelt, nelt2; unsigned h0, h1, h2, h3; - rtx seq; + rtx_insn *seq; bool ok; if (d->one_operand_p) @@ -11667,7 +11742,7 @@ ia64_expand_vec_perm_const (rtx operands[4]) /* Implement targetm.vectorize.vec_perm_const_ok. */ static bool -ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode, +ia64_vectorize_vec_perm_const_ok (machine_mode vmode, const unsigned char *sel) { struct expand_vec_perm_d d; @@ -11740,7 +11815,7 @@ void ia64_expand_vec_perm_even_odd (rtx target, rtx op0, rtx op1, int odd) { struct expand_vec_perm_d d; - enum machine_mode vmode = GET_MODE (target); + machine_mode vmode = GET_MODE (target); unsigned int i, nelt = GET_MODE_NUNITS (vmode); bool ok; |