diff options
author | bstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4> | 2009-04-27 12:45:13 +0000 |
---|---|---|
committer | bstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4> | 2009-04-27 12:45:13 +0000 |
commit | 268b9e9e95f56a59a8817b28ad59b53f40fc668d (patch) | |
tree | 5e9529982daf11d5b3ab800d4c58bc3fbee99d28 /gcc/cfgexpand.c | |
parent | e1910362719612f58bd1ea5050fa7a5175036abc (diff) | |
download | gcc-268b9e9e95f56a59a8817b28ad59b53f40fc668d.tar.gz |
2009-04-27 Basile Starynkevitch <basile@starynkevitch.net>
MERGED WITH TRUNK r146824::
* gcc/basilys.h: all GTY goes before the identifiers.
* gcc/basilys.c: removed errors.h include.
* gcc/run-basilys.h: ditto.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@146839 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/cfgexpand.c')
-rw-r--r-- | gcc/cfgexpand.c | 316 |
1 files changed, 245 insertions, 71 deletions
diff --git a/gcc/cfgexpand.c b/gcc/cfgexpand.c index 695e4ef0ef9..a5765f81c47 100644 --- a/gcc/cfgexpand.c +++ b/gcc/cfgexpand.c @@ -42,8 +42,13 @@ along with GCC; see the file COPYING3. If not see #include "tree-inline.h" #include "value-prof.h" #include "target.h" +#include "ssaexpand.h" +/* This variable holds information helping the rewriting of SSA trees + into RTL. */ +struct ssaexpand SA; + /* Return an expression tree corresponding to the RHS of GIMPLE statement STMT. */ @@ -78,8 +83,22 @@ gimple_assign_rhs_to_tree (gimple stmt) static tree gimple_cond_pred_to_tree (gimple stmt) { + /* We're sometimes presented with such code: + D.123_1 = x < y; + if (D.123_1 != 0) + ... + This would expand to two comparisons which then later might + be cleaned up by combine. But some pattern matchers like if-conversion + work better when there's only one compare, so make up for this + here as special exception if TER would have made the same change. */ + tree lhs = gimple_cond_lhs (stmt); + if (SA.values + && TREE_CODE (lhs) == SSA_NAME + && SA.values[SSA_NAME_VERSION (lhs)]) + lhs = gimple_assign_rhs_to_tree (SA.values[SSA_NAME_VERSION (lhs)]); + return build2 (gimple_cond_code (stmt), boolean_type_node, - gimple_cond_lhs (stmt), gimple_cond_rhs (stmt)); + lhs, gimple_cond_rhs (stmt)); } /* Helper for gimple_to_tree. Set EXPR_LOCATION for every expression @@ -423,6 +442,23 @@ failed: #define STACK_ALIGNMENT_NEEDED 1 #endif +#define SSAVAR(x) (TREE_CODE (x) == SSA_NAME ? SSA_NAME_VAR (x) : x) + +/* Associate declaration T with storage space X. If T is no + SSA name this is exactly SET_DECL_RTL, otherwise make the + partition of T associated with X. */ +static inline void +set_rtl (tree t, rtx x) +{ + if (TREE_CODE (t) == SSA_NAME) + { + SA.partition_to_pseudo[var_to_partition (SA.map, t)] = x; + if (x && !MEM_P (x)) + set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (t), x); + } + else + SET_DECL_RTL (t, x); +} /* This structure holds data relevant to one variable that will be placed in a stack slot. */ @@ -561,15 +597,15 @@ add_stack_var (tree decl) } stack_vars[stack_vars_num].decl = decl; stack_vars[stack_vars_num].offset = 0; - stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (decl), 1); - stack_vars[stack_vars_num].alignb = get_decl_align_unit (decl); + stack_vars[stack_vars_num].size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (decl)), 1); + stack_vars[stack_vars_num].alignb = get_decl_align_unit (SSAVAR (decl)); /* All variables are initially in their own partition. */ stack_vars[stack_vars_num].representative = stack_vars_num; stack_vars[stack_vars_num].next = EOC; /* Ensure that this decl doesn't get put onto the list twice. */ - SET_DECL_RTL (decl, pc_rtx); + set_rtl (decl, pc_rtx); stack_vars_num++; } @@ -688,22 +724,37 @@ add_alias_set_conflicts (void) } /* A subroutine of partition_stack_vars. A comparison function for qsort, - sorting an array of indices by the size of the object. */ + sorting an array of indices by the size and type of the object. */ static int stack_var_size_cmp (const void *a, const void *b) { HOST_WIDE_INT sa = stack_vars[*(const size_t *)a].size; HOST_WIDE_INT sb = stack_vars[*(const size_t *)b].size; - unsigned int uida = DECL_UID (stack_vars[*(const size_t *)a].decl); - unsigned int uidb = DECL_UID (stack_vars[*(const size_t *)b].decl); + tree decla, declb; + unsigned int uida, uidb; if (sa < sb) return -1; if (sa > sb) return 1; - /* For stack variables of the same size use the uid of the decl - to make the sort stable. */ + decla = stack_vars[*(const size_t *)a].decl; + declb = stack_vars[*(const size_t *)b].decl; + /* For stack variables of the same size use and id of the decls + to make the sort stable. Two SSA names are compared by their + version, SSA names come before non-SSA names, and two normal + decls are compared by their DECL_UID. */ + if (TREE_CODE (decla) == SSA_NAME) + { + if (TREE_CODE (declb) == SSA_NAME) + uida = SSA_NAME_VERSION (decla), uidb = SSA_NAME_VERSION (declb); + else + return -1; + } + else if (TREE_CODE (declb) == SSA_NAME) + return 1; + else + uida = DECL_UID (decla), uidb = DECL_UID (declb); if (uida < uidb) return -1; if (uida > uidb) @@ -874,21 +925,27 @@ expand_one_stack_var_at (tree decl, HOST_WIDE_INT offset) gcc_assert (offset == trunc_int_for_mode (offset, Pmode)); x = plus_constant (virtual_stack_vars_rtx, offset); - x = gen_rtx_MEM (DECL_MODE (decl), x); - - /* Set alignment we actually gave this decl. */ - offset -= frame_phase; - align = offset & -offset; - align *= BITS_PER_UNIT; - if (align == 0) - align = STACK_BOUNDARY; - else if (align > MAX_SUPPORTED_STACK_ALIGNMENT) - align = MAX_SUPPORTED_STACK_ALIGNMENT; - DECL_ALIGN (decl) = align; - DECL_USER_ALIGN (decl) = 0; + x = gen_rtx_MEM (DECL_MODE (SSAVAR (decl)), x); - set_mem_attributes (x, decl, true); - SET_DECL_RTL (decl, x); + if (TREE_CODE (decl) != SSA_NAME) + { + /* Set alignment we actually gave this decl if it isn't an SSA name. + If it is we generate stack slots only accidentally so it isn't as + important, we'll simply use the alignment that is already set. */ + offset -= frame_phase; + align = offset & -offset; + align *= BITS_PER_UNIT; + if (align == 0) + align = STACK_BOUNDARY; + else if (align > MAX_SUPPORTED_STACK_ALIGNMENT) + align = MAX_SUPPORTED_STACK_ALIGNMENT; + + DECL_ALIGN (decl) = align; + DECL_USER_ALIGN (decl) = 0; + } + + set_mem_attributes (x, SSAVAR (decl), true); + set_rtl (decl, x); } /* A subroutine of expand_used_vars. Give each partition representative @@ -912,7 +969,9 @@ expand_stack_vars (bool (*pred) (tree)) /* Skip variables that have already had rtl assigned. See also add_stack_var where we perpetrate this pc_rtx hack. */ - if (DECL_RTL (stack_vars[i].decl) != pc_rtx) + if ((TREE_CODE (stack_vars[i].decl) == SSA_NAME + ? SA.partition_to_pseudo[var_to_partition (SA.map, stack_vars[i].decl)] + : DECL_RTL (stack_vars[i].decl)) != pc_rtx) continue; /* Check the predicate to see whether this variable should be @@ -951,7 +1010,7 @@ account_stack_vars (void) size += stack_vars[i].size; for (j = i; j != EOC; j = stack_vars[j].next) - SET_DECL_RTL (stack_vars[j].decl, NULL); + set_rtl (stack_vars[j].decl, NULL); } return size; } @@ -964,8 +1023,8 @@ expand_one_stack_var (tree var) { HOST_WIDE_INT size, offset, align; - size = tree_low_cst (DECL_SIZE_UNIT (var), 1); - align = get_decl_align_unit (var); + size = tree_low_cst (DECL_SIZE_UNIT (SSAVAR (var)), 1); + align = get_decl_align_unit (SSAVAR (var)); offset = alloc_stack_frame_space (size, align); expand_one_stack_var_at (var, offset); @@ -986,20 +1045,21 @@ expand_one_hard_reg_var (tree var) static void expand_one_register_var (tree var) { - tree type = TREE_TYPE (var); + tree decl = SSAVAR (var); + tree type = TREE_TYPE (decl); int unsignedp = TYPE_UNSIGNED (type); enum machine_mode reg_mode - = promote_mode (type, DECL_MODE (var), &unsignedp, 0); + = promote_mode (type, DECL_MODE (decl), &unsignedp, 0); rtx x = gen_reg_rtx (reg_mode); - SET_DECL_RTL (var, x); + set_rtl (var, x); /* Note if the object is a user variable. */ - if (!DECL_ARTIFICIAL (var)) - mark_user_reg (x); + if (!DECL_ARTIFICIAL (decl)) + mark_user_reg (x); if (POINTER_TYPE_P (type)) - mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (TREE_TYPE (var)))); + mark_reg_pointer (x, TYPE_ALIGN (TREE_TYPE (type))); } /* A subroutine of expand_one_var. Called to assign rtl to a VAR_DECL that @@ -1067,6 +1127,9 @@ defer_stack_allocation (tree var, bool toplevel) static HOST_WIDE_INT expand_one_var (tree var, bool toplevel, bool really_expand) { + tree origvar = var; + var = SSAVAR (var); + if (SUPPORTS_STACK_ALIGNMENT && TREE_TYPE (var) != error_mark_node && TREE_CODE (var) == VAR_DECL) @@ -1092,7 +1155,18 @@ expand_one_var (tree var, bool toplevel, bool really_expand) } } - if (TREE_CODE (var) != VAR_DECL) + if (TREE_CODE (origvar) == SSA_NAME) + { + gcc_assert (TREE_CODE (var) != VAR_DECL + || (!DECL_EXTERNAL (var) + && !DECL_HAS_VALUE_EXPR_P (var) + && !TREE_STATIC (var) + && !DECL_RTL_SET_P (var) + && TREE_TYPE (var) != error_mark_node + && !DECL_HARD_REGISTER (var) + && really_expand)); + } + if (TREE_CODE (var) != VAR_DECL && TREE_CODE (origvar) != SSA_NAME) ; else if (DECL_EXTERNAL (var)) ; @@ -1107,7 +1181,7 @@ expand_one_var (tree var, bool toplevel, bool really_expand) if (really_expand) expand_one_error_var (var); } - else if (DECL_HARD_REGISTER (var)) + else if (TREE_CODE (var) == VAR_DECL && DECL_HARD_REGISTER (var)) { if (really_expand) expand_one_hard_reg_var (var); @@ -1115,14 +1189,14 @@ expand_one_var (tree var, bool toplevel, bool really_expand) else if (use_register_for_decl (var)) { if (really_expand) - expand_one_register_var (var); + expand_one_register_var (origvar); } else if (defer_stack_allocation (var, toplevel)) - add_stack_var (var); + add_stack_var (origvar); else { if (really_expand) - expand_one_stack_var (var); + expand_one_stack_var (origvar); return tree_low_cst (DECL_SIZE_UNIT (var), 1); } return 0; @@ -1441,6 +1515,7 @@ static void expand_used_vars (void) { tree t, next, outer_block = DECL_INITIAL (current_function_decl); + unsigned i; /* Compute the phase of the stack frame for this function. */ { @@ -1451,6 +1526,28 @@ expand_used_vars (void) init_vars_expansion (); + for (i = 0; i < SA.map->num_partitions; i++) + { + tree var = partition_to_var (SA.map, i); + + gcc_assert (is_gimple_reg (var)); + if (TREE_CODE (SSA_NAME_VAR (var)) == VAR_DECL) + expand_one_var (var, true, true); + else + { + /* This is a PARM_DECL or RESULT_DECL. For those partitions that + contain the default def (representing the parm or result itself) + we don't do anything here. But those which don't contain the + default def (representing a temporary based on the parm/result) + we need to allocate space just like for normal VAR_DECLs. */ + if (!bitmap_bit_p (SA.partition_has_default_def, i)) + { + expand_one_var (var, true, true); + gcc_assert (SA.partition_to_pseudo[i]); + } + } + } + /* At this point all variables on the local_decls with TREE_USED set are not associated with any block scope. Lay them out. */ t = cfun->local_decls; @@ -1462,19 +1559,15 @@ expand_used_vars (void) next = TREE_CHAIN (t); + /* Expanded above already. */ + if (is_gimple_reg (var)) + ; /* We didn't set a block for static or extern because it's hard to tell the difference between a global variable (re)declared in a local scope, and one that's really declared there to begin with. And it doesn't really matter much, since we're not giving them stack space. Expand them now. */ - if (TREE_STATIC (var) || DECL_EXTERNAL (var)) - expand_now = true; - - /* Any variable that could have been hoisted into an SSA_NAME - will have been propagated anywhere the optimizers chose, - i.e. not confined to their original block. Allocate them - as if they were defined in the outermost scope. */ - else if (is_gimple_reg (var)) + else if (TREE_STATIC (var) || DECL_EXTERNAL (var)) expand_now = true; /* If the variable is not associated with any block, then it @@ -1674,6 +1767,19 @@ expand_gimple_cond (basic_block bb, gimple stmt) true_edge->goto_block = NULL; false_edge->flags |= EDGE_FALLTHRU; ggc_free (pred); + /* Special case: when jumpif decides that the condition is + trivial it emits an unconditional jump (and the necessary + barrier). But we still have two edges, the fallthru one is + wrong. purge_dead_edges would clean this up later. Unfortunately + we have to insert insns (and split edges) before + find_many_sub_basic_blocks and hence before purge_dead_edges. + But splitting edges might create new blocks which depend on the + fact that if there are two edges there's no barrier. So the + barrier would get lost and verify_flow_info would ICE. Instead + of auditing all edge splitters to care for the barrier (which + normally isn't there in a cleaned CFG), fix it here. */ + if (BARRIER_P (get_last_insn ())) + remove_edge (false_edge); return NULL; } if (true_edge->dest == bb->next_bb) @@ -1690,6 +1796,8 @@ expand_gimple_cond (basic_block bb, gimple stmt) false_edge->goto_block = NULL; true_edge->flags |= EDGE_FALLTHRU; ggc_free (pred); + if (BARRIER_P (get_last_insn ())) + remove_edge (true_edge); return NULL; } @@ -1932,20 +2040,6 @@ expand_gimple_basic_block (basic_block bb) NOTE_BASIC_BLOCK (note) = bb; - for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) - { - /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */ - e->flags &= ~EDGE_EXECUTABLE; - - /* At the moment not all abnormal edges match the RTL representation. - It is safe to remove them here as find_many_sub_basic_blocks will - rediscover them. In the future we should get this fixed properly. */ - if (e->flags & EDGE_ABNORMAL) - remove_edge (e); - else - ei_next (&ei); - } - for (; !gsi_end_p (gsi); gsi_next (&gsi)) { gimple stmt = gsi_stmt (gsi); @@ -1975,7 +2069,19 @@ expand_gimple_basic_block (basic_block bb) } else if (gimple_code (stmt) != GIMPLE_CHANGE_DYNAMIC_TYPE) { - tree stmt_tree = gimple_to_tree (stmt); + def_operand_p def_p; + tree stmt_tree; + def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF); + + if (def_p != NULL) + { + /* Ignore this stmt if it is in the list of + replaceable expressions. */ + if (SA.values + && SA.values[SSA_NAME_VERSION (DEF_FROM_PTR (def_p))]) + continue; + } + stmt_tree = gimple_to_tree (stmt); last = get_last_insn (); expand_expr_stmt (stmt_tree); maybe_dump_rtl_for_gimple_stmt (stmt, last); @@ -2286,6 +2392,11 @@ gimple_expand_cfg (void) sbitmap blocks; edge_iterator ei; edge e; + unsigned i; + + rewrite_out_of_ssa (&SA); + SA.partition_to_pseudo = (rtx *)xcalloc (SA.map->num_partitions, + sizeof (rtx)); /* Some backends want to know that we are expanding to RTL. */ currently_expanding_to_rtl = 1; @@ -2339,6 +2450,29 @@ gimple_expand_cfg (void) /* Set up parameters and prepare for return, for the function. */ expand_function_start (current_function_decl); + /* Now that we also have the parameter RTXs, copy them over to our + partitions. */ + for (i = 0; i < SA.map->num_partitions; i++) + { + tree var = SSA_NAME_VAR (partition_to_var (SA.map, i)); + + if (TREE_CODE (var) != VAR_DECL + && !SA.partition_to_pseudo[i]) + SA.partition_to_pseudo[i] = DECL_RTL_IF_SET (var); + gcc_assert (SA.partition_to_pseudo[i]); + /* Some RTL parts really want to look at DECL_RTL(x) when x + was a decl marked in REG_ATTR or MEM_ATTR. We could use + SET_DECL_RTL here making this available, but that would mean + to select one of the potentially many RTLs for one DECL. Instead + of doing that we simply reset the MEM_EXPR of the RTL in question, + then nobody can get at it and hence nobody can call DECL_RTL on it. */ + if (!DECL_RTL_SET_P (var)) + { + if (MEM_P (SA.partition_to_pseudo[i])) + set_mem_expr (SA.partition_to_pseudo[i], NULL); + } + } + /* If this function is `main', emit a call to `__main' to run global initializers, etc. */ if (DECL_NAME (current_function_decl) @@ -2368,13 +2502,15 @@ gimple_expand_cfg (void) gcc_assert (crtl->parm_stack_boundary <= INCOMING_STACK_BOUNDARY); } + expand_phi_nodes (&SA); + /* Register rtl specific functions for cfg. */ rtl_register_cfg_hooks (); init_block = construct_init_block (); /* Clear EDGE_EXECUTABLE on the entry edge(s). It is cleaned from the - remaining edges in expand_gimple_basic_block. */ + remaining edges later. */ FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) e->flags &= ~EDGE_EXECUTABLE; @@ -2382,6 +2518,9 @@ gimple_expand_cfg (void) FOR_BB_BETWEEN (bb, init_block->next_bb, EXIT_BLOCK_PTR, next_bb) bb = expand_gimple_basic_block (bb); + execute_free_datastructures (); + finish_out_of_ssa (&SA); + /* Expansion is used by optimization passes too, set maybe_hot_insn_p conservatively to true until they are all profile aware. */ pointer_map_destroy (lab_rtx_for_bb); @@ -2391,9 +2530,6 @@ gimple_expand_cfg (void) set_curr_insn_block (DECL_INITIAL (current_function_decl)); insn_locators_finalize (); - /* We're done expanding trees to RTL. */ - currently_expanding_to_rtl = 0; - /* Convert tree EH labels to RTL EH labels and zap the tree EH table. */ convert_from_eh_region_ranges (); set_eh_throw_stmt_table (cfun, NULL); @@ -2401,11 +2537,48 @@ gimple_expand_cfg (void) rebuild_jump_labels (get_insns ()); find_exception_handler_labels (); + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb) + { + edge e; + edge_iterator ei; + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) + { + if (e->insns.r) + commit_one_edge_insertion (e); + else + ei_next (&ei); + } + } + + /* We're done expanding trees to RTL. */ + currently_expanding_to_rtl = 0; + + FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb) + { + edge e; + edge_iterator ei; + for (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) + { + /* Clear EDGE_EXECUTABLE. This flag is never used in the backend. */ + e->flags &= ~EDGE_EXECUTABLE; + + /* At the moment not all abnormal edges match the RTL + representation. It is safe to remove them here as + find_many_sub_basic_blocks will rediscover them. + In the future we should get this fixed properly. */ + if ((e->flags & EDGE_ABNORMAL) + && !(e->flags & EDGE_SIBCALL)) + remove_edge (e); + else + ei_next (&ei); + } + } + blocks = sbitmap_alloc (last_basic_block); sbitmap_ones (blocks); find_many_sub_basic_blocks (blocks); - purge_all_dead_edges (); sbitmap_free (blocks); + purge_all_dead_edges (); compact_blocks (); @@ -2470,11 +2643,12 @@ struct rtl_opt_pass pass_expand = NULL, /* next */ 0, /* static_pass_number */ TV_EXPAND, /* tv_id */ - /* ??? If TER is enabled, we actually receive GENERIC. */ - PROP_gimple_leh | PROP_cfg, /* properties_required */ + PROP_ssa | PROP_gimple_leh | PROP_cfg,/* properties_required */ PROP_rtl, /* properties_provided */ - PROP_trees, /* properties_destroyed */ - 0, /* todo_flags_start */ - TODO_dump_func, /* todo_flags_finish */ + PROP_ssa | PROP_trees, /* properties_destroyed */ + TODO_verify_ssa | TODO_verify_flow + | TODO_verify_stmts, /* todo_flags_start */ + TODO_dump_func + | TODO_ggc_collect /* todo_flags_finish */ } }; |