summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog137
-rw-r--r--gcc/Makefile.in24
-rw-r--r--gcc/ddg.c37
-rw-r--r--gcc/df-core.c1164
-rw-r--r--gcc/df-problems.c3093
-rw-r--r--gcc/df-scan.c1795
-rw-r--r--gcc/df.c3975
-rw-r--r--gcc/df.h670
-rw-r--r--gcc/loop-invariant.c68
-rw-r--r--gcc/modulo-sched.c15
-rw-r--r--gcc/sched-deps.c5
-rw-r--r--gcc/web.c71
12 files changed, 6748 insertions, 4306 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 61c5a682b69..2f622a133a9 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,140 @@
+2005-01-11 Danny Berlin <dberlin@dberlin.org>
+ Kenneth Zadeck <zadeck@naturalbridge.com>
+
+ * df.h (DF_SCAN, DF_RU, DF_RD, DF_LR, DF_UR, DF_UREC, DF_CHAIN,
+ DF_RI, DF_LAST_PROBLEM_PLUS1, DF_DU_CHAIN, DF_UD_CHAIN,
+ DF_REF_TYPE_NAMES, DF_HARD_REGS, DF_EQUIV_NOTES, DF_SUBREGS,
+ DF_SCAN_BB_INFO, DF_RU_BB_INFO, DF_RD_BB_INFO, DF_LR_BB_INFO,
+ DF_UR_BB_INFO, DF_UREC_BB_INFO, DF_LIVE_IN, DF_LIVE_OUT,
+ DF_RA_LIVE_IN, DF_RA_LIVE_OUT, DF_UPWARD_LIVE_IN,
+ DF_UPWARD_LIVE_OUT, DF_REF_REAL_REG, DF_REF_REGNO,
+ DF_REF_REAL_LOC, DF_REF_REG, DF_REF_LOC, DF_REF_BB, DF_REF_BBNO,
+ DF_REF_INSN, DF_REF_INSN_UID, DF_REF_TYPE, DF_REF_CHAIN,
+ DF_REF_ID, DF_REF_FLAGS, DF_REF_NEXT_REG, DF_REF_PREV_REG,
+ DF_REF_NEXT_REF, DF_REF_DATA, DF_REF_REG_DEF_P, DF_REF_REG_USE_P,
+ DF_REF_REG_MEM_STORE_P, DF_REF_REG_MEM_LOAD_P, DF_REF_REG_MEM_P,
+ DF_DEFS_SIZE, DF_DEFS_GET, DF_DEFS_SET, DF_USES_SIZE, DF_USES_GET,
+ DF_USES_SET, DF_REG_SIZE, DF_REG_DEF_GET, DF_REG_DEF_SET,
+ DF_REG_USE_GET, DF_REG_USE_SET, DF_REGNO_FIRST_DEF,
+ DF_REGNO_LAST_USE, DF_INSN_SIZE, DF_INSN_GET, DF_INSN_SET,
+ DF_INSN_CONTAINS_ASM, DF_INSN_LUID, DF_INSN_DEFS, DF_INSN_USES,
+ DF_INSN_UID_GET, DF_INSN_UID_LUID, DF_INSN_UID_DEFS,
+ DF_INSN_UID_USES, DF_SCAN_INITIAL, DF_SCAN_GLOBAL,
+ DF_SCAN_POST_ALLOC): New macros.
+ (df_flow_dir, df_ref_type, df_ref_flags, df_alloc_function,
+ df_free_bb_function, df_local_compute_function, df_init_function,
+ df_dataflow_function, df_confluence_function_0,
+ df_confluence_function_n, df_transfer_function,
+ df_finalizer_function, df_free_function, df_dump_problem_function,
+ df_problem, dataflow, df_insn_info, df_reg_info, df_ref, df_link,
+ df_ref_info, df, df_map, df_scan_bb_info, df_ru_bb_info,
+ df_ru_bb_info, df_rd_bb_info, df_lr_bb_info, df_ur_bb_info,
+ df_urec_bb_info, ) New types.
+ (df_invalidated_by_call, df_all_hard_regs, df_state) New public
+ variables.
+ (df_init, df_add_problem, df_set_blocks, df_finish, df_analyze,
+ df_analyze_simple_change_some_blocks,
+ df_analyze_simple_change_one_block, df_compact_blocks,
+ df_bb_replace, df_bb_regno_last_use_find,
+ df_bb_regno_first_def_find, df_bb_regno_last_def_find,
+ df_insn_regno_def_p, df_find_def, df_find_use,
+ df_iterative_dataflow, df_dump, df_chain_dump, df_refs_chain_dump,
+ df_regs_chain_dump, df_insn_debug, df_insn_debug_regno,
+ df_regno_debug, df_ref_debug, debug_df_insn, debug_df_regno,
+ debug_df_reg, debug_df_defno, debug_df_useno, debug_df_ref,
+ debug_df_chain, df_get_dependent_problem, df_chain_create,
+ df_chain_unlink, df_chain_copy, df_get_live_in, df_get_live_out,
+ df_grow_bb_info, df_chain_dump, df_print_bb_index,
+ df_ru_add_problem, df_ru_get_bb_info, df_rd_add_problem,
+ df_rd_get_bb_info, df_lr_add_problem, df_lr_get_bb_info,
+ df_ur_add_problem, df_ur_get_bb_info, df_urec_add_problem,
+ df_urec_get_bb_info, df_chain_add_problem, df_ri_add_problem,
+ df_reg_lifetime, df_scan_get_bb_info, df_scan_add_problem,
+ df_rescan_blocks, df_ref_create, df_get_artificial_defs,
+ df_get_artificial_uses, df_reg_chain_create, df_reg_chain_unlink,
+ df_ref_remove, df_insn_refs_delete, df_refs_delete,
+ df_reorganize_refs, df_set_state, df_hard_reg_init,
+ df_read_modify_subreg_p) New public functions.
+ * df-core.c: The core dataflow solver and glue routines for rtl
+ dataflow.
+ (df_init, df_add_problem, df_set_blocks, df_finish,
+ df_hybrid_search_forward, df_hybrid_search_backward,
+ df_iterative_dataflow, df_prune_to_subcfg, df_analyze_problem,
+ df_analyze, df_get_bb_info, df_set_bb_info, df_bb_replace,
+ df_bb_regno_last_use_find, df_bb_regno_first_def_find,
+ df_bb_regno_last_def_find, df_insn_regno_def_p, df_find_def,
+ df_reg_defined, df_find_use, df_reg_used, df_dump,
+ df_refs_chain_dump, df_regs_chain_dump, df_insn_debug,
+ df_insn_debug_regno, df_regno_debug, df_ref_debug, debug_df_insn,
+ debug_df_reg, debug_df_regno, debug_df_ref debug_df_defno,
+ debug_df_useno, reset_df_after_reload): New functions.
+ * df-scan.c: The scanning fuctions, once in df.c, completely
+ rewritten so that they now fully model the functionality of
+ register usage at the backend.
+ (df_scan_free_internal, df_scan_get_bb_info, df_scan_set_bb_info,
+ df_scan_free_bb_info, df_scan_alloc, df_scan_free, df_scan_dump,
+ df_scan_add_problem, df_grow_reg_info, df_grow_ref_info,
+ df_grow_insn_info, df_rescan_blocks, df_ref_create,
+ df_get_artificial_defs, df_get_artificial_uses,
+ df_reg_chain_create, df_ref_unlink, df_reg_chain_unlink,
+ df_ref_remove, df_insn_create_insn_record, df_insn_refs_delete,
+ df_refs_delete, df_reorganize_refs, df_set_state,
+ df_ref_create_structure, df_ref_record, df_read_modify_subreg_p,
+ df_def_record_1, df_defs_record, df_uses_record,
+ df_insn_contains_asm_1, df_insn_contains_asm, df_insn_refs_record,
+ df_has_eh_preds, df_bb_refs_record, df_refs_record, df_mark_reg,
+ df_record_exit_block_uses, df_hard_reg_init): New functions.
+
+ * df-problems.c: Seven concrete dataflow problems that use the
+ scanning in df-scan.c and are solved by the engine in df-core.c.
+ (df_get_dependent_problem, df_chain_create, df_chain_unlink,
+ df_chain_copy, df_get_live_in, df_get_live_out, df_grow_bb_info,
+ df_chain_dump, df_print_bb_index, df_ref_bitmap, df_set_seen,
+ df_unset_seen, df_ru_get_bb_info, df_ru_set_bb_info,
+ df_ru_free_bb_info, df_ru_alloc,
+ df_ru_bb_local_compute_process_def,
+ df_ru_bb_local_compute_process_use, df_ru_bb_local_compute,
+ df_ru_local_compute, df_ru_init_solution, df_ru_confluence_n,
+ df_ru_transfer_function, df_ru_free, df_ru_dump,
+ df_ru_add_problem, df_rd_get_bb_info, df_rd_set_bb_info,
+ df_rd_free_bb_info, df_rd_alloc,
+ df_rd_bb_local_compute_process_def, df_rd_bb_local_compute,
+ df_rd_local_compute, df_rd_init_solution, df_rd_confluence_n,
+ df_rd_transfer_function, df_rd_free, df_rd_dump,
+ df_rd_add_problem, df_lr_get_bb_info, df_lr_set_bb_info,
+ df_lr_free_bb_info, df_lr_alloc, df_lr_bb_local_compute,
+ df_lr_local_compute, df_lr_init, df_lr_confluence_0,
+ df_lr_confluence_n, df_lr_transfer_function, df_lr_free,
+ df_lr_dump, df_lr_add_problem, df_ur_get_bb_info,
+ df_ur_set_bb_info, df_ur_free_bb_info, df_ur_alloc,
+ df_ur_bb_local_compute, df_ur_local_compute, df_ur_init,
+ df_ur_local_finalize, df_ur_confluence_n, df_ur_transfer_function,
+ df_ur_free, df_ur_dump, df_ur_add_problem, df_urec_get_bb_info,
+ df_urec_set_bb_info, df_urec_free_bb_info, df_urec_alloc,
+ df_urec_mark_reg_change, df_urec_check_earlyclobber,
+ df_urec_mark_reg_use_for_earlyclobber,
+ df_urec_mark_reg_use_for_earlyclobber_1, df_urec_bb_local_compute,
+ df_urec_local_compute, df_urec_init, df_urec_local_finalize,
+ df_urec_confluence_n, df_urec_transfer_function, df_urec_free,
+ df_urec_dump, df_urec_add_problem, df_chain_alloc,
+ df_chain_create_bb_process_use, df_chain_create_bb,
+ df_chain_finalize, df_chain_free, df_chains_dump,
+ df_chain_add_problem, df_ri_alloc, df_ri_bb_compute,
+ df_ri_compute, df_ri_free, df_ri_dump, df_ri_add_problem,
+ df_reg_lifetime): New functions.
+ * df.c: Deleted file.
+ * ddg.c (create_ddg_dep_no_link, build_inter_loop_deps): Made code
+ consistent with new df api.
+ * modulo-sched.c (sms_schedule, rest_of_handle_sms,
+ rest_of_handle_sms): Ditto.
+ * web.c (unionfind_union, union_defs, entry_register, web_main):
+ Ditto.
+ * loop_invariant.c (invariant_for_use, hash_invariant_expr_1,
+ invariant_expr_equal_p, find_defs, check_dependencies,
+ find_invariant_insn, find_invariants_to_move, move_invariant_reg,
+ free_inv_motion_data, move_loop_invariants): Ditto.
+ * sched-deps.c (sched_analyze_1): Ditto.
+
2006-01-11 Zdenek Dvorak <dvorakz@suse.cz>
* tree-ssa-operands.c (get_expr_operands): Record addressable
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 82e5de26e9b..8318090645a 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -761,7 +761,7 @@ IPA_UTILS_H = ipa-utils.h $(TREE_H) $(CGRAPH_H)
IPA_REFERENCE_H = ipa-reference.h bitmap.h $(TREE_H)
IPA_TYPE_ESCAPE_H = ipa-type-escape.h $(TREE_H)
CGRAPH_H = cgraph.h $(TREE_H)
-DF_H = df.h bitmap.h sbitmap.h $(BASIC_BLOCK_H)
+DF_H = df.h bitmap.h $(BASIC_BLOCK_H) alloc-pool.h
DDG_H = ddg.h sbitmap.h $(DF_H)
GCC_H = gcc.h version.h
GGC_H = ggc.h gtype-desc.h
@@ -973,7 +973,8 @@ OBJS-common = \
cfgloopanal.o cfgloopmanip.o loop-init.o loop-unswitch.o loop-unroll.o \
cfgrtl.o combine.o conflict.o convert.o coverage.o cse.o cselib.o \
dbxout.o ddg.o tree-ssa-loop-ch.o loop-invariant.o tree-ssa-loop-im.o \
- debug.o df.o dfp.o diagnostic.o dojump.o dominance.o loop-doloop.o \
+ debug.o df-core.o df-problems.o odf.o df-scan.o dfp.o diagnostic.o dojump.o \
+ dominance.o loop-doloop.o \
dwarf2asm.o dwarf2out.o emit-rtl.o except.o explow.o loop-iv.o \
expmed.o expr.o final.o flow.o fold-const.o function.o gcse.o \
genrtl.o ggc-common.o global.o graph.o gtype-desc.o \
@@ -2301,9 +2302,22 @@ tree-vect-generic.o : tree-vect-generic.c $(CONFIG_H) $(SYSTEM_H) $(TREE_H) \
$(FLAGS_H) $(OPTABS_H) $(RTL_H) $(MACHMODE_H) $(EXPR_H) \
langhooks.h $(FLAGS_H) $(DIAGNOSTIC_H) gt-tree-vect-generic.h $(GGC_H) \
coretypes.h insn-codes.h
-df.o : df.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
- insn-config.h $(RECOG_H) function.h $(REGS_H) alloc-pool.h hard-reg-set.h \
- $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h $(TM_P_H)
+df-core.o : df-core.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
+ insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h \
+ hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h \
+ $(TM_P_H) $(FLAGS_H) output.h tree-pass.h
+df-problems.o : df-problems.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h \
+ hard-reg-set.h $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h $(TM_P_H) \
+ $(FLAGS_H) output.h
+odf.o : odf.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
+ insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h hard-reg-set.h \
+ $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h $(TM_P_H) $(FLAGS_H) \
+ output.h
+df-scan.o : df-scan.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
+ insn-config.h $(RECOG_H) $(FUNCTION_H) $(REGS_H) alloc-pool.h hard-reg-set.h \
+ $(BASIC_BLOCK_H) $(DF_H) bitmap.h sbitmap.h $(TM_P_H) $(FLAGS_H) \
+ output.h
var-tracking.o : var-tracking.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(TREE_H) hard-reg-set.h insn-config.h reload.h $(FLAGS_H) \
$(BASIC_BLOCK_H) output.h sbitmap.h alloc-pool.h $(FIBHEAP_H) $(HASHTAB_H) \
diff --git a/gcc/ddg.c b/gcc/ddg.c
index 6bb3d6c374d..86ffa1f3161 100644
--- a/gcc/ddg.c
+++ b/gcc/ddg.c
@@ -1,5 +1,5 @@
/* DDG - Data Dependence Graph implementation.
- Copyright (C) 2004, 2005
+ Copyright (C) 2004, 2005, 2006
Free Software Foundation, Inc.
Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
@@ -222,10 +222,10 @@ create_ddg_dep_no_link (ddg_ptr g, ddg_node_ptr from, ddg_node_ptr to,
for all its uses in the next iteration, and an output dependence to the
first def of the next iteration. */
static void
-add_deps_for_def (ddg_ptr g, struct df *df, struct ref *rd)
+add_deps_for_def (ddg_ptr g, struct df *df, struct df_ref *rd)
{
int regno = DF_REF_REGNO (rd);
- struct bb_info *bb_info = DF_BB_INFO (df, g->bb);
+ struct df_rd_bb_info *bb_info = DF_RD_BB_INFO (df, g->bb);
struct df_link *r_use;
int use_before_def = false;
rtx def_insn = DF_REF_INSN (rd);
@@ -235,7 +235,7 @@ add_deps_for_def (ddg_ptr g, struct df *df, struct ref *rd)
that is upwards exposed in RD's block. */
for (r_use = DF_REF_CHAIN (rd); r_use != NULL; r_use = r_use->next)
{
- if (bitmap_bit_p (bb_info->ru_gen, r_use->ref->id))
+ if (bitmap_bit_p (bb_info->gen, r_use->ref->id))
{
rtx use_insn = DF_REF_INSN (r_use->ref);
ddg_node_ptr dest_node = get_node_of_insn (g, use_insn);
@@ -257,7 +257,7 @@ add_deps_for_def (ddg_ptr g, struct df *df, struct ref *rd)
there is a use between the two defs. */
if (! use_before_def)
{
- struct ref *def = df_bb_regno_first_def_find (df, g->bb, regno);
+ struct df_ref *def = df_bb_regno_first_def_find (df, g->bb, regno);
int i;
ddg_node_ptr dest_node;
@@ -266,7 +266,7 @@ add_deps_for_def (ddg_ptr g, struct df *df, struct ref *rd)
/* Check if there are uses after RD. */
for (i = src_node->cuid + 1; i < g->num_nodes; i++)
- if (df_reg_used (df, g->nodes[i].insn, rd->reg))
+ if (df_find_use (df, g->nodes[i].insn, rd->reg))
return;
dest_node = get_node_of_insn (g, def->insn);
@@ -278,16 +278,16 @@ add_deps_for_def (ddg_ptr g, struct df *df, struct ref *rd)
(nearest BLOCK_BEGIN) def of the next iteration, unless USE is followed
by a def in the block. */
static void
-add_deps_for_use (ddg_ptr g, struct df *df, struct ref *use)
+add_deps_for_use (ddg_ptr g, struct df *df, struct df_ref *use)
{
int i;
int regno = DF_REF_REGNO (use);
- struct ref *first_def = df_bb_regno_first_def_find (df, g->bb, regno);
+ struct df_ref *first_def = df_bb_regno_first_def_find (df, g->bb, regno);
ddg_node_ptr use_node;
ddg_node_ptr def_node;
- struct bb_info *bb_info;
+ struct df_rd_bb_info *bb_info;
- bb_info = DF_BB_INFO (df, g->bb);
+ bb_info = DF_RD_BB_INFO (df, g->bb);
if (!first_def)
return;
@@ -304,7 +304,7 @@ add_deps_for_use (ddg_ptr g, struct df *df, struct ref *use)
/* We must not add ANTI dep when there is an intra-loop TRUE dep in
the opposite direction. If the first_def reaches the USE then there is
such a dep. */
- if (! bitmap_bit_p (bb_info->rd_gen, first_def->id))
+ if (! bitmap_bit_p (bb_info->gen, first_def->id))
create_ddg_dep_no_link (g, use_node, def_node, ANTI_DEP, REG_DEP, 1);
}
@@ -313,25 +313,28 @@ static void
build_inter_loop_deps (ddg_ptr g, struct df *df)
{
unsigned rd_num, u_num;
- struct bb_info *bb_info;
+ struct df_rd_bb_info *rd_bb_info;
+ struct df_ru_bb_info *ru_bb_info;
bitmap_iterator bi;
- bb_info = DF_BB_INFO (df, g->bb);
+ rd_bb_info = DF_RD_BB_INFO (df, g->bb);
/* Find inter-loop output and true deps by connecting downward exposed defs
to the first def of the BB and to upwards exposed uses. */
- EXECUTE_IF_SET_IN_BITMAP (bb_info->rd_gen, 0, rd_num, bi)
+ EXECUTE_IF_SET_IN_BITMAP (rd_bb_info->gen, 0, rd_num, bi)
{
- struct ref *rd = df->defs[rd_num];
+ struct df_ref *rd = DF_DEFS_GET (df, rd_num);
add_deps_for_def (g, df, rd);
}
+ ru_bb_info = DF_RU_BB_INFO (df, g->bb);
+
/* Find inter-loop anti deps. We are interested in uses of the block that
appear below all defs; this implies that these uses are killed. */
- EXECUTE_IF_SET_IN_BITMAP (bb_info->ru_kill, 0, u_num, bi)
+ EXECUTE_IF_SET_IN_BITMAP (ru_bb_info->kill, 0, u_num, bi)
{
- struct ref *use = df->uses[u_num];
+ struct df_ref *use = DF_USES_GET (df, u_num);
/* We are interested in uses of this BB. */
if (BLOCK_FOR_INSN (use->insn) == g->bb)
diff --git a/gcc/df-core.c b/gcc/df-core.c
new file mode 100644
index 00000000000..aef38cb4354
--- /dev/null
+++ b/gcc/df-core.c
@@ -0,0 +1,1164 @@
+/* Allocation for dataflow support routines.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Originally contributed by Michael P. Hayes
+ (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
+ Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
+ and Kenneth Zadeck (zadeck@naturalbridge.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+*/
+
+/*
+OVERVIEW:
+
+The files in this collection (df*.c,df.h) provide a general framework
+for solving dataflow problems. The global dataflow is performed using
+a good implementation of iterative dataflow analysis.
+
+The file df-problems.c provides problem instance for the most common
+dataflow problems: reaching defs, upward exposed uses, live variables,
+uninitialized variables, def-use chains, and use-def chains. However,
+the interface allows other dataflow problems to be defined as well.
+
+
+USAGE:
+
+Here is an example of using the dataflow routines.
+
+ struct df *df;
+
+ df = df_init (init_flags);
+
+ df_add_problem (df, problem);
+
+ df_set_blocks (df, blocks);
+
+ df_rescan_blocks (df, blocks);
+
+ df_analyze (df);
+
+ df_dump (df, stderr);
+
+ df_finish (df);
+
+
+
+DF_INIT simply creates a poor man's object (df) that needs to be
+passed to all the dataflow routines. df_finish destroys this object
+and frees up any allocated memory.
+
+There are two flags that can be passed to df_init:
+
+DF_NO_SCAN means that no scanning of the rtl code is performed. This
+is used if the problem instance is to do it's own scanning.
+
+DF_HARD_REGS means that the scanning is to build information about
+both pseudo registers and hardware registers. Without this
+information, the problems will be solved only on pseudo registers.
+
+
+
+DF_ADD_PROBLEM adds a problem, defined by an instance to struct
+df_problem, to the set of problems solved in this instance of df. All
+calls to add a problem for a given instance of df must occur before
+the first call to DF_RESCAN_BLOCKS or DF_ANALYZE.
+
+For all of the problems defined in df-problems.c, there are
+convienence functions named DF_*_ADD_PROBLEM.
+
+
+Problems can be dependent on other problems. For instance, solving
+def-use or use-def chains is dependant on solving reaching
+definitions. As long as these dependancies are listed in the problem
+definition, the order of adding the problems is not material.
+Otherwise, the problems will be solved in the order of calls to
+df_add_problem. Note that it is not necessary to have a problem. In
+that case, df will just be used to do the scanning.
+
+
+
+DF_SET_BLOCKS is an optional call used to define a region of the
+function on which the analysis will be performed. The normal case is
+to analyze the entire function and no call to df_set_blocks is made.
+
+When a subset is given, the analysis behaves as if the function only
+contains those blocks and any edges that occur directly between the
+blocks in the set. Care should be taken to call df_set_blocks right
+before the call to analyze in order to eliminate the possiblity that
+optimizations that reorder blocks invalidate the bitvector.
+
+
+
+DF_RESCAN_BLOCKS is an optional call that causes the scanner to be
+ (re)run over the set of blocks passed in. If blocks is NULL, the entire
+function (or all of the blocks defined in df_set_blocks) is rescanned.
+If blocks contains blocks that were not defined in the call to
+df_set_blocks, these blocks are added to the set of blocks.
+
+
+DF_ANALYZE causes all of the defined problems to be (re)solved. It
+does not cause blocks to be (re)scanned at the rtl level unless no
+prior call is made to df_rescan_blocks.
+
+
+DF_DUMP can then be called to dump the information produce to some
+file.
+
+
+
+DF_FINISH causes all of the datastructures to be cleaned up and freed.
+The df_instance is also freed and its pointer should be NULLed.
+
+
+
+
+Scanning produces a `struct df_ref' data structure (ref) is allocated
+for every register reference (def or use) and this records the insn
+and bb the ref is found within. The refs are linked together in
+chains of uses and defs for each insn and for each register. Each ref
+also has a chain field that links all the use refs for a def or all
+the def refs for a use. This is used to create use-def or def-use
+chains.
+
+Different optimizations have different needs. Ultimately, only
+register allocation and schedulers should be using the bitmaps
+produced for the live register and uninitialized register problems.
+The rest of the backend should be upgraded to using and maintaining
+the linked information such as def use or use def chains.
+
+
+
+PHILOSOPHY:
+
+While incremental bitmaps are not worthwhile to maintain, incremental
+chains may be perfectly reasonable. The fastest way to build chains
+from scratch or after significant modifications is to build reaching
+definitions (RD) and build the chains from this.
+
+However, general algorithms for maintaining use-def or def-use chains
+are not practical. The amount of work to recompute the chain any
+chain after an arbitrary change is large. However, with a modest
+amount of work it is generally possible to have the application that
+uses the chains keep them up to date. The high level knowledge of
+what is really happening is essential to crafting efficient
+incremental algorithms.
+
+As for the bit vector problems, there is no interface to give a set of
+blocks over with to resolve the iteration. In general, restarting a
+dataflow iteration is difficult and expensive. Again, the best way to
+keep the dataflow infomation up to data (if this is really what is
+needed) it to formulate a problem specific solution.
+
+There are fine grained calls for creating and deleting references from
+instructions in df-scan.c. However, these are not currently connected
+to the engine that resolves the dataflow equations.
+
+
+DATA STRUCTURES:
+
+The basic object is a DF_REF (reference) and this may either be a
+DEF (definition) or a USE of a register.
+
+These are linked into a variety of lists; namely reg-def, reg-use,
+insn-def, insn-use, def-use, and use-def lists. For example, the
+reg-def lists contain all the locations that define a given register
+while the insn-use lists contain all the locations that use a
+register.
+
+Note that the reg-def and reg-use chains are generally short for
+pseudos and long for the hard registers.
+
+ACCESSING REFS:
+
+There are 4 ways to obtain access to refs:
+
+1) References are divided into two categories, REAL and ARTIFICIAL.
+
+ REAL refs are associated with instructions. They are linked into
+ either in the insn's defs list (accessed by the DF_INSN_DEFS or
+ DF_INSN_UID_DEFS macros) or the insn's uses list (accessed by the
+ DF_INSN_USES or DF_INSN_UID_USES macros). These macros produce a
+ ref (or NULL), the rest of the list can be obtained by traversal of
+ the NEXT_REF field (accessed by the DF_REF_NEXT_REF macro.) There
+ is no significance to the ordering of the uses or refs in an
+ instruction.
+
+ ARTIFICIAL refs are associated with basic blocks. The heads of
+ these lists can be accessed by calling get_artificial_defs or
+ get_artificial_uses for the particular basic block. Artificial
+ defs and uses are only there if DF_HARD_REGS was specified when the
+ df instance was created.
+
+ Artificial defs and uses occur at the beginning blocks that are the
+ destination of eh edges. The defs come from the registers
+ specified in EH_RETURN_DATA_REGNO and the uses come from the
+ registers specified in ED_USES. Logically these defs and uses
+ should really occur along the eh edge, but there is no convienent
+ way to do this. Artificial edges that occur at the beginning of
+ the block have the DF_REF_AT_TOP flag set.
+
+ Artificial uses also occur at the end of all blocks. These arise
+ from the hard registers that are always live, such as the stack
+ register and are put there to keep the code from forgetting about
+ them.
+
+2) All of the uses and defs associated with each pseudo or hard
+ register are linked in a bidirectional chain. These are called
+ reg-use or reg_def chains.
+
+ The first use (or def) for a register can be obtained using the
+ DF_REG_USE_GET macro (or DF_REG_DEF_GET macro). Subsequent uses
+ for the same regno can be obtained by following the next_reg field
+ of the ref.
+
+ In previous versions of this code, these chains were ordered. It
+ has not been practical to continue this practice.
+
+3) If def-use or use-def chains are built, these can be traversed to
+ get to other refs.
+
+4) An array of all of the uses (and an array of all of the defs) can
+ be built. These arrays are indexed by the value in the id
+ structure. These arrays are only lazily kept up to date, and that
+ process can be expensive. To have these arrays built, call
+ df_reorganize_refs. Note that the values in the id field of a ref
+ may change across calls to df_analyze or df_reorganize refs.
+
+ If the only use of this array is to find all of the refs, it is
+ better to traverse all of the registers and then traverse all of
+ reg-use or reg-def chains.
+
+
+
+NOTES:
+
+Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
+both a use and a def. These are both marked read/write to show that they
+are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
+will generate a use of reg 42 followed by a def of reg 42 (both marked
+read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
+generates a use of reg 41 then a def of reg 41 (both marked read/write),
+even though reg 41 is decremented before it is used for the memory
+address in this second example.
+
+A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
+for which the number of word_mode units covered by the outer mode is
+smaller than that covered by the inner mode, invokes a read-modify-write.
+operation. We generate both a use and a def and again mark them
+read/write.
+
+Paradoxical subreg writes do not leave a trace of the old content, so they
+are write-only operations.
+*/
+
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "function.h"
+#include "regs.h"
+#include "output.h"
+#include "alloc-pool.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "sbitmap.h"
+#include "bitmap.h"
+#include "timevar.h"
+#include "df.h"
+#include "tree-pass.h"
+
+static struct df *ddf = NULL;
+struct df *shared_df = NULL;
+
+/*----------------------------------------------------------------------------
+ Functions to create, destroy and manipulate an instance of df.
+----------------------------------------------------------------------------*/
+
+
+/* Initialize dataflow analysis and allocate and initialize dataflow
+ memory. */
+
+struct df *
+df_init (int flags)
+{
+ struct df *df = xcalloc (1, sizeof (struct df));
+ df->flags = flags;
+
+ /* This is executed once per compilation to initialize platform
+ specific data structures. */
+ df_hard_reg_init ();
+
+ /* All df instance must define the scanning problem. */
+ df_scan_add_problem (df);
+ ddf = df;
+ return df;
+}
+
+/* Add PROBLEM to the DF instance. */
+
+struct dataflow *
+df_add_problem (struct df *df, struct df_problem *problem)
+{
+ struct dataflow *dflow;
+
+ /* First try to add the dependent problem. */
+ if (problem->dependent_problem)
+ df_add_problem (df, problem->dependent_problem);
+
+ /* Check to see if this problem has already been defined. If it
+ has, just return that instance, if not, add it to the end of the
+ vector. */
+ dflow = df->problems_by_index[problem->id];
+ if (dflow)
+ return dflow;
+
+ /* Make a new one and add it to the end. */
+ dflow = xcalloc (1, sizeof (struct dataflow));
+ dflow->df = df;
+ dflow->problem = problem;
+ df->problems_in_order[df->num_problems_defined++] = dflow;
+ df->problems_by_index[dflow->problem->id] = dflow;
+
+ return dflow;
+}
+
+
+/* Set the blocks that are to be considered for analysis. If this is
+ not called or is called with null, the entire function in
+ analyzed. */
+
+void
+df_set_blocks (struct df *df, bitmap blocks)
+{
+ if (blocks)
+ {
+ if (!df->blocks_to_analyze)
+ df->blocks_to_analyze = BITMAP_ALLOC (NULL);
+ bitmap_copy (df->blocks_to_analyze, blocks);
+ }
+ else
+ {
+ if (df->blocks_to_analyze)
+ {
+ BITMAP_FREE (df->blocks_to_analyze);
+ df->blocks_to_analyze = NULL;
+ }
+ }
+}
+
+
+/* Free all the dataflow info and the DF structure. This should be
+ called from the df_finish macro which also NULLs the parm. */
+
+void
+df_finish1 (struct df *df)
+{
+ int i;
+
+ for (i = 0; i < df->num_problems_defined; i++)
+ (*df->problems_in_order[i]->problem->free_fun) (df->problems_in_order[i]);
+
+ free (df);
+}
+
+
+/*----------------------------------------------------------------------------
+ The general data flow analysis engine.
+----------------------------------------------------------------------------*/
+
+
+/* Hybrid search algorithm from "Implementation Techniques for
+ Efficient Data-Flow Analysis of Large Programs". */
+
+static void
+df_hybrid_search_forward (basic_block bb,
+ struct dataflow *dataflow,
+ bool single_pass)
+{
+ int result_changed;
+ int i = bb->index;
+ edge e;
+ edge_iterator ei;
+
+ SET_BIT (dataflow->visited, bb->index);
+ gcc_assert (TEST_BIT (dataflow->pending, bb->index));
+ RESET_BIT (dataflow->pending, i);
+
+ /* Calculate <conf_op> of predecessor_outs. */
+ if (EDGE_COUNT (bb->preds) > 0)
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (!TEST_BIT (dataflow->considered, e->src->index))
+ continue;
+
+ (*dataflow->problem->con_fun_n) (dataflow, e);
+ }
+ else if (*dataflow->problem->con_fun_0)
+ (*dataflow->problem->con_fun_0) (dataflow, bb);
+
+ result_changed = (*dataflow->problem->trans_fun) (dataflow, i);
+
+ if (!result_changed || single_pass)
+ return;
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (e->dest->index == i)
+ continue;
+ if (!TEST_BIT (dataflow->considered, e->dest->index))
+ continue;
+ SET_BIT (dataflow->pending, e->dest->index);
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (e->dest->index == i)
+ continue;
+
+ if (!TEST_BIT (dataflow->considered, e->dest->index))
+ continue;
+ if (!TEST_BIT (dataflow->visited, e->dest->index))
+ df_hybrid_search_forward (e->dest, dataflow, single_pass);
+ }
+}
+
+static void
+df_hybrid_search_backward (basic_block bb,
+ struct dataflow *dataflow,
+ bool single_pass)
+{
+ int result_changed;
+ int i = bb->index;
+ edge e;
+ edge_iterator ei;
+
+ SET_BIT (dataflow->visited, bb->index);
+ gcc_assert (TEST_BIT (dataflow->pending, bb->index));
+ RESET_BIT (dataflow->pending, i);
+
+ /* Calculate <conf_op> of predecessor_outs. */
+ if (EDGE_COUNT (bb->succs) > 0)
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ if (!TEST_BIT (dataflow->considered, e->dest->index))
+ continue;
+
+ (*dataflow->problem->con_fun_n) (dataflow, e);
+ }
+ else if (*dataflow->problem->con_fun_0)
+ (*dataflow->problem->con_fun_0) (dataflow, bb);
+
+ result_changed = (*dataflow->problem->trans_fun) (dataflow, i);
+
+ if (!result_changed || single_pass)
+ return;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->src->index == i)
+ continue;
+
+ if (!TEST_BIT (dataflow->considered, e->src->index))
+ continue;
+
+ SET_BIT (dataflow->pending, e->src->index);
+ }
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->src->index == i)
+ continue;
+
+ if (!TEST_BIT (dataflow->considered, e->src->index))
+ continue;
+
+ if (!TEST_BIT (dataflow->visited, e->src->index))
+ df_hybrid_search_backward (e->src, dataflow, single_pass);
+ }
+}
+
+
+/* This function will perform iterative bitvector dataflow described
+ by DATAFLOW, producing the in and out sets. Only the part of the
+ cfg induced by blocks in DATAFLOW->order is taken into account.
+
+ SINGLE_PASS is true if you just want to make one pass over the
+ blocks. */
+
+void
+df_iterative_dataflow (struct dataflow *dataflow,
+ bitmap blocks_to_consider, bitmap blocks_to_init,
+ int *blocks_in_postorder, int n_blocks,
+ bool single_pass)
+{
+ unsigned int idx;
+ int i;
+ sbitmap visited = sbitmap_alloc (last_basic_block);
+ sbitmap pending = sbitmap_alloc (last_basic_block);
+ sbitmap considered = sbitmap_alloc (last_basic_block);
+ bitmap_iterator bi;
+
+ dataflow->visited = visited;
+ dataflow->pending = pending;
+ dataflow->considered = considered;
+
+ sbitmap_zero (visited);
+ sbitmap_zero (pending);
+ sbitmap_zero (considered);
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_consider, 0, idx, bi)
+ {
+ SET_BIT (considered, idx);
+ }
+
+ for (i = 0; i < n_blocks; i++)
+ {
+ idx = blocks_in_postorder[i];
+ SET_BIT (pending, idx);
+ };
+
+ (*dataflow->problem->init_fun) (dataflow, blocks_to_init);
+
+ while (1)
+ {
+
+ /* For forward problems, you want to pass in reverse postorder
+ and for backward problems you want postorder. This has been
+ shown to be as good as you can do by several people, the
+ first being Mathew Hecht in his phd dissertation.
+
+ The nodes are passed into this function in postorder. */
+
+ if (dataflow->problem->dir == DF_FORWARD)
+ {
+ for (i = n_blocks - 1 ; i >= 0 ; i--)
+ {
+ idx = blocks_in_postorder[i];
+
+ if (TEST_BIT (pending, idx) && !TEST_BIT (visited, idx))
+ df_hybrid_search_forward (BASIC_BLOCK (idx), dataflow, single_pass);
+ }
+ }
+ else
+ {
+ for (i = 0; i < n_blocks; i++)
+ {
+ idx = blocks_in_postorder[i];
+
+ if (TEST_BIT (pending, idx) && !TEST_BIT (visited, idx))
+ df_hybrid_search_backward (BASIC_BLOCK (idx), dataflow, single_pass);
+ }
+ }
+
+ if (sbitmap_first_set_bit (pending) == -1)
+ break;
+
+ sbitmap_zero (visited);
+ }
+
+ sbitmap_free (pending);
+ sbitmap_free (visited);
+ sbitmap_free (considered);
+}
+
+
+/* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
+ the order of the remaining entries. Returns the length of the resulting
+ list. */
+
+static unsigned
+df_prune_to_subcfg (int list[], unsigned len, bitmap blocks)
+{
+ unsigned act, last;
+
+ for (act = 0, last = 0; act < len; act++)
+ if (bitmap_bit_p (blocks, list[act]))
+ list[last++] = list[act];
+
+ return last;
+}
+
+
+/* Execute dataflow analysis on a single dataflow problem.
+
+ There are three sets of blocks passed in:
+
+ BLOCKS_TO_CONSIDER are the blocks whose solution can either be
+ examined or will be computed. For calls from DF_ANALYZE, this is
+ the set of blocks that has been passed to DF_SET_BLOCKS. For calls
+ from DF_ANALYZE_SIMPLE_CHANGE_SOME_BLOCKS, this is the set of
+ blocks in the fringe (the set of blocks passed in plus the set of
+ immed preds and succs of those blocks).
+
+ BLOCKS_TO_INIT are the blocks whose solution will be changed by
+ this iteration. For calls from DF_ANALYZE, this is the set of
+ blocks that has been passed to DF_SET_BLOCKS. For calls from
+ DF_ANALYZE_SIMPLE_CHANGE_SOME_BLOCKS, this is the set of blocks
+ passed in.
+
+ BLOCKS_TO_SCAN are the set of blocks that need to be rescanned.
+ For calls from DF_ANALYZE, this is the accumulated set of blocks
+ that has been passed to DF_RESCAN_BLOCKS since the last call to
+ DF_ANALYZE. For calls from DF_ANALYZE_SIMPLE_CHANGE_SOME_BLOCKS,
+ this is the set of blocks passed in.
+
+ blocks_to_consider blocks_to_init blocks_to_scan
+ full redo all all all
+ partial redo all all sub
+ small fixup fringe sub sub
+*/
+
+static void
+df_analyze_problem (struct dataflow *dflow,
+ bitmap blocks_to_consider,
+ bitmap blocks_to_init,
+ bitmap blocks_to_scan,
+ int *postorder, int n_blocks, bool single_pass)
+{
+ /* (Re)Allocate the datastructures necessary to solve the problem. */
+ if (*dflow->problem->alloc_fun)
+ (*dflow->problem->alloc_fun) (dflow, blocks_to_scan);
+
+ /* Set up the problem and compute the local information. This
+ function is passed both the blocks_to_consider and the
+ blocks_to_scan because the RD and RU problems require the entire
+ function to be rescanned if they are going to be updated. */
+ if (*dflow->problem->local_compute_fun)
+ (*dflow->problem->local_compute_fun) (dflow, blocks_to_consider, blocks_to_scan);
+
+ /* Solve the equations. */
+ if (*dflow->problem->dataflow_fun)
+ (*dflow->problem->dataflow_fun) (dflow, blocks_to_consider, blocks_to_init,
+ postorder, n_blocks, single_pass);
+
+ /* Massage the solution. */
+ if (*dflow->problem->finalize_fun)
+ (*dflow->problem->finalize_fun) (dflow, blocks_to_consider);
+}
+
+
+/* Analyze dataflow info for the basic blocks specified by the bitmap
+ BLOCKS, or for the whole CFG if BLOCKS is zero. */
+
+void
+df_analyze (struct df *df)
+{
+ int *postorder = xmalloc (sizeof (int) *last_basic_block);
+ bitmap current_all_blocks = BITMAP_ALLOC (NULL);
+ int n_blocks;
+ int i;
+ bool everything;
+
+ n_blocks = post_order_compute (postorder, true);
+
+ if (n_blocks != n_basic_blocks)
+ delete_unreachable_blocks ();
+
+ for (i = 0; i < n_blocks; i++)
+ bitmap_set_bit (current_all_blocks, postorder[i]);
+
+ /* No one called df_rescan_blocks, so do it. */
+ if (!df->blocks_to_scan)
+ df_rescan_blocks (df, NULL);
+
+ /* Make sure that we have pruned any unreachable blocks from these
+ sets. */
+ bitmap_and_into (df->blocks_to_scan, current_all_blocks);
+
+ if (df->blocks_to_analyze)
+ {
+ everything = false;
+ bitmap_and_into (df->blocks_to_analyze, current_all_blocks);
+ n_blocks = df_prune_to_subcfg (postorder, n_blocks, df->blocks_to_analyze);
+ BITMAP_FREE (current_all_blocks);
+ }
+ else
+ {
+ everything = true;
+ df->blocks_to_analyze = current_all_blocks;
+ current_all_blocks = NULL;
+ }
+
+ /* Skip over the DF_SCAN problem. */
+ for (i = 1; i < df->num_problems_defined; i++)
+ df_analyze_problem (df->problems_in_order[i],
+ df->blocks_to_analyze, df->blocks_to_analyze,
+ df->blocks_to_scan,
+ postorder, n_blocks, false);
+
+ if (everything)
+ {
+ BITMAP_FREE (df->blocks_to_analyze);
+ df->blocks_to_analyze = NULL;
+ }
+
+ BITMAP_FREE (df->blocks_to_scan);
+ df->blocks_to_scan = NULL;
+}
+
+
+
+/*----------------------------------------------------------------------------
+ Functions to support limited incremental change.
+----------------------------------------------------------------------------*/
+
+
+/* Get basic block info. */
+
+static void *
+df_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ return (struct df_scan_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_set_bb_info (struct dataflow *dflow, unsigned int index,
+ void *bb_info)
+{
+ dflow->block_info[index] = bb_info;
+}
+
+
+/* Called from the rtl_compact_blocks to reorganize the problems basic
+ block info. */
+
+void
+df_compact_blocks (struct df *df)
+{
+ int i, p;
+ basic_block bb;
+ void **problem_temps;
+ int size = last_basic_block *sizeof (void *);
+ problem_temps = xmalloc (size);
+
+ for (p = 0; p < df->num_problems_defined; p++)
+ {
+ struct dataflow *dflow = df->problems_in_order[p];
+ if (*dflow->problem->free_bb_fun)
+ {
+ df_grow_bb_info (dflow);
+ memcpy (problem_temps, dflow->block_info, size);
+
+ /* Copy the bb info from the problem tmps to the proper
+ place in the block_info vector. Null out the copied
+ item. */
+ i = NUM_FIXED_BLOCKS;
+ FOR_EACH_BB (bb)
+ {
+ df_set_bb_info (dflow, i, problem_temps[bb->index]);
+ problem_temps[bb->index] = NULL;
+ i++;
+ }
+ memset (dflow->block_info + i, 0,
+ (last_basic_block - i) *sizeof (void *));
+
+ /* Free any block infos that were not copied (and NULLed).
+ These are from orphaned blocks. */
+ for (i = NUM_FIXED_BLOCKS; i < last_basic_block; i++)
+ {
+ if (problem_temps[i])
+ (*dflow->problem->free_bb_fun) (dflow, problem_temps[i]);
+ }
+ }
+ }
+
+ free (problem_temps);
+
+ i = NUM_FIXED_BLOCKS;
+ FOR_EACH_BB (bb)
+ {
+ BASIC_BLOCK (i) = bb;
+ bb->index = i;
+ i++;
+ }
+
+ gcc_assert (i == n_basic_blocks);
+
+ for (; i < last_basic_block; i++)
+ BASIC_BLOCK (i) = NULL;
+}
+
+
+/* Shove NEW_BLOCK in at OLD_INDEX. Called from if-cvt to hack a
+ block. There is no excuse for people to do this kind of thing. */
+
+void
+df_bb_replace (struct df *df, int old_index, basic_block new_block)
+{
+ int p;
+
+ for (p = 0; p < df->num_problems_defined; p++)
+ {
+ struct dataflow *dflow = df->problems_in_order[p];
+ if (dflow->block_info)
+ {
+ void *temp;
+
+ df_grow_bb_info (dflow);
+
+ /* The old switcheroo. */
+
+ temp = df_get_bb_info (dflow, old_index);
+ df_set_bb_info (dflow, old_index,
+ df_get_bb_info (dflow, new_block->index));
+ df_set_bb_info (dflow, new_block->index, temp);
+ }
+ }
+
+ BASIC_BLOCK (old_index) = new_block;
+ new_block->index = old_index;
+}
+
+/*----------------------------------------------------------------------------
+ PUBLIC INTERFACES TO QUERY INFORMATION.
+----------------------------------------------------------------------------*/
+
+
+/* Return last use of REGNO within BB. */
+
+struct df_ref *
+df_bb_regno_last_use_find (struct df *df, basic_block bb, unsigned int regno)
+{
+ rtx insn;
+ struct df_ref *use;
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+ for (use = DF_INSN_UID_GET (df, uid)->uses; use; use = use->next_ref)
+ if (DF_REF_REGNO (use) == regno)
+ return use;
+ }
+ return NULL;
+}
+
+
+/* Return first def of REGNO within BB. */
+
+struct df_ref *
+df_bb_regno_first_def_find (struct df *df, basic_block bb, unsigned int regno)
+{
+ rtx insn;
+ struct df_ref *def;
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ if (DF_REF_REGNO (def) == regno)
+ return def;
+ }
+ return NULL;
+}
+
+
+/* Return last def of REGNO within BB. */
+
+struct df_ref *
+df_bb_regno_last_def_find (struct df *df, basic_block bb, unsigned int regno)
+{
+ rtx insn;
+ struct df_ref *def;
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ if (DF_REF_REGNO (def) == regno)
+ return def;
+ }
+
+ return NULL;
+}
+
+/* Return true if INSN defines REGNO. */
+
+bool
+df_insn_regno_def_p (struct df *df, rtx insn, unsigned int regno)
+{
+ unsigned int uid;
+ struct df_ref *def;
+
+ uid = INSN_UID (insn);
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ if (DF_REF_REGNO (def) == regno)
+ return true;
+
+ return false;
+}
+
+
+/* Finds the reference corresponding to the definition of REG in INSN.
+ DF is the dataflow object. */
+
+struct df_ref *
+df_find_def (struct df *df, rtx insn, rtx reg)
+{
+ unsigned int uid;
+ struct df_ref *def;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+ gcc_assert (REG_P (reg));
+
+ uid = INSN_UID (insn);
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ if (rtx_equal_p (DF_REF_REAL_REG (def), reg))
+ return def;
+
+ return NULL;
+}
+
+
+/* Return true if REG is defined in INSN, zero otherwise. */
+
+bool
+df_reg_defined (struct df *df, rtx insn, rtx reg)
+{
+ return df_find_def (df, insn, reg) != NULL;
+}
+
+
+/* Finds the reference corresponding to the use of REG in INSN.
+ DF is the dataflow object. */
+
+struct df_ref *
+df_find_use (struct df *df, rtx insn, rtx reg)
+{
+ unsigned int uid;
+ struct df_ref *use;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+ gcc_assert (REG_P (reg));
+
+ uid = INSN_UID (insn);
+ for (use = DF_INSN_UID_GET (df, uid)->uses; use; use = use->next_ref)
+ if (rtx_equal_p (DF_REF_REAL_REG (use), reg))
+ return use;
+
+ return NULL;
+}
+
+
+/* Return true if REG is referenced in INSN, zero otherwise. */
+
+bool
+df_reg_used (struct df *df, rtx insn, rtx reg)
+{
+ return df_find_use (df, insn, reg) != NULL;
+}
+
+
+/*----------------------------------------------------------------------------
+ Debugging and printing functions.
+----------------------------------------------------------------------------*/
+
+/* Dump dataflow info. */
+void
+df_dump (struct df *df, FILE *file)
+{
+ int i;
+
+ if (! df || ! file)
+ return;
+
+ fprintf (file, "\n\n%s\n", current_function_name ());
+ fprintf (file, "\nDataflow summary:\n");
+ fprintf (file, "def_info->bitmap_size = %d, use_info->bitmap_size = %d\n",
+ df->def_info.bitmap_size, df->use_info.bitmap_size);
+
+ for (i = 0; i < df->num_problems_defined; i++)
+ (*df->problems_in_order[i]->problem->dump_fun) (df->problems_in_order[i], file);
+
+ fprintf (file, "\n");
+}
+
+
+void
+df_refs_chain_dump (struct df *df, struct df_ref *ref,
+ bool follow_chain, FILE *file)
+{
+ fprintf (file, "{ ");
+ while (ref)
+ {
+ fprintf (file, "%c%d(%d) ",
+ DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
+ DF_REF_ID (ref),
+ DF_REF_REGNO (ref));
+ if (follow_chain)
+ df_chain_dump (df, DF_REF_CHAIN (ref), file);
+ ref = ref->next_ref;
+ }
+ fprintf (file, "}");
+}
+
+
+/* Dump either a ref-def or reg-use chain. */
+
+void
+df_regs_chain_dump (struct df *df ATTRIBUTE_UNUSED, struct df_ref *ref, FILE *file)
+{
+ fprintf (file, "{ ");
+ while (ref)
+ {
+ fprintf (file, "%c%d(%d) ",
+ DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
+ DF_REF_ID (ref),
+ DF_REF_REGNO (ref));
+ ref = ref->next_reg;
+ }
+ fprintf (file, "}");
+}
+
+
+void
+df_insn_debug (struct df *df, rtx insn, bool follow_chain, FILE *file)
+{
+ unsigned int uid;
+ int bbi;
+
+ uid = INSN_UID (insn);
+
+ if (DF_INSN_UID_DEFS (df, uid))
+ bbi = DF_REF_BBNO (DF_INSN_UID_DEFS (df, uid));
+ else if (DF_INSN_UID_USES(df, uid))
+ bbi = DF_REF_BBNO (DF_INSN_UID_USES (df, uid));
+ else
+ bbi = -1;
+
+ fprintf (file, "insn %d bb %d luid %d defs ",
+ uid, bbi, DF_INSN_LUID (df, insn));
+
+ df_refs_chain_dump (df, DF_INSN_UID_DEFS (df, uid), follow_chain, file);
+ fprintf (file, " defs ");
+ df_refs_chain_dump (df, DF_INSN_UID_USES (df, uid), follow_chain, file);
+ fprintf (file, "\n");
+}
+
+void
+df_insn_debug_regno (struct df *df, rtx insn, FILE *file)
+{
+ unsigned int uid;
+ int bbi;
+
+ uid = INSN_UID (insn);
+ if (DF_INSN_UID_DEFS (df, uid))
+ bbi = DF_REF_BBNO (DF_INSN_UID_DEFS (df, uid));
+ else if (DF_INSN_UID_USES(df, uid))
+ bbi = DF_REF_BBNO (DF_INSN_UID_USES (df, uid));
+ else
+ bbi = -1;
+
+ fprintf (file, "insn %d bb %d luid %d defs ",
+ uid, bbi, DF_INSN_LUID (df, insn));
+ df_regs_chain_dump (df, DF_INSN_UID_DEFS (df, uid), file);
+
+ fprintf (file, " uses ");
+ df_regs_chain_dump (df, DF_INSN_UID_USES (df, uid), file);
+ fprintf (file, "\n");
+}
+
+void
+df_regno_debug (struct df *df, unsigned int regno, FILE *file)
+{
+ fprintf (file, "reg %d defs ", regno);
+ df_regs_chain_dump (df, DF_REG_DEF_GET (df, regno)->reg_chain, file);
+ fprintf (file, " uses ");
+ df_regs_chain_dump (df, DF_REG_USE_GET (df, regno)->reg_chain, file);
+ fprintf (file, "\n");
+}
+
+
+void
+df_ref_debug (struct df *df, struct df_ref *ref, FILE *file)
+{
+ fprintf (file, "%c%d ",
+ DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
+ DF_REF_ID (ref));
+ fprintf (file, "reg %d bb %d luid %d insn %d chain ",
+ DF_REF_REGNO (ref),
+ DF_REF_BBNO (ref),
+ DF_REF_INSN (ref) ? DF_INSN_LUID (df, DF_REF_INSN (ref)) : -1,
+ DF_REF_INSN (ref) ? INSN_UID (DF_REF_INSN (ref)) : -1);
+ df_chain_dump (df, DF_REF_CHAIN (ref), file);
+ fprintf (file, "\n");
+}
+
+/* Functions for debugging from GDB. */
+
+void
+debug_df_insn (rtx insn)
+{
+ df_insn_debug (ddf, insn, true, stderr);
+ debug_rtx (insn);
+}
+
+
+void
+debug_df_reg (rtx reg)
+{
+ df_regno_debug (ddf, REGNO (reg), stderr);
+}
+
+
+void
+debug_df_regno (unsigned int regno)
+{
+ df_regno_debug (ddf, regno, stderr);
+}
+
+
+void
+debug_df_ref (struct df_ref *ref)
+{
+ df_ref_debug (ddf, ref, stderr);
+}
+
+
+void
+debug_df_defno (unsigned int defno)
+{
+ df_ref_debug (ddf, DF_DEFS_GET (ddf, defno), stderr);
+}
+
+
+void
+debug_df_useno (unsigned int defno)
+{
+ df_ref_debug (ddf, DF_USES_GET (ddf, defno), stderr);
+}
+
+
+void
+debug_df_chain (struct df_link *link)
+{
+ df_chain_dump (ddf, link, stderr);
+ fputc ('\n', stderr);
+}
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
new file mode 100644
index 00000000000..c17e048edad
--- /dev/null
+++ b/gcc/df-problems.c
@@ -0,0 +1,3093 @@
+/* Standard problems for dataflow support routines.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Originally contributed by Michael P. Hayes
+ (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
+ Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
+ and Kenneth Zadeck (zadeck@naturalbridge.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "function.h"
+#include "regs.h"
+#include "output.h"
+#include "alloc-pool.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "sbitmap.h"
+#include "bitmap.h"
+#include "timevar.h"
+#include "df.h"
+
+#define DF_SPARSE_THRESHOLD 32
+
+static bitmap seen_in_block = NULL;
+static bitmap seen_in_insn = NULL;
+
+
+/*----------------------------------------------------------------------------
+ Public functions access functions for the dataflow problems.
+----------------------------------------------------------------------------*/
+
+/* Get the instance of the problem that DFLOW is dependent on. */
+
+struct dataflow *
+df_get_dependent_problem (struct dataflow *dflow)
+{
+ struct df *df = dflow->df;
+ struct df_problem *dependent_problem = dflow->problem->dependent_problem;
+
+ gcc_assert (dependent_problem);
+ return df->problems_by_index[dependent_problem->id];
+}
+
+
+/* Create a du or ud chain from SRC to DST and link it into SRC. */
+
+struct df_link *
+df_chain_create (struct dataflow *dflow, struct df_ref *src, struct df_ref *dst)
+{
+ struct df_link *head = DF_REF_CHAIN (src);
+ struct df_link *link = pool_alloc (dflow->block_pool);;
+
+ DF_REF_CHAIN (src) = link;
+ link->next = head;
+ link->ref = dst;
+ return link;
+}
+
+
+/* Delete a du or ud chain for REF. If LINK is NULL, delete all
+ chains for ref and check to see if the reverse chains can also be
+ deleted. If LINK is not NULL it must be a link off of ref. In
+ this case, the other end is not deleted. */
+
+void
+df_chain_unlink (struct dataflow *dflow, struct df_ref *ref, struct df_link *link)
+{
+ struct df_link *chain = DF_REF_CHAIN (ref);
+ if (link)
+ {
+ /* Link was the first element in the chain. */
+ if (chain == link)
+ DF_REF_CHAIN (ref) = link->next;
+ else
+ {
+ /* Link is an internal element in the chain. */
+ struct df_link *prev = chain;
+ while (chain)
+ {
+ if (chain == link)
+ {
+ prev->next = chain->next;
+ break;
+ }
+ prev = chain;
+ chain = chain->next;
+ }
+ }
+ pool_free (dflow->block_pool, link);
+ }
+ else
+ {
+ /* If chain is NULL here, it was because of a recursive call
+ when the other flavor of chains was not built. Just run thru
+ the entire chain calling the other side and then deleting the
+ link. */
+ while (chain)
+ {
+ struct df_link *next = chain->next;
+ /* Delete the other side if it exists. */
+ df_chain_unlink (dflow, chain->ref, chain);
+ chain = next;
+ }
+ }
+}
+
+
+/* Copy the du or ud chain starting at FROM_REF and attach it to
+ TO_REF. */
+
+void
+df_chain_copy (struct dataflow *dflow,
+ struct df_ref *to_ref,
+ struct df_link *from_ref)
+{
+ while (from_ref)
+ {
+ df_chain_create (dflow, to_ref, from_ref->ref);
+ from_ref = from_ref->next;
+ }
+}
+
+
+/* Get the live in set for BB no matter what problem happens to be
+ defined. */
+
+bitmap
+df_get_live_in (struct df *df, basic_block bb)
+{
+ gcc_assert (df->problems_by_index[DF_LR]);
+
+ if (df->problems_by_index[DF_UREC])
+ return DF_RA_LIVE_IN (df, bb);
+ else if (df->problems_by_index[DF_UR])
+ return DF_LIVE_IN (df, bb);
+ else
+ return DF_UPWARD_LIVE_IN (df, bb);
+}
+
+
+/* Get the live out set for BB no matter what problem happens to be
+ defined. */
+
+bitmap
+df_get_live_out (struct df *df, basic_block bb)
+{
+ gcc_assert (df->problems_by_index[DF_LR]);
+
+ if (df->problems_by_index[DF_UREC])
+ return DF_RA_LIVE_OUT (df, bb);
+ else if (df->problems_by_index[DF_UR])
+ return DF_LIVE_OUT (df, bb);
+ else
+ return DF_UPWARD_LIVE_OUT (df, bb);
+}
+
+
+/*----------------------------------------------------------------------------
+ Utility functions.
+----------------------------------------------------------------------------*/
+
+/* Generic versions to get the void* version of the block info. Only
+ used inside the problem instace vectors. */
+
+/* Grow the bb_info array. */
+
+void
+df_grow_bb_info (struct dataflow *dflow)
+{
+ unsigned int new_size = last_basic_block + 1;
+ if (dflow->block_info_size < new_size)
+ {
+ new_size += new_size / 4;
+ dflow->block_info = xrealloc (dflow->block_info,
+ new_size *sizeof (void*));
+ memset (dflow->block_info + dflow->block_info_size, 0,
+ (new_size - dflow->block_info_size) *sizeof (void *));
+ dflow->block_info_size = new_size;
+ }
+}
+
+/* Dump a def-use or use-def chain for REF to FILE. */
+
+void
+df_chain_dump (struct df *df ATTRIBUTE_UNUSED, struct df_link *link, FILE *file)
+{
+ fprintf (file, "{ ");
+ for (; link; link = link->next)
+ {
+ fprintf (file, "%c%d(bb %d insn %d) ",
+ DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
+ DF_REF_ID (link->ref),
+ DF_REF_BBNO (link->ref),
+ DF_REF_INSN (link->ref) ? DF_REF_INSN_UID (link->ref) : -1);
+ }
+ fprintf (file, "}");
+}
+
+
+/* Print some basic block info as part of df_dump. */
+
+void
+df_print_bb_index (basic_block bb, FILE *file)
+{
+ edge e;
+ edge_iterator ei;
+
+ fprintf (file, "( ");
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ basic_block pred = e->src;
+ fprintf (file, "%d ", pred->index);
+ }
+ fprintf (file, ")->[%d]->( ", bb->index);
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ {
+ basic_block succ = e->dest;
+ fprintf (file, "%d ", succ->index);
+ }
+ fprintf (file, ")\n");
+}
+
+
+/* Return the set of reference ids in CHAIN, caching the result in *BMAP. */
+
+static inline bitmap
+df_ref_bitmap (bitmap *maps, unsigned int regno, int start, int count)
+{
+ bitmap ids = maps[regno];
+ if (!ids)
+ {
+ unsigned int i;
+ unsigned int end = start + count;;
+ ids = BITMAP_ALLOC (NULL);
+ maps[regno] = ids;
+ for (i = start; i < end; i++)
+ bitmap_set_bit (ids, i);
+ }
+ return ids;
+}
+
+
+/* Make sure that the seen_in_insn and seen_in_block sbitmaps are set
+ up correctly. */
+
+static void
+df_set_seen (void)
+{
+ seen_in_block = BITMAP_ALLOC (NULL);
+ seen_in_insn = BITMAP_ALLOC (NULL);
+}
+
+
+static void
+df_unset_seen (void)
+{
+ BITMAP_FREE (seen_in_block);
+ BITMAP_FREE (seen_in_insn);
+}
+
+
+
+/*----------------------------------------------------------------------------
+ REACHING USES
+
+ Find the locations in the function where each use site for a pseudo
+ can reach backwards.
+
+----------------------------------------------------------------------------*/
+
+struct df_ru_problem_data
+{
+ bitmap *use_sites; /* Bitmap of uses for each pseudo. */
+ unsigned int use_sites_size; /* Size of use_sites. */
+ /* The set of defs to regs invalidated by call. */
+ bitmap sparse_invalidated_by_call;
+ /* The set of defs to regs invalidate by call for ru. */
+ bitmap dense_invalidated_by_call;
+};
+
+/* Get basic block info. */
+
+struct df_ru_bb_info *
+df_ru_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ return (struct df_ru_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_ru_set_bb_info (struct dataflow *dflow, unsigned int index,
+ struct df_ru_bb_info *bb_info)
+{
+ dflow->block_info[index] = bb_info;
+}
+
+
+/* Free basic block info. */
+
+static void
+df_ru_free_bb_info (struct dataflow *dflow, void *vbb_info)
+{
+ struct df_ru_bb_info *bb_info = (struct df_ru_bb_info *) vbb_info;
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->sparse_kill);
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ pool_free (dflow->block_pool, bb_info);
+ }
+}
+
+
+/* Allocate or reset bitmaps for DFLOW blocks. The solution bits are
+ not touched unless the block is new. */
+
+static void
+df_ru_alloc (struct dataflow *dflow, bitmap blocks_to_rescan)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ unsigned int reg_size = max_reg_num ();
+
+ if (! dflow->block_pool)
+ dflow->block_pool = create_alloc_pool ("df_ru_block pool",
+ sizeof (struct df_ru_bb_info), 50);
+
+ if (dflow->problem_data)
+ {
+ unsigned int i;
+ struct df_ru_problem_data *problem_data =
+ (struct df_ru_problem_data *) dflow->problem_data;
+
+ for (i = 0; i < problem_data->use_sites_size; i++)
+ {
+ bitmap bm = problem_data->use_sites[i];
+ if (bm)
+ {
+ BITMAP_FREE (bm);
+ problem_data->use_sites[i] = NULL;
+ }
+ }
+
+ if (problem_data->use_sites_size > reg_size)
+ {
+ problem_data->use_sites
+ = xrealloc (problem_data->use_sites, reg_size *sizeof (bitmap));
+ memset (problem_data->use_sites, 0,
+ (reg_size - problem_data->use_sites_size) *sizeof (bitmap));
+ problem_data->use_sites_size = reg_size;
+ }
+
+ bitmap_clear (problem_data->sparse_invalidated_by_call);
+ bitmap_clear (problem_data->dense_invalidated_by_call);
+ }
+ else
+ {
+ struct df_ru_problem_data *problem_data =
+ xmalloc (sizeof (struct df_ru_problem_data));
+ dflow->problem_data = problem_data;
+
+ problem_data->use_sites = xcalloc (reg_size, sizeof (bitmap));
+ problem_data->use_sites_size = reg_size;
+ problem_data->sparse_invalidated_by_call = BITMAP_ALLOC (NULL);
+ problem_data->dense_invalidated_by_call = BITMAP_ALLOC (NULL);
+ }
+
+ df_grow_bb_info (dflow);
+
+ /* Because of the clustering of all def sites for the same pseudo,
+ we have to process all of the blocks before doing the
+ analysis. */
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
+ {
+ struct df_ru_bb_info *bb_info = df_ru_get_bb_info (dflow, bb_index);
+ if (bb_info)
+ {
+ bitmap_clear (bb_info->kill);
+ bitmap_clear (bb_info->sparse_kill);
+ bitmap_clear (bb_info->gen);
+ }
+ else
+ {
+ bb_info = (struct df_ru_bb_info *) pool_alloc (dflow->block_pool);
+ df_ru_set_bb_info (dflow, bb_index, bb_info);
+ bb_info->kill = BITMAP_ALLOC (NULL);
+ bb_info->sparse_kill = BITMAP_ALLOC (NULL);
+ bb_info->gen = BITMAP_ALLOC (NULL);
+ bb_info->in = BITMAP_ALLOC (NULL);
+ bb_info->out = BITMAP_ALLOC (NULL);
+ }
+ }
+}
+
+
+/* Process a list of DEFs for df_ru_bb_local_compute. */
+
+static void
+df_ru_bb_local_compute_process_def (struct dataflow *dflow,
+ struct df_ru_bb_info *bb_info,
+ struct df_ref *def)
+{
+ struct df *df = dflow->df;
+ while (def)
+ {
+ unsigned int regno = DF_REF_REGNO (def);
+ unsigned int begin = DF_REG_USE_GET (df, regno)->begin;
+ unsigned int n_uses = DF_REG_USE_GET (df, regno)->n_refs;
+ if (!bitmap_bit_p (seen_in_block, regno))
+ {
+ /* The first def for regno, causes the kill info to be
+ generated and the gen information to cleared. */
+ if (!bitmap_bit_p (seen_in_insn, regno))
+ {
+ if (n_uses > DF_SPARSE_THRESHOLD)
+ {
+ bitmap_set_bit (bb_info->sparse_kill, regno);
+ bitmap_clear_range (bb_info->gen, begin, n_uses);
+ }
+ else
+ {
+ struct df_ru_problem_data *problem_data =
+ (struct df_ru_problem_data *) dflow->problem_data;
+ bitmap uses =
+ df_ref_bitmap (problem_data->use_sites, regno,
+ begin, n_uses);
+ bitmap_ior_into (bb_info->kill, uses);
+ bitmap_and_compl_into (bb_info->gen, uses);
+ }
+ }
+ bitmap_set_bit (seen_in_insn, regno);
+ }
+ def = def->next_ref;
+ }
+}
+
+
+/* Process a list of USEs for df_ru_bb_local_compute. */
+
+static void
+df_ru_bb_local_compute_process_use (struct df_ru_bb_info *bb_info,
+ struct df_ref *use,
+ enum df_ref_flags top_flag)
+{
+ while (use)
+ {
+ if (top_flag == (DF_REF_FLAGS (use) & DF_REF_AT_TOP))
+ {
+ /* Add use to set of gens in this BB unless we have seen a
+ def in a previous instruction. */
+ unsigned int regno = DF_REF_REGNO (use);
+ if (!bitmap_bit_p (seen_in_block, regno))
+ bitmap_set_bit (bb_info->gen, DF_REF_ID (use));
+ }
+ use = use->next_ref;
+ }
+}
+
+/* Compute local reaching use (upward exposed use) info for basic
+ block BB. USE_INFO->REGS[R] caches the set of uses for register R. */
+static void
+df_ru_bb_local_compute (struct dataflow *dflow, unsigned int bb_index)
+{
+ struct df *df = dflow->df;
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_ru_bb_info *bb_info = df_ru_get_bb_info (dflow, bb_index);
+ rtx insn;
+
+ /* Set when a def for regno is seen. */
+ bitmap_clear (seen_in_block);
+ bitmap_clear (seen_in_insn);
+
+#ifdef EH_USES
+ /* Variables defined in the prolog that are used by the exception
+ handler. */
+ df_ru_bb_local_compute_process_use (bb_info,
+ df_get_artificial_uses (df, bb_index),
+ DF_REF_AT_TOP);
+#endif
+
+ /* Process the artificial defs first since these are at the top of
+ the block. */
+ df_ru_bb_local_compute_process_def (dflow, bb_info,
+ df_get_artificial_defs (df, bb_index));
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+ if (! INSN_P (insn))
+ continue;
+
+ df_ru_bb_local_compute_process_def (dflow, bb_info,
+ DF_INSN_UID_GET (df, uid)->defs);
+
+ /* The use processing must happen after the defs processing even
+ though the uses logically happen first since the defs clear
+ the gen set. Otherwise, a use for regno occuring in the same
+ instruction as a def for regno would be cleared. */
+ df_ru_bb_local_compute_process_use (bb_info,
+ DF_INSN_UID_GET (df, uid)->uses, 0);
+
+ bitmap_ior_into (seen_in_block, seen_in_insn);
+ bitmap_clear (seen_in_insn);
+ }
+
+ /* Process the hardware registers that are always live. */
+ df_ru_bb_local_compute_process_use (bb_info,
+ df_get_artificial_uses (df, bb_index), 0);
+}
+
+
+/* Compute local reaching use (upward exposed use) info for each basic
+ block within BLOCKS. */
+static void
+df_ru_local_compute (struct dataflow *dflow,
+ bitmap all_blocks,
+ bitmap rescan_blocks ATTRIBUTE_UNUSED)
+{
+ struct df *df = dflow->df;
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ unsigned int regno;
+ struct df_ru_problem_data *problem_data =
+ (struct df_ru_problem_data *) dflow->problem_data;
+ bitmap sparse_invalidated = problem_data->sparse_invalidated_by_call;
+ bitmap dense_invalidated = problem_data->dense_invalidated_by_call;
+
+ df_set_seen ();
+
+ if (!df->use_info.refs_organized)
+ df_reorganize_refs (&df->use_info);
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ df_ru_bb_local_compute (dflow, bb_index);
+ }
+
+ /* Set up the knockout bit vectors to be applied across EH_EDGES. */
+ EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call, 0, regno, bi)
+ {
+ struct df_reg_info *reg_info = DF_REG_USE_GET (df, regno);
+ if (reg_info->n_refs > DF_SPARSE_THRESHOLD)
+ bitmap_set_bit (sparse_invalidated, regno);
+ else
+ {
+ bitmap defs = df_ref_bitmap (problem_data->use_sites, regno,
+ reg_info->begin, reg_info->n_refs);
+ bitmap_ior_into (dense_invalidated, defs);
+ }
+ }
+
+ df_unset_seen ();
+}
+
+
+/* Initialize the solution bit vectors for problem. */
+
+static void
+df_ru_init_solution (struct dataflow *dflow, bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_ru_bb_info *bb_info = df_ru_get_bb_info (dflow, bb_index);
+ bitmap_copy (bb_info->in, bb_info->gen);
+ bitmap_clear (bb_info->out);
+ }
+}
+
+
+/* Out of target gets or of in of source. */
+
+static void
+df_ru_confluence_n (struct dataflow *dflow, edge e)
+{
+ bitmap op1 = df_ru_get_bb_info (dflow, e->src->index)->out;
+ bitmap op2 = df_ru_get_bb_info (dflow, e->dest->index)->in;
+
+ if (e->flags & EDGE_EH)
+ {
+ struct df_ru_problem_data *problem_data =
+ (struct df_ru_problem_data *) dflow->problem_data;
+ bitmap sparse_invalidated = problem_data->sparse_invalidated_by_call;
+ bitmap dense_invalidated = problem_data->dense_invalidated_by_call;
+ struct df *df = dflow->df;
+ bitmap_iterator bi;
+ unsigned int regno;
+ bitmap_ior_and_compl_into (op1, op2, dense_invalidated);
+ EXECUTE_IF_SET_IN_BITMAP (sparse_invalidated, 0, regno, bi)
+ {
+ bitmap_clear_range (op1,
+ DF_REG_USE_GET (df, regno)->begin,
+ DF_REG_USE_GET (df, regno)->n_refs);
+ }
+ }
+ else
+ bitmap_ior_into (op1, op2);
+}
+
+
+/* Transfer function. */
+
+static bool
+df_ru_transfer_function (struct dataflow *dflow, int bb_index)
+{
+ struct df_ru_bb_info *bb_info = df_ru_get_bb_info (dflow, bb_index);
+ unsigned int regno;
+ bitmap_iterator bi;
+ bitmap in = bb_info->in;
+ bitmap out = bb_info->out;
+ bitmap gen = bb_info->gen;
+ bitmap kill = bb_info->kill;
+ bitmap sparse_kill = bb_info->sparse_kill;
+
+ if (bitmap_empty_p (sparse_kill))
+ return bitmap_ior_and_compl (in, gen, out, kill);
+ else
+ {
+ struct df *df = dflow->df;
+ bool changed = false;
+ bitmap tmp = BITMAP_ALLOC (NULL);
+ bitmap_copy (tmp, in);
+ EXECUTE_IF_SET_IN_BITMAP (sparse_kill, 0, regno, bi)
+ {
+ bitmap_clear_range (tmp,
+ DF_REG_USE_GET (df, regno)->begin,
+ DF_REG_USE_GET (df, regno)->n_refs);
+ }
+ bitmap_and_compl_into (tmp, kill);
+ bitmap_ior_into (tmp, gen);
+ changed = !bitmap_equal_p (tmp, out);
+ if (changed)
+ {
+ BITMAP_FREE (out);
+ bb_info->in = tmp;
+ }
+ else
+ BITMAP_FREE (tmp);
+ return changed;
+ }
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_ru_free (struct dataflow *dflow)
+{
+ unsigned int i;
+ struct df_ru_problem_data *problem_data =
+ (struct df_ru_problem_data *) dflow->problem_data;
+
+ for (i = 0; i < dflow->block_info_size; i++)
+ {
+ struct df_ru_bb_info *bb_info = df_ru_get_bb_info (dflow, i);
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->sparse_kill);
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ }
+ }
+
+ free_alloc_pool (dflow->block_pool);
+
+ for (i = 0; i < problem_data->use_sites_size; i++)
+ {
+ bitmap bm = problem_data->use_sites[i];
+ if (bm)
+ BITMAP_FREE (bm);
+ }
+
+ free (problem_data->use_sites);
+ BITMAP_FREE (problem_data->sparse_invalidated_by_call);
+ BITMAP_FREE (problem_data->dense_invalidated_by_call);
+
+ dflow->block_info_size = 0;
+ free (dflow->block_info);
+ free (dflow->problem_data);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_ru_dump (struct dataflow *dflow, FILE *file)
+{
+ basic_block bb;
+ struct df *df = dflow->df;
+ struct df_ru_problem_data *problem_data =
+ (struct df_ru_problem_data *) dflow->problem_data;
+ unsigned int m = max_reg_num ();
+ unsigned int regno;
+
+ fprintf (file, "Reaching uses:\n");
+
+ fprintf (file, " sparse invalidated \t");
+ dump_bitmap (file, problem_data->sparse_invalidated_by_call);
+ fprintf (file, " dense invalidated \t");
+ dump_bitmap (file, problem_data->dense_invalidated_by_call);
+
+ for (regno = 0; regno < m; regno++)
+ if (DF_REG_USE_GET (df, regno)->n_refs)
+ fprintf (file, "%d[%d,%d] ", regno,
+ DF_REG_USE_GET (df, regno)->begin,
+ DF_REG_USE_GET (df, regno)->n_refs);
+ fprintf (file, "\n");
+
+ FOR_ALL_BB (bb)
+ {
+ struct df_ru_bb_info *bb_info = df_ru_get_bb_info (dflow, bb->index);
+ df_print_bb_index (bb, file);
+
+ if (! bb_info->in)
+ continue;
+
+ fprintf (file, " in \t");
+ dump_bitmap (file, bb_info->in);
+ fprintf (file, " gen \t");
+ dump_bitmap (file, bb_info->gen);
+ fprintf (file, " kill\t");
+ dump_bitmap (file, bb_info->kill);
+ fprintf (file, " out \t");
+ dump_bitmap (file, bb_info->out);
+ }
+}
+
+/* All of the information associated with every instance of the problem. */
+
+static struct df_problem problem_RU =
+{
+ DF_RU, /* Problem id. */
+ DF_BACKWARD, /* Direction. */
+ df_ru_alloc, /* Allocate the problem specific data. */
+ df_ru_free_bb_info, /* Free basic block info. */
+ df_ru_local_compute, /* Local compute function. */
+ df_ru_init_solution, /* Init the solution specific data. */
+ df_iterative_dataflow, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ df_ru_confluence_n, /* Confluence operator n. */
+ df_ru_transfer_function, /* Transfer function. */
+ NULL, /* Finalize function. */
+ df_ru_free, /* Free all of the problem information. */
+ df_ru_dump, /* Debugging. */
+ NULL /* Dependent problem. */
+};
+
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_ru_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_RU);
+}
+
+
+/*----------------------------------------------------------------------------
+ REACHING DEFINITIONS
+
+ Find the locations in the function where each definition site for a
+ pseudo reaches.
+----------------------------------------------------------------------------*/
+
+struct df_rd_problem_data
+{
+ bitmap *def_sites; /* Bitmap of defs for each pseudo. */
+ unsigned int def_sites_size; /* Size of def_sites. */
+ /* The set of defs to regs invalidated by call. */
+ bitmap sparse_invalidated_by_call;
+ /* The set of defs to regs invalidate by call for ru. */
+ bitmap dense_invalidated_by_call;
+};
+
+/* Get basic block info. */
+
+struct df_rd_bb_info *
+df_rd_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ return (struct df_rd_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_rd_set_bb_info (struct dataflow *dflow, unsigned int index,
+ struct df_rd_bb_info *bb_info)
+{
+ dflow->block_info[index] = bb_info;
+}
+
+
+/* Free basic block info. */
+
+static void
+df_rd_free_bb_info (struct dataflow *dflow, void *vbb_info)
+{
+ struct df_rd_bb_info *bb_info = (struct df_rd_bb_info *) vbb_info;
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->sparse_kill);
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ pool_free (dflow->block_pool, bb_info);
+ }
+}
+
+
+/* Allocate or reset bitmaps for DFLOW blocks. The solution bits are
+ not touched unless the block is new. */
+
+static void
+df_rd_alloc (struct dataflow *dflow, bitmap blocks_to_rescan)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ unsigned int reg_size = max_reg_num ();
+
+ if (! dflow->block_pool)
+ dflow->block_pool = create_alloc_pool ("df_rd_block pool",
+ sizeof (struct df_rd_bb_info), 50);
+
+ if (dflow->problem_data)
+ {
+ unsigned int i;
+ struct df_rd_problem_data *problem_data =
+ (struct df_rd_problem_data *) dflow->problem_data;
+
+ for (i = 0; i < problem_data->def_sites_size; i++)
+ {
+ bitmap bm = problem_data->def_sites[i];
+ if (bm)
+ {
+ BITMAP_FREE (bm);
+ problem_data->def_sites[i] = NULL;
+ }
+ }
+
+ if (problem_data->def_sites_size > reg_size)
+ {
+ problem_data->def_sites
+ = xrealloc (problem_data->def_sites, reg_size *sizeof (bitmap));
+ memset (problem_data->def_sites, 0,
+ (reg_size - problem_data->def_sites_size) *sizeof (bitmap));
+ problem_data->def_sites_size = reg_size;
+ }
+
+ bitmap_clear (problem_data->sparse_invalidated_by_call);
+ bitmap_clear (problem_data->dense_invalidated_by_call);
+ }
+ else
+ {
+ struct df_rd_problem_data *problem_data =
+ xmalloc (sizeof (struct df_rd_problem_data));
+ dflow->problem_data = problem_data;
+
+ problem_data->def_sites = xcalloc (reg_size, sizeof (bitmap));
+ problem_data->def_sites_size = reg_size;
+ problem_data->sparse_invalidated_by_call = BITMAP_ALLOC (NULL);
+ problem_data->dense_invalidated_by_call = BITMAP_ALLOC (NULL);
+ }
+
+ df_grow_bb_info (dflow);
+
+ /* Because of the clustering of all def sites for the same pseudo,
+ we have to process all of the blocks before doing the
+ analysis. */
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
+ {
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (dflow, bb_index);
+ if (bb_info)
+ {
+ bitmap_clear (bb_info->kill);
+ bitmap_clear (bb_info->sparse_kill);
+ bitmap_clear (bb_info->gen);
+ }
+ else
+ {
+ bb_info = (struct df_rd_bb_info *) pool_alloc (dflow->block_pool);
+ df_rd_set_bb_info (dflow, bb_index, bb_info);
+ bb_info->kill = BITMAP_ALLOC (NULL);
+ bb_info->sparse_kill = BITMAP_ALLOC (NULL);
+ bb_info->gen = BITMAP_ALLOC (NULL);
+ bb_info->in = BITMAP_ALLOC (NULL);
+ bb_info->out = BITMAP_ALLOC (NULL);
+ }
+ }
+}
+
+
+/* Process a list of DEFs for df_rd_bb_local_compute. */
+
+static void
+df_rd_bb_local_compute_process_def (struct dataflow *dflow,
+ struct df_rd_bb_info *bb_info,
+ struct df_ref *def)
+{
+ struct df *df = dflow->df;
+ while (def)
+ {
+ unsigned int regno = DF_REF_REGNO (def);
+ unsigned int begin = DF_REG_DEF_GET (df, regno)->begin;
+ unsigned int n_defs = DF_REG_DEF_GET (df, regno)->n_refs;
+
+ /* Only the last def(s) for a regno in the block has any
+ effect. */
+ if (!bitmap_bit_p (seen_in_block, regno))
+ {
+ /* The first def for regno in insn gets to knock out the
+ defs from other instructions. */
+ if (!bitmap_bit_p (seen_in_insn, regno))
+ {
+ if (n_defs > DF_SPARSE_THRESHOLD)
+ {
+ bitmap_set_bit (bb_info->sparse_kill, regno);
+ bitmap_clear_range (bb_info->gen, begin, n_defs);
+ }
+ else
+ {
+ struct df_rd_problem_data *problem_data =
+ (struct df_rd_problem_data *) dflow->problem_data;
+ bitmap defs =
+ df_ref_bitmap (problem_data->def_sites, regno,
+ begin, n_defs);
+ bitmap_ior_into (bb_info->kill, defs);
+ bitmap_and_compl_into (bb_info->gen, defs);
+ }
+ }
+
+ bitmap_set_bit (seen_in_insn, regno);
+ /* All defs for regno in the instruction may be put into
+ the gen set. */
+ if (! (DF_REF_FLAGS (def) & DF_REF_CLOBBER))
+ bitmap_set_bit (bb_info->gen, DF_REF_ID (def));
+ }
+ def = def->next_ref;
+ }
+}
+
+/* Compute local reaching def info for basic block BB. */
+
+static void
+df_rd_bb_local_compute (struct dataflow *dflow, unsigned int bb_index)
+{
+ struct df *df = dflow->df;
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (dflow, bb_index);
+ rtx insn;
+
+ bitmap_clear (seen_in_block);
+ bitmap_clear (seen_in_insn);
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+
+ if (! INSN_P (insn))
+ continue;
+
+ df_rd_bb_local_compute_process_def (dflow, bb_info,
+ DF_INSN_UID_GET (df, uid)->defs);
+
+ /* This complex dance with the two bitmaps is required because
+ instructions can assign twice to the same pseudo. This
+ generally happens with calls that will have one def for the
+ result and another def for the clobber. If only one vector
+ is used and the clobber goes first, the result will be
+ lost. */
+ bitmap_ior_into (seen_in_block, seen_in_insn);
+ bitmap_clear (seen_in_insn);
+ }
+
+ /* Process the artificial defs last since we are going backwards
+ thur the block and these are logically at the start. */
+ df_rd_bb_local_compute_process_def (dflow, bb_info,
+ df_get_artificial_defs (df, bb_index));
+}
+
+
+/* Compute local reaching def info for each basic block within BLOCKS. */
+
+static void
+df_rd_local_compute (struct dataflow *dflow,
+ bitmap all_blocks,
+ bitmap rescan_blocks ATTRIBUTE_UNUSED)
+{
+ struct df *df = dflow->df;
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ unsigned int regno;
+ struct df_rd_problem_data *problem_data =
+ (struct df_rd_problem_data *) dflow->problem_data;
+ bitmap sparse_invalidated = problem_data->sparse_invalidated_by_call;
+ bitmap dense_invalidated = problem_data->dense_invalidated_by_call;
+
+ df_set_seen ();
+
+ if (!df->def_info.refs_organized)
+ df_reorganize_refs (&df->def_info);
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ df_rd_bb_local_compute (dflow, bb_index);
+ }
+
+ /* Set up the knockout bit vectors to be applied across EH_EDGES. */
+ EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call, 0, regno, bi)
+ {
+ struct df_reg_info *reg_info = DF_REG_DEF_GET (df, regno);
+ if (reg_info->n_refs > DF_SPARSE_THRESHOLD)
+ {
+ bitmap_set_bit (sparse_invalidated, regno);
+ }
+ else
+ {
+ bitmap defs = df_ref_bitmap (problem_data->def_sites, regno,
+ reg_info->begin, reg_info->n_refs);
+ bitmap_ior_into (dense_invalidated, defs);
+ }
+ }
+ df_unset_seen ();
+}
+
+
+/* Initialize the solution bit vectors for problem. */
+
+static void
+df_rd_init_solution (struct dataflow *dflow, bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (dflow, bb_index);
+
+ bitmap_copy (bb_info->out, bb_info->gen);
+ bitmap_clear (bb_info->in);
+ }
+}
+
+/* In of target gets or of out of source. */
+
+static void
+df_rd_confluence_n (struct dataflow *dflow, edge e)
+{
+ bitmap op1 = df_rd_get_bb_info (dflow, e->dest->index)->in;
+ bitmap op2 = df_rd_get_bb_info (dflow, e->src->index)->out;
+
+ if (e->flags & EDGE_EH)
+ {
+ struct df_rd_problem_data *problem_data =
+ (struct df_rd_problem_data *) dflow->problem_data;
+ bitmap sparse_invalidated = problem_data->sparse_invalidated_by_call;
+ bitmap dense_invalidated = problem_data->dense_invalidated_by_call;
+ struct df *df = dflow->df;
+ bitmap_iterator bi;
+ unsigned int regno;
+ bitmap_ior_and_compl_into (op1, op2, dense_invalidated);
+ EXECUTE_IF_SET_IN_BITMAP (sparse_invalidated, 0, regno, bi)
+ {
+ bitmap_clear_range (op1,
+ DF_REG_DEF_GET (df, regno)->begin,
+ DF_REG_DEF_GET (df, regno)->n_refs);
+ }
+ }
+ else
+ bitmap_ior_into (op1, op2);
+}
+
+
+/* Transfer function. */
+
+static bool
+df_rd_transfer_function (struct dataflow *dflow, int bb_index)
+{
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (dflow, bb_index);
+ unsigned int regno;
+ bitmap_iterator bi;
+ bitmap in = bb_info->in;
+ bitmap out = bb_info->out;
+ bitmap gen = bb_info->gen;
+ bitmap kill = bb_info->kill;
+ bitmap sparse_kill = bb_info->sparse_kill;
+
+ if (bitmap_empty_p (sparse_kill))
+ return bitmap_ior_and_compl (out, gen, in, kill);
+ else
+ {
+ struct df *df = dflow->df;
+ bool changed = false;
+ bitmap tmp = BITMAP_ALLOC (NULL);
+ bitmap_copy (tmp, in);
+ EXECUTE_IF_SET_IN_BITMAP (sparse_kill, 0, regno, bi)
+ {
+ bitmap_clear_range (tmp,
+ DF_REG_DEF_GET (df, regno)->begin,
+ DF_REG_DEF_GET (df, regno)->n_refs);
+ }
+ bitmap_and_compl_into (tmp, kill);
+ bitmap_ior_into (tmp, gen);
+ changed = !bitmap_equal_p (tmp, out);
+ if (changed)
+ {
+ BITMAP_FREE (out);
+ bb_info->out = tmp;
+ }
+ else
+ BITMAP_FREE (tmp);
+ return changed;
+ }
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_rd_free (struct dataflow *dflow)
+{
+ unsigned int i;
+ struct df_rd_problem_data *problem_data =
+ (struct df_rd_problem_data *) dflow->problem_data;
+
+ for (i = 0; i < dflow->block_info_size; i++)
+ {
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (dflow, i);
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->sparse_kill);
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ }
+ }
+
+ free_alloc_pool (dflow->block_pool);
+
+ for (i = 0; i < problem_data->def_sites_size; i++)
+ {
+ bitmap bm = problem_data->def_sites[i];
+ if (bm)
+ BITMAP_FREE (bm);
+ }
+
+ free (problem_data->def_sites);
+ BITMAP_FREE (problem_data->sparse_invalidated_by_call);
+ BITMAP_FREE (problem_data->dense_invalidated_by_call);
+
+ dflow->block_info_size = 0;
+ free (dflow->block_info);
+ free (dflow->problem_data);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_rd_dump (struct dataflow *dflow, FILE *file)
+{
+ struct df *df = dflow->df;
+ basic_block bb;
+ struct df_rd_problem_data *problem_data =
+ (struct df_rd_problem_data *) dflow->problem_data;
+ unsigned int m = max_reg_num ();
+ unsigned int regno;
+
+ fprintf (file, "Reaching defs:\n\n");
+
+ fprintf (file, " sparse invalidated \t");
+ dump_bitmap (file, problem_data->sparse_invalidated_by_call);
+ fprintf (file, " dense invalidated \t");
+ dump_bitmap (file, problem_data->dense_invalidated_by_call);
+
+ for (regno = 0; regno < m; regno++)
+ if (DF_REG_DEF_GET (df, regno)->n_refs)
+ fprintf (file, "%d[%d,%d] ", regno,
+ DF_REG_DEF_GET (df, regno)->begin,
+ DF_REG_DEF_GET (df, regno)->n_refs);
+ fprintf (file, "\n");
+
+ FOR_ALL_BB (bb)
+ {
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (dflow, bb->index);
+ df_print_bb_index (bb, file);
+
+ if (! bb_info->in)
+ continue;
+
+ fprintf (file, " in\t(%d)\n", (int) bitmap_count_bits (bb_info->in));
+ dump_bitmap (file, bb_info->in);
+ fprintf (file, " gen \t(%d)\n", (int) bitmap_count_bits (bb_info->gen));
+ dump_bitmap (file, bb_info->gen);
+ fprintf (file, " kill\t(%d)\n", (int) bitmap_count_bits (bb_info->kill));
+ dump_bitmap (file, bb_info->kill);
+ fprintf (file, " out\t(%d)\n", (int) bitmap_count_bits (bb_info->out));
+ dump_bitmap (file, bb_info->out);
+ }
+}
+
+/* All of the information associated with every instance of the problem. */
+
+static struct df_problem problem_RD =
+{
+ DF_RD, /* Problem id. */
+ DF_FORWARD, /* Direction. */
+ df_rd_alloc, /* Allocate the problem specific data. */
+ df_rd_free_bb_info, /* Free basic block info. */
+ df_rd_local_compute, /* Local compute function. */
+ df_rd_init_solution, /* Init the solution specific data. */
+ df_iterative_dataflow, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ df_rd_confluence_n, /* Confluence operator n. */
+ df_rd_transfer_function, /* Transfer function. */
+ NULL, /* Finalize function. */
+ df_rd_free, /* Free all of the problem information. */
+ df_rd_dump, /* Debugging. */
+ NULL /* Dependent problem. */
+};
+
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_rd_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_RD);
+}
+
+
+
+/*----------------------------------------------------------------------------
+ LIVE REGISTERS
+
+ Find the locations in the function where any use of a pseudo can reach
+ in the backwards direction.
+----------------------------------------------------------------------------*/
+
+/* Get basic block info. */
+
+struct df_lr_bb_info *
+df_lr_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ return (struct df_lr_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_lr_set_bb_info (struct dataflow *dflow, unsigned int index,
+ struct df_lr_bb_info *bb_info)
+{
+ dflow->block_info[index] = bb_info;
+}
+
+
+/* Free basic block info. */
+
+static void
+df_lr_free_bb_info (struct dataflow *dflow, void *vbb_info)
+{
+ struct df_lr_bb_info *bb_info = (struct df_lr_bb_info *) vbb_info;
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->use);
+ BITMAP_FREE (bb_info->def);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ pool_free (dflow->block_pool, bb_info);
+ }
+}
+
+
+/* Allocate or reset bitmaps for DFLOW blocks. The solution bits are
+ not touched unless the block is new. */
+
+static void
+df_lr_alloc (struct dataflow *dflow, bitmap blocks_to_rescan)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ if (! dflow->block_pool)
+ dflow->block_pool = create_alloc_pool ("df_lr_block pool",
+ sizeof (struct df_lr_bb_info), 50);
+
+ df_grow_bb_info (dflow);
+
+ /* Because of the clustering of all def sites for the same pseudo,
+ we have to process all of the blocks before doing the
+ analysis. */
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
+ {
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, bb_index);
+ if (bb_info)
+ {
+ bitmap_clear (bb_info->def);
+ bitmap_clear (bb_info->use);
+ }
+ else
+ {
+ bb_info = (struct df_lr_bb_info *) pool_alloc (dflow->block_pool);
+ df_lr_set_bb_info (dflow, bb_index, bb_info);
+ bb_info->use = BITMAP_ALLOC (NULL);
+ bb_info->def = BITMAP_ALLOC (NULL);
+ bb_info->in = BITMAP_ALLOC (NULL);
+ bb_info->out = BITMAP_ALLOC (NULL);
+ }
+ }
+}
+
+
+/* Compute local live register info for basic block BB. */
+
+static void
+df_lr_bb_local_compute (struct dataflow *dflow,
+ struct df *df, unsigned int bb_index)
+{
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, bb_index);
+ rtx insn;
+ struct df_ref *def;
+ struct df_ref *use;
+
+ /* Process the hardware registers that are always live. */
+ for (use = df_get_artificial_uses (df, bb_index); use; use = use->next_ref)
+ /* Add use to set of uses in this BB. */
+ if ((DF_REF_FLAGS (use) & DF_REF_AT_TOP) == 0)
+ bitmap_set_bit (bb_info->use, DF_REF_REGNO (use));
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+
+ if (! INSN_P (insn))
+ continue;
+
+ if (CALL_P (insn))
+ {
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+
+ if (dregno >= FIRST_PSEUDO_REGISTER
+ || !(SIBLING_CALL_P (insn)
+ && bitmap_bit_p (df->exit_block_uses, dregno)
+ && !refers_to_regno_p (dregno, dregno+1,
+ current_function_return_rtx,
+ (rtx *)0)))
+ {
+ /* Add def to set of defs in this BB. */
+ bitmap_set_bit (bb_info->def, dregno);
+ bitmap_clear_bit (bb_info->use, dregno);
+ }
+ }
+ }
+ else
+ {
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+
+ if (DF_INSN_CONTAINS_ASM (df, insn)
+ && dregno < FIRST_PSEUDO_REGISTER)
+ {
+ unsigned int i;
+ unsigned int end =
+ dregno + hard_regno_nregs[dregno][GET_MODE (DF_REF_REG (def))] - 1;
+ for (i = dregno; i <= end; ++i)
+ regs_asm_clobbered[i] = 1;
+ }
+ /* Add def to set of defs in this BB. */
+ bitmap_set_bit (bb_info->def, dregno);
+ bitmap_clear_bit (bb_info->use, dregno);
+ }
+ }
+
+ for (use = DF_INSN_UID_GET (df, uid)->uses; use; use = use->next_ref)
+ /* Add use to set of uses in this BB. */
+ bitmap_set_bit (bb_info->use, DF_REF_REGNO (use));
+ }
+
+ /* Process the registers set in an exception handler. */
+ for (def = df_get_artificial_defs (df, bb_index); def; def = def->next_ref)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+ bitmap_set_bit (bb_info->def, dregno);
+ bitmap_clear_bit (bb_info->use, dregno);
+ }
+
+#ifdef EH_USES
+ /* Process the uses that are live into an exception handler. */
+ for (use = df_get_artificial_uses (df, bb_index); use; use = use->next_ref)
+ /* Add use to set of uses in this BB. */
+ if (DF_REF_FLAGS (use) & DF_REF_AT_TOP)
+ bitmap_set_bit (bb_info->use, DF_REF_REGNO (use));
+#endif
+}
+
+/* Compute local live register info for each basic block within BLOCKS. */
+
+static void
+df_lr_local_compute (struct dataflow *dflow,
+ bitmap all_blocks,
+ bitmap rescan_blocks)
+{
+ struct df *df = dflow->df;
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ /* Assume that the stack pointer is unchanging if alloca hasn't
+ been used. */
+ if (bitmap_equal_p (all_blocks, rescan_blocks))
+ memset (regs_asm_clobbered, 0, sizeof (regs_asm_clobbered));
+
+ bitmap_clear (df->hardware_regs_used);
+
+ /* The all-important stack pointer must always be live. */
+ bitmap_set_bit (df->hardware_regs_used, STACK_POINTER_REGNUM);
+
+ /* Before reload, there are a few registers that must be forced
+ live everywhere -- which might not already be the case for
+ blocks within infinite loops. */
+ if (! reload_completed)
+ {
+ /* Any reference to any pseudo before reload is a potential
+ reference of the frame pointer. */
+ bitmap_set_bit (df->hardware_regs_used, FRAME_POINTER_REGNUM);
+
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ /* Pseudos with argument area equivalences may require
+ reloading via the argument pointer. */
+ if (fixed_regs[ARG_POINTER_REGNUM])
+ bitmap_set_bit (df->hardware_regs_used, ARG_POINTER_REGNUM);
+#endif
+
+ /* Any constant, or pseudo with constant equivalences, may
+ require reloading from memory using the pic register. */
+ if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
+ bitmap_set_bit (df->hardware_regs_used, PIC_OFFSET_TABLE_REGNUM);
+ }
+
+ if (bitmap_bit_p (rescan_blocks, EXIT_BLOCK))
+ {
+ /* The exit block is special for this problem and its bits are
+ computed from thin air. */
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, EXIT_BLOCK);
+ bitmap_copy (bb_info->use, df->exit_block_uses);
+ }
+
+ EXECUTE_IF_SET_IN_BITMAP (rescan_blocks, 0, bb_index, bi)
+ {
+ if (bb_index == EXIT_BLOCK)
+ continue;
+ df_lr_bb_local_compute (dflow, df, bb_index);
+ }
+}
+
+
+/* Initialize the solution vectors. */
+
+static void
+df_lr_init (struct dataflow *dflow, bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, bb_index);
+ bitmap_copy (bb_info->in, bb_info->use);
+ bitmap_clear (bb_info->out);
+ }
+}
+
+
+/* Confluence function that processes infinite loops. This might be a
+ noreturn function that throws. And even if it isn't, getting the
+ unwind info right helps debugging. */
+static void
+df_lr_confluence_0 (struct dataflow *dflow, basic_block bb)
+{
+ struct df *df = dflow->df;
+
+ bitmap op1 = df_lr_get_bb_info (dflow, bb->index)->out;
+ if (bb != EXIT_BLOCK_PTR)
+ bitmap_copy (op1, df->hardware_regs_used);
+}
+
+
+/* Confluence function that ignores fake edges. */
+static void
+df_lr_confluence_n (struct dataflow *dflow, edge e)
+{
+ bitmap op1 = df_lr_get_bb_info (dflow, e->src->index)->out;
+ bitmap op2 = df_lr_get_bb_info (dflow, e->dest->index)->in;
+
+ /* Call-clobbered registers die across exception and call edges. */
+ /* ??? Abnormal call edges ignored for the moment, as this gets
+ confused by sibling call edges, which crashes reg-stack. */
+ if (e->flags & EDGE_EH)
+ bitmap_ior_and_compl_into (op1, op2, df_invalidated_by_call);
+ else
+ bitmap_ior_into (op1, op2);
+
+ bitmap_ior_into (op1, dflow->df->hardware_regs_used);
+}
+
+
+/* Transfer function. */
+static bool
+df_lr_transfer_function (struct dataflow *dflow, int bb_index)
+{
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, bb_index);
+ bitmap in = bb_info->in;
+ bitmap out = bb_info->out;
+ bitmap use = bb_info->use;
+ bitmap def = bb_info->def;
+
+ return bitmap_ior_and_compl (in, use, out, def);
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_lr_free (struct dataflow *dflow)
+{
+ unsigned int i;
+ for (i = 0; i < dflow->block_info_size; i++)
+ {
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, i);
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->use);
+ BITMAP_FREE (bb_info->def);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ }
+ }
+ free_alloc_pool (dflow->block_pool);
+
+ dflow->block_info_size = 0;
+ free (dflow->block_info);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_lr_dump (struct dataflow *dflow, FILE *file)
+{
+ basic_block bb;
+
+ fprintf (file, "Live Registers:\n");
+ FOR_ALL_BB (bb)
+ {
+ struct df_lr_bb_info *bb_info = df_lr_get_bb_info (dflow, bb->index);
+ df_print_bb_index (bb, file);
+
+ if (!bb_info->in)
+ continue;
+
+ fprintf (file, " in \t");
+ dump_bitmap (file, bb_info->in);
+ fprintf (file, " use \t");
+ dump_bitmap (file, bb_info->use);
+ fprintf (file, " def \t");
+ dump_bitmap (file, bb_info->def);
+ fprintf (file, " out \t");
+ dump_bitmap (file, bb_info->out);
+ }
+}
+
+/* All of the information associated with every instance of the problem. */
+
+static struct df_problem problem_LR =
+{
+ DF_LR, /* Problem id. */
+ DF_BACKWARD, /* Direction. */
+ df_lr_alloc, /* Allocate the problem specific data. */
+ df_lr_free_bb_info, /* Free basic block info. */
+ df_lr_local_compute, /* Local compute function. */
+ df_lr_init, /* Init the solution specific data. */
+ df_iterative_dataflow, /* Iterative solver. */
+ df_lr_confluence_0, /* Confluence operator 0. */
+ df_lr_confluence_n, /* Confluence operator n. */
+ df_lr_transfer_function, /* Transfer function. */
+ NULL, /* Finalize function. */
+ df_lr_free, /* Free all of the problem information. */
+ df_lr_dump, /* Debugging. */
+ NULL /* Dependent problem. */
+};
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_lr_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_LR);
+}
+
+
+
+/*----------------------------------------------------------------------------
+ UNINITIALIZED REGISTERS
+
+ Find the set of uses for registers that are reachable from the entry
+ block without passing thru a definition.
+----------------------------------------------------------------------------*/
+
+/* Get basic block info. */
+
+struct df_ur_bb_info *
+df_ur_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ return (struct df_ur_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_ur_set_bb_info (struct dataflow *dflow, unsigned int index,
+ struct df_ur_bb_info *bb_info)
+{
+ dflow->block_info[index] = bb_info;
+}
+
+
+/* Free basic block info. */
+
+static void
+df_ur_free_bb_info (struct dataflow *dflow, void *vbb_info)
+{
+ struct df_ur_bb_info *bb_info = (struct df_ur_bb_info *) vbb_info;
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ pool_free (dflow->block_pool, bb_info);
+ }
+}
+
+
+/* Allocate or reset bitmaps for DFLOW blocks. The solution bits are
+ not touched unless the block is new. */
+
+static void
+df_ur_alloc (struct dataflow *dflow, bitmap blocks_to_rescan)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ if (! dflow->block_pool)
+ dflow->block_pool = create_alloc_pool ("df_ur_block pool",
+ sizeof (struct df_ur_bb_info), 100);
+
+ df_grow_bb_info (dflow);
+
+ /* Because of the clustering of all def sites for the same pseudo,
+ we have to process all of the blocks before doing the
+ analysis. */
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
+ {
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb_index);
+ if (bb_info)
+ {
+ bitmap_clear (bb_info->kill);
+ bitmap_clear (bb_info->gen);
+ }
+ else
+ {
+ bb_info = (struct df_ur_bb_info *) pool_alloc (dflow->block_pool);
+ df_ur_set_bb_info (dflow, bb_index, bb_info);
+ bb_info->kill = BITMAP_ALLOC (NULL);
+ bb_info->gen = BITMAP_ALLOC (NULL);
+ bb_info->in = BITMAP_ALLOC (NULL);
+ bb_info->out = BITMAP_ALLOC (NULL);
+ }
+ }
+}
+
+
+/* Compute local uninitialized register info for basic block BB. */
+
+static void
+df_ur_bb_local_compute (struct dataflow *dflow, unsigned int bb_index)
+{
+ struct df *df = dflow->df;
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb_index);
+ rtx insn;
+ struct df_ref *def;
+
+ bitmap_clear (seen_in_block);
+ bitmap_clear (seen_in_insn);
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+ if (!INSN_P (insn))
+ continue;
+
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ {
+ unsigned int regno = DF_REF_REGNO (def);
+ /* Only the last def counts. */
+ if (!bitmap_bit_p (seen_in_block, regno))
+ {
+ bitmap_set_bit (seen_in_insn, regno);
+
+ if (DF_REF_FLAGS (def) & DF_REF_CLOBBER)
+ bitmap_set_bit (bb_info->kill, regno);
+ else
+ bitmap_set_bit (bb_info->gen, regno);
+ }
+ }
+ bitmap_ior_into (seen_in_block, seen_in_insn);
+ bitmap_clear (seen_in_insn);
+ }
+
+ for (def = df_get_artificial_defs (df, bb_index); def; def = def->next_ref)
+ {
+ unsigned int regno = DF_REF_REGNO (def);
+ if (!bitmap_bit_p (seen_in_block, regno))
+ {
+ bitmap_set_bit (seen_in_block, regno);
+ bitmap_set_bit (bb_info->gen, regno);
+ }
+ }
+}
+
+
+/* Compute local uninitialized register info. */
+
+static void
+df_ur_local_compute (struct dataflow *dflow,
+ bitmap all_blocks ATTRIBUTE_UNUSED,
+ bitmap rescan_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ df_set_seen ();
+
+ EXECUTE_IF_SET_IN_BITMAP (rescan_blocks, 0, bb_index, bi)
+ {
+ df_ur_bb_local_compute (dflow, bb_index);
+ }
+
+ df_unset_seen ();
+}
+
+
+/* Initialize the solution vectors. */
+
+static void
+df_ur_init (struct dataflow *dflow, bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb_index);
+
+ bitmap_copy (bb_info->out, bb_info->gen);
+ bitmap_clear (bb_info->in);
+ }
+}
+
+
+/* Or in the stack regs, hard regs and early clobber regs into the the
+ ur_in sets of all of the blocks. */
+
+static void
+df_ur_local_finalize (struct dataflow *dflow, bitmap all_blocks)
+{
+ struct df *df = dflow->df;
+ struct dataflow *lr_dflow = df->problems_by_index[DF_LR];
+ bitmap tmp = BITMAP_ALLOC (NULL);
+ bitmap_iterator bi;
+ unsigned int bb_index;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb_index);
+ struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (lr_dflow, bb_index);
+
+ bitmap_ior_into (bb_info->in, df_all_hard_regs);
+ bitmap_ior_into (bb_info->out, df_all_hard_regs);
+
+ /* No register may reach a location where it is not used. Thus
+ we trim the rr result to the places where it is used. */
+ bitmap_and_into (bb_info->in, bb_lr_info->in);
+ bitmap_and_into (bb_info->out, bb_lr_info->out);
+
+#if 1
+ /* Hard registers may still stick in the ur_out set, but not
+ be in the ur_in set, if their only mention was in a call
+ in this block. This is because a call kills in the lr
+ problem but does not kill in the ur problem. To clean
+ this up, we execute the transfer function on the lr_in
+ set and then use that to knock bits out of ur_out. */
+ bitmap_ior_and_compl (tmp, bb_info->gen, bb_lr_info->in,
+ bb_info->kill);
+ bitmap_and_into (bb_info->out, tmp);
+#endif
+ }
+
+ BITMAP_FREE (tmp);
+}
+
+
+/* Confluence function that ignores fake edges. */
+
+static void
+df_ur_confluence_n (struct dataflow *dflow, edge e)
+{
+ bitmap op1 = df_ur_get_bb_info (dflow, e->dest->index)->in;
+ bitmap op2 = df_ur_get_bb_info (dflow, e->src->index)->out;
+
+ if (e->flags & EDGE_FAKE)
+ return;
+
+ bitmap_ior_into (op1, op2);
+}
+
+
+/* Transfer function. */
+
+static bool
+df_ur_transfer_function (struct dataflow *dflow, int bb_index)
+{
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb_index);
+ bitmap in = bb_info->in;
+ bitmap out = bb_info->out;
+ bitmap gen = bb_info->gen;
+ bitmap kill = bb_info->kill;
+
+ return bitmap_ior_and_compl (out, gen, in, kill);
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_ur_free (struct dataflow *dflow)
+{
+ unsigned int i;
+
+ for (i = 0; i < dflow->block_info_size; i++)
+ {
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, i);
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ }
+ }
+
+ free_alloc_pool (dflow->block_pool);
+ dflow->block_info_size = 0;
+ free (dflow->block_info);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_ur_dump (struct dataflow *dflow, FILE *file)
+{
+ basic_block bb;
+
+ fprintf (file, "Undefined regs:\n");
+
+ FOR_ALL_BB (bb)
+ {
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb->index);
+ df_print_bb_index (bb, file);
+
+ if (! bb_info->in)
+ continue;
+
+ fprintf (file, " in \t");
+ dump_bitmap (file, bb_info->in);
+ fprintf (file, " gen \t");
+ dump_bitmap (file, bb_info->gen);
+ fprintf (file, " kill\t");
+ dump_bitmap (file, bb_info->kill);
+ fprintf (file, " out \t");
+ dump_bitmap (file, bb_info->out);
+ }
+}
+
+/* All of the information associated with every instance of the problem. */
+
+static struct df_problem problem_UR =
+{
+ DF_UR, /* Problem id. */
+ DF_FORWARD, /* Direction. */
+ df_ur_alloc, /* Allocate the problem specific data. */
+ df_ur_free_bb_info, /* Free basic block info. */
+ df_ur_local_compute, /* Local compute function. */
+ df_ur_init, /* Init the solution specific data. */
+ df_iterative_dataflow, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ df_ur_confluence_n, /* Confluence operator n. */
+ df_ur_transfer_function, /* Transfer function. */
+ df_ur_local_finalize, /* Finalize function. */
+ df_ur_free, /* Free all of the problem information. */
+ df_ur_dump, /* Debugging. */
+ &problem_LR /* Dependent problem. */
+};
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_ur_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_UR);
+}
+
+
+
+/*----------------------------------------------------------------------------
+ UNINITIALIZED REGISTERS WITH EARLYCLOBBER
+
+ Find the set of uses for registers that are reachable from the entry
+ block without passing thru a definition.
+
+ This is a variant of the UR problem above that has a lot of special
+ features just for the register allocation phase.
+----------------------------------------------------------------------------*/
+
+struct df_urec_problem_data
+{
+ bool earlyclobbers_found; /* True if any instruction contains an
+ earlyclobber. */
+#ifdef STACK_REGS
+ bitmap stack_regs; /* Registers that may be allocated to a STACK_REGS. */
+#endif
+};
+
+
+/* Get basic block info. */
+
+struct df_urec_bb_info *
+df_urec_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ return (struct df_urec_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_urec_set_bb_info (struct dataflow *dflow, unsigned int index,
+ struct df_urec_bb_info *bb_info)
+{
+ dflow->block_info[index] = bb_info;
+}
+
+
+/* Free basic block info. */
+
+static void
+df_urec_free_bb_info (struct dataflow *dflow, void *vbb_info)
+{
+ struct df_urec_bb_info *bb_info = (struct df_urec_bb_info *) vbb_info;
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ BITMAP_FREE (bb_info->earlyclobber);
+ pool_free (dflow->block_pool, bb_info);
+ }
+}
+
+
+/* Allocate or reset bitmaps for DFLOW blocks. The solution bits are
+ not touched unless the block is new. */
+
+static void
+df_urec_alloc (struct dataflow *dflow, bitmap blocks_to_rescan)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ struct df_urec_problem_data *problem_data =
+ (struct df_urec_problem_data *) dflow->problem_data;
+
+ if (! dflow->block_pool)
+ dflow->block_pool = create_alloc_pool ("df_urec_block pool",
+ sizeof (struct df_urec_bb_info), 50);
+
+ if (!dflow->problem_data)
+ {
+ problem_data = xmalloc (sizeof (struct df_urec_problem_data));
+ dflow->problem_data = problem_data;
+ }
+ problem_data->earlyclobbers_found = false;
+
+ df_grow_bb_info (dflow);
+
+ /* Because of the clustering of all def sites for the same pseudo,
+ we have to process all of the blocks before doing the
+ analysis. */
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
+ {
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, bb_index);
+ if (bb_info)
+ {
+ bitmap_clear (bb_info->kill);
+ bitmap_clear (bb_info->gen);
+ bitmap_clear (bb_info->earlyclobber);
+ }
+ else
+ {
+ bb_info = (struct df_urec_bb_info *) pool_alloc (dflow->block_pool);
+ df_urec_set_bb_info (dflow, bb_index, bb_info);
+ bb_info->kill = BITMAP_ALLOC (NULL);
+ bb_info->gen = BITMAP_ALLOC (NULL);
+ bb_info->in = BITMAP_ALLOC (NULL);
+ bb_info->out = BITMAP_ALLOC (NULL);
+ bb_info->earlyclobber = BITMAP_ALLOC (NULL);
+ }
+ }
+}
+
+
+/* The function modifies local info for register REG being changed in
+ SETTER. DATA is used to pass the current basic block info. */
+
+static void
+df_urec_mark_reg_change (rtx reg, rtx setter, void *data)
+{
+ int regno;
+ int endregno;
+ int i;
+ struct df_urec_bb_info *bb_info = (struct df_urec_bb_info*) data;
+
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ if (!REG_P (reg))
+ return;
+
+
+ endregno = regno = REGNO (reg);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ endregno +=hard_regno_nregs[regno][GET_MODE (reg)];
+
+ for (i = regno; i < endregno; i++)
+ {
+ bitmap_set_bit (bb_info->kill, i);
+
+ if (GET_CODE (setter) != CLOBBER)
+ bitmap_set_bit (bb_info->gen, i);
+ else
+ bitmap_clear_bit (bb_info->gen, i);
+ }
+ }
+ else
+ {
+ bitmap_set_bit (bb_info->kill, regno);
+
+ if (GET_CODE (setter) != CLOBBER)
+ bitmap_set_bit (bb_info->gen, regno);
+ else
+ bitmap_clear_bit (bb_info->gen, regno);
+ }
+}
+/* Classes of registers which could be early clobbered in the current
+ insn. */
+
+DEF_VEC_I(int);
+DEF_VEC_ALLOC_I(int,heap);
+
+static VEC(int,heap) *earlyclobber_regclass;
+
+/* This function finds and stores register classes that could be early
+ clobbered in INSN. If any earlyclobber classes are found, the function
+ returns TRUE, in all other cases it returns FALSE. */
+
+static bool
+df_urec_check_earlyclobber (rtx insn)
+{
+ int opno;
+ bool found = false;
+
+ extract_insn (insn);
+
+ VEC_truncate (int, earlyclobber_regclass, 0);
+ for (opno = 0; opno < recog_data.n_operands; opno++)
+ {
+ char c;
+ bool amp_p;
+ int i;
+ enum reg_class class;
+ const char *p = recog_data.constraints[opno];
+
+ class = NO_REGS;
+ amp_p = false;
+ for (;;)
+ {
+ c = *p;
+ switch (c)
+ {
+ case '=': case '+': case '?':
+ case '#': case '!':
+ case '*': case '%':
+ case 'm': case '<': case '>': case 'V': case 'o':
+ case 'E': case 'F': case 'G': case 'H':
+ case 's': case 'i': case 'n':
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P':
+ case 'X':
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ /* These don't say anything we care about. */
+ break;
+
+ case '&':
+ amp_p = true;
+ break;
+ case '\0':
+ case ',':
+ if (amp_p && class != NO_REGS)
+ {
+ int rc;
+
+ found = true;
+ for (i = 0;
+ VEC_iterate (int, earlyclobber_regclass, i, rc);
+ i++)
+ {
+ if (rc == (int) class)
+ goto found_rc;
+ }
+
+ /* We use VEC_quick_push here because
+ earlyclobber_regclass holds no more than
+ N_REG_CLASSES elements. */
+ VEC_quick_push (int, earlyclobber_regclass, (int) class);
+ found_rc:
+ ;
+ }
+
+ amp_p = false;
+ class = NO_REGS;
+ break;
+
+ case 'r':
+ class = GENERAL_REGS;
+ break;
+
+ default:
+ class = REG_CLASS_FROM_CONSTRAINT (c, p);
+ break;
+ }
+ if (c == '\0')
+ break;
+ p += CONSTRAINT_LEN (c, p);
+ }
+ }
+
+ return found;
+}
+
+/* The function checks that pseudo-register *X has a class
+ intersecting with the class of pseudo-register could be early
+ clobbered in the same insn.
+
+ This function is a no-op if earlyclobber_regclass is empty.
+
+ Reload can assign the same hard register to uninitialized
+ pseudo-register and early clobbered pseudo-register in an insn if
+ the pseudo-register is used first time in given BB and not lived at
+ the BB start. To prevent this we don't change life information for
+ such pseudo-registers. */
+
+static int
+df_urec_mark_reg_use_for_earlyclobber (rtx *x, void *data)
+{
+ enum reg_class pref_class, alt_class;
+ int i, regno;
+ struct df_urec_bb_info *bb_info = (struct df_urec_bb_info*) data;
+
+ if (REG_P (*x) && REGNO (*x) >= FIRST_PSEUDO_REGISTER)
+ {
+ int rc;
+
+ regno = REGNO (*x);
+ if (bitmap_bit_p (bb_info->kill, regno)
+ || bitmap_bit_p (bb_info->gen, regno))
+ return 0;
+ pref_class = reg_preferred_class (regno);
+ alt_class = reg_alternate_class (regno);
+ for (i = 0; VEC_iterate (int, earlyclobber_regclass, i, rc); i++)
+ {
+ if (reg_classes_intersect_p (rc, pref_class)
+ || (rc != NO_REGS
+ && reg_classes_intersect_p (rc, alt_class)))
+ {
+ bitmap_set_bit (bb_info->earlyclobber, regno);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+/* The function processes all pseudo-registers in *X with the aid of
+ previous function. */
+
+static void
+df_urec_mark_reg_use_for_earlyclobber_1 (rtx *x, void *data)
+{
+ for_each_rtx (x, df_urec_mark_reg_use_for_earlyclobber, data);
+}
+
+
+/* Compute local uninitialized register info for basic block BB. */
+
+static void
+df_urec_bb_local_compute (struct dataflow *dflow, unsigned int bb_index)
+{
+ struct df *df = dflow->df;
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, bb_index);
+ rtx insn;
+ struct df_ref *def;
+
+ for (def = df_get_artificial_defs (df, bb_index); def; def = def->next_ref)
+ {
+ unsigned int regno = DF_REF_REGNO (def);
+ bitmap_set_bit (bb_info->gen, regno);
+ }
+
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (INSN_P (insn))
+ {
+ note_stores (PATTERN (insn), df_urec_mark_reg_change, bb_info);
+ if (df_state & (DF_SCAN_GLOBAL | DF_SCAN_POST_ALLOC)
+ && df_urec_check_earlyclobber (insn))
+ {
+ struct df_urec_problem_data *problem_data =
+ (struct df_urec_problem_data *) dflow->problem_data;
+ problem_data->earlyclobbers_found = true;
+ note_uses (&PATTERN (insn),
+ df_urec_mark_reg_use_for_earlyclobber_1, bb_info);
+ }
+ }
+ }
+}
+
+
+/* Compute local uninitialized register info. */
+
+static void
+df_urec_local_compute (struct dataflow *dflow,
+ bitmap all_blocks ATTRIBUTE_UNUSED,
+ bitmap rescan_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+#ifdef STACK_REGS
+ int i;
+ HARD_REG_SET zero, stack_hard_regs, used;
+ struct df_urec_problem_data *problem_data =
+ (struct df_urec_problem_data *) dflow->problem_data;
+
+ /* Any register that MAY be allocated to a register stack (like the
+ 387) is treated poorly. Each such register is marked as being
+ live everywhere. This keeps the register allocator and the
+ subsequent passes from doing anything useful with these values.
+
+ FIXME: This seems like an incredibly poor idea. */
+
+ CLEAR_HARD_REG_SET (zero);
+ CLEAR_HARD_REG_SET (stack_hard_regs);
+ for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
+ SET_HARD_REG_BIT (stack_hard_regs, i);
+ problem_data->stack_regs = BITMAP_ALLOC (NULL);
+ for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
+ {
+ COPY_HARD_REG_SET (used, reg_class_contents[reg_preferred_class (i)]);
+ IOR_HARD_REG_SET (used, reg_class_contents[reg_alternate_class (i)]);
+ AND_HARD_REG_SET (used, stack_hard_regs);
+ GO_IF_HARD_REG_EQUAL (used, zero, skip);
+ bitmap_set_bit (problem_data->stack_regs, i);
+ skip:
+ ;
+ }
+#endif
+
+ /* We know that earlyclobber_regclass holds no more than
+ N_REG_CLASSES elements. See df_urec_check_earlyclobber. */
+ earlyclobber_regclass = VEC_alloc (int, heap, N_REG_CLASSES);
+
+ EXECUTE_IF_SET_IN_BITMAP (rescan_blocks, 0, bb_index, bi)
+ {
+ df_urec_bb_local_compute (dflow, bb_index);
+ }
+
+ VEC_free (int, heap, earlyclobber_regclass);
+}
+
+
+/* Initialize the solution vectors. */
+
+static void
+df_urec_init (struct dataflow *dflow, bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, bb_index);
+
+ /* FIXME: This is a hack, it has been copied over from
+ make_accurate_live_analysis by Vlad. Most likely it is necessary
+ because the generation of gen and kill information for hardware
+ registers in ur is a subset of what is really necessary and what
+ is done for the lr problem. */
+
+ /* Inside the register allocator, partial availability is only
+ allowed for the psuedo registers. To implement this, the rr is
+ initially iored with a mask ones for the hard registers and zeros
+ for the pseudos before being iterated. This means that each
+ hardware register will be live unless explicitly killed by some
+ statement. Eventually most of these bit will die because the
+ results of rr are anded with the results of lr before being used.
+ Outside of register allocation, a more conservative strategy of
+ completely ignoring the unintialized registers is imployed in the
+ finalizer function. */
+ if (df_state & DF_SCAN_GLOBAL)
+ {
+ bitmap_ior (bb_info->out, bb_info->gen, df_all_hard_regs);
+ bitmap_copy (bb_info->in, df_all_hard_regs);
+ }
+ else
+ {
+ bitmap_copy (bb_info->out, bb_info->gen);
+ bitmap_clear (bb_info->in);
+ }
+ }
+}
+
+
+/* Or in the stack regs, hard regs and early clobber regs into the the
+ ur_in sets of all of the blocks. */
+
+static void
+df_urec_local_finalize (struct dataflow *dflow, bitmap all_blocks)
+{
+ struct df *df = dflow->df;
+ struct dataflow *lr_dflow = df->problems_by_index[DF_LR];
+ bitmap tmp = BITMAP_ALLOC (NULL);
+ bitmap_iterator bi;
+ unsigned int bb_index;
+ struct df_urec_problem_data *problem_data =
+ (struct df_urec_problem_data *) dflow->problem_data;
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, bb_index);
+ struct df_lr_bb_info *bb_lr_info = df_lr_get_bb_info (lr_dflow, bb_index);
+
+ if (bb_index != ENTRY_BLOCK && bb_index != EXIT_BLOCK)
+ {
+ if (problem_data->earlyclobbers_found)
+ bitmap_ior_into (bb_info->in, bb_info->earlyclobber);
+
+#ifdef STACK_REGS
+ /* We can not use the same stack register for uninitialized
+ pseudo-register and another living pseudo-register
+ because if the uninitialized pseudo-register dies,
+ subsequent pass reg-stack will be confused (it will
+ believe that the other register dies). */
+ bitmap_ior_into (bb_info->in, problem_data->stack_regs);
+ bitmap_ior_into (bb_info->out, problem_data->stack_regs);
+#endif
+ }
+
+ if (!(df_state & DF_SCAN_GLOBAL))
+ {
+ bitmap_ior_into (bb_info->in, df_all_hard_regs);
+ bitmap_ior_into (bb_info->out, df_all_hard_regs);
+ }
+
+ /* No register may reach a location where it is not used. Thus
+ we trim the rr result to the places where it is used. */
+ bitmap_and_into (bb_info->in, bb_lr_info->in);
+ bitmap_and_into (bb_info->out, bb_lr_info->out);
+
+#if 1
+ /* Hard registers may still stick in the ur_out set, but not
+ be in the ur_in set, if their only mention was in a call
+ in this block. This is because a call kills in the lr
+ problem but does not kill in the rr problem. To clean
+ this up, we execute the transfer function on the lr_in
+ set and then use that to knock bits out of ur_out. */
+ bitmap_ior_and_compl (tmp, bb_info->gen, bb_lr_info->in,
+ bb_info->kill);
+ bitmap_and_into (bb_info->out, tmp);
+#endif
+ }
+
+#ifdef STACK_REGS
+ BITMAP_FREE (problem_data->stack_regs);
+#endif
+ BITMAP_FREE (tmp);
+}
+
+
+/* Confluence function that ignores fake edges. */
+
+static void
+df_urec_confluence_n (struct dataflow *dflow, edge e)
+{
+ bitmap op1 = df_urec_get_bb_info (dflow, e->dest->index)->in;
+ bitmap op2 = df_urec_get_bb_info (dflow, e->src->index)->out;
+
+ if (e->flags & EDGE_FAKE)
+ return;
+
+ bitmap_ior_into (op1, op2);
+}
+
+
+/* Transfer function. */
+
+static bool
+df_urec_transfer_function (struct dataflow *dflow, int bb_index)
+{
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, bb_index);
+ bitmap in = bb_info->in;
+ bitmap out = bb_info->out;
+ bitmap gen = bb_info->gen;
+ bitmap kill = bb_info->kill;
+
+ return bitmap_ior_and_compl (out, gen, in, kill);
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_urec_free (struct dataflow *dflow)
+{
+ unsigned int i;
+
+ for (i = 0; i < dflow->block_info_size; i++)
+ {
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, i);
+ if (bb_info)
+ {
+ BITMAP_FREE (bb_info->gen);
+ BITMAP_FREE (bb_info->kill);
+ BITMAP_FREE (bb_info->in);
+ BITMAP_FREE (bb_info->out);
+ BITMAP_FREE (bb_info->earlyclobber);
+ }
+ }
+
+ free_alloc_pool (dflow->block_pool);
+
+ dflow->block_info_size = 0;
+ free (dflow->block_info);
+ free (dflow->problem_data);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_urec_dump (struct dataflow *dflow, FILE *file)
+{
+ basic_block bb;
+
+ fprintf (file, "Undefined regs:\n");
+
+ FOR_ALL_BB (bb)
+ {
+ struct df_urec_bb_info *bb_info = df_urec_get_bb_info (dflow, bb->index);
+ df_print_bb_index (bb, file);
+
+ if (! bb_info->in)
+ continue;
+
+ fprintf (file, " in \t");
+ dump_bitmap (file, bb_info->in);
+ fprintf (file, " gen \t");
+ dump_bitmap (file, bb_info->gen);
+ fprintf (file, " kill\t");
+ dump_bitmap (file, bb_info->kill);
+ fprintf (file, " ec\t");
+ dump_bitmap (file, bb_info->earlyclobber);
+ fprintf (file, " out \t");
+ dump_bitmap (file, bb_info->out);
+ }
+}
+
+/* All of the information associated with every instance of the problem. */
+
+static struct df_problem problem_UREC =
+{
+ DF_UREC, /* Problem id. */
+ DF_FORWARD, /* Direction. */
+ df_urec_alloc, /* Allocate the problem specific data. */
+ df_urec_free_bb_info, /* Free basic block info. */
+ df_urec_local_compute, /* Local compute function. */
+ df_urec_init, /* Init the solution specific data. */
+ df_iterative_dataflow, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ df_urec_confluence_n, /* Confluence operator n. */
+ df_urec_transfer_function, /* Transfer function. */
+ df_urec_local_finalize, /* Finalize function. */
+ df_urec_free, /* Free all of the problem information. */
+ df_urec_dump, /* Debugging. */
+ &problem_LR /* Dependent problem. */
+};
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_urec_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_UREC);
+}
+
+
+
+/*----------------------------------------------------------------------------
+ CREATE DEF_USE (DU) and / or USE_DEF (UD) CHAINS
+
+ Link either the defs to the uses and / or the uses to the defs.
+
+ These problems are set up like the other dataflow problems so that
+ they nicely fit into the framework. They are much simpler and only
+ involve a single traversal of instructions and an examination of
+ the reaching defs information (the dependent problem).
+----------------------------------------------------------------------------*/
+
+struct df_chain_problem_data
+{
+ int flags;
+};
+
+
+/* Create def-use or use-def chains. */
+
+static void
+df_chain_alloc (struct dataflow *dflow,
+ bitmap blocks_to_rescan ATTRIBUTE_UNUSED)
+{
+ struct df *df = dflow->df;
+ unsigned int i;
+ struct df_chain_problem_data *problem_data =
+ (struct df_chain_problem_data *) dflow->problem_data;
+
+ /* Wholesale destruction of the old chains. */
+ if (dflow->block_pool)
+ free_alloc_pool (dflow->block_pool);
+
+ dflow->block_pool = create_alloc_pool ("df_chain_chain_block pool",
+ sizeof (struct df_link), 100);
+
+ if (problem_data->flags & DF_DU_CHAIN)
+ {
+ if (!df->def_info.refs_organized)
+ df_reorganize_refs (&df->def_info);
+
+ /* Clear out the pointers from the refs. */
+ for (i = 0; i < DF_DEFS_SIZE (df); i++)
+ {
+ struct df_ref *ref = df->def_info.refs[i];
+ DF_REF_CHAIN (ref) = NULL;
+ }
+ }
+
+ if (problem_data->flags & DF_UD_CHAIN)
+ {
+ if (!df->use_info.refs_organized)
+ df_reorganize_refs (&df->use_info);
+ for (i = 0; i < DF_USES_SIZE (df); i++)
+ {
+ struct df_ref *ref = df->use_info.refs[i];
+ DF_REF_CHAIN (ref) = NULL;
+ }
+ }
+}
+
+
+/* Create the chains for a list of USEs. */
+
+static void
+df_chain_create_bb_process_use (struct dataflow *dflow,
+ struct df_chain_problem_data *problem_data,
+ bitmap local_rd,
+ struct df_ref *use,
+ enum df_ref_flags top_flag)
+{
+ struct df *df = dflow->df;
+ bitmap_iterator bi;
+ unsigned int def_index;
+
+ while (use)
+ {
+ /* Do not want to go thur this for an uninitialized var. */
+ unsigned int uregno = DF_REF_REGNO (use);
+ int count = DF_REG_DEF_GET (df, uregno)->n_refs;
+ if (count)
+ {
+ if (top_flag == (DF_REF_FLAGS (use) & DF_REF_AT_TOP))
+ {
+ unsigned int first_index = DF_REG_DEF_GET (df, uregno)->begin;
+ unsigned int last_index = first_index + count - 1;
+
+ EXECUTE_IF_SET_IN_BITMAP (local_rd, first_index, def_index, bi)
+ {
+ struct df_ref *def;
+ if (def_index > last_index)
+ break;
+
+ def = DF_DEFS_GET (df, def_index);
+ if (problem_data->flags & DF_DU_CHAIN)
+ df_chain_create (dflow, def, use);
+ if (problem_data->flags & DF_UD_CHAIN)
+ df_chain_create (dflow, use, def);
+ }
+ }
+ }
+ use = use->next_ref;
+ }
+}
+
+/* Reset the storage pool that the def-use or use-def chains have been
+ allocated in. We do not need to re adjust the pointers in the refs,
+ these have already been clean out.*/
+
+/* Create chains from reaching defs bitmaps for basic block BB. */
+static void
+df_chain_create_bb (struct dataflow *dflow,
+ struct dataflow *rd_dflow,
+ unsigned int bb_index)
+{
+ basic_block bb = BASIC_BLOCK (bb_index);
+ struct df_rd_bb_info *bb_info = df_rd_get_bb_info (rd_dflow, bb_index);
+ rtx insn;
+ bitmap cpy = BITMAP_ALLOC (NULL);
+ struct df *df = dflow->df;
+ struct df_chain_problem_data *problem_data =
+ (struct df_chain_problem_data *) dflow->problem_data;
+ struct df_ref *def;
+
+ bitmap_copy (cpy, bb_info->in);
+
+ /* Since we are going forwards, process the artificial uses first
+ then the artificial defs second. */
+
+#ifdef EH_USES
+ /* Create the chains for the artificial uses from the EH_USES at the
+ beginning of the block. */
+ df_chain_create_bb_process_use (dflow, problem_data, cpy,
+ df_get_artificial_uses (df, bb->index),
+ DF_REF_AT_TOP);
+#endif
+
+ for (def = df_get_artificial_defs (df, bb_index); def; def = def->next_ref)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+ bitmap_clear_range (cpy,
+ DF_REG_DEF_GET (df, dregno)->begin,
+ DF_REG_DEF_GET (df, dregno)->n_refs);
+ if (! (DF_REF_FLAGS (def) & DF_REF_CLOBBER))
+ bitmap_set_bit (cpy, DF_REF_ID (def));
+ }
+
+ /* Process the regular instructions next. */
+ FOR_BB_INSNS (bb, insn)
+ {
+ struct df_ref *def;
+ unsigned int uid = INSN_UID (insn);
+
+ if (! INSN_P (insn))
+ continue;
+
+ /* Now scan the uses and link them up with the defs that remain
+ in the cpy vector. */
+
+ df_chain_create_bb_process_use (dflow, problem_data, cpy,
+ DF_INSN_UID_GET (df, uid)->uses, 0);
+
+ /* Since we are going forwards, process the defs second. This
+ pass only changes the bits in cpy. */
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+ bitmap_clear_range (cpy,
+ DF_REG_DEF_GET (df, dregno)->begin,
+ DF_REG_DEF_GET (df, dregno)->n_refs);
+ if (! (DF_REF_FLAGS (def) & DF_REF_CLOBBER))
+ bitmap_set_bit (cpy, DF_REF_ID (def));
+ }
+ }
+
+ /* Create the chains for the artificial uses of the hard registers
+ at the end of the block. */
+ df_chain_create_bb_process_use (dflow, problem_data, cpy,
+ df_get_artificial_uses (df, bb->index), 0);
+}
+
+/* Create def-use chains from reaching use bitmaps for basic blocks
+ in BLOCKS. */
+
+static void
+df_chain_finalize (struct dataflow *dflow, bitmap all_blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ struct df *df = dflow->df;
+ struct dataflow *rd_dflow = df->problems_by_index [DF_RD];
+
+ EXECUTE_IF_SET_IN_BITMAP (all_blocks, 0, bb_index, bi)
+ {
+ df_chain_create_bb (dflow, rd_dflow, bb_index);
+ }
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_chain_free (struct dataflow *dflow)
+{
+ free_alloc_pool (dflow->block_pool);
+ free (dflow->problem_data);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_chains_dump (struct dataflow *dflow, FILE *file)
+{
+ struct df *df = dflow->df;
+ unsigned int j;
+ struct df_chain_problem_data *problem_data =
+ (struct df_chain_problem_data *) dflow->problem_data;
+
+ if (problem_data->flags & DF_DU_CHAIN)
+ {
+ fprintf (file, "Def-use chains:\n");
+ for (j = 0; j < df->def_info.bitmap_size; j++)
+ {
+ struct df_ref *def = DF_DEFS_GET (df, j);
+ if (def)
+ {
+ fprintf (file, "d%d bb %d luid %d insn %d reg %d ",
+ j, DF_REF_BBNO (def),
+ DF_INSN_LUID (df, DF_REF_INSN (def)),
+ DF_REF_INSN (def) ? DF_REF_INSN_UID (def) : -1,
+ DF_REF_REGNO (def));
+ if (def->flags & DF_REF_READ_WRITE)
+ fprintf (file, "read/write ");
+ df_chain_dump (df, DF_REF_CHAIN (def), file);
+ fprintf (file, "\n");
+ }
+ }
+ }
+
+ if (problem_data->flags & DF_UD_CHAIN)
+ {
+ fprintf (file, "Use-def chains:\n");
+ for (j = 0; j < df->use_info.bitmap_size; j++)
+ {
+ struct df_ref *use = DF_USES_GET (df, j);
+ if (use)
+ {
+ fprintf (file, "u%d bb %d luid %d insn %d reg %d ",
+ j, DF_REF_BBNO (use),
+ DF_REF_INSN (use) ?
+ DF_INSN_LUID (df, DF_REF_INSN (use))
+ : -1,
+ DF_REF_INSN (DF_USES_GET (df, j)) ?
+ DF_REF_INSN_UID (DF_USES_GET (df,j))
+ : -1,
+ DF_REF_REGNO (use));
+ if (use->flags & DF_REF_READ_WRITE)
+ fprintf (file, "read/write ");
+ if (use->flags & DF_REF_STRIPPED)
+ fprintf (file, "stripped ");
+ if (use->flags & DF_REF_IN_NOTE)
+ fprintf (file, "note ");
+ df_chain_dump (df, DF_REF_CHAIN (use), file);
+ fprintf (file, "\n");
+ }
+ }
+ }
+}
+
+
+static struct df_problem problem_CHAIN =
+{
+ DF_CHAIN, /* Problem id. */
+ DF_NONE, /* Direction. */
+ df_chain_alloc, /* Allocate the problem specific data. */
+ NULL, /* Free basic block info. */
+ NULL, /* Local compute function. */
+ NULL, /* Init the solution specific data. */
+ NULL, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ NULL, /* Confluence operator n. */
+ NULL, /* Transfer function. */
+ df_chain_finalize, /* Finalize function. */
+ df_chain_free, /* Free all of the problem information. */
+ df_chains_dump, /* Debugging. */
+ &problem_RD /* Dependent problem. */
+};
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_chain_add_problem (struct df *df, int flags)
+{
+ struct df_chain_problem_data *problem_data =
+ xmalloc (sizeof (struct df_chain_problem_data));
+ struct dataflow *dflow = df_add_problem (df, &problem_CHAIN);
+
+ dflow->problem_data = problem_data;
+ problem_data->flags = flags;
+
+ return dflow;
+}
+
+
+/*----------------------------------------------------------------------------
+ REGISTER INFORMATION
+
+ Currently this consists of only lifetime information. But the plan is
+ to enhance it so that it produces all of the register information needed
+ by the register allocators.
+----------------------------------------------------------------------------*/
+
+
+struct df_ri_problem_data
+{
+ int *lifetime;
+};
+
+
+/* Allocate the lifetime information. */
+
+static void
+df_ri_alloc (struct dataflow *dflow, bitmap blocks_to_rescan ATTRIBUTE_UNUSED)
+{
+ struct df_ri_problem_data *problem_data =
+ (struct df_ri_problem_data *) dflow->problem_data;
+
+ if (!dflow->problem_data)
+ {
+ struct df_ri_problem_data *problem_data =
+ xmalloc (sizeof (struct df_ri_problem_data));
+ dflow->problem_data = problem_data;
+ }
+
+ problem_data->lifetime = xrealloc (problem_data->lifetime,
+ max_reg_num () *sizeof (int));
+ memset (problem_data->lifetime, 0, max_reg_num () *sizeof (int));
+}
+
+/* Compute register info: lifetime, bb, and number of defs and uses
+ for basic block BB. */
+
+static void
+df_ri_bb_compute (struct dataflow *dflow, unsigned int bb_index, bitmap live)
+{
+ struct df *df = dflow->df;
+ struct df_ur_bb_info *bb_info = df_ur_get_bb_info (dflow, bb_index);
+ struct df_ri_problem_data *problem_data =
+ (struct df_ri_problem_data *) dflow->problem_data;
+ basic_block bb = BASIC_BLOCK (bb_index);
+ rtx insn;
+
+ bitmap_copy (live, bb_info->out);
+
+ FOR_BB_INSNS_REVERSE (bb, insn)
+ {
+ unsigned int uid = INSN_UID (insn);
+ unsigned int regno;
+ bitmap_iterator bi;
+ struct df_ref *def;
+ struct df_ref *use;
+
+ if (! INSN_P (insn))
+ continue;
+
+ for (def = DF_INSN_UID_GET (df, uid)->defs; def; def = def->next_ref)
+ {
+ unsigned int dregno = DF_REF_REGNO (def);
+
+ /* Kill this register. */
+ bitmap_clear_bit (live, dregno);
+ }
+
+ for (use = DF_INSN_UID_GET (df, uid)->uses; use; use = use->next_ref)
+ {
+ unsigned int uregno = DF_REF_REGNO (use);
+
+ /* This register is now live. */
+ bitmap_set_bit (live, uregno);
+ }
+
+ /* Increment lifetimes of all live registers. */
+ EXECUTE_IF_SET_IN_BITMAP (live, 0, regno, bi)
+ {
+ problem_data->lifetime[regno]++;
+ }
+ }
+}
+
+
+/* Compute register info: lifetime, bb, and number of defs and uses. */
+static void
+df_ri_compute (struct dataflow *dflow, bitmap all_blocks ATTRIBUTE_UNUSED,
+ bitmap blocks_to_scan)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+ bitmap live;
+
+ live = BITMAP_ALLOC (NULL);
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_scan, 0, bb_index, bi)
+ {
+ df_ri_bb_compute (dflow, bb_index, live);
+ }
+
+ BITMAP_FREE (live);
+}
+
+
+/* Free all storage associated with the problem. */
+
+static void
+df_ri_free (struct dataflow *dflow)
+{
+ struct df_ri_problem_data *problem_data =
+ (struct df_ri_problem_data *) dflow->problem_data;
+
+ free (problem_data->lifetime);
+ free (dflow->problem_data);
+ free (dflow);
+}
+
+
+/* Debugging info. */
+
+static void
+df_ri_dump (struct dataflow *dflow, FILE *file)
+{
+ struct df_ri_problem_data *problem_data =
+ (struct df_ri_problem_data *) dflow->problem_data;
+ int j;
+
+ fprintf (file, "Register info:\n");
+ for (j = 0; j < max_reg_num (); j++)
+ {
+ fprintf (file, "reg %d life %d\n", j, problem_data->lifetime[j]);
+ }
+}
+
+/* All of the information associated every instance of the problem. */
+
+static struct df_problem problem_RI =
+{
+ DF_RI, /* Problem id. */
+ DF_NONE, /* Direction. */
+ df_ri_alloc, /* Allocate the problem specific data. */
+ NULL, /* Free basic block info. */
+ df_ri_compute, /* Local compute function. */
+ NULL, /* Init the solution specific data. */
+ NULL, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ NULL, /* Confluence operator n. */
+ NULL, /* Transfer function. */
+ NULL, /* Finalize function. */
+ df_ri_free, /* Free all of the problem information. */
+ df_ri_dump, /* Debugging. */
+ &problem_UR /* Dependent problem. */
+};
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_ri_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_RI);
+}
+
+
+/* Return total lifetime (in insns) of REG. */
+int
+df_reg_lifetime (struct df *df, rtx reg)
+{
+ struct dataflow *dflow = df->problems_by_index[DF_RI];
+ struct df_ri_problem_data *problem_data =
+ (struct df_ri_problem_data *) dflow->problem_data;
+ return problem_data->lifetime[REGNO (reg)];
+}
+
+
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
new file mode 100644
index 00000000000..0aa07bf33c6
--- /dev/null
+++ b/gcc/df-scan.c
@@ -0,0 +1,1795 @@
+/* FIXME: We need to go back and add the warning messages about code
+ moved across setjmp. */
+
+
+/* Scanning of rtl for dataflow analysis.
+ Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
+ Originally contributed by Michael P. Hayes
+ (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
+ Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
+ and Kenneth Zadeck (zadeck@naturalbridge.com).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
+02110-1301, USA.
+*/
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tm_p.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "function.h"
+#include "regs.h"
+#include "output.h"
+#include "alloc-pool.h"
+#include "flags.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "sbitmap.h"
+#include "bitmap.h"
+#include "timevar.h"
+#include "df.h"
+
+#ifndef HAVE_epilogue
+#define HAVE_epilogue 0
+#endif
+#ifndef HAVE_prologue
+#define HAVE_prologue 0
+#endif
+#ifndef HAVE_sibcall_epilogue
+#define HAVE_sibcall_epilogue 0
+#endif
+
+#ifndef EPILOGUE_USES
+#define EPILOGUE_USES(REGNO) 0
+#endif
+
+/* Indicates where we are in the compilation. */
+int df_state;
+
+/* The bitmap_obstack is used to hold some static variables that
+ should not be reset after each function is compiled. */
+
+static bitmap_obstack persistent_obstack;
+
+/* The set of hard registers in eliminables[i].from. */
+
+static HARD_REG_SET elim_reg_set;
+
+/* This is a bitmap copy of regs_invalidated_by_call so that we can
+ easily add it into bitmaps, etc. */
+
+bitmap df_invalidated_by_call = NULL;
+
+/* Initialize ur_in and ur_out as if all hard registers were partially
+ available. */
+
+bitmap df_all_hard_regs = NULL;
+
+static void df_ref_record (struct dataflow *, rtx, rtx *,
+ basic_block, rtx, enum df_ref_type,
+ enum df_ref_flags, bool record_live);
+static void df_def_record_1 (struct dataflow *, rtx, basic_block, rtx,
+ enum df_ref_flags, bool record_live);
+static void df_defs_record (struct dataflow *, rtx, basic_block, rtx);
+static void df_uses_record (struct dataflow *, rtx *, enum df_ref_type,
+ basic_block, rtx, enum df_ref_flags);
+
+static void df_insn_refs_record (struct dataflow *, basic_block, rtx);
+static void df_bb_refs_record (struct dataflow *, basic_block);
+static void df_refs_record (struct dataflow *, bitmap);
+static struct df_ref *df_ref_create_structure (struct dataflow *, rtx, rtx *,
+ basic_block, rtx, enum df_ref_type,
+ enum df_ref_flags);
+static void df_record_exit_block_uses (struct dataflow *);
+static void df_grow_reg_info (struct dataflow *, struct df_ref_info *);
+static void df_grow_ref_info (struct df_ref_info *, unsigned int);
+static void df_grow_insn_info (struct df *);
+
+
+/*----------------------------------------------------------------------------
+ SCANNING DATAFLOW PROBLEM
+
+ There are several ways in which scanning looks just like the other
+ dataflow problems. It shares the all the mechanisms for local info
+ as well as basic block info. Where it differs is when and how often
+ it gets run. It also has no need for the iterative solver.
+----------------------------------------------------------------------------*/
+
+/* Problem data for the scanning dataflow function. */
+struct df_scan_problem_data
+{
+ alloc_pool ref_pool;
+ alloc_pool insn_pool;
+ alloc_pool reg_pool;
+};
+
+typedef struct df_scan_bb_info *df_scan_bb_info_t;
+
+static void
+df_scan_free_internal (struct dataflow *dflow)
+{
+ struct df *df = dflow->df;
+ struct df_scan_problem_data *problem_data =
+ (struct df_scan_problem_data *) dflow->problem_data;
+
+ free (df->def_info.regs);
+ free (df->def_info.refs);
+ memset (&df->def_info, 0, (sizeof (struct df_ref_info)));
+
+ free (df->use_info.regs);
+ free (df->use_info.refs);
+ memset (&df->use_info, 0, (sizeof (struct df_ref_info)));
+
+ free (df->insns);
+ df->insns = NULL;
+ df->insns_size = 0;
+
+ free (dflow->block_info);
+ dflow->block_info = NULL;
+ dflow->block_info_size = 0;
+
+ BITMAP_FREE (df->hardware_regs_used);
+ BITMAP_FREE (df->exit_block_uses);
+
+ free_alloc_pool (dflow->block_pool);
+ free_alloc_pool (problem_data->ref_pool);
+ free_alloc_pool (problem_data->insn_pool);
+ free_alloc_pool (problem_data->reg_pool);
+}
+
+
+/* Get basic block info. */
+
+struct df_scan_bb_info *
+df_scan_get_bb_info (struct dataflow *dflow, unsigned int index)
+{
+ gcc_assert (index < dflow->block_info_size);
+ return (struct df_scan_bb_info *) dflow->block_info[index];
+}
+
+
+/* Set basic block info. */
+
+static void
+df_scan_set_bb_info (struct dataflow *dflow, unsigned int index,
+ struct df_scan_bb_info *bb_info)
+{
+ gcc_assert (index < dflow->block_info_size);
+ dflow->block_info[index] = (void *) bb_info;
+}
+
+
+/* Free basic block info. */
+
+static void
+df_scan_free_bb_info (struct dataflow *dflow, void *vbb_info)
+{
+ struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info;
+ if (bb_info)
+ pool_free (dflow->block_pool, bb_info);
+}
+
+
+/* Allocate the problem data for the scanning problem. This should be
+ called when the problem is created or when the entire function is to
+ be rescanned. */
+
+static void
+df_scan_alloc (struct dataflow *dflow, bitmap blocks_to_rescan)
+{
+ struct df *df = dflow->df;
+ struct df_scan_problem_data *problem_data;
+ unsigned int insn_num = get_max_uid () + 1;
+ unsigned int block_size = 50;
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ /* Given the number of pools, this is really faster than tearing
+ everything apart. */
+ if (dflow->problem_data)
+ df_scan_free_internal (dflow);
+
+ dflow->block_pool
+ = create_alloc_pool ("df_scan_block pool",
+ sizeof (struct df_scan_bb_info),
+ block_size);
+
+ problem_data = xmalloc (sizeof (struct df_scan_problem_data));
+ dflow->problem_data = problem_data;
+
+ problem_data->ref_pool
+ = create_alloc_pool ("df_scan_ref pool",
+ sizeof (struct df_ref), block_size);
+ problem_data->insn_pool
+ = create_alloc_pool ("df_scan_insn pool",
+ sizeof (struct df_insn_info), block_size);
+
+ problem_data->reg_pool
+ = create_alloc_pool ("df_scan_reg pool",
+ sizeof (struct df_reg_info), block_size);
+
+ insn_num += insn_num / 4;
+ df_grow_reg_info (dflow, &df->def_info);
+ df_grow_ref_info (&df->def_info, insn_num);
+
+ df_grow_reg_info (dflow, &df->use_info);
+ df_grow_ref_info (&df->use_info, insn_num *2);
+
+ df_grow_insn_info (df);
+ df_grow_bb_info (dflow);
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks_to_rescan, 0, bb_index, bi)
+ {
+ struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb_index);
+ if (!bb_info)
+ {
+ bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
+ df_scan_set_bb_info (dflow, bb_index, bb_info);
+ }
+ bb_info->artificial_defs = NULL;
+ bb_info->artificial_uses = NULL;
+ }
+
+ df->hardware_regs_used = BITMAP_ALLOC (NULL);
+ df->exit_block_uses = BITMAP_ALLOC (NULL);
+}
+
+
+/* Free all of the data associated with the scan problem. */
+
+static void
+df_scan_free (struct dataflow *dflow)
+{
+ struct df *df = dflow->df;
+
+ df_scan_free_internal (dflow);
+ if (df->blocks_to_scan)
+ BITMAP_FREE (df->blocks_to_scan);
+
+ if (df->blocks_to_analyze)
+ BITMAP_FREE (df->blocks_to_analyze);
+
+ free (dflow->problem_data);
+ free (dflow);
+}
+
+static void
+df_scan_dump (struct dataflow *dflow ATTRIBUTE_UNUSED, FILE *file ATTRIBUTE_UNUSED)
+{
+ struct df *df = dflow->df;
+ int i;
+
+ fprintf (file, " all hard regs \t");
+ dump_bitmap (file, df_all_hard_regs);
+ fprintf (file, " invalidated by call \t");
+ dump_bitmap (file, df_invalidated_by_call);
+ fprintf (file, " hardware regs used \t");
+ dump_bitmap (file, df->hardware_regs_used);
+ fprintf (file, " exit block uses \t");
+ dump_bitmap (file, df->exit_block_uses);
+ fprintf (file, " regs ever live \t");
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i])
+ fprintf (file, "%d ", i);
+ fprintf (file, "\n");
+}
+
+static struct df_problem problem_SCAN =
+{
+ DF_SCAN, /* Problem id. */
+ DF_NONE, /* Direction. */
+ df_scan_alloc, /* Allocate the problem specific data. */
+ df_scan_free_bb_info, /* Free basic block info. */
+ NULL, /* Local compute function. */
+ NULL, /* Init the solution specific data. */
+ NULL, /* Iterative solver. */
+ NULL, /* Confluence operator 0. */
+ NULL, /* Confluence operator n. */
+ NULL, /* Transfer function. */
+ NULL, /* Finalize function. */
+ df_scan_free, /* Free all of the problem information. */
+ df_scan_dump, /* Debugging. */
+ NULL /* Dependent problem. */
+};
+
+
+/* Create a new DATAFLOW instance and add it to an existing instance
+ of DF. The returned structure is what is used to get at the
+ solution. */
+
+struct dataflow *
+df_scan_add_problem (struct df *df)
+{
+ return df_add_problem (df, &problem_SCAN);
+}
+
+/*----------------------------------------------------------------------------
+ Storage Allocation Utilities
+----------------------------------------------------------------------------*/
+
+
+/* First, grow the reg_info information. If the current size is less than
+ the number of psuedos, grow to 25% more than the number of
+ pseudos.
+
+ Second, assure that all of the slots up to max_reg_num have been
+ filled with reg_info structures. */
+
+static void
+df_grow_reg_info (struct dataflow *dflow, struct df_ref_info *ref_info)
+{
+ unsigned int max_reg = max_reg_num ();
+ unsigned int new_size = max_reg;
+ struct df_scan_problem_data *problem_data =
+ (struct df_scan_problem_data *) dflow->problem_data;
+ unsigned int i;
+
+ if (ref_info->regs_size < new_size)
+ {
+ new_size += new_size / 4;
+ ref_info->regs = xrealloc (ref_info->regs,
+ new_size *sizeof (struct df_reg_info*));
+ ref_info->regs_size = new_size;
+ }
+
+ for (i = ref_info->regs_inited; i < max_reg; i++)
+ {
+ struct df_reg_info *reg_info = pool_alloc (problem_data->reg_pool);
+ memset (reg_info, 0, sizeof (struct df_reg_info));
+ ref_info->regs[i] = reg_info;
+ }
+
+ ref_info->regs_inited = max_reg;
+}
+
+
+/* Grow the ref information. */
+
+static void
+df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size)
+{
+ if (ref_info->refs_size < new_size)
+ {
+ ref_info->refs = xrealloc (ref_info->refs,
+ new_size *sizeof (struct df_ref *));
+ memset (ref_info->refs + ref_info->refs_size, 0,
+ (new_size - ref_info->refs_size) *sizeof (struct df_ref *));
+ ref_info->refs_size = new_size;
+ }
+}
+
+
+/* Grow the ref information. If the current size is less than the
+ number of instructions, grow to 25% more than the number of
+ instructions. */
+
+static void
+df_grow_insn_info (struct df *df)
+{
+ unsigned int new_size = get_max_uid () + 1;
+ if (df->insns_size < new_size)
+ {
+ new_size += new_size / 4;
+ df->insns = xrealloc (df->insns,
+ new_size *sizeof (struct df_insn_info *));
+ memset (df->insns + df->insns_size, 0,
+ (new_size - df->insns_size) *sizeof (struct df_insn_info *));
+ df->insns_size = new_size;
+ }
+}
+
+
+
+
+/*----------------------------------------------------------------------------
+ PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING.
+----------------------------------------------------------------------------*/
+
+/* Rescan some BLOCKS or all the blocks defined by the last call to
+ df_set_blocks if BLOCKS is NULL); */
+
+void
+df_rescan_blocks (struct df *df, bitmap blocks)
+{
+ bitmap local_blocks_to_scan = BITMAP_ALLOC (NULL);
+
+ struct dataflow *dflow = df->problems_by_index [DF_SCAN];
+ basic_block bb;
+
+ df->def_info.refs_organized = false;
+ df->use_info.refs_organized = false;
+
+ if (blocks)
+ {
+ /* Need to assure that there are space in all of the tables. */
+ unsigned int insn_num = get_max_uid () + 1;
+ insn_num += insn_num / 4;
+
+ df_grow_reg_info (dflow, &df->def_info);
+ df_grow_ref_info (&df->def_info, insn_num);
+
+ df_grow_reg_info (dflow, &df->use_info);
+ df_grow_ref_info (&df->use_info, insn_num *2);
+
+ df_grow_insn_info (df);
+ df_grow_bb_info (dflow);
+
+ bitmap_copy (local_blocks_to_scan, blocks);
+ df->def_info.add_refs_inline = true;
+ df->use_info.add_refs_inline = true;
+
+ df_refs_delete (dflow, local_blocks_to_scan);
+
+ /* This may be a mistake, but if an explicit blocks is passed in
+ and the set of blocks to analyze has been explicitly set, add
+ the extra blocks to blocks_to_analyze. The alternative is to
+ put an assert here. We do not want this to just go by
+ silently or else we may get storage leaks. */
+ if (df->blocks_to_analyze)
+ bitmap_ior_into (df->blocks_to_analyze, blocks);
+ }
+ else
+ {
+ /* If we are going to do everything, just reallocate everything.
+ Most stuff is allocated in pools so this is faster than
+ walking it. */
+ if (df->blocks_to_analyze)
+ bitmap_copy (local_blocks_to_scan, df->blocks_to_analyze);
+ else
+ FOR_ALL_BB (bb)
+ {
+ bitmap_set_bit (local_blocks_to_scan, bb->index);
+ }
+ df_scan_alloc (dflow, local_blocks_to_scan);
+
+ df->def_info.add_refs_inline = false;
+ df->use_info.add_refs_inline = false;
+ }
+
+ df_refs_record (dflow, local_blocks_to_scan);
+#if 0
+ bitmap_print (stderr, local_blocks_to_scan, "scanning: ", "\n");
+#endif
+
+ if (!df->blocks_to_scan)
+ df->blocks_to_scan = BITMAP_ALLOC (NULL);
+
+ bitmap_ior_into (df->blocks_to_scan, local_blocks_to_scan);
+ BITMAP_FREE (local_blocks_to_scan);
+}
+
+/* Create a new ref of type DF_REF_TYPE for register REG at address
+ LOC within INSN of BB. */
+
+struct df_ref *
+df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn,
+ basic_block bb,
+ enum df_ref_type ref_type,
+ enum df_ref_flags ref_flags)
+{
+ struct dataflow *dflow = df->problems_by_index[DF_SCAN];
+ struct df_scan_bb_info *bb_info;
+
+ df_grow_reg_info (dflow, &df->use_info);
+ df_grow_reg_info (dflow, &df->def_info);
+ df_grow_bb_info (dflow);
+
+ /* Make sure there is the bb_info for this block. */
+ bb_info = df_scan_get_bb_info (dflow, bb->index);
+ if (!bb_info)
+ {
+ bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
+ df_scan_set_bb_info (dflow, bb->index, bb_info);
+ bb_info->artificial_defs = NULL;
+ bb_info->artificial_uses = NULL;
+ }
+
+ if (ref_type == DF_REF_REG_DEF)
+ df->def_info.add_refs_inline = true;
+ else
+ df->use_info.add_refs_inline = true;
+
+ return df_ref_create_structure (dflow, reg, loc, bb, insn, ref_type, ref_flags);
+}
+
+
+
+/*----------------------------------------------------------------------------
+ UTILITIES TO CREATE AND DESTROY REFS AND CHAINS.
+----------------------------------------------------------------------------*/
+
+
+/* Get the artifical uses for a basic block. */
+
+struct df_ref *
+df_get_artificial_defs (struct df *df, unsigned int bb_index)
+{
+ struct dataflow *dflow = df->problems_by_index[DF_SCAN];
+ return df_scan_get_bb_info (dflow, bb_index)->artificial_defs;
+}
+
+
+/* Get the artifical uses for a basic block. */
+
+struct df_ref *
+df_get_artificial_uses (struct df *df, unsigned int bb_index)
+{
+ struct dataflow *dflow = df->problems_by_index[DF_SCAN];
+ return df_scan_get_bb_info (dflow, bb_index)->artificial_uses;
+}
+
+
+/* Link REF at the front of reg_use or reg_def chain for REGNO. */
+
+void
+df_reg_chain_create (struct df_reg_info *reg_info,
+ struct df_ref *ref)
+{
+ struct df_ref *head = reg_info->reg_chain;
+ reg_info->reg_chain = ref;
+
+ DF_REF_NEXT_REG (ref) = head;
+
+ /* We cannot actually link to the head of the chain. */
+ DF_REF_PREV_REG (ref) = NULL;
+
+ if (head)
+ DF_REF_PREV_REG (head) = ref;
+}
+
+
+/* Remove REF from the CHAIN. Return the head of the chain. This
+ will be CHAIN unless the REF was at the beginning of the chain. */
+
+static struct df_ref *
+df_ref_unlink (struct df_ref *chain, struct df_ref *ref)
+{
+ struct df_ref *orig_chain = chain;
+ struct df_ref *prev = NULL;
+ while (chain)
+ {
+ if (chain == ref)
+ {
+ if (prev)
+ {
+ prev->next_ref = ref->next_ref;
+ ref->next_ref = NULL;
+ return orig_chain;
+ }
+ else
+ {
+ chain = ref->next_ref;
+ ref->next_ref = NULL;
+ return chain;
+ }
+ }
+
+ prev = chain;
+ chain = chain->next_ref;
+ }
+
+ /* Someone passed in a ref that was not in the chain. */
+ gcc_unreachable ();
+ return NULL;
+}
+
+
+/* Unlink and delete REF at the reg_use or reg_def chain. Also delete
+ the def-use or use-def chain if it exists. Returns the next ref in
+ uses or defs chain. */
+
+struct df_ref *
+df_reg_chain_unlink (struct dataflow *dflow, struct df_ref *ref)
+{
+ struct df *df = dflow->df;
+ struct df_ref *next = DF_REF_NEXT_REG (ref);
+ struct df_ref *prev = DF_REF_PREV_REG (ref);
+ struct df_scan_problem_data *problem_data =
+ (struct df_scan_problem_data *) dflow->problem_data;
+ struct df_reg_info *reg_info;
+ struct df_ref *next_ref = ref->next_ref;
+ unsigned int id = DF_REF_ID (ref);
+
+ if (DF_REF_TYPE (ref) == DF_REF_REG_DEF)
+ {
+ reg_info = DF_REG_DEF_GET (df, DF_REF_REGNO (ref));
+ df->def_info.bitmap_size--;
+ if (df->def_info.refs && (id < df->def_info.refs_size))
+ DF_DEFS_SET (df, id, NULL);
+ }
+ else
+ {
+ reg_info = DF_REG_USE_GET (df, DF_REF_REGNO (ref));
+ df->use_info.bitmap_size--;
+ if (df->use_info.refs && (id < df->use_info.refs_size))
+ DF_USES_SET (df, id, NULL);
+ }
+
+ /* Delete any def-use or use-def chains that start here. */
+ if (DF_REF_CHAIN (ref))
+ df_chain_unlink (df->problems_by_index[DF_CHAIN], ref, NULL);
+
+ reg_info->n_refs--;
+
+ /* Unlink from the reg chain. If there is no prev, this is the
+ first of the list. If not, just join the next and prev. */
+ if (prev)
+ {
+ DF_REF_NEXT_REG (prev) = next;
+ if (next)
+ DF_REF_PREV_REG (next) = prev;
+ }
+ else
+ {
+ reg_info->reg_chain = next;
+ if (next)
+ DF_REF_PREV_REG (next) = NULL;
+ }
+
+ pool_free (problem_data->ref_pool, ref);
+ return next_ref;
+}
+
+
+/* Unlink REF from all def-use/use-def chains, etc. */
+
+void
+df_ref_remove (struct df *df, struct df_ref *ref)
+{
+ struct dataflow *dflow = df->problems_by_index [DF_SCAN];
+ if (DF_REF_REG_DEF_P (ref))
+ {
+ if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL)
+ {
+ struct df_scan_bb_info *bb_info
+ = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index);
+ bb_info->artificial_defs
+ = df_ref_unlink (bb_info->artificial_defs, ref);
+ }
+ else
+ DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref)) =
+ df_ref_unlink (DF_INSN_UID_DEFS (df, DF_REF_INSN_UID (ref)), ref);
+
+ if (df->def_info.add_refs_inline)
+ DF_DEFS_SET (df, DF_REF_ID (ref), NULL);
+ }
+ else
+ {
+ if (DF_REF_FLAGS (ref) & DF_REF_ARTIFICIAL)
+ {
+ struct df_scan_bb_info *bb_info
+ = df_scan_get_bb_info (dflow, DF_REF_BB (ref)->index);
+ bb_info->artificial_uses
+ = df_ref_unlink (bb_info->artificial_uses, ref);
+ }
+ else
+ DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref)) =
+ df_ref_unlink (DF_INSN_UID_USES (df, DF_REF_INSN_UID (ref)), ref);
+
+ if (df->use_info.add_refs_inline)
+ DF_USES_SET (df, DF_REF_ID (ref), NULL);
+ }
+
+ df_reg_chain_unlink (dflow, ref);
+}
+
+
+/* Create the insn record for INSN. If there was one there, zero it out. */
+
+static struct df_insn_info *
+df_insn_create_insn_record (struct dataflow *dflow, rtx insn)
+{
+ struct df *df = dflow->df;
+ struct df_scan_problem_data *problem_data =
+ (struct df_scan_problem_data *) dflow->problem_data;
+
+ struct df_insn_info *insn_rec = DF_INSN_GET (df, insn);
+ if (!insn_rec)
+ {
+ insn_rec = pool_alloc (problem_data->insn_pool);
+ DF_INSN_SET (df, insn, insn_rec);
+ }
+ memset (insn_rec, 0, sizeof (struct df_insn_info));
+
+ return insn_rec;
+}
+
+/* Delete all of the refs information from BLOCKS. */
+
+void
+df_insn_refs_delete (struct dataflow *dflow, rtx insn)
+{
+ struct df *df = dflow->df;
+ unsigned int uid = INSN_UID (insn);
+ struct df_insn_info *insn_info = DF_INSN_UID_GET (df, uid);
+ struct df_ref *ref;
+ struct df_scan_problem_data *problem_data =
+ (struct df_scan_problem_data *) dflow->problem_data;
+
+ if (insn_info)
+ {
+ ref = insn_info->defs;
+ while (ref)
+ ref = df_reg_chain_unlink (dflow, ref);
+
+ ref = insn_info->uses;
+ while (ref)
+ ref = df_reg_chain_unlink (dflow, ref);
+
+ pool_free (problem_data->insn_pool, insn_info);
+ DF_INSN_SET (df, insn, NULL);
+ }
+}
+
+
+/* Delete all of the refs information from BLOCKS. */
+
+void
+df_refs_delete (struct dataflow *dflow, bitmap blocks)
+{
+ bitmap_iterator bi;
+ unsigned int bb_index;
+ struct df_ref *def;
+ struct df_ref *use;
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
+ {
+ struct df_scan_bb_info *bb_info
+ = df_scan_get_bb_info (dflow, bb_index);
+ rtx insn;
+ basic_block bb = BASIC_BLOCK (bb_index);
+ FOR_BB_INSNS (bb, insn)
+ {
+ if (INSN_P (insn))
+ {
+ /* Record defs within INSN. */
+ df_insn_refs_delete (dflow, insn);
+ }
+ }
+
+ /* Get rid of any artifical uses. */
+ if (bb_info)
+ {
+ def = bb_info->artificial_defs;
+ while (def)
+ def = df_reg_chain_unlink (dflow, def);
+ bb_info->artificial_defs = NULL;
+ use = bb_info->artificial_uses;
+ while (use)
+ use = df_reg_chain_unlink (dflow, use);
+ bb_info->artificial_uses = NULL;
+ }
+ }
+}
+
+
+/* Take build ref table for either the uses or defs from the reg-use
+ or reg-def chains. */
+
+void
+df_reorganize_refs (struct df_ref_info *ref_info)
+{
+ unsigned int m = ref_info->regs_inited;
+ unsigned int regno;
+ unsigned int offset = 0;
+ unsigned int size = 0;
+
+ if (ref_info->refs_organized)
+ return;
+
+ if (ref_info->refs_size < ref_info->bitmap_size)
+ {
+ int new_size = ref_info->bitmap_size + ref_info->bitmap_size / 4;
+ df_grow_ref_info (ref_info, new_size);
+ }
+
+ for (regno = 0; regno < m; regno++)
+ {
+ struct df_reg_info *reg_info = ref_info->regs[regno];
+ int count = 0;
+ if (reg_info)
+ {
+ struct df_ref *ref = reg_info->reg_chain;
+ reg_info->begin = offset;
+ while (ref)
+ {
+ ref_info->refs[offset] = ref;
+ DF_REF_ID (ref) = offset++;
+ ref = DF_REF_NEXT_REG (ref);
+ count++;
+ size++;
+ }
+ reg_info->n_refs = count;
+ }
+ }
+
+ /* The bitmap size is not decremented when refs are deleted. So
+ reset it now that we have squished out all of the empty
+ slots. */
+ ref_info->bitmap_size = size;
+ ref_info->refs_organized = true;
+ ref_info->add_refs_inline = true;
+}
+
+
+/* Local miscellaneous routines. */
+
+/* Local routines for recording refs. */
+
+/* Set where we are in the compilation. */
+
+void
+df_set_state (int state)
+{
+ df_state = state;
+}
+
+
+
+/*----------------------------------------------------------------------------
+ Hard core instruction scanning code. No external interfaces here,
+ just a lot of routines that look inside insns.
+----------------------------------------------------------------------------*/
+
+/* Create a ref and add it to the reg-def or reg-use chains. */
+
+static struct df_ref *
+df_ref_create_structure (struct dataflow *dflow, rtx reg, rtx *loc,
+ basic_block bb, rtx insn,
+ enum df_ref_type ref_type,
+ enum df_ref_flags ref_flags)
+{
+ struct df_ref *this_ref;
+ struct df *df = dflow->df;
+ int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
+ struct df_scan_problem_data *problem_data =
+ (struct df_scan_problem_data *) dflow->problem_data;
+
+ this_ref = pool_alloc (problem_data->ref_pool);
+ DF_REF_REG (this_ref) = reg;
+ DF_REF_REGNO (this_ref) = regno;
+ DF_REF_LOC (this_ref) = loc;
+ DF_REF_INSN (this_ref) = insn;
+ DF_REF_CHAIN (this_ref) = NULL;
+ DF_REF_TYPE (this_ref) = ref_type;
+ DF_REF_FLAGS (this_ref) = ref_flags;
+ DF_REF_DATA (this_ref) = NULL;
+ DF_REF_BB (this_ref) = bb;
+
+ /* Link the ref into the reg_def and reg_use chains and keep a count
+ of the instances. */
+ if (ref_type == DF_REF_REG_DEF)
+ {
+ struct df_reg_info *reg_info = DF_REG_DEF_GET (df, regno);
+ reg_info->n_refs++;
+
+ /* Add the ref to the reg_def chain. */
+ df_reg_chain_create (reg_info, this_ref);
+ DF_REF_ID (this_ref) = df->def_info.bitmap_size;
+ if (df->def_info.add_refs_inline)
+ {
+ if (DF_DEFS_SIZE (df) >= df->def_info.refs_size)
+ {
+ int new_size = df->def_info.bitmap_size
+ + df->def_info.bitmap_size / 4;
+ df_grow_ref_info (&df->def_info, new_size);
+ }
+ /* Add the ref to the big array of defs. */
+ DF_DEFS_SET (df, df->def_info.bitmap_size, this_ref);
+ df->def_info.refs_organized = false;
+ }
+
+ df->def_info.bitmap_size++;
+
+ if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL)
+ {
+ struct df_scan_bb_info *bb_info
+ = df_scan_get_bb_info (dflow, bb->index);
+ this_ref->next_ref = bb_info->artificial_defs;
+ bb_info->artificial_defs = this_ref;
+ }
+ else
+ {
+ this_ref->next_ref = DF_INSN_GET (df, insn)->defs;
+ DF_INSN_GET (df, insn)->defs = this_ref;
+ }
+ }
+ else
+ {
+ struct df_reg_info *reg_info = DF_REG_USE_GET (df, regno);
+ reg_info->n_refs++;
+
+ /* Add the ref to the reg_use chain. */
+ df_reg_chain_create (reg_info, this_ref);
+ DF_REF_ID (this_ref) = df->use_info.bitmap_size;
+ if (df->use_info.add_refs_inline)
+ {
+ if (DF_USES_SIZE (df) >= df->use_info.refs_size)
+ {
+ int new_size = df->use_info.bitmap_size
+ + df->use_info.bitmap_size / 4;
+ df_grow_ref_info (&df->use_info, new_size);
+ }
+ /* Add the ref to the big array of defs. */
+ DF_USES_SET (df, df->use_info.bitmap_size, this_ref);
+ df->use_info.refs_organized = false;
+ }
+
+ df->use_info.bitmap_size++;
+ if (DF_REF_FLAGS (this_ref) & DF_REF_ARTIFICIAL)
+ {
+ struct df_scan_bb_info *bb_info
+ = df_scan_get_bb_info (dflow, bb->index);
+ this_ref->next_ref = bb_info->artificial_uses;
+ bb_info->artificial_uses = this_ref;
+ }
+ else
+ {
+ this_ref->next_ref = DF_INSN_GET (df, insn)->uses;
+ DF_INSN_GET (df, insn)->uses = this_ref;
+ }
+ }
+ return this_ref;
+}
+
+
+/* Create new references of type DF_REF_TYPE for each part of register REG
+ at address LOC within INSN of BB. */
+
+static void
+df_ref_record (struct dataflow *dflow, rtx reg, rtx *loc,
+ basic_block bb, rtx insn,
+ enum df_ref_type ref_type,
+ enum df_ref_flags ref_flags,
+ bool record_live)
+{
+ unsigned int regno;
+ struct df *df = dflow->df;
+
+ gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
+
+ /* For the reg allocator we are interested in some SUBREG rtx's, but not
+ all. Notably only those representing a word extraction from a multi-word
+ reg. As written in the docu those should have the form
+ (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
+ XXX Is that true? We could also use the global word_mode variable. */
+ if ((df->flags & DF_SUBREGS) == 0
+ && GET_CODE (reg) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
+ || GET_MODE_SIZE (GET_MODE (reg))
+ >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
+ {
+ loc = &SUBREG_REG (reg);
+ reg = *loc;
+ ref_flags |= DF_REF_STRIPPED;
+ }
+
+ regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int i;
+ int endregno;
+
+ if (! (df->flags & DF_HARD_REGS))
+ return;
+
+ /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG
+ for the mode, because we only want to add references to regs, which
+ are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
+ reference the whole reg 0 in DI mode (which would also include
+ reg 1, at least, if 0 and 1 are SImode registers). */
+ endregno = hard_regno_nregs[regno][GET_MODE (reg)];
+ if (GET_CODE (reg) == SUBREG)
+ regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
+ SUBREG_BYTE (reg), GET_MODE (reg));
+ endregno += regno;
+
+ for (i = regno; i < endregno; i++)
+ {
+ /* Calls are handled at call site because regs_ever_live
+ doesn't include clobbered regs, only used ones. */
+ if (ref_type == DF_REF_REG_DEF && record_live)
+ regs_ever_live[i] = 1;
+ else if ((ref_type == DF_REF_REG_USE
+ || ref_type == DF_REF_REG_MEM_STORE
+ || ref_type == DF_REF_REG_MEM_LOAD)
+ && ((ref_flags & DF_REF_ARTIFICIAL) == 0))
+ {
+ /* Set regs_ever_live on uses of non-eliminable frame
+ pointers and arg pointers. */
+ if (! (TEST_HARD_REG_BIT (elim_reg_set, regno)
+ && (regno == FRAME_POINTER_REGNUM
+ || regno == ARG_POINTER_REGNUM)))
+ regs_ever_live[i] = 1;
+ }
+
+ df_ref_create_structure (dflow, regno_reg_rtx[i], loc,
+ bb, insn, ref_type, ref_flags);
+ }
+ }
+ else
+ {
+ df_ref_create_structure (dflow, reg, loc,
+ bb, insn, ref_type, ref_flags);
+ }
+}
+
+
+/* A set to a non-paradoxical SUBREG for which the number of word_mode units
+ covered by the outer mode is smaller than that covered by the inner mode,
+ is a read-modify-write operation.
+ This function returns true iff the SUBREG X is such a SUBREG. */
+
+bool
+df_read_modify_subreg_p (rtx x)
+{
+ unsigned int isize, osize;
+ if (GET_CODE (x) != SUBREG)
+ return false;
+ isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
+ osize = GET_MODE_SIZE (GET_MODE (x));
+ return (isize > osize && isize > UNITS_PER_WORD);
+}
+
+
+/* Process all the registers defined in the rtx, X.
+ Autoincrement/decrement definitions will be picked up by
+ df_uses_record. */
+
+static void
+df_def_record_1 (struct dataflow *dflow, rtx x,
+ basic_block bb, rtx insn,
+ enum df_ref_flags flags, bool record_live)
+{
+ rtx *loc;
+ rtx dst;
+
+ /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
+ construct. */
+ if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
+ loc = &XEXP (x, 0);
+ else
+ loc = &SET_DEST (x);
+ dst = *loc;
+
+ /* Some targets place small structures in registers for
+ return values of functions. */
+ if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
+ {
+ int i;
+
+ for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
+ {
+ rtx temp = XVECEXP (dst, 0, i);
+ if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
+ || GET_CODE (temp) == SET)
+ df_def_record_1 (dflow, temp, bb, insn,
+ GET_CODE (temp) == CLOBBER ? flags | DF_REF_CLOBBER : flags,
+ record_live);
+ }
+ return;
+ }
+
+ /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
+ be handy for the reg allocator. */
+ while (GET_CODE (dst) == STRICT_LOW_PART
+ || GET_CODE (dst) == ZERO_EXTRACT
+ || df_read_modify_subreg_p (dst))
+ {
+#if 0
+ /* Strict low part always contains SUBREG, but we do not want to make
+ it appear outside, as whole register is always considered. */
+ if (GET_CODE (dst) == STRICT_LOW_PART)
+ {
+ loc = &XEXP (dst, 0);
+ dst = *loc;
+ }
+#endif
+ loc = &XEXP (dst, 0);
+ dst = *loc;
+ flags |= DF_REF_READ_WRITE;
+ }
+
+ if (REG_P (dst)
+ || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))))
+ df_ref_record (dflow, dst, loc, bb, insn,
+ DF_REF_REG_DEF, flags, record_live);
+}
+
+
+/* Process all the registers defined in the pattern rtx, X. */
+
+static void
+df_defs_record (struct dataflow *dflow, rtx x, basic_block bb, rtx insn)
+{
+ RTX_CODE code = GET_CODE (x);
+
+ if (code == SET || code == CLOBBER)
+ {
+ /* Mark the single def within the pattern. */
+ df_def_record_1 (dflow, x, bb, insn,
+ code == CLOBBER ? DF_REF_CLOBBER : 0, true);
+ }
+ else if (code == COND_EXEC)
+ {
+ df_defs_record (dflow, COND_EXEC_CODE (x), bb, insn);
+ }
+ else if (code == PARALLEL)
+ {
+ int i;
+
+ /* Mark the multiple defs within the pattern. */
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ df_defs_record (dflow, XVECEXP (x, 0, i), bb, insn);
+ }
+}
+
+
+/* Process all the registers used in the rtx at address LOC. */
+
+static void
+df_uses_record (struct dataflow *dflow, rtx *loc, enum df_ref_type ref_type,
+ basic_block bb, rtx insn, enum df_ref_flags flags)
+{
+ RTX_CODE code;
+ rtx x;
+ retry:
+ x = *loc;
+ if (!x)
+ return;
+ code = GET_CODE (x);
+ switch (code)
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case CONST_VECTOR:
+ case PC:
+ case CC0:
+ case ADDR_VEC:
+ case ADDR_DIFF_VEC:
+ return;
+
+ case CLOBBER:
+ /* If we are clobbering a MEM, mark any registers inside the address
+ as being used. */
+ if (MEM_P (XEXP (x, 0)))
+ df_uses_record (dflow, &XEXP (XEXP (x, 0), 0),
+ DF_REF_REG_MEM_STORE, bb, insn, flags);
+
+ /* If we're clobbering a REG then we have a def so ignore. */
+ return;
+
+ case MEM:
+ df_uses_record (dflow, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn,
+ flags & DF_REF_IN_NOTE);
+ return;
+
+ case SUBREG:
+ /* While we're here, optimize this case. */
+
+ /* In case the SUBREG is not of a REG, do not optimize. */
+ if (!REG_P (SUBREG_REG (x)))
+ {
+ loc = &SUBREG_REG (x);
+ df_uses_record (dflow, loc, ref_type, bb, insn, flags);
+ return;
+ }
+ /* ... Fall through ... */
+
+ case REG:
+ df_ref_record (dflow, x, loc, bb, insn, ref_type, flags, true);
+ return;
+
+ case SET:
+ {
+ rtx dst = SET_DEST (x);
+ gcc_assert (!(flags & DF_REF_IN_NOTE));
+ df_uses_record (dflow, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0);
+
+ switch (GET_CODE (dst))
+ {
+ case SUBREG:
+ if (df_read_modify_subreg_p (dst))
+ {
+ df_uses_record (dflow, &SUBREG_REG (dst),
+ DF_REF_REG_USE, bb,
+ insn, DF_REF_READ_WRITE);
+ break;
+ }
+ /* Fall through. */
+ case REG:
+ case PARALLEL:
+ case SCRATCH:
+ case PC:
+ case CC0:
+ break;
+ case MEM:
+ df_uses_record (dflow, &XEXP (dst, 0),
+ DF_REF_REG_MEM_STORE,
+ bb, insn, 0);
+ break;
+ case STRICT_LOW_PART:
+ {
+ rtx *temp = &XEXP (dst, 0);
+ /* A strict_low_part uses the whole REG and not just the
+ SUBREG. */
+ dst = XEXP (dst, 0);
+ df_uses_record (dflow,
+ (GET_CODE (dst) == SUBREG)
+ ? &SUBREG_REG (dst) : temp,
+ DF_REF_REG_USE, bb,
+ insn, DF_REF_READ_WRITE);
+ }
+ break;
+ case ZERO_EXTRACT:
+ case SIGN_EXTRACT:
+ df_uses_record (dflow, &XEXP (dst, 0),
+ DF_REF_REG_USE, bb, insn,
+ DF_REF_READ_WRITE);
+ df_uses_record (dflow, &XEXP (dst, 1),
+ DF_REF_REG_USE, bb, insn, 0);
+ df_uses_record (dflow, &XEXP (dst, 2),
+ DF_REF_REG_USE, bb, insn, 0);
+ dst = XEXP (dst, 0);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return;
+ }
+
+ case RETURN:
+ break;
+
+ case ASM_OPERANDS:
+ case UNSPEC_VOLATILE:
+ case TRAP_IF:
+ case ASM_INPUT:
+ {
+ /* Traditional and volatile asm instructions must be
+ considered to use and clobber all hard registers, all
+ pseudo-registers and all of memory. So must TRAP_IF and
+ UNSPEC_VOLATILE operations.
+
+ Consider for instance a volatile asm that changes the fpu
+ rounding mode. An insn should not be moved across this
+ even if it only uses pseudo-regs because it might give an
+ incorrectly rounded result.
+
+ However, flow.c's liveness computation did *not* do this,
+ giving the reasoning as " ?!? Unfortunately, marking all
+ hard registers as live causes massive problems for the
+ register allocator and marking all pseudos as live creates
+ mountains of uninitialized variable warnings."
+
+ In order to maintain the status quo with regard to liveness
+ and uses, we do what flow.c did and just mark any regs we
+ can find in ASM_OPERANDS as used. Later on, when liveness
+ is computed, asm insns are scanned and regs_asm_clobbered
+ is filled out.
+
+ For all ASM_OPERANDS, we must traverse the vector of input
+ operands. We can not just fall through here since then we
+ would be confused by the ASM_INPUT rtx inside ASM_OPERANDS,
+ which do not indicate traditional asms unlike their normal
+ usage. */
+ if (code == ASM_OPERANDS)
+ {
+ int j;
+
+ for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
+ df_uses_record (dflow, &ASM_OPERANDS_INPUT (x, j),
+ DF_REF_REG_USE, bb, insn, 0);
+ return;
+ }
+ break;
+ }
+
+ case PRE_DEC:
+ case POST_DEC:
+ case PRE_INC:
+ case POST_INC:
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ /* Catch the def of the register being modified. */
+ df_ref_record (dflow, XEXP (x, 0), &XEXP (x, 0), bb, insn,
+ DF_REF_REG_DEF, DF_REF_READ_WRITE, true);
+
+ /* ... Fall through to handle uses ... */
+
+ default:
+ break;
+ }
+
+ /* Recursively scan the operands of this expression. */
+ {
+ const char *fmt = GET_RTX_FORMAT (code);
+ int i;
+
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ /* Tail recursive case: save a function call level. */
+ if (i == 0)
+ {
+ loc = &XEXP (x, 0);
+ goto retry;
+ }
+ df_uses_record (dflow, &XEXP (x, i), ref_type, bb, insn, flags);
+ }
+ else if (fmt[i] == 'E')
+ {
+ int j;
+ for (j = 0; j < XVECLEN (x, i); j++)
+ df_uses_record (dflow, &XVECEXP (x, i, j), ref_type,
+ bb, insn, flags);
+ }
+ }
+ }
+}
+
+/* Return true if *LOC contains an asm. */
+
+static int
+df_insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
+{
+ if ( !*loc)
+ return 0;
+ if (GET_CODE (*loc) == ASM_OPERANDS)
+ return 1;
+ return 0;
+}
+
+
+/* Return true if INSN contains an ASM. */
+
+static int
+df_insn_contains_asm (rtx insn)
+{
+ return for_each_rtx (&insn, df_insn_contains_asm_1, NULL);
+}
+
+
+
+/* Record all the refs for DF within INSN of basic block BB. */
+
+static void
+df_insn_refs_record (struct dataflow *dflow, basic_block bb, rtx insn)
+{
+ int i;
+ struct df *df = dflow->df;
+
+ if (INSN_P (insn))
+ {
+ rtx note;
+
+ if (df_insn_contains_asm (insn))
+ DF_INSN_CONTAINS_ASM (df, insn) = true;
+
+ /* Record register defs. */
+ df_defs_record (dflow, PATTERN (insn), bb, insn);
+
+ if (df->flags & DF_EQUIV_NOTES)
+ for (note = REG_NOTES (insn); note;
+ note = XEXP (note, 1))
+ {
+ switch (REG_NOTE_KIND (note))
+ {
+ case REG_EQUIV:
+ case REG_EQUAL:
+ df_uses_record (dflow, &XEXP (note, 0), DF_REF_REG_USE,
+ bb, insn, DF_REF_IN_NOTE);
+ default:
+ break;
+ }
+ }
+
+ if (CALL_P (insn))
+ {
+ rtx note;
+
+ /* Record the registers used to pass arguments, and explicitly
+ noted as clobbered. */
+ for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
+ note = XEXP (note, 1))
+ {
+ if (GET_CODE (XEXP (note, 0)) == USE)
+ df_uses_record (dflow, &XEXP (XEXP (note, 0), 0),
+ DF_REF_REG_USE,
+ bb, insn, 0);
+ else if (GET_CODE (XEXP (note, 0)) == CLOBBER)
+ {
+ df_defs_record (dflow, XEXP (note, 0), bb, insn);
+ if (REG_P (XEXP (XEXP (note, 0), 0)))
+ {
+ rtx reg = XEXP (XEXP (note, 0), 0);
+ int regno_last;
+ int regno_first;
+ int i;
+
+ regno_last = regno_first = REGNO (reg);
+ if (regno_first < FIRST_PSEUDO_REGISTER)
+ regno_last
+ += hard_regno_nregs[regno_first][GET_MODE (reg)] - 1;
+ for (i = regno_first; i <= regno_last; i++)
+ regs_ever_live[i] = 1;
+ }
+ }
+ }
+
+ /* The stack ptr is used (honorarily) by a CALL insn. */
+ df_uses_record (dflow, &regno_reg_rtx[STACK_POINTER_REGNUM],
+ DF_REF_REG_USE, bb, insn,
+ 0);
+
+ if (df->flags & DF_HARD_REGS)
+ {
+ bitmap_iterator bi;
+ unsigned int ui;
+ /* Calls may also reference any of the global registers,
+ so they are recorded as used. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i])
+ df_uses_record (dflow, &regno_reg_rtx[i],
+ DF_REF_REG_USE, bb, insn,
+ 0);
+ EXECUTE_IF_SET_IN_BITMAP (df_invalidated_by_call, 0, ui, bi)
+ df_ref_record (dflow, regno_reg_rtx[ui], &regno_reg_rtx[ui], bb, insn,
+ DF_REF_REG_DEF, DF_REF_CLOBBER, false);
+ }
+ }
+
+ /* Record the register uses. */
+ df_uses_record (dflow, &PATTERN (insn),
+ DF_REF_REG_USE, bb, insn, 0);
+
+ }
+}
+
+static bool
+df_has_eh_preds (basic_block bb)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
+ {
+ if (e->flags & EDGE_EH)
+ return true;
+ }
+ return false;
+}
+
+/* Record all the refs within the basic block BB. */
+
+static void
+df_bb_refs_record (struct dataflow *dflow, basic_block bb)
+{
+ struct df *df = dflow->df;
+ rtx insn;
+ int luid = 0;
+ struct df_scan_bb_info *bb_info = df_scan_get_bb_info (dflow, bb->index);
+
+ /* Need to make sure that there is a record in the basic block info. */
+ if (!bb_info)
+ {
+ bb_info = (struct df_scan_bb_info *) pool_alloc (dflow->block_pool);
+ df_scan_set_bb_info (dflow, bb->index, bb_info);
+ bb_info->artificial_defs = NULL;
+ bb_info->artificial_uses = NULL;
+ }
+
+ /* Scan the block an insn at a time from beginning to end. */
+ FOR_BB_INSNS (bb, insn)
+ {
+ df_insn_create_insn_record (dflow, insn);
+ if (INSN_P (insn))
+ {
+ /* Record defs within INSN. */
+ DF_INSN_LUID (df, insn) = luid++;
+ df_insn_refs_record (dflow, bb, insn);
+ }
+ DF_INSN_LUID (df, insn) = luid;
+ }
+
+#ifdef EH_RETURN_DATA_REGNO
+ if ((df->flags & DF_HARD_REGS)
+ && df_has_eh_preds (bb))
+ {
+ unsigned int i;
+ /* Mark the registers that will contain data for the handler. */
+ if (current_function_calls_eh_return)
+ for (i = 0; ; ++i)
+ {
+ unsigned regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+ df_ref_record (dflow, regno_reg_rtx[i], &regno_reg_rtx[i], bb, NULL,
+ DF_REF_REG_DEF, DF_REF_ARTIFICIAL | DF_REF_AT_TOP, false);
+ }
+ }
+#endif
+
+#ifdef EH_USES
+ /* This code is putting in a artificial ref for the use at the TOP
+ of the block that receives the exception. It is too cumbersome
+ to actually put the ref on the edge. We could either model this
+ at the top of the receiver block or the bottom of the sender
+ block.
+
+ The bottom of the sender block is problematic because not all
+ out-edges of the a block are eh-edges. However, it is true that
+ all edges into a block are either eh-edges or none of them are
+ eh-edges. Thus, we can model this at the top of the eh-receiver
+ for all of the edges at once. */
+ if ((df->flags & DF_HARD_REGS)
+ && df_has_eh_preds (bb))
+ {
+ unsigned int i;
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (EH_USES (i))
+ df_uses_record (dflow, &regno_reg_rtx[i],
+ DF_REF_REG_USE, EXIT_BLOCK_PTR, NULL,
+ DF_REF_ARTIFICIAL | DF_REF_AT_TOP);
+ }
+#endif
+
+ if ((df->flags & DF_HARD_REGS)
+ && bb->index >= NUM_FIXED_BLOCKS)
+ {
+ /* Before reload, there are a few registers that must be forced
+ live everywhere -- which might not already be the case for
+ blocks within infinite loops. */
+ if (! reload_completed)
+ {
+
+ /* Any reference to any pseudo before reload is a potential
+ reference of the frame pointer. */
+ df_uses_record (dflow, &regno_reg_rtx [FRAME_POINTER_REGNUM],
+ DF_REF_REG_USE, bb, NULL, DF_REF_ARTIFICIAL);
+
+#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
+ /* Pseudos with argument area equivalences may require
+ reloading via the argument pointer. */
+ if (fixed_regs[ARG_POINTER_REGNUM])
+ df_uses_record (dflow, &regno_reg_rtx[ARG_POINTER_REGNUM],
+ DF_REF_REG_USE, bb, NULL,
+ DF_REF_ARTIFICIAL);
+#endif
+
+ /* Any constant, or pseudo with constant equivalences, may
+ require reloading from memory using the pic register. */
+ if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
+ df_uses_record (dflow, &regno_reg_rtx[PIC_OFFSET_TABLE_REGNUM],
+ DF_REF_REG_USE, bb, NULL,
+ DF_REF_ARTIFICIAL);
+ }
+ /* The all-important stack pointer must always be live. */
+ df_uses_record (dflow, &regno_reg_rtx[STACK_POINTER_REGNUM],
+ DF_REF_REG_USE, bb, NULL, DF_REF_ARTIFICIAL);
+ }
+}
+
+
+/* Record all the refs in the basic blocks specified by BLOCKS. */
+
+static void
+df_refs_record (struct dataflow *dflow, bitmap blocks)
+{
+ unsigned int bb_index;
+ bitmap_iterator bi;
+
+ EXECUTE_IF_SET_IN_BITMAP (blocks, 0, bb_index, bi)
+ {
+ basic_block bb = BASIC_BLOCK (bb_index);
+ df_bb_refs_record (dflow, bb);
+ }
+
+ if (bitmap_bit_p (blocks, EXIT_BLOCK))
+ df_record_exit_block_uses (dflow);
+}
+
+
+/*----------------------------------------------------------------------------
+ Specialized hard register scanning functions.
+----------------------------------------------------------------------------*/
+
+/* Mark a register in SET. Hard registers in large modes get all
+ of their component registers set as well. */
+
+static void
+df_mark_reg (rtx reg, void *vset)
+{
+ bitmap set = (bitmap) vset;
+ int regno = REGNO (reg);
+
+ gcc_assert (GET_MODE (reg) != BLKmode);
+
+ bitmap_set_bit (set, regno);
+ if (regno < FIRST_PSEUDO_REGISTER)
+ {
+ int n = hard_regno_nregs[regno][GET_MODE (reg)];
+ while (--n > 0)
+ bitmap_set_bit (set, regno + n);
+ }
+}
+
+/* Record the set of hard registers that are used in the exit block. */
+
+static void
+df_record_exit_block_uses (struct dataflow *dflow)
+{
+ unsigned int i;
+ bitmap_iterator bi;
+ struct df *df = dflow->df;
+
+ bitmap_clear (df->exit_block_uses);
+
+ if (! (df->flags & DF_HARD_REGS))
+ return;
+
+ /* If exiting needs the right stack value, consider the stack
+ pointer live at the end of the function. */
+ if ((HAVE_epilogue && epilogue_completed)
+ || ! EXIT_IGNORE_STACK
+ || (! FRAME_POINTER_REQUIRED
+ && ! current_function_calls_alloca
+ && flag_omit_frame_pointer)
+ || current_function_sp_is_unchanging)
+ {
+ bitmap_set_bit (df->exit_block_uses, STACK_POINTER_REGNUM);
+ }
+
+ /* Mark the frame pointer if needed at the end of the function.
+ If we end up eliminating it, it will be removed from the live
+ list of each basic block by reload. */
+
+ if (! reload_completed || frame_pointer_needed)
+ {
+ bitmap_set_bit (df->exit_block_uses, FRAME_POINTER_REGNUM);
+#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
+ /* If they are different, also mark the hard frame pointer as live. */
+ if (! LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM))
+ bitmap_set_bit (df->exit_block_uses, HARD_FRAME_POINTER_REGNUM);
+#endif
+ }
+
+#ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
+ /* Many architectures have a GP register even without flag_pic.
+ Assume the pic register is not in use, or will be handled by
+ other means, if it is not fixed. */
+ if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && fixed_regs[PIC_OFFSET_TABLE_REGNUM])
+ bitmap_set_bit (df->exit_block_uses, PIC_OFFSET_TABLE_REGNUM);
+#endif
+
+ /* Mark all global registers, and all registers used by the
+ epilogue as being live at the end of the function since they
+ may be referenced by our caller. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (global_regs[i] || EPILOGUE_USES (i))
+ bitmap_set_bit (df->exit_block_uses, i);
+
+ if (HAVE_epilogue && epilogue_completed)
+ {
+ /* Mark all call-saved registers that we actually used. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (regs_ever_live[i] && ! LOCAL_REGNO (i)
+ && ! TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ bitmap_set_bit (df->exit_block_uses, i);
+ }
+
+#ifdef EH_RETURN_DATA_REGNO
+ /* Mark the registers that will contain data for the handler. */
+ if (reload_completed && current_function_calls_eh_return)
+ for (i = 0; ; ++i)
+ {
+ unsigned regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+ bitmap_set_bit (df->exit_block_uses, regno);
+ }
+#endif
+
+#ifdef EH_RETURN_STACKADJ_RTX
+ if ((! HAVE_epilogue || ! epilogue_completed)
+ && current_function_calls_eh_return)
+ {
+ rtx tmp = EH_RETURN_STACKADJ_RTX;
+ if (tmp && REG_P (tmp))
+ df_mark_reg (tmp, df->exit_block_uses);
+ }
+#endif
+
+#ifdef EH_RETURN_HANDLER_RTX
+ if ((! HAVE_epilogue || ! epilogue_completed)
+ && current_function_calls_eh_return)
+ {
+ rtx tmp = EH_RETURN_HANDLER_RTX;
+ if (tmp && REG_P (tmp))
+ df_mark_reg (tmp, df->exit_block_uses);
+ }
+#endif
+
+ /* Mark function return value. */
+ diddle_return_value (df_mark_reg, (void*) df->exit_block_uses);
+
+ if (df->flags & DF_HARD_REGS)
+ EXECUTE_IF_SET_IN_BITMAP (df->exit_block_uses, 0, i, bi)
+ df_uses_record (dflow, &regno_reg_rtx[i],
+ DF_REF_REG_USE, EXIT_BLOCK_PTR, NULL,
+ DF_REF_ARTIFICIAL);
+}
+
+static bool initialized = false;
+
+/* Initialize some platform specific structures. */
+
+void
+df_hard_reg_init (void)
+{
+#ifdef ELIMINABLE_REGS
+ int i;
+ static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
+#endif
+ /* After reload, some ports add certain bits to regs_ever_live so
+ this cannot be reset. */
+
+ if (!reload_completed)
+ memset (regs_ever_live, 0, sizeof (regs_ever_live));
+
+ if (initialized)
+ return;
+
+ bitmap_obstack_initialize (&persistent_obstack);
+
+ /* Record which registers will be eliminated. We use this in
+ mark_used_regs. */
+ CLEAR_HARD_REG_SET (elim_reg_set);
+
+#ifdef ELIMINABLE_REGS
+ for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
+ SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from);
+#else
+ SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM);
+#endif
+
+ df_invalidated_by_call = BITMAP_ALLOC (&persistent_obstack);
+
+ /* Inconveniently, this is only readily available in hard reg set
+ form. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; ++i)
+ if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
+ bitmap_set_bit (df_invalidated_by_call, i);
+
+ df_all_hard_regs = BITMAP_ALLOC (&persistent_obstack);
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ bitmap_set_bit (df_all_hard_regs, i);
+
+ initialized = true;
+}
diff --git a/gcc/df.c b/gcc/df.c
index fafd06dad45..e69de29bb2d 100644
--- a/gcc/df.c
+++ b/gcc/df.c
@@ -1,3975 +0,0 @@
-/* Dataflow support routines.
- Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
- Free Software Foundation, Inc.
- Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz,
- mhayes@redhat.com)
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 2, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-You should have received a copy of the GNU General Public License
-along with GCC; see the file COPYING. If not, write to the Free
-Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
-02110-1301, USA.
-
-
-OVERVIEW:
-
-This file provides some dataflow routines for computing reaching defs,
-upward exposed uses, live variables, def-use chains, and use-def
-chains. The global dataflow is performed using simple iterative
-methods with a worklist and could be sped up by ordering the blocks
-with a depth first search order.
-
-A `struct ref' data structure (ref) is allocated for every register
-reference (def or use) and this records the insn and bb the ref is
-found within. The refs are linked together in chains of uses and defs
-for each insn and for each register. Each ref also has a chain field
-that links all the use refs for a def or all the def refs for a use.
-This is used to create use-def or def-use chains.
-
-
-USAGE:
-
-Here's an example of using the dataflow routines.
-
- struct df *df;
-
- df = df_init ();
-
- df_analyze (df, 0, DF_ALL);
-
- df_dump (df, DF_ALL, stderr);
-
- df_finish (df);
-
-
-df_init simply creates a poor man's object (df) that needs to be
-passed to all the dataflow routines. df_finish destroys this
-object and frees up any allocated memory. DF_ALL says to analyze
-everything.
-
-df_analyze performs the following:
-
-1. Records defs and uses by scanning the insns in each basic block
- or by scanning the insns queued by df_insn_modify.
-2. Links defs and uses into insn-def and insn-use chains.
-3. Links defs and uses into reg-def and reg-use chains.
-4. Assigns LUIDs to each insn (for modified blocks).
-5. Calculates local reaching definitions.
-6. Calculates global reaching definitions.
-7. Creates use-def chains.
-8. Calculates local reaching uses (upwards exposed uses).
-9. Calculates global reaching uses.
-10. Creates def-use chains.
-11. Calculates local live registers.
-12. Calculates global live registers.
-13. Calculates register lifetimes and determines local registers.
-
-
-PHILOSOPHY:
-
-Note that the dataflow information is not updated for every newly
-deleted or created insn. If the dataflow information requires
-updating then all the changed, new, or deleted insns needs to be
-marked with df_insn_modify (or df_insns_modify) either directly or
-indirectly (say through calling df_insn_delete). df_insn_modify
-marks all the modified insns to get processed the next time df_analyze
- is called.
-
-Beware that tinkering with insns may invalidate the dataflow information.
-The philosophy behind these routines is that once the dataflow
-information has been gathered, the user should store what they require
-before they tinker with any insn. Once a reg is replaced, for example,
-then the reg-def/reg-use chains will point to the wrong place. Once a
-whole lot of changes have been made, df_analyze can be called again
-to update the dataflow information. Currently, this is not very smart
-with regard to propagating changes to the dataflow so it should not
-be called very often.
-
-
-DATA STRUCTURES:
-
-The basic object is a REF (reference) and this may either be a DEF
-(definition) or a USE of a register.
-
-These are linked into a variety of lists; namely reg-def, reg-use,
- insn-def, insn-use, def-use, and use-def lists. For example,
-the reg-def lists contain all the refs that define a given register
-while the insn-use lists contain all the refs used by an insn.
-
-Note that the reg-def and reg-use chains are generally short (except for
-the hard registers) and thus it is much faster to search these chains
-rather than searching the def or use bitmaps.
-
-If the insns are in SSA form then the reg-def and use-def lists
-should only contain the single defining ref.
-
-
-TODO:
-
-1) Incremental dataflow analysis.
-
-Note that if a loop invariant insn is hoisted (or sunk), we do not
-need to change the def-use or use-def chains. All we have to do is to
-change the bb field for all the associated defs and uses and to
-renumber the LUIDs for the original and new basic blocks of the insn.
-
-When shadowing loop mems we create new uses and defs for new pseudos
-so we do not affect the existing dataflow information.
-
-My current strategy is to queue up all modified, created, or deleted
-insns so when df_analyze is called we can easily determine all the new
-or deleted refs. Currently the global dataflow information is
-recomputed from scratch but this could be propagated more efficiently.
-
-2) Reduced memory requirements.
-
-We could operate a pool of ref structures. When a ref is deleted it
-gets returned to the pool (say by linking on to a chain of free refs).
-This will require a pair of bitmaps for defs and uses so that we can
-tell which ones have been changed. Alternatively, we could
-periodically squeeze the def and use tables and associated bitmaps and
-renumber the def and use ids.
-
-3) Ordering of reg-def and reg-use lists.
-
-Should the first entry in the def list be the first def (within a BB)?
-Similarly, should the first entry in the use list be the last use
-(within a BB)?
-
-4) Working with a sub-CFG.
-
-Often the whole CFG does not need to be analyzed, for example,
-when optimizing a loop, only certain registers are of interest.
-Perhaps there should be a bitmap argument to df_analyze to specify
-which registers should be analyzed?
-
-
-NOTES:
-
-Embedded addressing side-effects, such as POST_INC or PRE_INC, generate
-both a use and a def. These are both marked read/write to show that they
-are dependent. For example, (set (reg 40) (mem (post_inc (reg 42))))
-will generate a use of reg 42 followed by a def of reg 42 (both marked
-read/write). Similarly, (set (reg 40) (mem (pre_dec (reg 41))))
-generates a use of reg 41 then a def of reg 41 (both marked read/write),
-even though reg 41 is decremented before it is used for the memory
-address in this second example.
-
-A set to a REG inside a ZERO_EXTRACT, or a set to a non-paradoxical SUBREG
-for which the number of word_mode units covered by the outer mode is
-smaller than that covered by the inner mode, invokes a read-modify-write.
-operation. We generate both a use and a def and again mark them
-read/write.
-Paradoxical subreg writes don't leave a trace of the old content, so they
-are write-only operations. */
-
-#include "config.h"
-#include "system.h"
-#include "coretypes.h"
-#include "tm.h"
-#include "rtl.h"
-#include "tm_p.h"
-#include "insn-config.h"
-#include "recog.h"
-#include "function.h"
-#include "regs.h"
-#include "alloc-pool.h"
-#include "hard-reg-set.h"
-#include "basic-block.h"
-#include "sbitmap.h"
-#include "bitmap.h"
-#include "df.h"
-
-#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
- do \
- { \
- unsigned int node_; \
- bitmap_iterator bi; \
- EXECUTE_IF_SET_IN_BITMAP (BITMAP, MIN, node_, bi) \
- { \
- (BB) = BASIC_BLOCK (node_); \
- CODE; \
- } \
- } \
- while (0)
-
-static alloc_pool df_ref_pool;
-static alloc_pool df_link_pool;
-static struct df *ddf;
-
-static void df_reg_table_realloc (struct df *, int);
-static void df_insn_table_realloc (struct df *, unsigned int);
-static void df_bb_table_realloc (struct df *, unsigned int);
-static void df_bitmaps_alloc (struct df *, bitmap, int);
-static void df_bitmaps_free (struct df *, int);
-static void df_free (struct df *);
-static void df_alloc (struct df *, int);
-
-static rtx df_reg_use_gen (unsigned int);
-
-static inline struct df_link *df_link_create (struct ref *, struct df_link *);
-static struct df_link *df_ref_unlink (struct df_link **, struct ref *);
-static void df_def_unlink (struct df *, struct ref *);
-static void df_use_unlink (struct df *, struct ref *);
-static void df_insn_refs_unlink (struct df *, basic_block, rtx);
-#if 0
-static void df_bb_refs_unlink (struct df *, basic_block);
-static void df_refs_unlink (struct df *, bitmap);
-#endif
-
-static struct ref *df_ref_create (struct df *, rtx, rtx *, rtx,
- enum df_ref_type, enum df_ref_flags);
-static void df_ref_record_1 (struct df *, rtx, rtx *, rtx, enum df_ref_type,
- enum df_ref_flags);
-static void df_ref_record (struct df *, rtx, rtx *, rtx, enum df_ref_type,
- enum df_ref_flags);
-static void df_def_record_1 (struct df *, rtx, basic_block, rtx);
-static void df_defs_record (struct df *, rtx, basic_block, rtx);
-static void df_uses_record (struct df *, rtx *, enum df_ref_type,
- basic_block, rtx, enum df_ref_flags);
-static void df_insn_refs_record (struct df *, basic_block, rtx);
-static void df_bb_refs_record (struct df *, basic_block);
-static void df_refs_record (struct df *, bitmap);
-
-static void df_bb_reg_def_chain_create (struct df *, basic_block);
-static void df_reg_def_chain_create (struct df *, bitmap, bool);
-static void df_bb_reg_use_chain_create (struct df *, basic_block);
-static void df_reg_use_chain_create (struct df *, bitmap, bool);
-static void df_bb_du_chain_create (struct df *, basic_block, bitmap);
-static void df_du_chain_create (struct df *, bitmap);
-static void df_bb_ud_chain_create (struct df *, basic_block);
-static void df_ud_chain_create (struct df *, bitmap);
-static void df_bb_rd_local_compute (struct df *, basic_block, bitmap);
-static void df_rd_local_compute (struct df *, bitmap);
-static void df_bb_ru_local_compute (struct df *, basic_block);
-static void df_ru_local_compute (struct df *, bitmap);
-static void df_bb_lr_local_compute (struct df *, basic_block);
-static void df_lr_local_compute (struct df *, bitmap);
-static void df_bb_reg_info_compute (struct df *, basic_block, bitmap);
-static void df_reg_info_compute (struct df *, bitmap);
-
-static int df_bb_luids_set (struct df *df, basic_block);
-static int df_luids_set (struct df *df, bitmap);
-
-static int df_modified_p (struct df *, bitmap);
-static int df_refs_queue (struct df *);
-static int df_refs_process (struct df *);
-static int df_bb_refs_update (struct df *, basic_block);
-static int df_refs_update (struct df *, bitmap);
-static void df_analyze_1 (struct df *, bitmap, int, int);
-
-static void df_insns_modify (struct df *, basic_block, rtx, rtx);
-static int df_rtx_mem_replace (rtx *, void *);
-static int df_rtx_reg_replace (rtx *, void *);
-void df_refs_reg_replace (struct df *, bitmap, struct df_link *, rtx, rtx);
-
-static int df_def_dominates_all_uses_p (struct df *, struct ref *def);
-static int df_def_dominates_uses_p (struct df *, struct ref *def, bitmap);
-static struct ref *df_bb_insn_regno_last_use_find (struct df *, basic_block,
- rtx, unsigned int);
-static struct ref *df_bb_insn_regno_first_def_find (struct df *, basic_block,
- rtx, unsigned int);
-
-static void df_chain_dump (struct df_link *, FILE *file);
-static void df_chain_dump_regno (struct df_link *, FILE *file);
-static void df_regno_debug (struct df *, unsigned int, FILE *);
-static void df_ref_debug (struct df *, struct ref *, FILE *);
-static void df_rd_transfer_function (int, int *, void *, void *, void *,
- void *, void *);
-static void df_ru_transfer_function (int, int *, void *, void *, void *,
- void *, void *);
-static void df_lr_transfer_function (int, int *, void *, void *, void *,
- void *, void *);
-static void hybrid_search (basic_block, struct dataflow *,
- sbitmap, sbitmap, sbitmap);
-
-
-/* Local memory allocation/deallocation routines. */
-
-
-/* Increase the insn info table to have space for at least SIZE + 1
- elements. */
-static void
-df_insn_table_realloc (struct df *df, unsigned int size)
-{
- size++;
- if (size <= df->insn_size)
- return;
-
- /* Make the table a little larger than requested, so we do not need
- to enlarge it so often. */
- size += df->insn_size / 4;
-
- df->insns = xrealloc (df->insns, size * sizeof (struct insn_info));
-
- memset (df->insns + df->insn_size, 0,
- (size - df->insn_size) * sizeof (struct insn_info));
-
- df->insn_size = size;
-
- if (! df->insns_modified)
- {
- df->insns_modified = BITMAP_ALLOC (NULL);
- bitmap_zero (df->insns_modified);
- }
-}
-
-/* Increase the bb info table to have space for at least SIZE + 1
- elements. */
-
-static void
-df_bb_table_realloc (struct df *df, unsigned int size)
-{
- size++;
- if (size <= df->n_bbs)
- return;
-
- /* Make the table a little larger than requested, so we do not need
- to enlarge it so often. */
- size += df->n_bbs / 4;
-
- df->bbs = xrealloc (df->bbs, size * sizeof (struct bb_info));
-
- memset (df->bbs + df->n_bbs, 0, (size - df->n_bbs) * sizeof (struct bb_info));
-
- df->n_bbs = size;
-}
-
-/* Increase the reg info table by SIZE more elements. */
-static void
-df_reg_table_realloc (struct df *df, int size)
-{
- /* Make table 25 percent larger by default. */
- if (! size)
- size = df->reg_size / 4;
-
- size += df->reg_size;
- if (size < max_reg_num ())
- size = max_reg_num ();
-
- df->regs = xrealloc (df->regs, size * sizeof (struct reg_info));
- df->reg_def_last = xrealloc (df->reg_def_last,
- size * sizeof (struct ref *));
-
- /* Zero the new entries. */
- memset (df->regs + df->reg_size, 0,
- (size - df->reg_size) * sizeof (struct reg_info));
-
- df->reg_size = size;
-}
-
-
-/* Allocate bitmaps for each basic block. */
-
-static void
-df_bitmaps_alloc (struct df *df, bitmap blocks, int flags)
-{
- basic_block bb;
-
- df->n_defs = df->def_id;
- df->n_uses = df->use_id;
-
- if (!blocks)
- blocks = df->all_blocks;
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- if (flags & DF_RD)
- {
- if (!bb_info->rd_in)
- {
- /* Allocate bitmaps for reaching definitions. */
- bb_info->rd_kill = BITMAP_ALLOC (NULL);
- bb_info->rd_gen = BITMAP_ALLOC (NULL);
- bb_info->rd_in = BITMAP_ALLOC (NULL);
- bb_info->rd_out = BITMAP_ALLOC (NULL);
- }
- else
- {
- bitmap_clear (bb_info->rd_kill);
- bitmap_clear (bb_info->rd_gen);
- bitmap_clear (bb_info->rd_in);
- bitmap_clear (bb_info->rd_out);
- }
- }
-
- if (flags & DF_RU)
- {
- if (!bb_info->ru_in)
- {
- /* Allocate bitmaps for upward exposed uses. */
- bb_info->ru_kill = BITMAP_ALLOC (NULL);
- bb_info->ru_gen = BITMAP_ALLOC (NULL);
- bb_info->ru_in = BITMAP_ALLOC (NULL);
- bb_info->ru_out = BITMAP_ALLOC (NULL);
- }
- else
- {
- bitmap_clear (bb_info->ru_kill);
- bitmap_clear (bb_info->ru_gen);
- bitmap_clear (bb_info->ru_in);
- bitmap_clear (bb_info->ru_out);
- }
- }
-
- if (flags & DF_LR)
- {
- if (!bb_info->lr_in)
- {
- /* Allocate bitmaps for live variables. */
- bb_info->lr_def = BITMAP_ALLOC (NULL);
- bb_info->lr_use = BITMAP_ALLOC (NULL);
- bb_info->lr_in = BITMAP_ALLOC (NULL);
- bb_info->lr_out = BITMAP_ALLOC (NULL);
- }
- else
- {
- bitmap_clear (bb_info->lr_def);
- bitmap_clear (bb_info->lr_use);
- bitmap_clear (bb_info->lr_in);
- bitmap_clear (bb_info->lr_out);
- }
- }
- });
-}
-
-
-/* Free bitmaps for each basic block. */
-static void
-df_bitmaps_free (struct df *df, int flags)
-{
- unsigned i;
-
- for (i = 0; i < df->n_bbs; i++)
- {
- struct bb_info *bb_info = &df->bbs[i];
-
- if ((flags & DF_RD) && bb_info->rd_in)
- {
- /* Free bitmaps for reaching definitions. */
- BITMAP_FREE (bb_info->rd_kill);
- bb_info->rd_kill = NULL;
- BITMAP_FREE (bb_info->rd_gen);
- bb_info->rd_gen = NULL;
- BITMAP_FREE (bb_info->rd_in);
- bb_info->rd_in = NULL;
- BITMAP_FREE (bb_info->rd_out);
- bb_info->rd_out = NULL;
- }
-
- if ((flags & DF_RU) && bb_info->ru_in)
- {
- /* Free bitmaps for upward exposed uses. */
- BITMAP_FREE (bb_info->ru_kill);
- bb_info->ru_kill = NULL;
- BITMAP_FREE (bb_info->ru_gen);
- bb_info->ru_gen = NULL;
- BITMAP_FREE (bb_info->ru_in);
- bb_info->ru_in = NULL;
- BITMAP_FREE (bb_info->ru_out);
- bb_info->ru_out = NULL;
- }
-
- if ((flags & DF_LR) && bb_info->lr_in)
- {
- /* Free bitmaps for live variables. */
- BITMAP_FREE (bb_info->lr_def);
- bb_info->lr_def = NULL;
- BITMAP_FREE (bb_info->lr_use);
- bb_info->lr_use = NULL;
- BITMAP_FREE (bb_info->lr_in);
- bb_info->lr_in = NULL;
- BITMAP_FREE (bb_info->lr_out);
- bb_info->lr_out = NULL;
- }
- }
- df->flags &= ~(flags & (DF_RD | DF_RU | DF_LR));
-}
-
-
-/* Allocate and initialize dataflow memory. */
-static void
-df_alloc (struct df *df, int n_regs)
-{
- int n_insns;
- basic_block bb;
-
- df_link_pool = create_alloc_pool ("df_link pool", sizeof (struct df_link),
- 100);
- df_ref_pool = create_alloc_pool ("df_ref pool", sizeof (struct ref), 100);
-
- /* Perhaps we should use LUIDs to save memory for the insn_refs
- table. This is only a small saving; a few pointers. */
- n_insns = get_max_uid () + 1;
-
- df->def_id = 0;
- df->n_defs = 0;
- /* Approximate number of defs by number of insns. */
- df->def_size = n_insns;
- df->defs = xmalloc (df->def_size * sizeof (*df->defs));
-
- df->use_id = 0;
- df->n_uses = 0;
- /* Approximate number of uses by twice number of insns. */
- df->use_size = n_insns * 2;
- df->uses = xmalloc (df->use_size * sizeof (*df->uses));
-
- df->n_regs = n_regs;
- df->n_bbs = last_basic_block;
-
- /* Allocate temporary working array used during local dataflow analysis. */
- df_insn_table_realloc (df, n_insns);
-
- df_reg_table_realloc (df, df->n_regs);
-
- df->bbs_modified = BITMAP_ALLOC (NULL);
- bitmap_zero (df->bbs_modified);
-
- df->flags = 0;
-
- df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
-
- df->all_blocks = BITMAP_ALLOC (NULL);
- FOR_EACH_BB (bb)
- bitmap_set_bit (df->all_blocks, bb->index);
-}
-
-
-/* Free all the dataflow info. */
-static void
-df_free (struct df *df)
-{
- df_bitmaps_free (df, DF_ALL);
-
- if (df->bbs)
- free (df->bbs);
- df->bbs = 0;
-
- if (df->insns)
- free (df->insns);
- df->insns = 0;
- df->insn_size = 0;
-
- if (df->defs)
- free (df->defs);
- df->defs = 0;
- df->def_size = 0;
- df->def_id = 0;
-
- if (df->uses)
- free (df->uses);
- df->uses = 0;
- df->use_size = 0;
- df->use_id = 0;
-
- if (df->regs)
- free (df->regs);
- df->regs = 0;
- df->reg_size = 0;
-
- BITMAP_FREE (df->bbs_modified);
- df->bbs_modified = 0;
-
- BITMAP_FREE (df->insns_modified);
- df->insns_modified = 0;
-
- BITMAP_FREE (df->all_blocks);
- df->all_blocks = 0;
-
- free_alloc_pool (df_ref_pool);
- free_alloc_pool (df_link_pool);
-}
-
-/* Local miscellaneous routines. */
-
-/* Return a USE for register REGNO. */
-static rtx df_reg_use_gen (unsigned int regno)
-{
- rtx reg;
- rtx use;
-
- reg = regno_reg_rtx[regno];
-
- use = gen_rtx_USE (GET_MODE (reg), reg);
- return use;
-}
-
-/* Local chain manipulation routines. */
-
-/* Create a link in a def-use or use-def chain. */
-static inline struct df_link *
-df_link_create (struct ref *ref, struct df_link *next)
-{
- struct df_link *link;
-
- link = pool_alloc (df_link_pool);
- link->next = next;
- link->ref = ref;
- return link;
-}
-
-/* Releases members of the CHAIN. */
-
-static void
-free_reg_ref_chain (struct df_link **chain)
-{
- struct df_link *act, *next;
-
- for (act = *chain; act; act = next)
- {
- next = act->next;
- pool_free (df_link_pool, act);
- }
-
- *chain = NULL;
-}
-
-/* Add REF to chain head pointed to by PHEAD. */
-static struct df_link *
-df_ref_unlink (struct df_link **phead, struct ref *ref)
-{
- struct df_link *link = *phead;
-
- if (link)
- {
- if (! link->next)
- {
- /* Only a single ref. It must be the one we want.
- If not, the def-use and use-def chains are likely to
- be inconsistent. */
- gcc_assert (link->ref == ref);
-
- /* Now have an empty chain. */
- *phead = NULL;
- }
- else
- {
- /* Multiple refs. One of them must be us. */
- if (link->ref == ref)
- *phead = link->next;
- else
- {
- /* Follow chain. */
- for (; link->next; link = link->next)
- {
- if (link->next->ref == ref)
- {
- /* Unlink from list. */
- link->next = link->next->next;
- return link->next;
- }
- }
- }
- }
- }
- return link;
-}
-
-
-/* Unlink REF from all def-use/use-def chains, etc. */
-int
-df_ref_remove (struct df *df, struct ref *ref)
-{
- if (DF_REF_REG_DEF_P (ref))
- {
- df_def_unlink (df, ref);
- df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].defs, ref);
- }
- else
- {
- df_use_unlink (df, ref);
- df_ref_unlink (&df->insns[DF_REF_INSN_UID (ref)].uses, ref);
- }
- return 1;
-}
-
-
-/* Unlink DEF from use-def and reg-def chains. */
-static void
-df_def_unlink (struct df *df ATTRIBUTE_UNUSED, struct ref *def)
-{
- struct df_link *du_link;
- unsigned int dregno = DF_REF_REGNO (def);
-
- /* Follow def-use chain to find all the uses of this def. */
- for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
- {
- struct ref *use = du_link->ref;
-
- /* Unlink this def from the use-def chain. */
- df_ref_unlink (&DF_REF_CHAIN (use), def);
- }
- DF_REF_CHAIN (def) = 0;
-
- /* Unlink def from reg-def chain. */
- df_ref_unlink (&df->regs[dregno].defs, def);
-
- df->defs[DF_REF_ID (def)] = 0;
-}
-
-
-/* Unlink use from def-use and reg-use chains. */
-static void
-df_use_unlink (struct df *df ATTRIBUTE_UNUSED, struct ref *use)
-{
- struct df_link *ud_link;
- unsigned int uregno = DF_REF_REGNO (use);
-
- /* Follow use-def chain to find all the defs of this use. */
- for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
- {
- struct ref *def = ud_link->ref;
-
- /* Unlink this use from the def-use chain. */
- df_ref_unlink (&DF_REF_CHAIN (def), use);
- }
- DF_REF_CHAIN (use) = 0;
-
- /* Unlink use from reg-use chain. */
- df_ref_unlink (&df->regs[uregno].uses, use);
-
- df->uses[DF_REF_ID (use)] = 0;
-}
-
-/* Local routines for recording refs. */
-
-
-/* Create a new ref of type DF_REF_TYPE for register REG at address
- LOC within INSN of BB. */
-static struct ref *
-df_ref_create (struct df *df, rtx reg, rtx *loc, rtx insn,
- enum df_ref_type ref_type, enum df_ref_flags ref_flags)
-{
- struct ref *this_ref;
-
- this_ref = pool_alloc (df_ref_pool);
- DF_REF_REG (this_ref) = reg;
- DF_REF_LOC (this_ref) = loc;
- DF_REF_INSN (this_ref) = insn;
- DF_REF_CHAIN (this_ref) = 0;
- DF_REF_TYPE (this_ref) = ref_type;
- DF_REF_FLAGS (this_ref) = ref_flags;
- DF_REF_DATA (this_ref) = NULL;
-
- if (ref_type == DF_REF_REG_DEF)
- {
- if (df->def_id >= df->def_size)
- {
- /* Make table 25 percent larger. */
- df->def_size += (df->def_size / 4);
- df->defs = xrealloc (df->defs,
- df->def_size * sizeof (*df->defs));
- }
- DF_REF_ID (this_ref) = df->def_id;
- df->defs[df->def_id++] = this_ref;
- }
- else
- {
- if (df->use_id >= df->use_size)
- {
- /* Make table 25 percent larger. */
- df->use_size += (df->use_size / 4);
- df->uses = xrealloc (df->uses,
- df->use_size * sizeof (*df->uses));
- }
- DF_REF_ID (this_ref) = df->use_id;
- df->uses[df->use_id++] = this_ref;
- }
- return this_ref;
-}
-
-
-/* Create a new reference of type DF_REF_TYPE for a single register REG,
- used inside the LOC rtx of INSN. */
-static void
-df_ref_record_1 (struct df *df, rtx reg, rtx *loc, rtx insn,
- enum df_ref_type ref_type, enum df_ref_flags ref_flags)
-{
- df_ref_create (df, reg, loc, insn, ref_type, ref_flags);
-}
-
-
-/* Create new references of type DF_REF_TYPE for each part of register REG
- at address LOC within INSN of BB. */
-static void
-df_ref_record (struct df *df, rtx reg, rtx *loc, rtx insn,
- enum df_ref_type ref_type, enum df_ref_flags ref_flags)
-{
- unsigned int regno;
-
- gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG);
-
- /* For the reg allocator we are interested in some SUBREG rtx's, but not
- all. Notably only those representing a word extraction from a multi-word
- reg. As written in the docu those should have the form
- (subreg:SI (reg:M A) N), with size(SImode) > size(Mmode).
- XXX Is that true? We could also use the global word_mode variable. */
- if ((df->flags & DF_SUBREGS) == 0
- && GET_CODE (reg) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (reg)) < GET_MODE_SIZE (word_mode)
- || GET_MODE_SIZE (GET_MODE (reg))
- >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (reg)))))
- {
- loc = &SUBREG_REG (reg);
- reg = *loc;
- ref_flags |= DF_REF_STRIPPED;
- }
-
- regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg);
- if (regno < FIRST_PSEUDO_REGISTER)
- {
- int i;
- int endregno;
-
- if (! (df->flags & DF_HARD_REGS))
- return;
-
- /* GET_MODE (reg) is correct here. We do not want to go into a SUBREG
- for the mode, because we only want to add references to regs, which
- are really referenced. E.g., a (subreg:SI (reg:DI 0) 0) does _not_
- reference the whole reg 0 in DI mode (which would also include
- reg 1, at least, if 0 and 1 are SImode registers). */
- endregno = hard_regno_nregs[regno][GET_MODE (reg)];
- if (GET_CODE (reg) == SUBREG)
- regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)),
- SUBREG_BYTE (reg), GET_MODE (reg));
- endregno += regno;
-
- for (i = regno; i < endregno; i++)
- df_ref_record_1 (df, regno_reg_rtx[i],
- loc, insn, ref_type, ref_flags);
- }
- else
- {
- df_ref_record_1 (df, reg, loc, insn, ref_type, ref_flags);
- }
-}
-
-
-/* A set to a non-paradoxical SUBREG for which the number of word_mode units
- covered by the outer mode is smaller than that covered by the inner mode,
- is a read-modify-write operation.
- This function returns true iff the SUBREG X is such a SUBREG. */
-bool
-read_modify_subreg_p (rtx x)
-{
- unsigned int isize, osize;
- if (GET_CODE (x) != SUBREG)
- return false;
- isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
- osize = GET_MODE_SIZE (GET_MODE (x));
- return (isize > osize && isize > UNITS_PER_WORD);
-}
-
-
-/* Process all the registers defined in the rtx, X. */
-static void
-df_def_record_1 (struct df *df, rtx x, basic_block bb, rtx insn)
-{
- rtx *loc;
- rtx dst;
- enum df_ref_flags flags = 0;
-
- /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL
- construct. */
- if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER)
- loc = &XEXP (x, 0);
- else
- loc = &SET_DEST (x);
- dst = *loc;
-
- /* Some targets place small structures in registers for
- return values of functions. */
- if (GET_CODE (dst) == PARALLEL && GET_MODE (dst) == BLKmode)
- {
- int i;
-
- for (i = XVECLEN (dst, 0) - 1; i >= 0; i--)
- {
- rtx temp = XVECEXP (dst, 0, i);
- if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER
- || GET_CODE (temp) == SET)
- df_def_record_1 (df, temp, bb, insn);
- }
- return;
- }
-
- /* Maybe, we should flag the use of STRICT_LOW_PART somehow. It might
- be handy for the reg allocator. */
- while (GET_CODE (dst) == STRICT_LOW_PART
- || GET_CODE (dst) == ZERO_EXTRACT
- || read_modify_subreg_p (dst))
- {
- /* Strict low part always contains SUBREG, but we do not want to make
- it appear outside, as whole register is always considered. */
- if (GET_CODE (dst) == STRICT_LOW_PART)
- {
- loc = &XEXP (dst, 0);
- dst = *loc;
- }
- loc = &XEXP (dst, 0);
- dst = *loc;
- flags |= DF_REF_READ_WRITE;
- }
-
- if (REG_P (dst)
- || (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))))
- df_ref_record (df, dst, loc, insn, DF_REF_REG_DEF, flags);
-}
-
-
-/* Process all the registers defined in the pattern rtx, X. */
-static void
-df_defs_record (struct df *df, rtx x, basic_block bb, rtx insn)
-{
- RTX_CODE code = GET_CODE (x);
-
- if (code == SET || code == CLOBBER)
- {
- /* Mark the single def within the pattern. */
- df_def_record_1 (df, x, bb, insn);
- }
- else if (code == PARALLEL)
- {
- int i;
-
- /* Mark the multiple defs within the pattern. */
- for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
- {
- code = GET_CODE (XVECEXP (x, 0, i));
- if (code == SET || code == CLOBBER)
- df_def_record_1 (df, XVECEXP (x, 0, i), bb, insn);
- }
- }
-}
-
-
-/* Process all the registers used in the rtx at address LOC. */
-static void
-df_uses_record (struct df *df, rtx *loc, enum df_ref_type ref_type,
- basic_block bb, rtx insn, enum df_ref_flags flags)
-{
- RTX_CODE code;
- rtx x;
- retry:
- x = *loc;
- if (!x)
- return;
- code = GET_CODE (x);
- switch (code)
- {
- case LABEL_REF:
- case SYMBOL_REF:
- case CONST_INT:
- case CONST:
- case CONST_DOUBLE:
- case CONST_VECTOR:
- case PC:
- case CC0:
- case ADDR_VEC:
- case ADDR_DIFF_VEC:
- return;
-
- case CLOBBER:
- /* If we are clobbering a MEM, mark any registers inside the address
- as being used. */
- if (MEM_P (XEXP (x, 0)))
- df_uses_record (df, &XEXP (XEXP (x, 0), 0),
- DF_REF_REG_MEM_STORE, bb, insn, flags);
-
- /* If we're clobbering a REG then we have a def so ignore. */
- return;
-
- case MEM:
- df_uses_record (df, &XEXP (x, 0), DF_REF_REG_MEM_LOAD, bb, insn, 0);
- return;
-
- case SUBREG:
- /* While we're here, optimize this case. */
-
- /* In case the SUBREG is not of a REG, do not optimize. */
- if (!REG_P (SUBREG_REG (x)))
- {
- loc = &SUBREG_REG (x);
- df_uses_record (df, loc, ref_type, bb, insn, flags);
- return;
- }
- /* ... Fall through ... */
-
- case REG:
- df_ref_record (df, x, loc, insn, ref_type, flags);
- return;
-
- case SET:
- {
- rtx dst = SET_DEST (x);
-
- df_uses_record (df, &SET_SRC (x), DF_REF_REG_USE, bb, insn, 0);
-
- switch (GET_CODE (dst))
- {
- case SUBREG:
- if (read_modify_subreg_p (dst))
- {
- df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
- insn, DF_REF_READ_WRITE);
- break;
- }
- /* Fall through. */
- case REG:
- case PARALLEL:
- case SCRATCH:
- case PC:
- case CC0:
- break;
- case MEM:
- df_uses_record (df, &XEXP (dst, 0),
- DF_REF_REG_MEM_STORE,
- bb, insn, 0);
- break;
- case STRICT_LOW_PART:
- /* A strict_low_part uses the whole REG and not just the
- SUBREG. */
- dst = XEXP (dst, 0);
- gcc_assert (GET_CODE (dst) == SUBREG);
- df_uses_record (df, &SUBREG_REG (dst), DF_REF_REG_USE, bb,
- insn, DF_REF_READ_WRITE);
- break;
- case ZERO_EXTRACT:
- case SIGN_EXTRACT:
- df_uses_record (df, &XEXP (dst, 0), DF_REF_REG_USE, bb, insn,
- DF_REF_READ_WRITE);
- df_uses_record (df, &XEXP (dst, 1), DF_REF_REG_USE, bb, insn, 0);
- df_uses_record (df, &XEXP (dst, 2), DF_REF_REG_USE, bb, insn, 0);
- dst = XEXP (dst, 0);
- break;
- default:
- gcc_unreachable ();
- }
- return;
- }
-
- case RETURN:
- break;
-
- case ASM_OPERANDS:
- case UNSPEC_VOLATILE:
- case TRAP_IF:
- case ASM_INPUT:
- {
- /* Traditional and volatile asm instructions must be considered to use
- and clobber all hard registers, all pseudo-registers and all of
- memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
-
- Consider for instance a volatile asm that changes the fpu rounding
- mode. An insn should not be moved across this even if it only uses
- pseudo-regs because it might give an incorrectly rounded result.
-
- For now, just mark any regs we can find in ASM_OPERANDS as
- used. */
-
- /* For all ASM_OPERANDS, we must traverse the vector of input operands.
- We can not just fall through here since then we would be confused
- by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
- traditional asms unlike their normal usage. */
- if (code == ASM_OPERANDS)
- {
- int j;
-
- for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
- df_uses_record (df, &ASM_OPERANDS_INPUT (x, j),
- DF_REF_REG_USE, bb, insn, 0);
- return;
- }
- break;
- }
-
- case PRE_DEC:
- case POST_DEC:
- case PRE_INC:
- case POST_INC:
- case PRE_MODIFY:
- case POST_MODIFY:
- /* Catch the def of the register being modified. */
- df_ref_record (df, XEXP (x, 0), &XEXP (x, 0), insn, DF_REF_REG_DEF, DF_REF_READ_WRITE);
-
- /* ... Fall through to handle uses ... */
-
- default:
- break;
- }
-
- /* Recursively scan the operands of this expression. */
- {
- const char *fmt = GET_RTX_FORMAT (code);
- int i;
-
- for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
- {
- if (fmt[i] == 'e')
- {
- /* Tail recursive case: save a function call level. */
- if (i == 0)
- {
- loc = &XEXP (x, 0);
- goto retry;
- }
- df_uses_record (df, &XEXP (x, i), ref_type, bb, insn, flags);
- }
- else if (fmt[i] == 'E')
- {
- int j;
- for (j = 0; j < XVECLEN (x, i); j++)
- df_uses_record (df, &XVECEXP (x, i, j), ref_type,
- bb, insn, flags);
- }
- }
- }
-}
-
-
-/* Record all the df within INSN of basic block BB. */
-static void
-df_insn_refs_record (struct df *df, basic_block bb, rtx insn)
-{
- int i;
-
- if (INSN_P (insn))
- {
- rtx note;
-
- /* Record register defs. */
- df_defs_record (df, PATTERN (insn), bb, insn);
-
- if (df->flags & DF_EQUIV_NOTES)
- for (note = REG_NOTES (insn); note;
- note = XEXP (note, 1))
- {
- switch (REG_NOTE_KIND (note))
- {
- case REG_EQUIV:
- case REG_EQUAL:
- df_uses_record (df, &XEXP (note, 0), DF_REF_REG_USE,
- bb, insn, 0);
- default:
- break;
- }
- }
-
- if (CALL_P (insn))
- {
- rtx note;
- rtx x;
-
- /* Record the registers used to pass arguments. */
- for (note = CALL_INSN_FUNCTION_USAGE (insn); note;
- note = XEXP (note, 1))
- {
- if (GET_CODE (XEXP (note, 0)) == USE)
- df_uses_record (df, &XEXP (XEXP (note, 0), 0), DF_REF_REG_USE,
- bb, insn, 0);
- }
-
- /* The stack ptr is used (honorarily) by a CALL insn. */
- x = df_reg_use_gen (STACK_POINTER_REGNUM);
- df_uses_record (df, &XEXP (x, 0), DF_REF_REG_USE, bb, insn, 0);
-
- if (df->flags & DF_HARD_REGS)
- {
- /* Calls may also reference any of the global registers,
- so they are recorded as used. */
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (global_regs[i])
- {
- x = df_reg_use_gen (i);
- df_uses_record (df, &XEXP (x, 0),
- DF_REF_REG_USE, bb, insn, 0);
- }
- }
- }
-
- /* Record the register uses. */
- df_uses_record (df, &PATTERN (insn),
- DF_REF_REG_USE, bb, insn, 0);
-
- if (CALL_P (insn))
- {
- rtx note;
-
- /* We do not record hard registers clobbered by the call,
- since there are awfully many of them and "defs" created
- through them are not interesting (since no use can be legally
- reached by them). So we must just make sure we include them when
- computing kill bitmaps. */
-
- /* There may be extra registers to be clobbered. */
- for (note = CALL_INSN_FUNCTION_USAGE (insn);
- note;
- note = XEXP (note, 1))
- if (GET_CODE (XEXP (note, 0)) == CLOBBER)
- df_defs_record (df, XEXP (note, 0), bb, insn);
- }
- }
-}
-
-
-/* Record all the refs within the basic block BB. */
-static void
-df_bb_refs_record (struct df *df, basic_block bb)
-{
- rtx insn;
-
- /* Scan the block an insn at a time from beginning to end. */
- FOR_BB_INSNS (bb, insn)
- {
- if (INSN_P (insn))
- {
- /* Record defs within INSN. */
- df_insn_refs_record (df, bb, insn);
- }
- }
-}
-
-
-/* Record all the refs in the basic blocks specified by BLOCKS. */
-static void
-df_refs_record (struct df *df, bitmap blocks)
-{
- basic_block bb;
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_refs_record (df, bb);
- });
-}
-
-/* Dataflow analysis routines. */
-
-/* Create reg-def chains for basic block BB. These are a list of
- definitions for each register. */
-
-static void
-df_bb_reg_def_chain_create (struct df *df, basic_block bb)
-{
- rtx insn;
-
- /* Perhaps the defs should be sorted using a depth first search
- of the CFG (or possibly a breadth first search). */
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- struct df_link *link;
- unsigned int uid = INSN_UID (insn);
-
- if (! INSN_P (insn))
- continue;
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
- unsigned int dregno = DF_REF_REGNO (def);
-
- /* Do not add ref's to the chain twice, i.e., only add new
- refs. XXX the same could be done by testing if the
- current insn is a modified (or a new) one. This would be
- faster. */
- if (DF_REF_ID (def) < df->def_id_save)
- continue;
-
- df->regs[dregno].defs = df_link_create (def, df->regs[dregno].defs);
- }
- }
-}
-
-
-/* Create reg-def chains for each basic block within BLOCKS. These
- are a list of definitions for each register. If REDO is true, add
- all defs, otherwise just add the new defs. */
-
-static void
-df_reg_def_chain_create (struct df *df, bitmap blocks, bool redo)
-{
- basic_block bb;
-#ifdef ENABLE_CHECKING
- unsigned regno;
-#endif
- unsigned old_def_id_save = df->def_id_save;
-
- if (redo)
- {
-#ifdef ENABLE_CHECKING
- for (regno = 0; regno < df->n_regs; regno++)
- gcc_assert (!df->regs[regno].defs);
-#endif
-
- /* Pretend that all defs are new. */
- df->def_id_save = 0;
- }
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_reg_def_chain_create (df, bb);
- });
-
- df->def_id_save = old_def_id_save;
-}
-
-/* Remove all reg-def chains stored in the dataflow object DF. */
-
-static void
-df_reg_def_chain_clean (struct df *df)
-{
- unsigned regno;
-
- for (regno = 0; regno < df->n_regs; regno++)
- free_reg_ref_chain (&df->regs[regno].defs);
-}
-
-/* Create reg-use chains for basic block BB. These are a list of uses
- for each register. */
-
-static void
-df_bb_reg_use_chain_create (struct df *df, basic_block bb)
-{
- rtx insn;
-
- /* Scan in forward order so that the last uses appear at the start
- of the chain. */
-
- FOR_BB_INSNS (bb, insn)
- {
- struct df_link *link;
- unsigned int uid = INSN_UID (insn);
-
- if (! INSN_P (insn))
- continue;
-
- for (link = df->insns[uid].uses; link; link = link->next)
- {
- struct ref *use = link->ref;
- unsigned int uregno = DF_REF_REGNO (use);
-
- /* Do not add ref's to the chain twice, i.e., only add new
- refs. XXX the same could be done by testing if the
- current insn is a modified (or a new) one. This would be
- faster. */
- if (DF_REF_ID (use) < df->use_id_save)
- continue;
-
- df->regs[uregno].uses
- = df_link_create (use, df->regs[uregno].uses);
- }
- }
-}
-
-
-/* Create reg-use chains for each basic block within BLOCKS. These
- are a list of uses for each register. If REDO is true, remove the
- old reg-use chains first, otherwise just add new uses to them. */
-
-static void
-df_reg_use_chain_create (struct df *df, bitmap blocks, bool redo)
-{
- basic_block bb;
-#ifdef ENABLE_CHECKING
- unsigned regno;
-#endif
- unsigned old_use_id_save = df->use_id_save;
-
- if (redo)
- {
-#ifdef ENABLE_CHECKING
- for (regno = 0; regno < df->n_regs; regno++)
- gcc_assert (!df->regs[regno].uses);
-#endif
-
- /* Pretend that all uses are new. */
- df->use_id_save = 0;
- }
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_reg_use_chain_create (df, bb);
- });
-
- df->use_id_save = old_use_id_save;
-}
-
-/* Remove all reg-use chains stored in the dataflow object DF. */
-
-static void
-df_reg_use_chain_clean (struct df *df)
-{
- unsigned regno;
-
- for (regno = 0; regno < df->n_regs; regno++)
- free_reg_ref_chain (&df->regs[regno].uses);
-}
-
-/* Create def-use chains from reaching use bitmaps for basic block BB. */
-static void
-df_bb_du_chain_create (struct df *df, basic_block bb, bitmap ru)
-{
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- rtx insn;
-
- bitmap_copy (ru, bb_info->ru_out);
-
- /* For each def in BB create a linked list (chain) of uses
- reached from the def. */
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- struct df_link *def_link;
- struct df_link *use_link;
- unsigned int uid = INSN_UID (insn);
-
- if (! INSN_P (insn))
- continue;
-
- /* For each def in insn... */
- for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
- {
- struct ref *def = def_link->ref;
- unsigned int dregno = DF_REF_REGNO (def);
-
- DF_REF_CHAIN (def) = 0;
-
- /* While the reg-use chains are not essential, it
- is _much_ faster to search these short lists rather
- than all the reaching uses, especially for large functions. */
- for (use_link = df->regs[dregno].uses; use_link;
- use_link = use_link->next)
- {
- struct ref *use = use_link->ref;
-
- if (bitmap_bit_p (ru, DF_REF_ID (use)))
- {
- DF_REF_CHAIN (def)
- = df_link_create (use, DF_REF_CHAIN (def));
-
- bitmap_clear_bit (ru, DF_REF_ID (use));
- }
- }
- }
-
- /* For each use in insn... */
- for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
- {
- struct ref *use = use_link->ref;
- bitmap_set_bit (ru, DF_REF_ID (use));
- }
- }
-}
-
-
-/* Create def-use chains from reaching use bitmaps for basic blocks
- in BLOCKS. */
-static void
-df_du_chain_create (struct df *df, bitmap blocks)
-{
- bitmap ru;
- basic_block bb;
-
- ru = BITMAP_ALLOC (NULL);
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_du_chain_create (df, bb, ru);
- });
-
- BITMAP_FREE (ru);
-}
-
-
-/* Create use-def chains from reaching def bitmaps for basic block BB. */
-static void
-df_bb_ud_chain_create (struct df *df, basic_block bb)
-{
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- struct ref **reg_def_last = df->reg_def_last;
- rtx insn;
-
- memset (reg_def_last, 0, df->n_regs * sizeof (struct ref *));
-
- /* For each use in BB create a linked list (chain) of defs
- that reach the use. */
- FOR_BB_INSNS (bb, insn)
- {
- unsigned int uid = INSN_UID (insn);
- struct df_link *use_link;
- struct df_link *def_link;
-
- if (! INSN_P (insn))
- continue;
-
- /* For each use in insn... */
- for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
- {
- struct ref *use = use_link->ref;
- unsigned int regno = DF_REF_REGNO (use);
-
- DF_REF_CHAIN (use) = 0;
-
- /* Has regno been defined in this BB yet? If so, use
- the last def as the single entry for the use-def
- chain for this use. Otherwise, we need to add all
- the defs using this regno that reach the start of
- this BB. */
- if (reg_def_last[regno])
- {
- DF_REF_CHAIN (use)
- = df_link_create (reg_def_last[regno], 0);
- }
- else
- {
- /* While the reg-def chains are not essential, it is
- _much_ faster to search these short lists rather than
- all the reaching defs, especially for large
- functions. */
- for (def_link = df->regs[regno].defs; def_link;
- def_link = def_link->next)
- {
- struct ref *def = def_link->ref;
-
- if (bitmap_bit_p (bb_info->rd_in, DF_REF_ID (def)))
- {
- DF_REF_CHAIN (use)
- = df_link_create (def, DF_REF_CHAIN (use));
- }
- }
- }
- }
-
-
- /* For each def in insn... record the last def of each reg. */
- for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
- {
- struct ref *def = def_link->ref;
- int dregno = DF_REF_REGNO (def);
-
- reg_def_last[dregno] = def;
- }
- }
-}
-
-
-/* Create use-def chains from reaching def bitmaps for basic blocks
- within BLOCKS. */
-static void
-df_ud_chain_create (struct df *df, bitmap blocks)
-{
- basic_block bb;
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_ud_chain_create (df, bb);
- });
-}
-
-
-
-static void
-df_rd_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in,
- void *out, void *gen, void *kill,
- void *data ATTRIBUTE_UNUSED)
-{
- *changed = bitmap_ior_and_compl (out, gen, in, kill);
-}
-
-
-static void
-df_ru_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in,
- void *out, void *gen, void *kill,
- void *data ATTRIBUTE_UNUSED)
-{
- *changed = bitmap_ior_and_compl (in, gen, out, kill);
-}
-
-
-static void
-df_lr_transfer_function (int bb ATTRIBUTE_UNUSED, int *changed, void *in,
- void *out, void *use, void *def,
- void *data ATTRIBUTE_UNUSED)
-{
- *changed = bitmap_ior_and_compl (in, use, out, def);
-}
-
-
-/* Compute local reaching def info for basic block BB. */
-static void
-df_bb_rd_local_compute (struct df *df, basic_block bb, bitmap call_killed_defs)
-{
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- rtx insn;
- bitmap seen = BITMAP_ALLOC (NULL);
- bool call_seen = false;
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- unsigned int uid = INSN_UID (insn);
- struct df_link *def_link;
-
- if (! INSN_P (insn))
- continue;
-
- for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
- {
- struct ref *def = def_link->ref;
- unsigned int regno = DF_REF_REGNO (def);
- struct df_link *def2_link;
-
- if (bitmap_bit_p (seen, regno)
- || (call_seen
- && regno < FIRST_PSEUDO_REGISTER
- && TEST_HARD_REG_BIT (regs_invalidated_by_call, regno)))
- continue;
-
- for (def2_link = df->regs[regno].defs; def2_link;
- def2_link = def2_link->next)
- {
- struct ref *def2 = def2_link->ref;
-
- /* Add all defs of this reg to the set of kills. This
- is greedy since many of these defs will not actually
- be killed by this BB but it keeps things a lot
- simpler. */
- bitmap_set_bit (bb_info->rd_kill, DF_REF_ID (def2));
- }
-
- bitmap_set_bit (bb_info->rd_gen, DF_REF_ID (def));
- bitmap_set_bit (seen, regno);
- }
-
- if (CALL_P (insn) && (df->flags & DF_HARD_REGS))
- {
- bitmap_ior_into (bb_info->rd_kill, call_killed_defs);
- call_seen = 1;
- }
- }
-
- BITMAP_FREE (seen);
-}
-
-
-/* Compute local reaching def info for each basic block within BLOCKS. */
-static void
-df_rd_local_compute (struct df *df, bitmap blocks)
-{
- basic_block bb;
- bitmap killed_by_call = NULL;
- unsigned regno;
- struct df_link *def_link;
-
- if (df->flags & DF_HARD_REGS)
- {
- killed_by_call = BITMAP_ALLOC (NULL);
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- {
- if (!TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- continue;
-
- for (def_link = df->regs[regno].defs;
- def_link;
- def_link = def_link->next)
- bitmap_set_bit (killed_by_call, DF_REF_ID (def_link->ref));
- }
- }
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_rd_local_compute (df, bb, killed_by_call);
- });
-
- if (df->flags & DF_HARD_REGS)
- BITMAP_FREE (killed_by_call);
-}
-
-
-/* Compute local reaching use (upward exposed use) info for basic
- block BB. */
-static void
-df_bb_ru_local_compute (struct df *df, basic_block bb)
-{
- /* This is much more tricky than computing reaching defs. With
- reaching defs, defs get killed by other defs. With upwards
- exposed uses, these get killed by defs with the same regno. */
-
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- rtx insn;
-
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- unsigned int uid = INSN_UID (insn);
- struct df_link *def_link;
- struct df_link *use_link;
-
- if (! INSN_P (insn))
- continue;
-
- for (def_link = df->insns[uid].defs; def_link; def_link = def_link->next)
- {
- struct ref *def = def_link->ref;
- unsigned int dregno = DF_REF_REGNO (def);
-
- for (use_link = df->regs[dregno].uses; use_link;
- use_link = use_link->next)
- {
- struct ref *use = use_link->ref;
-
- /* Add all uses of this reg to the set of kills. This
- is greedy since many of these uses will not actually
- be killed by this BB but it keeps things a lot
- simpler. */
- bitmap_set_bit (bb_info->ru_kill, DF_REF_ID (use));
-
- /* Zap from the set of gens for this BB. */
- bitmap_clear_bit (bb_info->ru_gen, DF_REF_ID (use));
- }
- }
-
- for (use_link = df->insns[uid].uses; use_link; use_link = use_link->next)
- {
- struct ref *use = use_link->ref;
- /* Add use to set of gens in this BB. */
- bitmap_set_bit (bb_info->ru_gen, DF_REF_ID (use));
- }
- }
-}
-
-
-/* Compute local reaching use (upward exposed use) info for each basic
- block within BLOCKS. */
-static void
-df_ru_local_compute (struct df *df, bitmap blocks)
-{
- basic_block bb;
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_ru_local_compute (df, bb);
- });
-}
-
-
-/* Compute local live variable info for basic block BB. */
-static void
-df_bb_lr_local_compute (struct df *df, basic_block bb)
-{
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- rtx insn;
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- unsigned int uid = INSN_UID (insn);
- struct df_link *link;
-
- if (! INSN_P (insn))
- continue;
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
- unsigned int dregno = DF_REF_REGNO (def);
-
- /* Add def to set of defs in this BB. */
- bitmap_set_bit (bb_info->lr_def, dregno);
-
- bitmap_clear_bit (bb_info->lr_use, dregno);
- }
-
- for (link = df->insns[uid].uses; link; link = link->next)
- {
- struct ref *use = link->ref;
- /* Add use to set of uses in this BB. */
- bitmap_set_bit (bb_info->lr_use, DF_REF_REGNO (use));
- }
- }
-}
-
-
-/* Compute local live variable info for each basic block within BLOCKS. */
-static void
-df_lr_local_compute (struct df *df, bitmap blocks)
-{
- basic_block bb;
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_lr_local_compute (df, bb);
- });
-}
-
-
-/* Compute register info: lifetime, bb, and number of defs and uses
- for basic block BB. */
-static void
-df_bb_reg_info_compute (struct df *df, basic_block bb, bitmap live)
-{
- struct reg_info *reg_info = df->regs;
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
- rtx insn;
-
- bitmap_copy (live, bb_info->lr_out);
-
- FOR_BB_INSNS_REVERSE (bb, insn)
- {
- unsigned int uid = INSN_UID (insn);
- unsigned int regno;
- struct df_link *link;
- bitmap_iterator bi;
-
- if (! INSN_P (insn))
- continue;
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
- unsigned int dregno = DF_REF_REGNO (def);
-
- /* Kill this register. */
- bitmap_clear_bit (live, dregno);
- reg_info[dregno].n_defs++;
- }
-
- for (link = df->insns[uid].uses; link; link = link->next)
- {
- struct ref *use = link->ref;
- unsigned int uregno = DF_REF_REGNO (use);
-
- /* This register is now live. */
- bitmap_set_bit (live, uregno);
- reg_info[uregno].n_uses++;
- }
-
- /* Increment lifetimes of all live registers. */
- EXECUTE_IF_SET_IN_BITMAP (live, 0, regno, bi)
- {
- reg_info[regno].lifetime++;
- }
- }
-}
-
-
-/* Compute register info: lifetime, bb, and number of defs and uses. */
-static void
-df_reg_info_compute (struct df *df, bitmap blocks)
-{
- basic_block bb;
- bitmap live;
-
- live = BITMAP_ALLOC (NULL);
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_reg_info_compute (df, bb, live);
- });
-
- BITMAP_FREE (live);
-}
-
-
-/* Assign LUIDs for BB. */
-static int
-df_bb_luids_set (struct df *df, basic_block bb)
-{
- rtx insn;
- int luid = 0;
-
- /* The LUIDs are monotonically increasing for each basic block. */
-
- FOR_BB_INSNS (bb, insn)
- {
- if (INSN_P (insn))
- DF_INSN_LUID (df, insn) = luid++;
- DF_INSN_LUID (df, insn) = luid;
- }
- return luid;
-}
-
-
-/* Assign LUIDs for each basic block within BLOCKS. */
-static int
-df_luids_set (struct df *df, bitmap blocks)
-{
- basic_block bb;
- int total = 0;
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- total += df_bb_luids_set (df, bb);
- });
- return total;
-}
-
-
-/* Perform dataflow analysis using existing DF structure for blocks
- within BLOCKS. If BLOCKS is zero, use all basic blocks in the CFG. */
-static void
-df_analyze_1 (struct df *df, bitmap blocks, int flags, int update)
-{
- int aflags;
- int dflags;
- basic_block bb;
- struct dataflow dflow;
-
- dflags = 0;
- aflags = flags;
- if (flags & DF_UD_CHAIN)
- aflags |= DF_RD | DF_RD_CHAIN;
-
- if (flags & DF_DU_CHAIN)
- aflags |= DF_RU;
-
- if (flags & DF_RU)
- aflags |= DF_RU_CHAIN;
-
- if (flags & DF_REG_INFO)
- aflags |= DF_LR;
-
- if (! blocks)
- blocks = df->all_blocks;
-
- df->flags = flags;
- if (update)
- {
- df_refs_update (df, NULL);
- /* More fine grained incremental dataflow analysis would be
- nice. For now recompute the whole shebang for the
- modified blocks. */
-#if 0
- df_refs_unlink (df, blocks);
-#endif
- /* All the def-use, use-def chains can be potentially
- modified by changes in one block. The size of the
- bitmaps can also change. */
- }
- else
- {
- /* Scan the function for all register defs and uses. */
- df_refs_queue (df);
- df_refs_record (df, blocks);
-
- /* Link all the new defs and uses to the insns. */
- df_refs_process (df);
- }
-
- /* Allocate the bitmaps now the total number of defs and uses are
- known. If the number of defs or uses have changed, then
- these bitmaps need to be reallocated. */
- df_bitmaps_alloc (df, NULL, aflags);
-
- /* Set the LUIDs for each specified basic block. */
- df_luids_set (df, blocks);
-
- /* Recreate reg-def and reg-use chains from scratch so that first
- def is at the head of the reg-def chain and the last use is at
- the head of the reg-use chain. This is only important for
- regs local to a basic block as it speeds up searching. */
- if (aflags & DF_RD_CHAIN)
- {
- df_reg_def_chain_create (df, blocks, false);
- }
-
- if (aflags & DF_RU_CHAIN)
- {
- df_reg_use_chain_create (df, blocks, false);
- }
-
- df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks - NUM_FIXED_BLOCKS);
- df->rc_order = xmalloc (sizeof (int) * n_basic_blocks - NUM_FIXED_BLOCKS);
- df->rts_order = xmalloc (sizeof (int) * n_basic_blocks - NUM_FIXED_BLOCKS);
-
- pre_and_rev_post_order_compute (df->dfs_order, df->rc_order, false);
- post_order_compute (df->rts_order, false);
- if (aflags & DF_RD)
- {
- /* Compute the sets of gens and kills for the defs of each bb. */
- dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
-
- df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
- FOR_EACH_BB (bb)
- {
- dflow.in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
- dflow.out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
- dflow.gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
- dflow.kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
- }
-
- dflow.repr = SR_BITMAP;
- dflow.dir = DF_FORWARD;
- dflow.conf_op = DF_UNION;
- dflow.transfun = df_rd_transfer_function;
- dflow.n_blocks = n_basic_blocks - NUM_FIXED_BLOCKS;
- dflow.order = df->rc_order;
- dflow.data = NULL;
-
- iterative_dataflow (&dflow);
- free (dflow.in);
- free (dflow.out);
- free (dflow.gen);
- free (dflow.kill);
- }
-
- if (aflags & DF_UD_CHAIN)
- {
- /* Create use-def chains. */
- df_ud_chain_create (df, df->all_blocks);
-
- if (! (flags & DF_RD))
- dflags |= DF_RD;
- }
-
- if (aflags & DF_RU)
- {
- /* Compute the sets of gens and kills for the upwards exposed
- uses in each bb. */
- dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
-
- df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
-
- FOR_EACH_BB (bb)
- {
- dflow.in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
- dflow.out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
- dflow.gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
- dflow.kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
- }
-
- dflow.repr = SR_BITMAP;
- dflow.dir = DF_BACKWARD;
- dflow.conf_op = DF_UNION;
- dflow.transfun = df_ru_transfer_function;
- dflow.n_blocks = n_basic_blocks - NUM_FIXED_BLOCKS;
- dflow.order = df->rts_order;
- dflow.data = NULL;
-
- iterative_dataflow (&dflow);
- free (dflow.in);
- free (dflow.out);
- free (dflow.gen);
- free (dflow.kill);
- }
-
- if (aflags & DF_DU_CHAIN)
- {
- /* Create def-use chains. */
- df_du_chain_create (df, df->all_blocks);
-
- if (! (flags & DF_RU))
- dflags |= DF_RU;
- }
-
- /* Free up bitmaps that are no longer required. */
- if (dflags)
- df_bitmaps_free (df, dflags);
-
- if (aflags & DF_LR)
- {
- /* Compute the sets of defs and uses of live variables. */
- dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
-
- df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
-
- FOR_EACH_BB (bb)
- {
- dflow.in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
- dflow.out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
- dflow.gen[bb->index] = DF_BB_INFO (df, bb)->lr_use;
- dflow.kill[bb->index] = DF_BB_INFO (df, bb)->lr_def;
- }
-
- dflow.repr = SR_BITMAP;
- dflow.dir = DF_BACKWARD;
- dflow.conf_op = DF_UNION;
- dflow.transfun = df_lr_transfer_function;
- dflow.n_blocks = n_basic_blocks - NUM_FIXED_BLOCKS;
- dflow.order = df->rts_order;
- dflow.data = NULL;
-
- iterative_dataflow (&dflow);
- free (dflow.in);
- free (dflow.out);
- free (dflow.gen);
- free (dflow.kill);
- }
-
- if (aflags & DF_REG_INFO)
- {
- df_reg_info_compute (df, df->all_blocks);
- }
-
- free (df->dfs_order);
- free (df->rc_order);
- free (df->rts_order);
-}
-
-
-/* Initialize dataflow analysis. */
-struct df *
-df_init (void)
-{
- struct df *df;
-
- df = xcalloc (1, sizeof (struct df));
-
- /* Squirrel away a global for debugging. */
- ddf = df;
-
- return df;
-}
-
-
-/* Start queuing refs. */
-static int
-df_refs_queue (struct df *df)
-{
- df->def_id_save = df->def_id;
- df->use_id_save = df->use_id;
- /* ???? Perhaps we should save current obstack state so that we can
- unwind it. */
- return 0;
-}
-
-
-/* Process queued refs. */
-static int
-df_refs_process (struct df *df)
-{
- unsigned int i;
-
- /* Build new insn-def chains. */
- for (i = df->def_id_save; i != df->def_id; i++)
- {
- struct ref *def = df->defs[i];
- unsigned int uid = DF_REF_INSN_UID (def);
-
- /* Add def to head of def list for INSN. */
- df->insns[uid].defs
- = df_link_create (def, df->insns[uid].defs);
- }
-
- /* Build new insn-use chains. */
- for (i = df->use_id_save; i != df->use_id; i++)
- {
- struct ref *use = df->uses[i];
- unsigned int uid = DF_REF_INSN_UID (use);
-
- /* Add use to head of use list for INSN. */
- df->insns[uid].uses
- = df_link_create (use, df->insns[uid].uses);
- }
- return 0;
-}
-
-
-/* Update refs for basic block BB. */
-static int
-df_bb_refs_update (struct df *df, basic_block bb)
-{
- rtx insn;
- int count = 0;
-
- /* While we have to scan the chain of insns for this BB, we do not
- need to allocate and queue a long chain of BB/INSN pairs. Using
- a bitmap for insns_modified saves memory and avoids queuing
- duplicates. */
-
- FOR_BB_INSNS (bb, insn)
- {
- unsigned int uid;
-
- uid = INSN_UID (insn);
-
- if (bitmap_bit_p (df->insns_modified, uid))
- {
- /* Delete any allocated refs of this insn. MPH, FIXME. */
- df_insn_refs_unlink (df, bb, insn);
-
- /* Scan the insn for refs. */
- df_insn_refs_record (df, bb, insn);
-
- count++;
- }
- }
- return count;
-}
-
-
-/* Process all the modified/deleted insns that were queued. */
-static int
-df_refs_update (struct df *df, bitmap blocks)
-{
- basic_block bb;
- unsigned count = 0, bbno;
-
- df->n_regs = max_reg_num ();
- if (df->n_regs >= df->reg_size)
- df_reg_table_realloc (df, 0);
-
- df_refs_queue (df);
-
- if (!blocks)
- {
- FOR_EACH_BB_IN_BITMAP (df->bbs_modified, 0, bb,
- {
- count += df_bb_refs_update (df, bb);
- });
- }
- else
- {
- bitmap_iterator bi;
-
- EXECUTE_IF_AND_IN_BITMAP (df->bbs_modified, blocks, 0, bbno, bi)
- {
- count += df_bb_refs_update (df, BASIC_BLOCK (bbno));
- }
- }
-
- df_refs_process (df);
- return count;
-}
-
-
-/* Return nonzero if any of the requested blocks in the bitmap
- BLOCKS have been modified. */
-static int
-df_modified_p (struct df *df, bitmap blocks)
-{
- int update = 0;
- basic_block bb;
-
- if (!df->n_bbs)
- return 0;
-
- FOR_EACH_BB (bb)
- if (bitmap_bit_p (df->bbs_modified, bb->index)
- && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->index)))
- {
- update = 1;
- break;
- }
-
- return update;
-}
-
-/* Analyze dataflow info for the basic blocks specified by the bitmap
- BLOCKS, or for the whole CFG if BLOCKS is zero, or just for the
- modified blocks if BLOCKS is -1. */
-
-int
-df_analyze (struct df *df, bitmap blocks, int flags)
-{
- int update;
-
- /* We could deal with additional basic blocks being created by
- rescanning everything again. */
- gcc_assert (!df->n_bbs || df->n_bbs == (unsigned int) last_basic_block);
-
- update = df_modified_p (df, blocks);
- if (update || (flags != df->flags))
- {
- if (! blocks)
- {
- if (df->n_bbs)
- {
- /* Recompute everything from scratch. */
- df_free (df);
- }
- /* Allocate and initialize data structures. */
- df_alloc (df, max_reg_num ());
- df_analyze_1 (df, 0, flags, 0);
- update = 1;
- }
- else
- {
- if (blocks == (bitmap) -1)
- blocks = df->bbs_modified;
-
- gcc_assert (df->n_bbs);
-
- df_analyze_1 (df, blocks, flags, 1);
- bitmap_zero (df->bbs_modified);
- bitmap_zero (df->insns_modified);
- }
- }
- return update;
-}
-
-/* Remove the entries not in BLOCKS from the LIST of length LEN, preserving
- the order of the remaining entries. Returns the length of the resulting
- list. */
-
-static unsigned
-prune_to_subcfg (int list[], unsigned len, bitmap blocks)
-{
- unsigned act, last;
-
- for (act = 0, last = 0; act < len; act++)
- if (bitmap_bit_p (blocks, list[act]))
- list[last++] = list[act];
-
- return last;
-}
-
-/* Alternative entry point to the analysis. Analyze just the part of the cfg
- graph induced by BLOCKS.
-
- TODO I am not quite sure how to avoid code duplication with df_analyze_1
- here, and simultaneously not make even greater chaos in it. We behave
- slightly differently in some details, especially in handling modified
- insns. */
-
-void
-df_analyze_subcfg (struct df *df, bitmap blocks, int flags)
-{
- rtx insn;
- basic_block bb;
- struct dataflow dflow;
- unsigned n_blocks;
-
- if (flags & DF_UD_CHAIN)
- flags |= DF_RD | DF_RD_CHAIN;
- if (flags & DF_DU_CHAIN)
- flags |= DF_RU;
- if (flags & DF_RU)
- flags |= DF_RU_CHAIN;
- if (flags & DF_REG_INFO)
- flags |= DF_LR;
-
- if (!df->n_bbs)
- {
- df_alloc (df, max_reg_num ());
-
- /* Mark all insns as modified. */
-
- FOR_EACH_BB (bb)
- {
- FOR_BB_INSNS (bb, insn)
- {
- df_insn_modify (df, bb, insn);
- }
- }
- }
-
- df->flags = flags;
-
- df_reg_def_chain_clean (df);
- df_reg_use_chain_clean (df);
-
- df_refs_update (df, blocks);
-
- /* Clear the updated stuff from ``modified'' bitmaps. */
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- if (bitmap_bit_p (df->bbs_modified, bb->index))
- {
- FOR_BB_INSNS (bb, insn)
- {
- bitmap_clear_bit (df->insns_modified, INSN_UID (insn));
- }
-
- bitmap_clear_bit (df->bbs_modified, bb->index);
- }
- });
-
- /* Allocate the bitmaps now the total number of defs and uses are
- known. If the number of defs or uses have changed, then
- these bitmaps need to be reallocated. */
- df_bitmaps_alloc (df, blocks, flags);
-
- /* Set the LUIDs for each specified basic block. */
- df_luids_set (df, blocks);
-
- /* Recreate reg-def and reg-use chains from scratch so that first
- def is at the head of the reg-def chain and the last use is at
- the head of the reg-use chain. This is only important for
- regs local to a basic block as it speeds up searching. */
- if (flags & DF_RD_CHAIN)
- {
- df_reg_def_chain_create (df, blocks, true);
- }
-
- if (flags & DF_RU_CHAIN)
- {
- df_reg_use_chain_create (df, blocks, true);
- }
-
- df->dfs_order = xmalloc (sizeof (int) * n_basic_blocks - NUM_FIXED_BLOCKS);
- df->rc_order = xmalloc (sizeof (int) * n_basic_blocks - NUM_FIXED_BLOCKS);
- df->rts_order = xmalloc (sizeof (int) * n_basic_blocks - NUM_FIXED_BLOCKS);
-
- pre_and_rev_post_order_compute (df->dfs_order, df->rc_order, false);
- post_order_compute (df->rts_order, false);
-
- n_blocks = prune_to_subcfg (df->dfs_order, n_basic_blocks - NUM_FIXED_BLOCKS, blocks);
- prune_to_subcfg (df->rc_order, n_basic_blocks - NUM_FIXED_BLOCKS, blocks);
- prune_to_subcfg (df->rts_order, n_basic_blocks - NUM_FIXED_BLOCKS, blocks);
-
- dflow.in = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.out = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.gen = xmalloc (sizeof (bitmap) * last_basic_block);
- dflow.kill = xmalloc (sizeof (bitmap) * last_basic_block);
-
- if (flags & DF_RD)
- {
- /* Compute the sets of gens and kills for the defs of each bb. */
- df_rd_local_compute (df, blocks);
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- dflow.in[bb->index] = DF_BB_INFO (df, bb)->rd_in;
- dflow.out[bb->index] = DF_BB_INFO (df, bb)->rd_out;
- dflow.gen[bb->index] = DF_BB_INFO (df, bb)->rd_gen;
- dflow.kill[bb->index] = DF_BB_INFO (df, bb)->rd_kill;
- });
-
- dflow.repr = SR_BITMAP;
- dflow.dir = DF_FORWARD;
- dflow.conf_op = DF_UNION;
- dflow.transfun = df_rd_transfer_function;
- dflow.n_blocks = n_blocks;
- dflow.order = df->rc_order;
- dflow.data = NULL;
-
- iterative_dataflow (&dflow);
- }
-
- if (flags & DF_UD_CHAIN)
- {
- /* Create use-def chains. */
- df_ud_chain_create (df, blocks);
- }
-
- if (flags & DF_RU)
- {
- /* Compute the sets of gens and kills for the upwards exposed
- uses in each bb. */
- df_ru_local_compute (df, blocks);
-
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- dflow.in[bb->index] = DF_BB_INFO (df, bb)->ru_in;
- dflow.out[bb->index] = DF_BB_INFO (df, bb)->ru_out;
- dflow.gen[bb->index] = DF_BB_INFO (df, bb)->ru_gen;
- dflow.kill[bb->index] = DF_BB_INFO (df, bb)->ru_kill;
- });
-
- dflow.repr = SR_BITMAP;
- dflow.dir = DF_BACKWARD;
- dflow.conf_op = DF_UNION;
- dflow.transfun = df_ru_transfer_function;
- dflow.n_blocks = n_blocks;
- dflow.order = df->rts_order;
- dflow.data = NULL;
-
- iterative_dataflow (&dflow);
- }
-
- if (flags & DF_DU_CHAIN)
- {
- /* Create def-use chains. */
- df_du_chain_create (df, blocks);
- }
-
- if (flags & DF_LR)
- {
- /* Compute the sets of defs and uses of live variables. */
- df_lr_local_compute (df, blocks);
-
- FOR_EACH_BB (bb)
- {
- dflow.in[bb->index] = DF_BB_INFO (df, bb)->lr_in;
- dflow.out[bb->index] = DF_BB_INFO (df, bb)->lr_out;
- dflow.gen[bb->index] = DF_BB_INFO (df, bb)->lr_use;
- dflow.kill[bb->index] = DF_BB_INFO (df, bb)->lr_def;
- }
-
- dflow.repr = SR_BITMAP;
- dflow.dir = DF_BACKWARD;
- dflow.conf_op = DF_UNION;
- dflow.transfun = df_lr_transfer_function;
- dflow.n_blocks = n_blocks;
- dflow.order = df->rts_order;
- dflow.data = NULL;
-
- iterative_dataflow (&dflow);
- }
-
- if (flags & DF_REG_INFO)
- {
- df_reg_info_compute (df, blocks);
- }
-
- free (dflow.in);
- free (dflow.out);
- free (dflow.gen);
- free (dflow.kill);
-
- free (df->dfs_order);
- free (df->rc_order);
- free (df->rts_order);
-}
-
-/* Free all the dataflow info and the DF structure. */
-void
-df_finish (struct df *df)
-{
- df_free (df);
- free (df);
-}
-
-/* Unlink INSN from its reference information. */
-static void
-df_insn_refs_unlink (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn)
-{
- struct df_link *link;
- unsigned int uid;
-
- uid = INSN_UID (insn);
-
- /* Unlink all refs defined by this insn. */
- for (link = df->insns[uid].defs; link; link = link->next)
- df_def_unlink (df, link->ref);
-
- /* Unlink all refs used by this insn. */
- for (link = df->insns[uid].uses; link; link = link->next)
- df_use_unlink (df, link->ref);
-
- df->insns[uid].defs = 0;
- df->insns[uid].uses = 0;
-}
-
-
-#if 0
-/* Unlink all the insns within BB from their reference information. */
-static void
-df_bb_refs_unlink (struct df *df, basic_block bb)
-{
- rtx insn;
-
- /* Scan the block an insn at a time from beginning to end. */
- for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
- {
- if (INSN_P (insn))
- {
- /* Unlink refs for INSN. */
- df_insn_refs_unlink (df, bb, insn);
- }
- if (insn == BB_END (bb))
- break;
- }
-}
-
-
-/* Unlink all the refs in the basic blocks specified by BLOCKS.
- Not currently used. */
-static void
-df_refs_unlink (struct df *df, bitmap blocks)
-{
- basic_block bb;
-
- if (blocks)
- {
- FOR_EACH_BB_IN_BITMAP (blocks, 0, bb,
- {
- df_bb_refs_unlink (df, bb);
- });
- }
- else
- {
- FOR_EACH_BB (bb)
- df_bb_refs_unlink (df, bb);
- }
-}
-#endif
-
-/* Functions to modify insns. */
-
-
-/* Delete INSN and all its reference information. */
-rtx
-df_insn_delete (struct df *df, basic_block bb ATTRIBUTE_UNUSED, rtx insn)
-{
- /* If the insn is a jump, we should perhaps call delete_insn to
- handle the JUMP_LABEL? */
-
- /* We should not be deleting the NOTE_INSN_BASIC_BLOCK or label. */
- gcc_assert (insn != BB_HEAD (bb));
-
- /* Delete the insn. */
- delete_insn (insn);
-
- df_insn_modify (df, bb, insn);
-
- return NEXT_INSN (insn);
-}
-
-/* Mark that basic block BB was modified. */
-
-static void
-df_bb_modify (struct df *df, basic_block bb)
-{
- if ((unsigned) bb->index >= df->n_bbs)
- df_bb_table_realloc (df, bb->index);
-
- bitmap_set_bit (df->bbs_modified, bb->index);
-}
-
-/* Mark that INSN within BB may have changed (created/modified/deleted).
- This may be called multiple times for the same insn. There is no
- harm calling this function if the insn wasn't changed; it will just
- slow down the rescanning of refs. */
-void
-df_insn_modify (struct df *df, basic_block bb, rtx insn)
-{
- unsigned int uid;
-
- uid = INSN_UID (insn);
- if (uid >= df->insn_size)
- df_insn_table_realloc (df, uid);
-
- df_bb_modify (df, bb);
- bitmap_set_bit (df->insns_modified, uid);
-
- /* For incremental updating on the fly, perhaps we could make a copy
- of all the refs of the original insn and turn them into
- anti-refs. When df_refs_update finds these anti-refs, it annihilates
- the original refs. If validate_change fails then these anti-refs
- will just get ignored. */
-}
-
-/* Check if INSN was marked as changed. Of course the correctness of
- the information depends on whether the instruction was really modified
- at the time df_insn_modify was called. */
-bool
-df_insn_modified_p (struct df *df, rtx insn)
-{
- unsigned int uid;
-
- uid = INSN_UID (insn);
- return (df->insns_modified
- && uid < df->insn_size
- && bitmap_bit_p (df->insns_modified, uid));
-}
-
-typedef struct replace_args
-{
- rtx match;
- rtx replacement;
- rtx insn;
- int modified;
-} replace_args;
-
-
-/* Replace mem pointed to by PX with its associated pseudo register.
- DATA is actually a pointer to a structure describing the
- instruction currently being scanned and the MEM we are currently
- replacing. */
-static int
-df_rtx_mem_replace (rtx *px, void *data)
-{
- replace_args *args = (replace_args *) data;
- rtx mem = *px;
-
- if (mem == NULL_RTX)
- return 0;
-
- switch (GET_CODE (mem))
- {
- case MEM:
- break;
-
- case CONST_DOUBLE:
- /* We're not interested in the MEM associated with a
- CONST_DOUBLE, so there's no need to traverse into one. */
- return -1;
-
- default:
- /* This is not a MEM. */
- return 0;
- }
-
- if (!rtx_equal_p (args->match, mem))
- /* This is not the MEM we are currently replacing. */
- return 0;
-
- /* Actually replace the MEM. */
- validate_change (args->insn, px, args->replacement, 1);
- args->modified++;
-
- return 0;
-}
-
-
-int
-df_insn_mem_replace (struct df *df, basic_block bb, rtx insn, rtx mem, rtx reg)
-{
- replace_args args;
-
- args.insn = insn;
- args.match = mem;
- args.replacement = reg;
- args.modified = 0;
-
- /* Search and replace all matching mems within insn. */
- for_each_rtx (&insn, df_rtx_mem_replace, &args);
-
- if (args.modified)
- df_insn_modify (df, bb, insn);
-
- /* ???? FIXME. We may have a new def or one or more new uses of REG
- in INSN. REG should be a new pseudo so it won't affect the
- dataflow information that we currently have. We should add
- the new uses and defs to INSN and then recreate the chains
- when df_analyze is called. */
- return args.modified;
-}
-
-
-/* Replace one register with another. Called through for_each_rtx; PX
- points to the rtx being scanned. DATA is actually a pointer to a
- structure of arguments. */
-static int
-df_rtx_reg_replace (rtx *px, void *data)
-{
- rtx x = *px;
- replace_args *args = (replace_args *) data;
-
- if (x == NULL_RTX)
- return 0;
-
- if (x == args->match)
- {
- validate_change (args->insn, px, args->replacement, 1);
- args->modified++;
- }
-
- return 0;
-}
-
-
-/* Replace the reg within every ref on CHAIN that is within the set
- BLOCKS of basic blocks with NEWREG. Also update the regs within
- REG_NOTES. */
-void
-df_refs_reg_replace (struct df *df, bitmap blocks, struct df_link *chain, rtx oldreg, rtx newreg)
-{
- struct df_link *link;
- replace_args args;
-
- if (! blocks)
- blocks = df->all_blocks;
-
- args.match = oldreg;
- args.replacement = newreg;
- args.modified = 0;
-
- for (link = chain; link; link = link->next)
- {
- struct ref *ref = link->ref;
- rtx insn = DF_REF_INSN (ref);
-
- if (! INSN_P (insn))
- continue;
-
- gcc_assert (bitmap_bit_p (blocks, DF_REF_BBNO (ref)));
-
- df_ref_reg_replace (df, ref, oldreg, newreg);
-
- /* Replace occurrences of the reg within the REG_NOTES. */
- if ((! link->next || DF_REF_INSN (ref)
- != DF_REF_INSN (link->next->ref))
- && REG_NOTES (insn))
- {
- args.insn = insn;
- for_each_rtx (&REG_NOTES (insn), df_rtx_reg_replace, &args);
- }
- }
-}
-
-
-/* Replace all occurrences of register OLDREG with register NEWREG in
- blocks defined by bitmap BLOCKS. This also replaces occurrences of
- OLDREG in the REG_NOTES but only for insns containing OLDREG. This
- routine expects the reg-use and reg-def chains to be valid. */
-int
-df_reg_replace (struct df *df, bitmap blocks, rtx oldreg, rtx newreg)
-{
- unsigned int oldregno = REGNO (oldreg);
-
- df_refs_reg_replace (df, blocks, df->regs[oldregno].defs, oldreg, newreg);
- df_refs_reg_replace (df, blocks, df->regs[oldregno].uses, oldreg, newreg);
- return 1;
-}
-
-
-/* Try replacing the reg within REF with NEWREG. Do not modify
- def-use/use-def chains. */
-int
-df_ref_reg_replace (struct df *df, struct ref *ref, rtx oldreg, rtx newreg)
-{
- /* Check that insn was deleted by being converted into a NOTE. If
- so ignore this insn. */
- if (! INSN_P (DF_REF_INSN (ref)))
- return 0;
-
- gcc_assert (!oldreg || oldreg == DF_REF_REG (ref));
-
- if (! validate_change (DF_REF_INSN (ref), DF_REF_LOC (ref), newreg, 1))
- return 0;
-
- df_insn_modify (df, DF_REF_BB (ref), DF_REF_INSN (ref));
- return 1;
-}
-
-
-struct ref*
-df_bb_def_use_swap (struct df *df, basic_block bb, rtx def_insn, rtx use_insn, unsigned int regno)
-{
- struct ref *def;
- struct ref *use;
- int def_uid;
- int use_uid;
- struct df_link *link;
-
- def = df_bb_insn_regno_first_def_find (df, bb, def_insn, regno);
- if (! def)
- return 0;
-
- use = df_bb_insn_regno_last_use_find (df, bb, use_insn, regno);
- if (! use)
- return 0;
-
- /* The USE no longer exists. */
- use_uid = INSN_UID (use_insn);
- df_use_unlink (df, use);
- df_ref_unlink (&df->insns[use_uid].uses, use);
-
- /* The DEF requires shifting so remove it from DEF_INSN
- and add it to USE_INSN by reusing LINK. */
- def_uid = INSN_UID (def_insn);
- link = df_ref_unlink (&df->insns[def_uid].defs, def);
- link->ref = def;
- link->next = df->insns[use_uid].defs;
- df->insns[use_uid].defs = link;
-
-#if 0
- link = df_ref_unlink (&df->regs[regno].defs, def);
- link->ref = def;
- link->next = df->regs[regno].defs;
- df->insns[regno].defs = link;
-#endif
-
- DF_REF_INSN (def) = use_insn;
- return def;
-}
-
-
-/* Record df between FIRST_INSN and LAST_INSN inclusive. All new
- insns must be processed by this routine. */
-static void
-df_insns_modify (struct df *df, basic_block bb, rtx first_insn, rtx last_insn)
-{
- rtx insn;
-
- for (insn = first_insn; ; insn = NEXT_INSN (insn))
- {
- unsigned int uid;
-
- /* A non-const call should not have slipped through the net. If
- it does, we need to create a new basic block. Ouch. The
- same applies for a label. */
- gcc_assert ((!CALL_P (insn) || CONST_OR_PURE_CALL_P (insn))
- && !LABEL_P (insn));
-
- uid = INSN_UID (insn);
-
- if (uid >= df->insn_size)
- df_insn_table_realloc (df, uid);
-
- df_insn_modify (df, bb, insn);
-
- if (insn == last_insn)
- break;
- }
-}
-
-
-/* Emit PATTERN before INSN within BB. */
-rtx
-df_pattern_emit_before (struct df *df, rtx pattern, basic_block bb, rtx insn)
-{
- rtx ret_insn;
- rtx prev_insn = PREV_INSN (insn);
-
- /* We should not be inserting before the start of the block. */
- gcc_assert (insn != BB_HEAD (bb));
- ret_insn = emit_insn_before (pattern, insn);
- if (ret_insn == insn)
- return ret_insn;
-
- df_insns_modify (df, bb, NEXT_INSN (prev_insn), ret_insn);
- return ret_insn;
-}
-
-
-/* Emit PATTERN after INSN within BB. */
-rtx
-df_pattern_emit_after (struct df *df, rtx pattern, basic_block bb, rtx insn)
-{
- rtx ret_insn;
-
- ret_insn = emit_insn_after (pattern, insn);
- if (ret_insn == insn)
- return ret_insn;
-
- df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
- return ret_insn;
-}
-
-
-/* Emit jump PATTERN after INSN within BB. */
-rtx
-df_jump_pattern_emit_after (struct df *df, rtx pattern, basic_block bb, rtx insn)
-{
- rtx ret_insn;
-
- ret_insn = emit_jump_insn_after (pattern, insn);
- if (ret_insn == insn)
- return ret_insn;
-
- df_insns_modify (df, bb, NEXT_INSN (insn), ret_insn);
- return ret_insn;
-}
-
-
-/* Move INSN within BB before BEFORE_INSN within BEFORE_BB.
-
- This function should only be used to move loop invariant insns
- out of a loop where it has been proven that the def-use info
- will still be valid. */
-rtx
-df_insn_move_before (struct df *df, basic_block bb, rtx insn, basic_block before_bb, rtx before_insn)
-{
- struct df_link *link;
- unsigned int uid;
-
- if (! bb)
- return df_pattern_emit_before (df, insn, before_bb, before_insn);
-
- uid = INSN_UID (insn);
-
- /* Change bb for all df defined and used by this insn. */
- for (link = df->insns[uid].defs; link; link = link->next)
- DF_REF_BB (link->ref) = before_bb;
- for (link = df->insns[uid].uses; link; link = link->next)
- DF_REF_BB (link->ref) = before_bb;
-
- /* The lifetimes of the registers used in this insn will be reduced
- while the lifetimes of the registers defined in this insn
- are likely to be increased. */
-
- /* ???? Perhaps all the insns moved should be stored on a list
- which df_analyze removes when it recalculates data flow. */
-
- return emit_insn_before (insn, before_insn);
-}
-
-/* Functions to query dataflow information. */
-
-
-int
-df_insn_regno_def_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED,
- rtx insn, unsigned int regno)
-{
- unsigned int uid;
- struct df_link *link;
-
- uid = INSN_UID (insn);
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
-
- if (DF_REF_REGNO (def) == regno)
- return 1;
- }
-
- return 0;
-}
-
-/* Finds the reference corresponding to the definition of REG in INSN.
- DF is the dataflow object. */
-
-struct ref *
-df_find_def (struct df *df, rtx insn, rtx reg)
-{
- struct df_link *defs;
-
- if (GET_CODE (reg) == SUBREG)
- reg = SUBREG_REG (reg);
- gcc_assert (REG_P (reg));
-
- for (defs = DF_INSN_DEFS (df, insn); defs; defs = defs->next)
- if (rtx_equal_p (DF_REF_REAL_REG (defs->ref), reg))
- return defs->ref;
-
- return NULL;
-}
-
-/* Finds the reference corresponding to the use of REG in INSN.
- DF is the dataflow object. */
-
-struct ref *
-df_find_use (struct df *df, rtx insn, rtx reg)
-{
- struct df_link *uses;
-
- if (GET_CODE (reg) == SUBREG)
- reg = SUBREG_REG (reg);
- gcc_assert (REG_P (reg));
-
- for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next)
- if (rtx_equal_p (DF_REF_REAL_REG (uses->ref), reg))
- return uses->ref;
-
- return NULL;
-}
-
-/* Return 1 if REG is referenced in INSN, zero otherwise. */
-
-int
-df_reg_used (struct df *df, rtx insn, rtx reg)
-{
- return df_find_use (df, insn, reg) != NULL;
-}
-
-static int
-df_def_dominates_all_uses_p (struct df *df ATTRIBUTE_UNUSED, struct ref *def)
-{
- struct df_link *du_link;
-
- /* Follow def-use chain to find all the uses of this def. */
- for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
- {
- struct ref *use = du_link->ref;
- struct df_link *ud_link;
-
- /* Follow use-def chain to check all the defs for this use. */
- for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
- if (ud_link->ref != def)
- return 0;
- }
- return 1;
-}
-
-
-int
-df_insn_dominates_all_uses_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED,
- rtx insn)
-{
- unsigned int uid;
- struct df_link *link;
-
- uid = INSN_UID (insn);
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
-
- if (! df_def_dominates_all_uses_p (df, def))
- return 0;
- }
-
- return 1;
-}
-
-
-/* Return nonzero if all DF dominates all the uses within the bitmap
- BLOCKS. */
-static int
-df_def_dominates_uses_p (struct df *df ATTRIBUTE_UNUSED, struct ref *def,
- bitmap blocks)
-{
- struct df_link *du_link;
-
- /* Follow def-use chain to find all the uses of this def. */
- for (du_link = DF_REF_CHAIN (def); du_link; du_link = du_link->next)
- {
- struct ref *use = du_link->ref;
- struct df_link *ud_link;
-
- /* Only worry about the uses within BLOCKS. For example,
- consider a register defined within a loop that is live at the
- loop exits. */
- if (bitmap_bit_p (blocks, DF_REF_BBNO (use)))
- {
- /* Follow use-def chain to check all the defs for this use. */
- for (ud_link = DF_REF_CHAIN (use); ud_link; ud_link = ud_link->next)
- if (ud_link->ref != def)
- return 0;
- }
- }
- return 1;
-}
-
-
-/* Return nonzero if all the defs of INSN within BB dominates
- all the corresponding uses. */
-int
-df_insn_dominates_uses_p (struct df *df, basic_block bb ATTRIBUTE_UNUSED,
- rtx insn, bitmap blocks)
-{
- unsigned int uid;
- struct df_link *link;
-
- uid = INSN_UID (insn);
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
-
- /* Only consider the defs within BLOCKS. */
- if (bitmap_bit_p (blocks, DF_REF_BBNO (def))
- && ! df_def_dominates_uses_p (df, def, blocks))
- return 0;
- }
- return 1;
-}
-
-
-/* Return the basic block that REG referenced in or NULL if referenced
- in multiple basic blocks. */
-basic_block
-df_regno_bb (struct df *df, unsigned int regno)
-{
- struct df_link *defs = df->regs[regno].defs;
- struct df_link *uses = df->regs[regno].uses;
- struct ref *def = defs ? defs->ref : 0;
- struct ref *use = uses ? uses->ref : 0;
- basic_block bb_def = def ? DF_REF_BB (def) : 0;
- basic_block bb_use = use ? DF_REF_BB (use) : 0;
-
- /* Compare blocks of first def and last use. ???? FIXME. What if
- the reg-def and reg-use lists are not correctly ordered. */
- return bb_def == bb_use ? bb_def : 0;
-}
-
-
-/* Return nonzero if REG used in multiple basic blocks. */
-int
-df_reg_global_p (struct df *df, rtx reg)
-{
- return df_regno_bb (df, REGNO (reg)) != 0;
-}
-
-
-/* Return total lifetime (in insns) of REG. */
-int
-df_reg_lifetime (struct df *df, rtx reg)
-{
- return df->regs[REGNO (reg)].lifetime;
-}
-
-
-/* Return nonzero if REG live at start of BB. */
-int
-df_bb_reg_live_start_p (struct df *df, basic_block bb, rtx reg)
-{
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- gcc_assert (bb_info->lr_in);
-
- return bitmap_bit_p (bb_info->lr_in, REGNO (reg));
-}
-
-
-/* Return nonzero if REG live at end of BB. */
-int
-df_bb_reg_live_end_p (struct df *df, basic_block bb, rtx reg)
-{
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- gcc_assert (bb_info->lr_in);
-
- return bitmap_bit_p (bb_info->lr_out, REGNO (reg));
-}
-
-
-/* Return -1 if life of REG1 before life of REG2, 1 if life of REG1
- after life of REG2, or 0, if the lives overlap. */
-int
-df_bb_regs_lives_compare (struct df *df, basic_block bb, rtx reg1, rtx reg2)
-{
- unsigned int regno1 = REGNO (reg1);
- unsigned int regno2 = REGNO (reg2);
- struct ref *def1;
- struct ref *use1;
- struct ref *def2;
- struct ref *use2;
-
-
- /* The regs must be local to BB. */
- gcc_assert (df_regno_bb (df, regno1) == bb
- && df_regno_bb (df, regno2) == bb);
-
- def2 = df_bb_regno_first_def_find (df, bb, regno2);
- use1 = df_bb_regno_last_use_find (df, bb, regno1);
-
- if (DF_INSN_LUID (df, DF_REF_INSN (def2))
- > DF_INSN_LUID (df, DF_REF_INSN (use1)))
- return -1;
-
- def1 = df_bb_regno_first_def_find (df, bb, regno1);
- use2 = df_bb_regno_last_use_find (df, bb, regno2);
-
- if (DF_INSN_LUID (df, DF_REF_INSN (def1))
- > DF_INSN_LUID (df, DF_REF_INSN (use2)))
- return 1;
-
- return 0;
-}
-
-
-/* Return true if the definition DEF, which is in the same basic
- block as USE, is available at USE. So DEF may as well be
- dead, in which case using it will extend its live range. */
-bool
-df_local_def_available_p (struct df *df, struct ref *def, struct ref *use)
-{
- struct df_link *link;
- int def_luid = DF_INSN_LUID (df, DF_REF_INSN (def));
- int in_bb = 0;
- unsigned int regno = REGNO (def->reg);
- basic_block bb;
-
- /* The regs must be local to BB. */
- gcc_assert (DF_REF_BB (def) == DF_REF_BB (use));
- bb = DF_REF_BB (def);
-
- /* This assumes that the reg-def list is ordered such that for any
- BB, the first def is found first. However, since the BBs are not
- ordered, the first def in the chain is not necessarily the first
- def in the function. */
- for (link = df->regs[regno].defs; link; link = link->next)
- {
- struct ref *this_def = link->ref;
- if (DF_REF_BB (this_def) == bb)
- {
- int this_luid = DF_INSN_LUID (df, DF_REF_INSN (this_def));
- /* Do nothing with defs coming before DEF. */
- if (this_luid > def_luid)
- return this_luid > DF_INSN_LUID (df, DF_REF_INSN (use));
-
- in_bb = 1;
- }
- else if (in_bb)
- /* DEF was the last in its basic block. */
- return 1;
- }
-
- /* DEF was the last in the function. */
- return 1;
-}
-
-
-/* Return last use of REGNO within BB. */
-struct ref *
-df_bb_regno_last_use_find (struct df *df, basic_block bb, unsigned int regno)
-{
- struct df_link *link;
-
- /* This assumes that the reg-use list is ordered such that for any
- BB, the last use is found first. However, since the BBs are not
- ordered, the first use in the chain is not necessarily the last
- use in the function. */
- for (link = df->regs[regno].uses; link; link = link->next)
- {
- struct ref *use = link->ref;
-
- if (DF_REF_BB (use) == bb)
- return use;
- }
- return 0;
-}
-
-
-/* Return first def of REGNO within BB. */
-struct ref *
-df_bb_regno_first_def_find (struct df *df, basic_block bb, unsigned int regno)
-{
- struct df_link *link;
-
- /* This assumes that the reg-def list is ordered such that for any
- BB, the first def is found first. However, since the BBs are not
- ordered, the first def in the chain is not necessarily the first
- def in the function. */
- for (link = df->regs[regno].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
-
- if (DF_REF_BB (def) == bb)
- return def;
- }
- return 0;
-}
-
-/* Return last def of REGNO within BB. */
-struct ref *
-df_bb_regno_last_def_find (struct df *df, basic_block bb, unsigned int regno)
-{
- struct df_link *link;
- struct ref *last_def = NULL;
- int in_bb = 0;
-
- /* This assumes that the reg-def list is ordered such that for any
- BB, the first def is found first. However, since the BBs are not
- ordered, the first def in the chain is not necessarily the first
- def in the function. */
- for (link = df->regs[regno].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
- /* The first time in the desired block. */
- if (DF_REF_BB (def) == bb)
- in_bb = 1;
- /* The last def in the desired block. */
- else if (in_bb)
- return last_def;
- last_def = def;
- }
- return last_def;
-}
-
-/* Return last use of REGNO inside INSN within BB. */
-static struct ref *
-df_bb_insn_regno_last_use_find (struct df *df,
- basic_block bb ATTRIBUTE_UNUSED, rtx insn,
- unsigned int regno)
-{
- unsigned int uid;
- struct df_link *link;
-
- uid = INSN_UID (insn);
-
- for (link = df->insns[uid].uses; link; link = link->next)
- {
- struct ref *use = link->ref;
-
- if (DF_REF_REGNO (use) == regno)
- return use;
- }
-
- return 0;
-}
-
-
-/* Return first def of REGNO inside INSN within BB. */
-static struct ref *
-df_bb_insn_regno_first_def_find (struct df *df,
- basic_block bb ATTRIBUTE_UNUSED, rtx insn,
- unsigned int regno)
-{
- unsigned int uid;
- struct df_link *link;
-
- uid = INSN_UID (insn);
-
- for (link = df->insns[uid].defs; link; link = link->next)
- {
- struct ref *def = link->ref;
-
- if (DF_REF_REGNO (def) == regno)
- return def;
- }
-
- return 0;
-}
-
-
-/* Return insn using REG if the BB contains only a single
- use and def of REG. */
-rtx
-df_bb_single_def_use_insn_find (struct df *df, basic_block bb, rtx insn, rtx reg)
-{
- struct ref *def;
- struct ref *use;
- struct df_link *du_link;
-
- def = df_bb_insn_regno_first_def_find (df, bb, insn, REGNO (reg));
-
- gcc_assert (def);
-
- du_link = DF_REF_CHAIN (def);
-
- if (! du_link)
- return NULL_RTX;
-
- use = du_link->ref;
-
- /* Check if def is dead. */
- if (! use)
- return NULL_RTX;
-
- /* Check for multiple uses. */
- if (du_link->next)
- return NULL_RTX;
-
- return DF_REF_INSN (use);
-}
-
-/* Functions for debugging/dumping dataflow information. */
-
-
-/* Dump a def-use or use-def chain for REF to FILE. */
-static void
-df_chain_dump (struct df_link *link, FILE *file)
-{
- fprintf (file, "{ ");
- for (; link; link = link->next)
- {
- fprintf (file, "%c%d ",
- DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
- DF_REF_ID (link->ref));
- }
- fprintf (file, "}");
-}
-
-
-/* Dump a chain of refs with the associated regno. */
-static void
-df_chain_dump_regno (struct df_link *link, FILE *file)
-{
- fprintf (file, "{ ");
- for (; link; link = link->next)
- {
- fprintf (file, "%c%d(%d) ",
- DF_REF_REG_DEF_P (link->ref) ? 'd' : 'u',
- DF_REF_ID (link->ref),
- DF_REF_REGNO (link->ref));
- }
- fprintf (file, "}");
-}
-
-
-/* Dump dataflow info. */
-void
-df_dump (struct df *df, int flags, FILE *file)
-{
- unsigned int j;
- basic_block bb;
-
- if (! df || ! file)
- return;
-
- fprintf (file, "\nDataflow summary:\n");
- fprintf (file, "n_regs = %d, n_defs = %d, n_uses = %d, n_bbs = %d\n",
- df->n_regs, df->n_defs, df->n_uses, df->n_bbs);
-
- if (flags & DF_RD)
- {
- basic_block bb;
-
- fprintf (file, "Reaching defs:\n");
- FOR_EACH_BB (bb)
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- if (! bb_info->rd_in)
- continue;
-
- fprintf (file, "bb %d in \t", bb->index);
- dump_bitmap (file, bb_info->rd_in);
- fprintf (file, "bb %d gen \t", bb->index);
- dump_bitmap (file, bb_info->rd_gen);
- fprintf (file, "bb %d kill\t", bb->index);
- dump_bitmap (file, bb_info->rd_kill);
- fprintf (file, "bb %d out \t", bb->index);
- dump_bitmap (file, bb_info->rd_out);
- }
- }
-
- if (flags & DF_UD_CHAIN)
- {
- fprintf (file, "Use-def chains:\n");
- for (j = 0; j < df->n_defs; j++)
- {
- if (df->defs[j])
- {
- fprintf (file, "d%d bb %d luid %d insn %d reg %d ",
- j, DF_REF_BBNO (df->defs[j]),
- DF_INSN_LUID (df, DF_REF_INSN (df->defs[j])),
- DF_REF_INSN_UID (df->defs[j]),
- DF_REF_REGNO (df->defs[j]));
- if (df->defs[j]->flags & DF_REF_READ_WRITE)
- fprintf (file, "read/write ");
- df_chain_dump (DF_REF_CHAIN (df->defs[j]), file);
- fprintf (file, "\n");
- }
- }
- }
-
- if (flags & DF_RU)
- {
- fprintf (file, "Reaching uses:\n");
- FOR_EACH_BB (bb)
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- if (! bb_info->ru_in)
- continue;
-
- fprintf (file, "bb %d in \t", bb->index);
- dump_bitmap (file, bb_info->ru_in);
- fprintf (file, "bb %d gen \t", bb->index);
- dump_bitmap (file, bb_info->ru_gen);
- fprintf (file, "bb %d kill\t", bb->index);
- dump_bitmap (file, bb_info->ru_kill);
- fprintf (file, "bb %d out \t", bb->index);
- dump_bitmap (file, bb_info->ru_out);
- }
- }
-
- if (flags & DF_DU_CHAIN)
- {
- fprintf (file, "Def-use chains:\n");
- for (j = 0; j < df->n_uses; j++)
- {
- if (df->uses[j])
- {
- fprintf (file, "u%d bb %d luid %d insn %d reg %d ",
- j, DF_REF_BBNO (df->uses[j]),
- DF_INSN_LUID (df, DF_REF_INSN (df->uses[j])),
- DF_REF_INSN_UID (df->uses[j]),
- DF_REF_REGNO (df->uses[j]));
- if (df->uses[j]->flags & DF_REF_READ_WRITE)
- fprintf (file, "read/write ");
- df_chain_dump (DF_REF_CHAIN (df->uses[j]), file);
- fprintf (file, "\n");
- }
- }
- }
-
- if (flags & DF_LR)
- {
- fprintf (file, "Live regs:\n");
- FOR_EACH_BB (bb)
- {
- struct bb_info *bb_info = DF_BB_INFO (df, bb);
-
- if (! bb_info->lr_in)
- continue;
-
- fprintf (file, "bb %d in \t", bb->index);
- dump_bitmap (file, bb_info->lr_in);
- fprintf (file, "bb %d use \t", bb->index);
- dump_bitmap (file, bb_info->lr_use);
- fprintf (file, "bb %d def \t", bb->index);
- dump_bitmap (file, bb_info->lr_def);
- fprintf (file, "bb %d out \t", bb->index);
- dump_bitmap (file, bb_info->lr_out);
- }
- }
-
- if (flags & (DF_REG_INFO | DF_RD_CHAIN | DF_RU_CHAIN))
- {
- struct reg_info *reg_info = df->regs;
-
- fprintf (file, "Register info:\n");
- for (j = 0; j < df->n_regs; j++)
- {
- if (((flags & DF_REG_INFO)
- && (reg_info[j].n_uses || reg_info[j].n_defs))
- || ((flags & DF_RD_CHAIN) && reg_info[j].defs)
- || ((flags & DF_RU_CHAIN) && reg_info[j].uses))
- {
- fprintf (file, "reg %d", j);
- if ((flags & DF_RD_CHAIN) && (flags & DF_RU_CHAIN))
- {
- basic_block bb = df_regno_bb (df, j);
-
- if (bb)
- fprintf (file, " bb %d", bb->index);
- else
- fprintf (file, " bb ?");
- }
- if (flags & DF_REG_INFO)
- {
- fprintf (file, " life %d", reg_info[j].lifetime);
- }
-
- if ((flags & DF_REG_INFO) || (flags & DF_RD_CHAIN))
- {
- fprintf (file, " defs ");
- if (flags & DF_REG_INFO)
- fprintf (file, "%d ", reg_info[j].n_defs);
- if (flags & DF_RD_CHAIN)
- df_chain_dump (reg_info[j].defs, file);
- }
-
- if ((flags & DF_REG_INFO) || (flags & DF_RU_CHAIN))
- {
- fprintf (file, " uses ");
- if (flags & DF_REG_INFO)
- fprintf (file, "%d ", reg_info[j].n_uses);
- if (flags & DF_RU_CHAIN)
- df_chain_dump (reg_info[j].uses, file);
- }
-
- fprintf (file, "\n");
- }
- }
- }
- fprintf (file, "\n");
-}
-
-
-void
-df_insn_debug (struct df *df, rtx insn, FILE *file)
-{
- unsigned int uid;
- int bbi;
-
- uid = INSN_UID (insn);
- if (uid >= df->insn_size)
- return;
-
- if (df->insns[uid].defs)
- bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
- else if (df->insns[uid].uses)
- bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
- else
- bbi = -1;
-
- fprintf (file, "insn %d bb %d luid %d defs ",
- uid, bbi, DF_INSN_LUID (df, insn));
- df_chain_dump (df->insns[uid].defs, file);
- fprintf (file, " uses ");
- df_chain_dump (df->insns[uid].uses, file);
- fprintf (file, "\n");
-}
-
-
-void
-df_insn_debug_regno (struct df *df, rtx insn, FILE *file)
-{
- unsigned int uid;
- int bbi;
-
- uid = INSN_UID (insn);
- if (uid >= df->insn_size)
- return;
-
- if (df->insns[uid].defs)
- bbi = DF_REF_BBNO (df->insns[uid].defs->ref);
- else if (df->insns[uid].uses)
- bbi = DF_REF_BBNO (df->insns[uid].uses->ref);
- else
- bbi = -1;
-
- fprintf (file, "insn %d bb %d luid %d defs ",
- uid, bbi, DF_INSN_LUID (df, insn));
- df_chain_dump_regno (df->insns[uid].defs, file);
- fprintf (file, " uses ");
- df_chain_dump_regno (df->insns[uid].uses, file);
- fprintf (file, "\n");
-}
-
-
-static void
-df_regno_debug (struct df *df, unsigned int regno, FILE *file)
-{
- if (regno >= df->reg_size)
- return;
-
- fprintf (file, "reg %d life %d defs ",
- regno, df->regs[regno].lifetime);
- df_chain_dump (df->regs[regno].defs, file);
- fprintf (file, " uses ");
- df_chain_dump (df->regs[regno].uses, file);
- fprintf (file, "\n");
-}
-
-
-static void
-df_ref_debug (struct df *df, struct ref *ref, FILE *file)
-{
- fprintf (file, "%c%d ",
- DF_REF_REG_DEF_P (ref) ? 'd' : 'u',
- DF_REF_ID (ref));
- fprintf (file, "reg %d bb %d luid %d insn %d chain ",
- DF_REF_REGNO (ref),
- DF_REF_BBNO (ref),
- DF_INSN_LUID (df, DF_REF_INSN (ref)),
- INSN_UID (DF_REF_INSN (ref)));
- df_chain_dump (DF_REF_CHAIN (ref), file);
- fprintf (file, "\n");
-}
-
-/* Functions for debugging from GDB. */
-
-void
-debug_df_insn (rtx insn)
-{
- df_insn_debug (ddf, insn, stderr);
- debug_rtx (insn);
-}
-
-
-void
-debug_df_reg (rtx reg)
-{
- df_regno_debug (ddf, REGNO (reg), stderr);
-}
-
-
-void
-debug_df_regno (unsigned int regno)
-{
- df_regno_debug (ddf, regno, stderr);
-}
-
-
-void
-debug_df_ref (struct ref *ref)
-{
- df_ref_debug (ddf, ref, stderr);
-}
-
-
-void
-debug_df_defno (unsigned int defno)
-{
- df_ref_debug (ddf, ddf->defs[defno], stderr);
-}
-
-
-void
-debug_df_useno (unsigned int defno)
-{
- df_ref_debug (ddf, ddf->uses[defno], stderr);
-}
-
-
-void
-debug_df_chain (struct df_link *link)
-{
- df_chain_dump (link, stderr);
- fputc ('\n', stderr);
-}
-
-
-/* Perform the set operation OP1 OP OP2, using set representation REPR, and
- storing the result in OP1. */
-
-static void
-dataflow_set_a_op_b (enum set_representation repr,
- enum df_confluence_op op,
- void *op1, void *op2)
-{
- switch (repr)
- {
- case SR_SBITMAP:
- switch (op)
- {
- case DF_UNION:
- sbitmap_a_or_b (op1, op1, op2);
- break;
-
- case DF_INTERSECTION:
- sbitmap_a_and_b (op1, op1, op2);
- break;
-
- default:
- gcc_unreachable ();
- }
- break;
-
- case SR_BITMAP:
- switch (op)
- {
- case DF_UNION:
- bitmap_ior_into (op1, op2);
- break;
-
- case DF_INTERSECTION:
- bitmap_and_into (op1, op2);
- break;
-
- default:
- gcc_unreachable ();
- }
- break;
-
- default:
- gcc_unreachable ();
- }
-}
-
-static void
-dataflow_set_copy (enum set_representation repr, void *dest, void *src)
-{
- switch (repr)
- {
- case SR_SBITMAP:
- sbitmap_copy (dest, src);
- break;
-
- case SR_BITMAP:
- bitmap_copy (dest, src);
- break;
-
- default:
- gcc_unreachable ();
- }
-}
-
-/* Hybrid search algorithm from "Implementation Techniques for
- Efficient Data-Flow Analysis of Large Programs". */
-
-static void
-hybrid_search (basic_block bb, struct dataflow *dataflow,
- sbitmap visited, sbitmap pending, sbitmap considered)
-{
- int changed;
- int i = bb->index;
- edge e;
- edge_iterator ei;
-
- SET_BIT (visited, bb->index);
- gcc_assert (TEST_BIT (pending, bb->index));
- RESET_BIT (pending, i);
-
-#define HS(E_ANTI, E_ANTI_BB, E_ANTI_START_BB, IN_SET, \
- E, E_BB, E_START_BB, OUT_SET) \
- do \
- { \
- /* Calculate <conf_op> of predecessor_outs. */ \
- bitmap_zero (IN_SET[i]); \
- FOR_EACH_EDGE (e, ei, bb->E_ANTI) \
- { \
- if (e->E_ANTI_BB == E_ANTI_START_BB) \
- continue; \
- if (!TEST_BIT (considered, e->E_ANTI_BB->index)) \
- continue; \
- \
- dataflow_set_a_op_b (dataflow->repr, dataflow->conf_op, \
- IN_SET[i], \
- OUT_SET[e->E_ANTI_BB->index]); \
- } \
- \
- (*dataflow->transfun)(i, &changed, \
- dataflow->in[i], dataflow->out[i], \
- dataflow->gen[i], dataflow->kill[i], \
- dataflow->data); \
- \
- if (!changed) \
- break; \
- \
- FOR_EACH_EDGE (e, ei, bb->E) \
- { \
- if (e->E_BB == E_START_BB || e->E_BB->index == i) \
- continue; \
- \
- if (!TEST_BIT (considered, e->E_BB->index)) \
- continue; \
- \
- SET_BIT (pending, e->E_BB->index); \
- } \
- \
- FOR_EACH_EDGE (e, ei, bb->E) \
- { \
- if (e->E_BB == E_START_BB || e->E_BB->index == i) \
- continue; \
- \
- if (!TEST_BIT (considered, e->E_BB->index)) \
- continue; \
- \
- if (!TEST_BIT (visited, e->E_BB->index)) \
- hybrid_search (e->E_BB, dataflow, visited, pending, considered); \
- } \
- } while (0)
-
- if (dataflow->dir == DF_FORWARD)
- HS (preds, src, ENTRY_BLOCK_PTR, dataflow->in,
- succs, dest, EXIT_BLOCK_PTR, dataflow->out);
- else
- HS (succs, dest, EXIT_BLOCK_PTR, dataflow->out,
- preds, src, ENTRY_BLOCK_PTR, dataflow->in);
-}
-
-/* This function will perform iterative bitvector dataflow described by
- DATAFLOW, producing the in and out sets. Only the part of the cfg
- induced by blocks in DATAFLOW->order is taken into account.
-
- For forward problems, you probably want to pass in rc_order. */
-
-void
-iterative_dataflow (struct dataflow *dataflow)
-{
- unsigned i, idx;
- sbitmap visited, pending, considered;
-
- pending = sbitmap_alloc (last_basic_block);
- visited = sbitmap_alloc (last_basic_block);
- considered = sbitmap_alloc (last_basic_block);
- sbitmap_zero (pending);
- sbitmap_zero (visited);
- sbitmap_zero (considered);
-
- for (i = 0; i < dataflow->n_blocks; i++)
- {
- idx = dataflow->order[i];
- SET_BIT (pending, idx);
- SET_BIT (considered, idx);
- if (dataflow->dir == DF_FORWARD)
- dataflow_set_copy (dataflow->repr,
- dataflow->out[idx], dataflow->gen[idx]);
- else
- dataflow_set_copy (dataflow->repr,
- dataflow->in[idx], dataflow->gen[idx]);
- };
-
- while (1)
- {
- for (i = 0; i < dataflow->n_blocks; i++)
- {
- idx = dataflow->order[i];
-
- if (TEST_BIT (pending, idx) && !TEST_BIT (visited, idx))
- hybrid_search (BASIC_BLOCK (idx), dataflow,
- visited, pending, considered);
- }
-
- if (sbitmap_first_set_bit (pending) == -1)
- break;
-
- sbitmap_zero (visited);
- }
-
- sbitmap_free (pending);
- sbitmap_free (visited);
- sbitmap_free (considered);
-}
diff --git a/gcc/df.h b/gcc/df.h
index e2ad9774dfb..09a112762bf 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -1,8 +1,11 @@
/* Form lists of pseudo register references for autoinc optimization
for GNU compiler. This is part of flow optimization.
- Copyright (C) 1999, 2000, 2001, 2003, 2004, 2005
+ Copyright (C) 1999, 2000, 2001, 2003, 2004, 2005, 2006
Free Software Foundation, Inc.
- Contributed by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
+ Originally contributed by Michael P. Hayes
+ (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com)
+ Major rewrite contributed by Danny Berlin (dberlin@dberlin.org)
+ and Kenneth Zadeck (zadeck@naturalbridge.com).
This file is part of GCC.
@@ -26,32 +29,152 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "bitmap.h"
#include "basic-block.h"
+#include "alloc-pool.h"
+
+struct dataflow;
+struct df;
+struct df_problem;
+
+/* Data flow problems. All problems must have a unique here. */
+/* Scanning is not really a dataflow problem, but it is useful to have
+ the basic block functions in the vector so that things get done in
+ a uniform manner. */
+#define DF_SCAN 0
+#define DF_RU 1 /* Reaching Uses. */
+#define DF_RD 2 /* Reaching Defs. */
+#define DF_LR 3 /* Live Registers. */
+#define DF_UR 4 /* Uninitialized Registers. */
+#define DF_UREC 5 /* Uninitialized Registers with Early Clobber. */
+#define DF_CHAIN 6 /* Def-Use and/or Use-Def Chains. */
+#define DF_RI 7 /* Register Info. */
+#define DF_LAST_PROBLEM_PLUS1 (DF_RI + 1)
+
+/* Flags that control the building of chains. */
+#define DF_DU_CHAIN 1 /* Build DU chains. */
+#define DF_UD_CHAIN 2 /* Build UD chains. */
-#define DF_RD 1 /* Reaching definitions. */
-#define DF_RU 2 /* Reaching uses. */
-#define DF_LR 4 /* Live registers. */
-#define DF_DU_CHAIN 8 /* Def-use chain. */
-#define DF_UD_CHAIN 16 /* Use-def chain. */
-#define DF_REG_INFO 32 /* Register info. */
-#define DF_RD_CHAIN 64 /* Reg-def chain. */
-#define DF_RU_CHAIN 128 /* Reg-use chain. */
-#define DF_ALL 255
-#define DF_HARD_REGS 1024 /* Mark hard registers. */
-#define DF_EQUIV_NOTES 2048 /* Mark uses present in EQUIV/EQUAL notes. */
-#define DF_SUBREGS 4096 /* Return subregs rather than the inner reg. */
-enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_USE, DF_REF_REG_MEM_LOAD,
- DF_REF_REG_MEM_STORE};
+/* Dataflow direction. */
+enum df_flow_dir
+ {
+ DF_NONE,
+ DF_FORWARD,
+ DF_BACKWARD
+ };
-#define DF_REF_TYPE_NAMES {"def", "use", "mem load", "mem store"}
+/* Function prototypes added to df_problem instance. */
-/* Link on a def-use or use-def chain. */
-struct df_link
+/* Allocate the problem specific data. */
+typedef void (*df_alloc_function) (struct dataflow *, bitmap);
+
+/* Free the basic block info. Called from the block reordering code
+ to get rid of the blocks that have been squished down. */
+typedef void (*df_free_bb_function) (struct dataflow *, void *);
+
+/* Local compute function. */
+typedef void (*df_local_compute_function) (struct dataflow *, bitmap, bitmap);
+
+/* Init the solution specific data. */
+typedef void (*df_init_function) (struct dataflow *, bitmap);
+
+/* Iterative dataflow function. */
+typedef void (*df_dataflow_function) (struct dataflow *, bitmap, bitmap,
+ int *, int, bool);
+
+/* Confluence operator for blocks with 0 out (or in) edges. */
+typedef void (*df_confluence_function_0) (struct dataflow *, basic_block);
+
+/* Confluence operator for blocks with 1 or more out (or in) edges. */
+typedef void (*df_confluence_function_n) (struct dataflow *, edge);
+
+/* Transfer function for blocks. */
+typedef bool (*df_transfer_function) (struct dataflow *, int);
+
+/* Function to massage the information after the problem solving. */
+typedef void (*df_finalizer_function) (struct dataflow*, bitmap);
+
+/* Function to free all of the problem specific datastructures. */
+typedef void (*df_free_function) (struct dataflow *);
+
+/* Function to dump results to FILE. */
+typedef void (*df_dump_problem_function) (struct dataflow *, FILE *);
+
+/* The static description of a dataflow problem to solve. See above
+ typedefs for doc for the function fields. */
+
+struct df_problem {
+ /* The unique id of the problem. This is used it index into
+ df->defined_problems to make accessing the problem data easy. */
+ unsigned int id;
+ enum df_flow_dir dir; /* Dataflow direction. */
+ df_alloc_function alloc_fun;
+ df_free_bb_function free_bb_fun;
+ df_local_compute_function local_compute_fun;
+ df_init_function init_fun;
+ df_dataflow_function dataflow_fun;
+ df_confluence_function_0 con_fun_0;
+ df_confluence_function_n con_fun_n;
+ df_transfer_function trans_fun;
+ df_finalizer_function finalize_fun;
+ df_free_function free_fun;
+ df_dump_problem_function dump_fun;
+
+ /* A dataflow problem that must be solved before this problem can be
+ solved. */
+ struct df_problem *dependent_problem;
+};
+
+
+/* The specific instance of the problem to solve. */
+struct dataflow
{
- struct df_link *next;
- struct ref *ref;
+ struct df *df; /* Instance of df we are working in. */
+ struct df_problem *problem; /* The problem to be solved. */
+
+ /* Communication between iterative_dataflow and hybrid_search. */
+ sbitmap visited, pending, considered;
+
+ /* Array indexed by bb->index, that contains basic block problem and
+ solution specific information. */
+ void **block_info;
+ unsigned int block_info_size;
+
+ /* The pool to allocate the block_info from. */
+ alloc_pool block_pool;
+
+ /* Other problem specific data that is not on a per basic block
+ basis. The structure is generally defined privately for the
+ problem. The exception being the scanning problem where it is
+ fully public. */
+ void *problem_data;
};
+/* One of these structures is allocated for every insn. */
+struct df_insn_info
+{
+ struct df_ref *defs; /* Head of insn-def chain. */
+ struct df_ref *uses; /* Head of insn-use chain. */
+ /* ???? The following luid field should be considered private so that
+ we can change it on the fly to accommodate new insns? */
+ int luid; /* Logical UID. */
+ bool contains_asm; /* Contains an asm instruction. */
+};
+
+/* Two of these structures are allocated for every pseudo reg, one for
+ the uses and one for the defs. */
+struct df_reg_info
+{
+ struct df_ref *reg_chain; /* Head of reg-use or def chain. */
+ unsigned int begin; /* First def_index for this pseudo. */
+ unsigned int n_refs; /* Number of refs or defs for this pseudo. */
+};
+
+
+enum df_ref_type {DF_REF_REG_DEF, DF_REF_REG_USE, DF_REF_REG_MEM_LOAD,
+ DF_REF_REG_MEM_STORE};
+
+#define DF_REF_TYPE_NAMES {"def", "use", "mem load", "mem store"}
+
enum df_ref_flags
{
/* Read-modify-write refs generate both a use and a def and
@@ -62,129 +185,177 @@ enum df_ref_flags
/* This flag is set, if we stripped the subreg from the reference.
In this case we must make conservative guesses, at what the
outer mode was. */
- DF_REF_STRIPPED = 2
+ DF_REF_STRIPPED = 2,
+
+ /* If this flag is set, this is not a real definition/use, but an
+ artificial one created to model always live registers, eh uses, etc. */
+ DF_REF_ARTIFICIAL = 4,
+
+
+ /* If this flag is set for an artificial use or def, that ref
+ logically happens at the top of the block. If it is not set
+ for an artificial use or def, that ref logically happens at the
+ bottom of the block. This is never set for regular refs. */
+ DF_REF_AT_TOP = 8,
+
+ /* This flag is set if the use is inside a REG_EQUAL note. */
+ DF_REF_IN_NOTE = 16,
+
+ /* This flag is set if this ref is really a clobber, and not a def. */
+ DF_REF_CLOBBER = 32
};
/* Define a register reference structure. One of these is allocated
for every register reference (use or def). Note some register
references (e.g., post_inc, subreg) generate both a def and a use. */
-struct ref
+struct df_ref
{
rtx reg; /* The register referenced. */
- rtx insn; /* Insn containing ref. */
+ unsigned int regno; /* The register number referenced. */
+ basic_block bb; /* Basic block containing the instruction. */
+ rtx insn; /* Insn containing ref. NB: THIS MAY BE NULL. */
rtx *loc; /* The location of the reg. */
- struct df_link *chain; /* Head of def-use or use-def chain. */
- unsigned int id; /* Ref index. */
+ struct df_link *chain; /* Head of def-use, use-def or bi chain. */
+ unsigned int id; /* Location in table. */
enum df_ref_type type; /* Type of ref. */
enum df_ref_flags flags; /* Various flags. */
- void *data; /* The data assigned to it by user. */
-};
+ /* For each regno, there are two chains of refs, one for the uses
+ and one for the defs. These chains go thru the refs themselves
+ rather than using an external structure. */
+ struct df_ref *next_reg; /* Next ref with same regno and type. */
+ struct df_ref *prev_reg; /* Prev ref with same regno and type. */
-/* One of these structures is allocated for every insn. */
-struct insn_info
-{
- struct df_link *defs; /* Head of insn-def chain. */
- struct df_link *uses; /* Head of insn-use chain. */
- /* ???? The following luid field should be considered private so that
- we can change it on the fly to accommodate new insns? */
- int luid; /* Logical UID. */
+ /* Each insn has two lists, one for the uses and one for the
+ defs. This is the next field in either of these chains. */
+ struct df_ref *next_ref;
+ void *data; /* The data assigned to it by user. */
};
+/* There are two kinds of links: */
-/* One of these structures is allocated for every reg. */
-struct reg_info
+/* This is used for def-use or use-def chains. */
+struct df_link
{
- struct df_link *defs; /* Head of reg-def chain. */
- struct df_link *uses; /* Head of reg-use chain. */
- int lifetime;
- int n_defs;
- int n_uses;
+ struct df_ref *ref;
+ struct df_link *next;
};
-
-/* One of these structures is allocated for every basic block. */
-struct bb_info
+/* Two of these structures are allocated, one for the uses and one for
+ the defs. */
+struct df_ref_info
{
- /* Reaching def bitmaps have def_id elements. */
- bitmap rd_kill;
- bitmap rd_gen;
- bitmap rd_in;
- bitmap rd_out;
- /* Reaching use bitmaps have use_id elements. */
- bitmap ru_kill;
- bitmap ru_gen;
- bitmap ru_in;
- bitmap ru_out;
- /* Live variable bitmaps have n_regs elements. */
- bitmap lr_def;
- bitmap lr_use;
- bitmap lr_in;
- bitmap lr_out;
- int rd_valid;
- int ru_valid;
- int lr_valid;
+ struct df_reg_info **regs; /* Array indexed by pseudo regno. */
+ unsigned int regs_size; /* Size of currently allocated regs table. */
+ unsigned int regs_inited; /* Number of regs with reg_infos allocated. */
+ struct df_ref **refs; /* Ref table, indexed by id. */
+ unsigned int refs_size; /* Size of currently allocated refs table. */
+ unsigned int bitmap_size; /* Number of refs seen. */
+
+ /* True if refs table is organized so that every reference for a
+ pseudo is contigious. */
+ bool refs_organized;
+ /* True if the next refs should be added immediately or false to
+ defer to later to reorganize the table. */
+ bool add_refs_inline;
};
+
+/*----------------------------------------------------------------------------
+ Problem data for the scanning dataflow problem. Unlike the other
+ dataflow problems, the problem data for scanning is fully exposed and
+ used by owners of the problem.
+----------------------------------------------------------------------------*/
struct df
{
+
+#define DF_HARD_REGS 1 /* Mark hard registers. */
+#define DF_EQUIV_NOTES 2 /* Mark uses present in EQUIV/EQUAL notes. */
+#define DF_SUBREGS 4 /* Return subregs rather than the inner reg. */
+
int flags; /* Indicates what's recorded. */
- struct bb_info *bbs; /* Basic block table. */
- struct ref **defs; /* Def table, indexed by def_id. */
- struct ref **uses; /* Use table, indexed by use_id. */
- struct ref **reg_def_last; /* Indexed by regno. */
- struct reg_info *regs; /* Regs table, index by regno. */
- unsigned int reg_size; /* Size of regs table. */
- struct insn_info *insns; /* Insn table, indexed by insn UID. */
- unsigned int insn_size; /* Size of insn table. */
- unsigned int def_id; /* Next def ID. */
- unsigned int def_size; /* Size of def table. */
- unsigned int n_defs; /* Size of def bitmaps. */
- unsigned int use_id; /* Next use ID. */
- unsigned int use_size; /* Size of use table. */
- unsigned int n_uses; /* Size of use bitmaps. */
- unsigned int n_bbs; /* Number of basic blocks. */
- unsigned int n_regs; /* Number of regs. */
- unsigned int def_id_save; /* Saved next def ID. */
- unsigned int use_id_save; /* Saved next use ID. */
- bitmap insns_modified; /* Insns that (may) have changed. */
- bitmap bbs_modified; /* Blocks that (may) have changed. */
- bitmap all_blocks; /* All blocks in CFG. */
- int *dfs_order; /* DFS order -> block number. */
- int *rc_order; /* Reverse completion order -> block number. */
- int *rts_order; /* Reverse top sort order -> block number. */
+
+ /* The set of problems to be solved is stored in two arrays. In
+ PROBLEMS_IN_ORDER, the problems are stored in the order that they
+ are solved. This is an internally dense array that may have
+ nulls at the end of it. In PROBLEMS_BY_INDEX, the problem is
+ stored by the value in df_problem.id. These are used to access
+ the problem local data without having to search the first
+ array. */
+
+ struct dataflow *problems_in_order [DF_LAST_PROBLEM_PLUS1];
+ struct dataflow *problems_by_index [DF_LAST_PROBLEM_PLUS1];
+ int num_problems_defined;
+
+ /* Set after calls to df_scan_blocks, this contains all of the
+ blocks that higher level problems must rescan before solving the
+ dataflow equations. If this is NULL, the blocks_to_analyze is
+ used. */
+ bitmap blocks_to_scan;
+
+ /* If not NULL, the subset of blocks of the program to be considered
+ for analysis. */
+ bitmap blocks_to_analyze;
+
+ /* The following information is really the problem data for the
+ scanning instance but it is used too often by the other problems
+ to keep getting it from there. */
+ struct df_ref_info def_info; /* Def info. */
+ struct df_ref_info use_info; /* Use info. */
+ struct df_insn_info **insns; /* Insn table, indexed by insn UID. */
+ unsigned int insns_size; /* Size of insn table. */
+ bitmap hardware_regs_used; /* The set of hardware registers used. */
+ bitmap exit_block_uses; /* The set of hardware registers used in exit block. */
};
+#define DF_SCAN_BB_INFO(DF, BB) (df_scan_get_bb_info((DF)->problems_by_index[DF_SCAN],(BB)->index))
+#define DF_RU_BB_INFO(DF, BB) (df_ru_get_bb_info((DF)->problems_by_index[DF_RU],(BB)->index))
+#define DF_RD_BB_INFO(DF, BB) (df_rd_get_bb_info((DF)->problems_by_index[DF_RD],(BB)->index))
+#define DF_LR_BB_INFO(DF, BB) (df_lr_get_bb_info((DF)->problems_by_index[DF_LR],(BB)->index))
+#define DF_UR_BB_INFO(DF, BB) (df_ur_get_bb_info((DF)->problems_by_index[DF_UR],(BB)->index))
+#define DF_UREC_BB_INFO(DF, BB) (df_urec_get_bb_info((DF)->problems_by_index[DF_UREC],(BB)->index))
-struct df_map
-{
- rtx old;
- rtx new;
-};
+/* Most transformations that wish to use live register analysis will
+ use these macros. The DF_UPWARD_LIVE* macros are only half of the
+ solution. */
+#define DF_LIVE_IN(DF, BB) (DF_UR_BB_INFO(DF, BB)->in)
+#define DF_LIVE_OUT(DF, BB) (DF_UR_BB_INFO(DF, BB)->out)
-#define DF_BB_INFO(REFS, BB) (&REFS->bbs[(BB)->index])
+/* Live in for register allocation also takes into account several other factors. */
+#define DF_RA_LIVE_IN(DF, BB) (DF_UREC_BB_INFO(DF, BB)->in)
+#define DF_RA_LIVE_OUT(DF, BB) (DF_UREC_BB_INFO(DF, BB)->out)
+
+/* These macros are currently used by only reg-stack since it is not
+ tolerant of uninitialized variables. This intolerance should be
+ fixed because it causes other problems. */
+#define DF_UPWARD_LIVE_IN(DF, BB) (DF_LR_BB_INFO(DF, BB)->in)
+#define DF_UPWARD_LIVE_OUT(DF, BB) (DF_LR_BB_INFO(DF, BB)->out)
/* Macros to access the elements within the ref structure. */
+
#define DF_REF_REAL_REG(REF) (GET_CODE ((REF)->reg) == SUBREG \
? SUBREG_REG ((REF)->reg) : ((REF)->reg))
-#define DF_REF_REGNO(REF) REGNO (DF_REF_REAL_REG (REF))
+#define DF_REF_REGNO(REF) ((REF)->regno)
#define DF_REF_REAL_LOC(REF) (GET_CODE ((REF)->reg) == SUBREG \
? &SUBREG_REG ((REF)->reg) : ((REF)->loc))
#define DF_REF_REG(REF) ((REF)->reg)
#define DF_REF_LOC(REF) ((REF)->loc)
-#define DF_REF_BB(REF) (BLOCK_FOR_INSN ((REF)->insn))
-#define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->index)
+#define DF_REF_BB(REF) ((REF)->bb)
+#define DF_REF_BBNO(REF) (DF_REF_BB (REF)->index)
#define DF_REF_INSN(REF) ((REF)->insn)
#define DF_REF_INSN_UID(REF) (INSN_UID ((REF)->insn))
#define DF_REF_TYPE(REF) ((REF)->type)
#define DF_REF_CHAIN(REF) ((REF)->chain)
#define DF_REF_ID(REF) ((REF)->id)
#define DF_REF_FLAGS(REF) ((REF)->flags)
+#define DF_REF_NEXT_REG(REF) ((REF)->next_reg)
+#define DF_REF_PREV_REG(REF) ((REF)->prev_reg)
+#define DF_REF_NEXT_REF(REF) ((REF)->next_ref)
#define DF_REF_DATA(REF) ((REF)->data)
/* Macros to determine the reference type. */
@@ -196,174 +367,213 @@ struct df_map
#define DF_REF_REG_MEM_P(REF) (DF_REF_REG_MEM_STORE_P (REF) \
|| DF_REF_REG_MEM_LOAD_P (REF))
+/* Macros to get the refs out of def_info or use_info refs table. */
+#define DF_DEFS_SIZE(DF) ((DF)->def_info.bitmap_size)
+#define DF_DEFS_GET(DF,ID) ((DF)->def_info.refs[(ID)])
+#define DF_DEFS_SET(DF,ID,VAL) ((DF)->def_info.refs[(ID)]=(VAL))
+#define DF_USES_SIZE(DF) ((DF)->use_info.bitmap_size)
+#define DF_USES_GET(DF,ID) ((DF)->use_info.refs[(ID)])
+#define DF_USES_SET(DF,ID,VAL) ((DF)->use_info.refs[(ID)]=(VAL))
+
+/* Macros to access the register information from scan dataflow record. */
+
+#define DF_REG_SIZE(DF) ((DF)->def_info.regs_size)
+#define DF_REG_DEF_GET(DF, REG) ((DF)->def_info.regs[(REG)])
+#define DF_REG_DEF_SET(DF, REG, VAL) ((DF)->def_info.regs[(REG)]=(VAL))
+#define DF_REG_USE_GET(DF, REG) ((DF)->use_info.regs[(REG)])
+#define DF_REG_USE_SET(DF, REG, VAL) ((DF)->use_info.regs[(REG)]=(VAL))
/* Macros to access the elements within the reg_info structure table. */
#define DF_REGNO_FIRST_DEF(DF, REGNUM) \
-((DF)->regs[REGNUM].defs ? (DF)->regs[REGNUM].defs->ref : 0)
+(DF_REG_DEF_GET(DF, REGNUM) ? DF_REG_DEF_GET(DF, REGNUM) : 0)
#define DF_REGNO_LAST_USE(DF, REGNUM) \
-((DF)->regs[REGNUM].uses ? (DF)->regs[REGNUM].uses->ref : 0)
-
-#define DF_REGNO_FIRST_BB(DF, REGNUM) \
-((DF)->regs[REGNUM].defs ? DF_REF_BB ((DF)->regs[REGNUM].defs->ref) : 0)
-#define DF_REGNO_LAST_BB(DF, REGNUM) \
-((DF)->regs[REGNUM].uses ? DF_REF_BB ((DF)->regs[REGNUM].uses->ref) : 0)
-
+(DF_REG_USE_GET(DF, REGNUM) ? DF_REG_USE_GET(DF, REGNUM) : 0)
/* Macros to access the elements within the insn_info structure table. */
-#define DF_INSN_LUID(DF, INSN) ((DF)->insns[INSN_UID (INSN)].luid)
-#define DF_INSN_DEFS(DF, INSN) ((DF)->insns[INSN_UID (INSN)].defs)
-#define DF_INSN_USES(DF, INSN) ((DF)->insns[INSN_UID (INSN)].uses)
-
-
-/* Functions to build and analyze dataflow information. */
-
-extern struct df *df_init (void);
-
-extern int df_analyze (struct df *, bitmap, int);
-extern void df_analyze_subcfg (struct df *, bitmap, int);
-
-extern void df_finish (struct df *);
-
-extern void df_dump (struct df *, int, FILE *);
-
-
-/* Functions to modify insns. */
-
-extern bool df_insn_modified_p (struct df *, rtx);
-
-extern void df_insn_modify (struct df *, basic_block, rtx);
-
-extern rtx df_insn_delete (struct df *, basic_block, rtx);
-
-extern rtx df_pattern_emit_before (struct df *, rtx, basic_block, rtx);
-
-extern rtx df_jump_pattern_emit_after (struct df *, rtx, basic_block, rtx);
-
-extern rtx df_pattern_emit_after (struct df *, rtx, basic_block, rtx);
+#define DF_INSN_SIZE(DF) ((DF)->insns_size)
+#define DF_INSN_GET(DF,INSN) ((DF)->insns[(INSN_UID(INSN))])
+#define DF_INSN_SET(DF,INSN,VAL) ((DF)->insns[(INSN_UID (INSN))]=(VAL))
+#define DF_INSN_CONTAINS_ASM(DF, INSN) (DF_INSN_GET(DF,INSN)->contains_asm)
+#define DF_INSN_LUID(DF, INSN) (DF_INSN_GET(DF,INSN)->luid)
+#define DF_INSN_DEFS(DF, INSN) (DF_INSN_GET(DF,INSN)->defs)
+#define DF_INSN_USES(DF, INSN) (DF_INSN_GET(DF,INSN)->uses)
-extern rtx df_insn_move_before (struct df *, basic_block, rtx, basic_block,
- rtx);
+#define DF_INSN_UID_GET(DF,UID) ((DF)->insns[(UID)])
+#define DF_INSN_UID_LUID(DF, INSN) (DF_INSN_UID_GET(DF,INSN)->luid)
+#define DF_INSN_UID_DEFS(DF, INSN) (DF_INSN_UID_GET(DF,INSN)->defs)
+#define DF_INSN_UID_USES(DF, INSN) (DF_INSN_UID_GET(DF,INSN)->uses)
-extern int df_reg_replace (struct df *, bitmap, rtx, rtx);
+/* This is a bitmap copy of regs_invalidated_by_call so that we can
+ easily add it into bitmaps, etc. */
-extern int df_ref_reg_replace (struct df *, struct ref *, rtx, rtx);
+extern bitmap df_invalidated_by_call;
-extern int df_ref_remove (struct df *, struct ref *);
+/* Initialize ur_in and ur_out as if all hard registers were partially
+available. */
-extern int df_insn_mem_replace (struct df *, basic_block, rtx, rtx, rtx);
+extern bitmap df_all_hard_regs;
-extern struct ref *df_bb_def_use_swap (struct df *, basic_block, rtx, rtx,
- unsigned int);
+/* The way that registers are processed, especially hard registers,
+ changes as the compilation proceeds. These states are passed to
+ df_set_state to control this processing. */
+#define DF_SCAN_INITIAL 1 /* Processing from beginning of rtl to
+ global-alloc. */
+#define DF_SCAN_GLOBAL 2 /* Processing before global
+ allocation. */
+#define DF_SCAN_POST_ALLOC 4 /* Processing after register
+ allocation. */
+extern int df_state; /* Indicates where we are in the compilation. */
-/* Functions to query dataflow information. */
-extern basic_block df_regno_bb (struct df *, unsigned int);
-
-extern int df_reg_lifetime (struct df *, rtx);
-
-extern int df_reg_global_p (struct df *, rtx);
-
-extern int df_insn_regno_def_p (struct df *, basic_block, rtx, unsigned int);
-
-extern int df_insn_dominates_all_uses_p (struct df *, basic_block, rtx);
-
-extern int df_insn_dominates_uses_p (struct df *, basic_block, rtx, bitmap);
+/* One of these structures is allocated for every basic block. */
+struct df_scan_bb_info
+{
+ /* Defs at the start of a basic block that is the target of an
+ exception edge. */
+ struct df_ref *artificial_defs;
-extern int df_bb_reg_live_start_p (struct df *, basic_block, rtx);
+ /* Uses of hard registers that are live at every block. */
+ struct df_ref *artificial_uses;
+};
-extern int df_bb_reg_live_end_p (struct df *, basic_block, rtx);
-extern int df_bb_regs_lives_compare (struct df *, basic_block, rtx, rtx);
+/* Reaching uses. */
+struct df_ru_bb_info
+{
+ bitmap kill;
+ bitmap sparse_kill;
+ bitmap gen;
+ bitmap in;
+ bitmap out;
+};
-extern bool df_local_def_available_p (struct df *, struct ref *, struct ref *);
-extern rtx df_bb_single_def_use_insn_find (struct df *, basic_block, rtx,
- rtx);
-extern struct ref *df_bb_regno_last_use_find (struct df *, basic_block, unsigned int);
+/* Reaching definitions. */
+struct df_rd_bb_info
+{
+ bitmap kill;
+ bitmap sparse_kill;
+ bitmap gen;
+ bitmap in;
+ bitmap out;
+};
-extern struct ref *df_bb_regno_first_def_find (struct df *, basic_block, unsigned int);
-extern struct ref *df_bb_regno_last_def_find (struct df *, basic_block, unsigned int);
+/* Live registers. */
+struct df_lr_bb_info
+{
+ bitmap def;
+ bitmap use;
+ bitmap in;
+ bitmap out;
+};
-extern struct ref *df_find_def (struct df *, rtx, rtx);
-extern struct ref *df_find_use (struct df *, rtx, rtx);
+/* Uninitialized registers. */
+struct df_ur_bb_info
+{
+ bitmap kill;
+ bitmap gen;
+ bitmap in;
+ bitmap out;
+};
-extern int df_reg_used (struct df *, rtx, rtx);
+/* Uninitialized registers. */
+struct df_urec_bb_info
+{
+ bitmap earlyclobber;
+ bitmap kill;
+ bitmap gen;
+ bitmap in;
+ bitmap out;
+};
-/* Functions for debugging from GDB. */
+#define df_finish(df) {df_finish1(df); df=NULL;}
+
+/* Functions defined in df-core.c. */
+
+extern struct df *df_init (int);
+extern struct dataflow *df_add_problem (struct df *, struct df_problem *);
+extern void df_set_blocks (struct df*, bitmap);
+extern void df_finish1 (struct df *);
+extern void df_analyze (struct df *);
+extern void df_compact_blocks (struct df *);
+extern void df_bb_replace (struct df *, int, basic_block);
+extern struct df_ref *df_bb_regno_last_use_find (struct df *, basic_block, unsigned int);
+extern struct df_ref *df_bb_regno_first_def_find (struct df *, basic_block, unsigned int);
+extern struct df_ref *df_bb_regno_last_def_find (struct df *, basic_block, unsigned int);
+extern bool df_insn_regno_def_p (struct df *, rtx, unsigned int);
+extern struct df_ref *df_find_def (struct df *, rtx, rtx);
+extern bool df_reg_defined (struct df *, rtx, rtx);
+extern struct df_ref *df_find_use (struct df *, rtx, rtx);
+extern bool df_reg_used (struct df *, rtx, rtx);
+extern void df_iterative_dataflow (struct dataflow *, bitmap, bitmap, int *, int, bool);
+extern void df_dump (struct df *, FILE *);
+extern void df_chain_dump (struct df *, struct df_link *, FILE *);
+extern void df_refs_chain_dump (struct df *, struct df_ref *, bool, FILE *);
+extern void df_regs_chain_dump (struct df *, struct df_ref *, FILE *);
+extern void df_insn_debug (struct df *, rtx, bool, FILE *);
+extern void df_insn_debug_regno (struct df *, rtx, FILE *);
+extern void df_regno_debug (struct df *, unsigned int, FILE *);
+extern void df_ref_debug (struct df *, struct df_ref *, FILE *);
extern void debug_df_insn (rtx);
-
extern void debug_df_regno (unsigned int);
-
extern void debug_df_reg (rtx);
-
extern void debug_df_defno (unsigned int);
-
extern void debug_df_useno (unsigned int);
-
-extern void debug_df_ref (struct ref *);
-
+extern void debug_df_ref (struct df_ref *);
extern void debug_df_chain (struct df_link *);
+/* An instance of df that can be shared between passes. */
+extern struct df *shared_df;
+
+
+/* Functions defined in df-problems.c. */
+
+extern struct dataflow *df_get_dependent_problem (struct dataflow *);
+extern struct df_link *df_chain_create (struct dataflow *, struct df_ref *, struct df_ref *);
+extern void df_chain_unlink (struct dataflow *, struct df_ref *, struct df_link *);
+extern void df_chain_copy (struct dataflow *, struct df_ref *, struct df_link *);
+extern bitmap df_get_live_in (struct df *, basic_block);
+extern bitmap df_get_live_out (struct df *, basic_block);
+extern void df_grow_bb_info (struct dataflow *);
+extern void df_chain_dump (struct df *, struct df_link *, FILE *);
+extern void df_print_bb_index (basic_block bb, FILE *file);
+extern struct dataflow *df_ru_add_problem (struct df *);
+extern struct df_ru_bb_info *df_ru_get_bb_info (struct dataflow *, unsigned int);
+extern struct dataflow *df_rd_add_problem (struct df *);
+extern struct df_rd_bb_info *df_rd_get_bb_info (struct dataflow *, unsigned int);
+extern struct dataflow *df_lr_add_problem (struct df *);
+extern struct df_lr_bb_info *df_lr_get_bb_info (struct dataflow *, unsigned int);
+extern struct dataflow *df_ur_add_problem (struct df *);
+extern struct df_ur_bb_info *df_ur_get_bb_info (struct dataflow *, unsigned int);
+extern struct dataflow *df_urec_add_problem (struct df *);
+extern struct df_urec_bb_info *df_urec_get_bb_info (struct dataflow *, unsigned int);
+extern struct dataflow *df_chain_add_problem (struct df *, int flags);
+extern struct dataflow *df_ri_add_problem (struct df *);
+extern int df_reg_lifetime (struct df *, rtx reg);
+
+
+/* Functions defined in df-scan.c. */
+
+extern struct df_scan_bb_info *df_scan_get_bb_info (struct dataflow *, unsigned int);
+extern struct dataflow *df_scan_add_problem (struct df *);
+extern void df_rescan_blocks (struct df *, bitmap);
+extern struct df_ref *df_ref_create (struct df *, rtx, rtx *, rtx,basic_block,enum df_ref_type, enum df_ref_flags);
+extern struct df_ref *df_get_artificial_defs (struct df *, unsigned int);
+extern struct df_ref *df_get_artificial_uses (struct df *, unsigned int);
+extern void df_reg_chain_create (struct df_reg_info *, struct df_ref *);
+extern struct df_ref *df_reg_chain_unlink (struct dataflow *, struct df_ref *);
+extern void df_ref_remove (struct df *, struct df_ref *);
+extern void df_insn_refs_delete (struct dataflow *, rtx);
+extern void df_refs_delete (struct dataflow *, bitmap);
+extern void df_reorganize_refs (struct df_ref_info *);
+extern void df_set_state (int);
+extern void df_hard_reg_init (void);
+extern bool df_read_modify_subreg_p (rtx);
-extern void df_insn_debug (struct df *, rtx, FILE *);
-
-extern void df_insn_debug_regno (struct df *, rtx, FILE *);
-
-
-/* Meet over any path (UNION) or meet over all paths (INTERSECTION). */
-enum df_confluence_op
- {
- DF_UNION,
- DF_INTERSECTION
- };
-
-
-/* Dataflow direction. */
-enum df_flow_dir
- {
- DF_FORWARD,
- DF_BACKWARD
- };
-
-
-typedef void (*transfer_function) (int, int *, void *, void *,
- void *, void *, void *);
-
-/* The description of a dataflow problem to solve. */
-
-enum set_representation
-{
- SR_SBITMAP, /* Represent sets by bitmaps. */
- SR_BITMAP /* Represent sets by sbitmaps. */
-};
-
-struct dataflow
-{
- enum set_representation repr; /* The way the sets are represented. */
-
- /* The following arrays are indexed by block indices, so they must always
- be large enough even if we restrict ourselves just to a subset of cfg. */
- void **gen, **kill; /* Gen and kill sets. */
- void **in, **out; /* Results. */
-
- enum df_flow_dir dir; /* Dataflow direction. */
- enum df_confluence_op conf_op; /* Confluence operator. */
- unsigned n_blocks; /* Number of basic blocks in the
- order. */
- int *order; /* The list of basic blocks to work
- with, in the order they should
- be processed in. */
- transfer_function transfun; /* The transfer function. */
- void *data; /* Data used by the transfer
- function. */
-};
-
-extern void iterative_dataflow (struct dataflow *);
-extern bool read_modify_subreg_p (rtx);
#endif /* GCC_DF_H */
diff --git a/gcc/loop-invariant.c b/gcc/loop-invariant.c
index 8c3f2d23512..96d216e2672 100644
--- a/gcc/loop-invariant.c
+++ b/gcc/loop-invariant.c
@@ -1,5 +1,5 @@
/* RTL-level loop invariant motion.
- Copyright (C) 2004, 2005 Free Software Foundation, Inc.
+ Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc.
This file is part of GCC.
@@ -153,7 +153,7 @@ static VEC(invariant_p,heap) *invariants;
/* The dataflow object. */
-static struct df *df;
+static struct df *df = NULL;
/* Test for possibility of invariantness of X. */
@@ -226,10 +226,10 @@ check_maybe_invariant (rtx x)
invariant. */
static struct invariant *
-invariant_for_use (struct ref *use)
+invariant_for_use (struct df_ref *use)
{
struct df_link *defs;
- struct ref *def;
+ struct df_ref *def;
basic_block bb = BLOCK_FOR_INSN (use->insn), def_bb;
defs = DF_REF_CHAIN (use);
@@ -255,7 +255,7 @@ hash_invariant_expr_1 (rtx insn, rtx x)
const char *fmt;
hashval_t val = code;
int do_not_record_p;
- struct ref *use;
+ struct df_ref *use;
struct invariant *inv;
switch (code)
@@ -306,7 +306,7 @@ invariant_expr_equal_p (rtx insn1, rtx e1, rtx insn2, rtx e2)
enum rtx_code code = GET_CODE (e1);
int i, j;
const char *fmt;
- struct ref *use1, *use2;
+ struct df_ref *use1, *use2;
struct invariant *inv1 = NULL, *inv2 = NULL;
rtx sub1, sub2;
@@ -600,7 +600,8 @@ find_defs (struct loop *loop, basic_block *body)
for (i = 0; i < loop->num_nodes; i++)
bitmap_set_bit (blocks, body[i]->index);
- df_analyze_subcfg (df, blocks, DF_UD_CHAIN | DF_HARD_REGS | DF_EQUIV_NOTES);
+ df_set_blocks (df, blocks);
+ df_analyze (df);
BITMAP_FREE (blocks);
}
@@ -673,16 +674,14 @@ record_use (struct def *def, rtx *use, rtx insn)
static bool
check_dependencies (rtx insn, bitmap depends_on)
{
- struct df_link *uses, *defs;
- struct ref *use, *def;
+ struct df_link *defs;
+ struct df_ref *use, *def;
basic_block bb = BLOCK_FOR_INSN (insn), def_bb;
struct def *def_data;
struct invariant *inv;
- for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next)
+ for (use = DF_INSN_GET (df, insn)->uses; use; use = use->next_ref)
{
- use = uses->ref;
-
defs = DF_REF_CHAIN (use);
if (!defs)
continue;
@@ -718,7 +717,7 @@ check_dependencies (rtx insn, bitmap depends_on)
static void
find_invariant_insn (rtx insn, bool always_reached, bool always_executed)
{
- struct ref *ref;
+ struct df_ref *ref;
struct def *def;
bitmap depends_on;
rtx set, dest;
@@ -781,13 +780,11 @@ find_invariant_insn (rtx insn, bool always_reached, bool always_executed)
static void
record_uses (rtx insn)
{
- struct df_link *uses;
- struct ref *use;
+ struct df_ref *use;
struct invariant *inv;
- for (uses = DF_INSN_USES (df, insn); uses; uses = uses->next)
+ for (use = DF_INSN_GET (df, insn)->uses; use; use = use->next_ref)
{
- use = uses->ref;
inv = invariant_for_use (use);
if (inv)
record_use (inv->def, DF_REF_LOC (use), DF_REF_INSN (use));
@@ -1025,6 +1022,7 @@ find_invariants_to_move (void)
{
unsigned i, regs_used, n_inv_uses, regs_needed = 0, new_regs;
struct invariant *inv = NULL;
+ unsigned int n_regs = DF_REG_SIZE (df);
if (!VEC_length (invariant_p, invariants))
return;
@@ -1037,7 +1035,7 @@ find_invariants_to_move (void)
here to stand for induction variables etc. that we do not detect. */
regs_used = 2;
- for (i = 0; i < df->n_regs; i++)
+ for (i = 0; i < n_regs; i++)
{
if (!DF_REGNO_FIRST_DEF (df, i) && DF_REGNO_LAST_USE (df, i))
{
@@ -1098,8 +1096,7 @@ move_invariant_reg (struct loop *loop, unsigned invno)
need to create a temporary register. */
set = single_set (inv->insn);
reg = gen_reg_rtx (GET_MODE (SET_DEST (set)));
- df_pattern_emit_after (df, gen_move_insn (SET_DEST (set), reg),
- BLOCK_FOR_INSN (inv->insn), inv->insn);
+ emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
/* If the SET_DEST of the invariant insn is a reg, we can just move
the insn out of the loop. Otherwise, we have to use gen_move_insn
@@ -1108,13 +1105,11 @@ move_invariant_reg (struct loop *loop, unsigned invno)
{
SET_DEST (set) = reg;
reorder_insns (inv->insn, inv->insn, BB_END (preheader));
- df_insn_modify (df, preheader, inv->insn);
}
else
{
- df_pattern_emit_after (df, gen_move_insn (reg, SET_SRC (set)),
- preheader, BB_END (preheader));
- df_insn_delete (df, BLOCK_FOR_INSN (inv->insn), inv->insn);
+ emit_insn_after (gen_move_insn (reg, SET_SRC (set)), BB_END (preheader));
+ delete_insn (inv->insn);
}
}
else
@@ -1122,9 +1117,8 @@ move_invariant_reg (struct loop *loop, unsigned invno)
move_invariant_reg (loop, repr->invno);
reg = repr->reg;
set = single_set (inv->insn);
- df_pattern_emit_after (df, gen_move_insn (SET_DEST (set), reg),
- BLOCK_FOR_INSN (inv->insn), inv->insn);
- df_insn_delete (df, BLOCK_FOR_INSN (inv->insn), inv->insn);
+ emit_insn_after (gen_move_insn (SET_DEST (set), reg), inv->insn);
+ delete_insn (inv->insn);
}
inv->reg = reg;
@@ -1135,10 +1129,7 @@ move_invariant_reg (struct loop *loop, unsigned invno)
if (inv->def)
{
for (use = inv->def->uses; use; use = use->next)
- {
- *use->pos = reg;
- df_insn_modify (df, BLOCK_FOR_INSN (use->insn), use->insn);
- }
+ *use->pos = reg;
}
}
@@ -1174,20 +1165,22 @@ free_inv_motion_data (void)
struct def *def;
struct invariant *inv;
- for (i = 0; i < df->n_defs; i++)
+ for (i = 0; i < DF_DEFS_SIZE (df); i++)
{
- if (!df->defs[i])
+ struct df_ref * ref = DF_DEFS_GET (df, i);
+ if (!ref)
continue;
- inv = DF_REF_DATA (df->defs[i]);
+ inv = DF_REF_DATA (ref);
if (!inv)
continue;
+
def = inv->def;
gcc_assert (def != NULL);
free_use_list (def->uses);
free (def);
- DF_REF_DATA (df->defs[i]) = NULL;
+ DF_REF_DATA (ref) = NULL;
}
for (i = 0; VEC_iterate (invariant_p, invariants, i, inv); i++)
@@ -1231,8 +1224,9 @@ move_loop_invariants (struct loops *loops)
struct loop *loop;
unsigned i;
- df = df_init ();
-
+ df = df_init (DF_HARD_REGS | DF_EQUIV_NOTES);
+ df_chain_add_problem (df, DF_UD_CHAIN);
+
/* Process the loops, innermost first. */
loop = loops->tree_root;
while (loop->inner)
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index ed06fc46620..a669bb87fdd 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -1,5 +1,5 @@
/* Swing Modulo Scheduling implementation.
- Copyright (C) 2004, 2005
+ Copyright (C) 2004, 2005, 2006
Free Software Foundation, Inc.
Contributed by Ayal Zaks and Mustafa Hagog <zaks,mustafa@il.ibm.com>
@@ -976,8 +976,11 @@ sms_schedule (FILE *dump_file)
sched_init (NULL);
/* Init Data Flow analysis, to be used in interloop dep calculation. */
- df = df_init ();
- df_analyze (df, 0, DF_ALL);
+ df = df_init (DF_HARD_REGS | DF_EQUIV_NOTES | DF_SUBREGS);
+ df_rd_add_problem (df);
+ df_ru_add_problem (df);
+ df_chain_add_problem (df, DF_DU_CHAIN | DF_UD_CHAIN);
+ df_analyze (df);
/* Allocate memory to hold the DDG array one entry for each loop.
We use loop->num as index into this array. */
@@ -1091,6 +1094,7 @@ sms_schedule (FILE *dump_file)
/* Release Data Flow analysis data structures. */
df_finish (df);
+ df = NULL;
/* We don't want to perform SMS on new loops - created by versioning. */
num_loops = loops->num;
@@ -2536,7 +2540,6 @@ rest_of_handle_sms (void)
{
#ifdef INSN_SCHEDULING
basic_block bb;
- sbitmap blocks;
/* We want to be able to create new pseudos. */
no_new_pseudos = 0;
@@ -2547,9 +2550,7 @@ rest_of_handle_sms (void)
/* Update the life information, because we add pseudos. */
max_regno = max_reg_num ();
allocate_reg_info (max_regno, FALSE, FALSE);
- blocks = sbitmap_alloc (last_basic_block);
- sbitmap_ones (blocks);
- update_life_info (blocks, UPDATE_LIFE_GLOBAL_RM_NOTES,
+ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
(PROP_DEATH_NOTES
| PROP_REG_INFO
| PROP_KILL_DEAD_CODE
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 9fa4fabd30c..4732529a7c5 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -1,7 +1,8 @@
/* Instruction scheduling pass. This file computes dependencies between
instructions.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
- 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+ 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
+ Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
and currently maintained by, Jim Wilson (wilson@cygnus.com)
@@ -520,7 +521,7 @@ sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
{
if (GET_CODE (dest) == STRICT_LOW_PART
|| GET_CODE (dest) == ZERO_EXTRACT
- || read_modify_subreg_p (dest))
+ || df_read_modify_subreg_p (dest))
{
/* These both read and modify the result. We must handle
them as writes to get proper dependencies for following
diff --git a/gcc/web.c b/gcc/web.c
index a52a17dfe9f..820288b7e51 100644
--- a/gcc/web.c
+++ b/gcc/web.c
@@ -1,6 +1,7 @@
/* Web construction code for GNU compiler.
Contributed by Jan Hubicka.
- Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2001, 2002, 2004, 2006
+ Free Software Foundation, Inc.
This file is part of GCC.
@@ -71,10 +72,10 @@ struct web_entry
static struct web_entry *unionfind_root (struct web_entry *);
static void unionfind_union (struct web_entry *, struct web_entry *);
-static void union_defs (struct df *, struct ref *, struct web_entry *,
+static void union_defs (struct df *, struct df_ref *, struct web_entry *,
struct web_entry *);
-static rtx entry_register (struct web_entry *, struct ref *, char *);
-static void replace_ref (struct ref *, rtx);
+static rtx entry_register (struct web_entry *, struct df_ref *, char *);
+static void replace_ref (struct df_ref *, rtx);
/* Find the root of unionfind tree (the representative of set). */
@@ -110,13 +111,13 @@ unionfind_union (struct web_entry *first, struct web_entry *second)
register, union them. */
static void
-union_defs (struct df *df, struct ref *use, struct web_entry *def_entry,
+union_defs (struct df *df, struct df_ref *use, struct web_entry *def_entry,
struct web_entry *use_entry)
{
rtx insn = DF_REF_INSN (use);
struct df_link *link = DF_REF_CHAIN (use);
- struct df_link *use_link = DF_INSN_USES (df, insn);
- struct df_link *def_link = DF_INSN_DEFS (df, insn);
+ struct df_ref *use_link = DF_INSN_USES (df, insn);
+ struct df_ref *def_link = DF_INSN_DEFS (df, insn);
rtx set = single_set (insn);
/* Some instructions may use match_dup for their operands. In case the
@@ -126,11 +127,11 @@ union_defs (struct df *df, struct ref *use, struct web_entry *def_entry,
while (use_link)
{
- if (use != use_link->ref
- && DF_REF_REAL_REG (use) == DF_REF_REAL_REG (use_link->ref))
+ if (use != use_link
+ && DF_REF_REAL_REG (use) == DF_REF_REAL_REG (use_link))
unionfind_union (use_entry + DF_REF_ID (use),
- use_entry + DF_REF_ID (use_link->ref));
- use_link = use_link->next;
+ use_entry + DF_REF_ID (use_link));
+ use_link = use_link->next_ref;
}
/* Recognize trivial noop moves and attempt to keep them as noop.
@@ -143,10 +144,10 @@ union_defs (struct df *df, struct ref *use, struct web_entry *def_entry,
{
while (def_link)
{
- if (DF_REF_REAL_REG (use) == DF_REF_REAL_REG (def_link->ref))
+ if (DF_REF_REAL_REG (use) == DF_REF_REAL_REG (def_link))
unionfind_union (use_entry + DF_REF_ID (use),
- def_entry + DF_REF_ID (def_link->ref));
- def_link = def_link->next;
+ def_entry + DF_REF_ID (def_link));
+ def_link = def_link->next_ref;
}
}
while (link)
@@ -160,14 +161,14 @@ union_defs (struct df *df, struct ref *use, struct web_entry *def_entry,
register. Find it and union. */
if (use->flags & DF_REF_READ_WRITE)
{
- struct df_link *link = DF_INSN_DEFS (df, DF_REF_INSN (use));
+ struct df_ref *link = DF_INSN_DEFS (df, DF_REF_INSN (use));
while (link)
{
- if (DF_REF_REAL_REG (link->ref) == DF_REF_REAL_REG (use))
+ if (DF_REF_REAL_REG (link) == DF_REF_REAL_REG (use))
unionfind_union (use_entry + DF_REF_ID (use),
- def_entry + DF_REF_ID (link->ref));
- link = link->next;
+ def_entry + DF_REF_ID (link));
+ link = link->next_ref;
}
}
}
@@ -175,7 +176,7 @@ union_defs (struct df *df, struct ref *use, struct web_entry *def_entry,
/* Find the corresponding register for the given entry. */
static rtx
-entry_register (struct web_entry *entry, struct ref *ref, char *used)
+entry_register (struct web_entry *entry, struct df_ref *ref, char *used)
{
struct web_entry *root;
rtx reg, newreg;
@@ -217,7 +218,7 @@ entry_register (struct web_entry *entry, struct ref *ref, char *used)
/* Replace the reference by REG. */
static void
-replace_ref (struct ref *ref, rtx reg)
+replace_ref (struct df_ref *ref, rtx reg)
{
rtx oldreg = DF_REF_REAL_REG (ref);
rtx *loc = DF_REF_REAL_LOC (ref);
@@ -242,28 +243,31 @@ web_main (void)
int max = max_reg_num ();
char *used;
- df = df_init ();
- df_analyze (df, 0, DF_UD_CHAIN | DF_EQUIV_NOTES);
+ df = df_init (DF_EQUIV_NOTES);
+ df_chain_add_problem (df, DF_UD_CHAIN);
+ df_analyze (df);
+ df_reorganize_refs (&df->def_info);
+ df_reorganize_refs (&df->use_info);
- def_entry = xcalloc (df->n_defs, sizeof (struct web_entry));
- use_entry = xcalloc (df->n_uses, sizeof (struct web_entry));
+ def_entry = xcalloc (DF_DEFS_SIZE (df), sizeof (struct web_entry));
+ use_entry = xcalloc (DF_USES_SIZE (df), sizeof (struct web_entry));
used = xcalloc (max, sizeof (char));
if (dump_file)
- df_dump (df, DF_UD_CHAIN | DF_DU_CHAIN, dump_file);
+ df_dump (df, dump_file);
/* Produce the web. */
- for (i = 0; i < df->n_uses; i++)
- union_defs (df, df->uses[i], def_entry, use_entry);
+ for (i = 0; i < DF_USES_SIZE (df); i++)
+ union_defs (df, DF_USES_GET (df, i), def_entry, use_entry);
/* Update the instruction stream, allocating new registers for split pseudos
in progress. */
- for (i = 0; i < df->n_uses; i++)
- replace_ref (df->uses[i], entry_register (use_entry + i, df->uses[i],
- used));
- for (i = 0; i < df->n_defs; i++)
- replace_ref (df->defs[i], entry_register (def_entry + i, df->defs[i],
- used));
+ for (i = 0; i < DF_USES_SIZE (df); i++)
+ replace_ref (DF_USES_GET (df, i),
+ entry_register (use_entry + i, DF_USES_GET (df, i), used));
+ for (i = 0; i < DF_DEFS_SIZE (df); i++)
+ replace_ref (DF_DEFS_GET (df, i),
+ entry_register (def_entry + i, DF_DEFS_GET (df, i), used));
/* Dataflow information is corrupt here, but it can be easily updated
by creating new entries for new registers and updates or calling
@@ -272,6 +276,7 @@ web_main (void)
free (use_entry);
free (used);
df_finish (df);
+ df = NULL;
}
static bool