summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormkuvyrkov <mkuvyrkov@138bc75d-0d04-0410-961f-82ee72b054a4>2006-03-16 05:27:03 +0000
committermkuvyrkov <mkuvyrkov@138bc75d-0d04-0410-961f-82ee72b054a4>2006-03-16 05:27:03 +0000
commit6a1cdb4db2389bca66bbe9c68d2452b4a36c64f8 (patch)
tree89738976186be03d3235bbf23baa42f8eeb612ca
parente48970004af4707cf541fc319cb866638b29cb3a (diff)
downloadgcc-6a1cdb4db2389bca66bbe9c68d2452b4a36c64f8.tar.gz
2006-03-16 Maxim Kuvyrkov <mkuvyrkov@ispras.ru>
* target.h (struct spec_info_def): New opaque declaration. (struct gcc_target.sched): New fields: adjust_cost_2, h_i_d_extended, speculate_insn, needs_block_p, gen_check, first_cycle_multipass_dfa_lookahead_guard_spec, set_sched_flags. * target-def.h (TARGET_SCHED_ADJUST_COST_2, TARGET_SCHED_H_I_D_EXTENDED, TARGET_SCHED_SPECULATE_INSN, TARGET_SCHED_NEEDS_BLOCK_P, TARGET_SCHED_GEN_CHECK, TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC, TARGET_SCHED_SET_SCHED_FLAGS): New macros to initialize fields in gcc_target.sched. (TARGET_SCHED): Use new macros. * rtl.h (copy_DEPS_LIST_list): New prototype. * sched-int.h (struct sched_info): Change signature of new_ready field, adjust all initializations. New fields: add_remove_insn, begin_schedule_ready, add_block, advance_target_bb, fix_recovery_cfg, region_head_or_leaf_p. (struct spec_info_def): New structure declaration. (spec_info_t): New typedef. (struct haifa_insn_data): New fields: todo_spec, done_spec, check_spec, recovery_block, orig_pat. (glat_start, glat_end): New variables declaraions. (TODO_SPEC, DONE_SPEC, CHECK_SPEC, RECOVERY_BLOCK, ORIG_PAT): New access macros. (enum SCHED_FLAGS): New constants: SCHED_RGN, SCHED_EBB, DETACH_LIFE_INFO, USE_GLAT. (enum SPEC_SCHED_FLAGS): New enumeration. (NOTE_NOTE_BB_P): New macro. (extend_dependency_caches, xrecalloc, unlink_bb_notes, add_block, attach_life_info, debug_spec_status, check_reg_live): New functions. (get_block_head_tail): Change signature to get_ebb_head_tail, adjust all uses in ddg.c, modulo-sched.c, haifa-sched.c, sched-rgn.c, sched-ebb.c (get_dep_weak, ds_merge): Prototype functions from sched-deps.c . * ddg.c (get_block_head_tail): Adjust all uses. * modulo-sched.c (get_block_head_tail): Adjust all uses. (sms_sched_info): Initialize new fields. (contributes_to_priority): Removed. * haifa-sched.c (params.h): New include. (get_block_head_tail): Adjust all uses. (ISSUE_POINTS): New macro. (glat_start, glat_end): New global variables. (spec_info_var, spec_info, added_recovery_block_p, nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control, bb_header, old_last_basic_block, before_recovery, current_sched_info_var, rgn_n_insns, luid): New static variables. (insn_cost1): New function. Move logic from insn_cost to here. (find_insn_reg_weight1): New function. Move logic from find_insn_reg_weight to here. (reemit_notes, move_insn, max_issue): Change signature. (move_insn1): Removed. (extend_h_i_d, extend_ready, extend_global, extend_all, init_h_i_d, extend_bb): New static functions to support extension of scheduler's data structures. (generate_recovery_code, process_insn_depend_be_in_spec, begin_speculative_block, add_to_speculative_block, init_before_recovery, create_recovery_block, create_check_block_twin, fix_recovery_deps): New static functions to support generation of recovery code. (fix_jump_move, find_fallthru_edge, dump_new_block_header, restore_bb_notes, move_block_after_check, move_succs): New static functions to support ebb scheduling. (init_glat, init_glat1, attach_life_info1, free_glat): New static functions to support handling of register live information. (associate_line_notes_with_blocks, change_pattern, speculate_insn, sched_remove_insn, clear_priorities, calc_priorities, bb_note, add_jump_dependencies): New static functions. (check_cfg, has_edge_p, check_sched_flags): New static functions for consistancy checking. (debug_spec_status): New function to call from debugger. (priority): Added code to handle speculation checks. (rank_for_schedule): Added code to distinguish speculative instructions. (schedule_insn): Added code to handle speculation checks. (unlink_other_notes, rm_line_notes, restore_line_notes, rm_other_notes): Fixed to handle ebbs. (move_insn): Added code to handle ebb scheduling. (max_issue): Added code to use ISSUE_POINTS of instructions. (choose_ready): Added code to choose between speculative and non-speculative instructions. (schedule_block): Added code to handle ebb scheduling and scheduling of speculative instructions. (sched_init): Initialize new variables. (sched_finish): Free new variables. Print statistics. (try_ready): Added code to handle speculative instructions. * lists.c (copy_DEPS_LIST_list): New function. * sched-deps.c (extend_dependency_caches): New function. Move logic from create_dependency_caches to here. (get_dep_weak, ds_merge): Make global. * genattr.c (main): Code to output prototype for dfa_clear_single_insn_cache. * genautomata.c (DFA_CLEAR_SINGLE_INSN_CACHE_FUNC_NAME): New macros. (output_dfa_clean_insn_cache_func): Code to output dfa_clear_single_insn_cache function. * sched-ebb.c (target_n_insns): Remove. Adjust all users to use n_insns. (can_schedule_ready_p, fix_basic_block_boundaries, add_missing_bbs): Removed. (n_insns, dont_calc_deps, ebb_head, ebb_tail, last_bb): New static variables. (begin_schedule_ready, add_remove_insn, add_block1, advance_target_bb, fix_recovery_cfg, ebb_head_or_leaf_p): Implement hooks from struct sched_info. (ebb_sched_info): Initialize new fields. (get_block_head_tail): Adjust all uses. (compute_jump_reg_dependencies): Fixed to use glat_start. (schedule_ebb): Code to remove unreachable last block. (schedule_ebbs): Added code to update register live information. * sched-rgn.c (region_sched_info): Initialize new fields. (get_block_head_tail): Adjust all uses. (last_was_jump): Removed. Adjust users. (begin_schedule_ready, add_remove_insn, insn_points, extend_regions, add_block1, fix_recovery_cfg, advance_target_bb, region_head_or_leaf_p): Implement new hooks. (check_dead_notes1): New static function. (struct region): New fields: dont_calc_deps, has_real_ebb. (RGN_DONT_CALC_DEPS, RGN_HAS_REAL_EBB): New access macros. (BB_TO_BLOCK): Fixed to handle EBBs. (EBB_FIRST_BB, EBB_LAST_BB): New macros. (ebb_head): New static variable. (debug_regions, contributes_to_priority): Fixed to handle EBBs. (find_single_block_regions, find_rgns, find_more_rgns): Initialize new fields. (compute_dom_prob_ps): New assertion. (check_live_1, update_live_1): Fixed to work with glat_start instead of global_live_at_start. (init_ready_list): New assertions. (can_schedule_ready_p): Split update code to begin_schedule_ready. (new_ready): Add support for BEGIN_CONTROL speculation. (schedule_insns): Fixed code that updates register live information to handle EBBs. (schedule_region): Fixed to handle EBBs. (init_regions): Use extend_regions and check_dead_notes1. * params.def (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY, PARAM_SCHED_SPEC_PROB_CUTOFF): New parameters. * doc/tm.texi (TARGET_SCHED_ADJUST_COST_2, TARGET_SCHED_H_I_D_EXTENDED, TARGET_SCHED_SPECULATE_INSN, TARGET_SCHED_NEEDS_BLOCK_P, TARGET_SCHED_GEN_CHECK, TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC, TARGET_SCHED_SET_SCHED_FLAGS): Document. * doc/invoke.texi (max-sched-insn-conflict-delay, sched-spec-prob-cutoff): Document. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@112128 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog143
-rw-r--r--gcc/Makefile.in3
-rw-r--r--gcc/ddg.c2
-rw-r--r--gcc/doc/invoke.texi9
-rw-r--r--gcc/doc/tm.texi68
-rw-r--r--gcc/genattr.c1
-rw-r--r--gcc/genautomata.c15
-rw-r--r--gcc/haifa-sched.c2447
-rw-r--r--gcc/lists.c16
-rw-r--r--gcc/modulo-sched.c20
-rw-r--r--gcc/params.def10
-rw-r--r--gcc/rtl.h1
-rw-r--r--gcc/sched-deps.c34
-rw-r--r--gcc/sched-ebb.c463
-rw-r--r--gcc/sched-int.h129
-rw-r--r--gcc/sched-rgn.c584
-rw-r--r--gcc/target-def.h17
-rw-r--r--gcc/target.h53
18 files changed, 3441 insertions, 574 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 826ea2784b7..b191bcffb60 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,148 @@
2006-03-16 Maxim Kuvyrkov <mkuvyrkov@ispras.ru>
+ * target.h (struct spec_info_def): New opaque declaration.
+ (struct gcc_target.sched): New fields: adjust_cost_2, h_i_d_extended,
+ speculate_insn, needs_block_p, gen_check,
+ first_cycle_multipass_dfa_lookahead_guard_spec, set_sched_flags.
+ * target-def.h (TARGET_SCHED_ADJUST_COST_2,
+ TARGET_SCHED_H_I_D_EXTENDED, TARGET_SCHED_SPECULATE_INSN,
+ TARGET_SCHED_NEEDS_BLOCK_P, TARGET_SCHED_GEN_CHECK,
+ TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC,
+ TARGET_SCHED_SET_SCHED_FLAGS): New macros to initialize fields in
+ gcc_target.sched.
+ (TARGET_SCHED): Use new macros.
+ * rtl.h (copy_DEPS_LIST_list): New prototype.
+ * sched-int.h (struct sched_info): Change signature of new_ready field,
+ adjust all initializations. New fields: add_remove_insn,
+ begin_schedule_ready, add_block, advance_target_bb, fix_recovery_cfg,
+ region_head_or_leaf_p.
+ (struct spec_info_def): New structure declaration.
+ (spec_info_t): New typedef.
+ (struct haifa_insn_data): New fields: todo_spec, done_spec, check_spec,
+ recovery_block, orig_pat.
+ (glat_start, glat_end): New variables declaraions.
+ (TODO_SPEC, DONE_SPEC, CHECK_SPEC, RECOVERY_BLOCK, ORIG_PAT):
+ New access macros.
+ (enum SCHED_FLAGS): New constants: SCHED_RGN, SCHED_EBB,
+ DETACH_LIFE_INFO, USE_GLAT.
+ (enum SPEC_SCHED_FLAGS): New enumeration.
+ (NOTE_NOTE_BB_P): New macro.
+ (extend_dependency_caches, xrecalloc, unlink_bb_notes, add_block,
+ attach_life_info, debug_spec_status, check_reg_live): New functions.
+ (get_block_head_tail): Change signature to get_ebb_head_tail, adjust
+ all uses in ddg.c, modulo-sched.c, haifa-sched.c, sched-rgn.c,
+ sched-ebb.c
+ (get_dep_weak, ds_merge): Prototype functions from sched-deps.c .
+ * ddg.c (get_block_head_tail): Adjust all uses.
+ * modulo-sched.c (get_block_head_tail): Adjust all uses.
+ (sms_sched_info): Initialize new fields.
+ (contributes_to_priority): Removed.
+ * haifa-sched.c (params.h): New include.
+ (get_block_head_tail): Adjust all uses.
+ (ISSUE_POINTS): New macro.
+ (glat_start, glat_end): New global variables.
+ (spec_info_var, spec_info, added_recovery_block_p, nr_begin_data,
+ nr_be_in_data, nr_begin_control, nr_be_in_control, bb_header,
+ old_last_basic_block, before_recovery, current_sched_info_var,
+ rgn_n_insns, luid): New static variables.
+ (insn_cost1): New function. Move logic from insn_cost to here.
+ (find_insn_reg_weight1): New function. Move logic from
+ find_insn_reg_weight to here.
+ (reemit_notes, move_insn, max_issue): Change signature.
+ (move_insn1): Removed.
+ (extend_h_i_d, extend_ready, extend_global, extend_all, init_h_i_d,
+ extend_bb): New static functions to support extension of scheduler's
+ data structures.
+ (generate_recovery_code, process_insn_depend_be_in_spec,
+ begin_speculative_block, add_to_speculative_block,
+ init_before_recovery, create_recovery_block, create_check_block_twin,
+ fix_recovery_deps): New static functions to support
+ generation of recovery code.
+ (fix_jump_move, find_fallthru_edge, dump_new_block_header,
+ restore_bb_notes, move_block_after_check, move_succs): New static
+ functions to support ebb scheduling.
+ (init_glat, init_glat1, attach_life_info1, free_glat): New static
+ functions to support handling of register live information.
+ (associate_line_notes_with_blocks, change_pattern, speculate_insn,
+ sched_remove_insn, clear_priorities, calc_priorities, bb_note,
+ add_jump_dependencies): New static functions.
+ (check_cfg, has_edge_p, check_sched_flags): New static functions for
+ consistancy checking.
+ (debug_spec_status): New function to call from debugger.
+ (priority): Added code to handle speculation checks.
+ (rank_for_schedule): Added code to distinguish speculative instructions.
+ (schedule_insn): Added code to handle speculation checks.
+ (unlink_other_notes, rm_line_notes, restore_line_notes, rm_other_notes):
+ Fixed to handle ebbs.
+ (move_insn): Added code to handle ebb scheduling.
+ (max_issue): Added code to use ISSUE_POINTS of instructions.
+ (choose_ready): Added code to choose between speculative and
+ non-speculative instructions.
+ (schedule_block): Added code to handle ebb scheduling and scheduling of
+ speculative instructions.
+ (sched_init): Initialize new variables.
+ (sched_finish): Free new variables. Print statistics.
+ (try_ready): Added code to handle speculative instructions.
+ * lists.c (copy_DEPS_LIST_list): New function.
+ * sched-deps.c (extend_dependency_caches): New function. Move logic
+ from create_dependency_caches to here.
+ (get_dep_weak, ds_merge): Make global.
+ * genattr.c (main): Code to output prototype for
+ dfa_clear_single_insn_cache.
+ * genautomata.c (DFA_CLEAR_SINGLE_INSN_CACHE_FUNC_NAME): New macros.
+ (output_dfa_clean_insn_cache_func): Code to output
+ dfa_clear_single_insn_cache function.
+ * sched-ebb.c (target_n_insns): Remove. Adjust all users to use
+ n_insns.
+ (can_schedule_ready_p, fix_basic_block_boundaries, add_missing_bbs):
+ Removed.
+ (n_insns, dont_calc_deps, ebb_head, ebb_tail, last_bb):
+ New static variables.
+ (begin_schedule_ready, add_remove_insn, add_block1, advance_target_bb,
+ fix_recovery_cfg, ebb_head_or_leaf_p): Implement hooks from
+ struct sched_info.
+ (ebb_sched_info): Initialize new fields.
+ (get_block_head_tail): Adjust all uses.
+ (compute_jump_reg_dependencies): Fixed to use glat_start.
+ (schedule_ebb): Code to remove unreachable last block.
+ (schedule_ebbs): Added code to update register live information.
+ * sched-rgn.c (region_sched_info): Initialize new fields.
+ (get_block_head_tail): Adjust all uses.
+ (last_was_jump): Removed. Adjust users.
+ (begin_schedule_ready, add_remove_insn, insn_points, extend_regions,
+ add_block1, fix_recovery_cfg, advance_target_bb, region_head_or_leaf_p):
+ Implement new hooks.
+ (check_dead_notes1): New static function.
+ (struct region): New fields: dont_calc_deps, has_real_ebb.
+ (RGN_DONT_CALC_DEPS, RGN_HAS_REAL_EBB): New access macros.
+ (BB_TO_BLOCK): Fixed to handle EBBs.
+ (EBB_FIRST_BB, EBB_LAST_BB): New macros.
+ (ebb_head): New static variable.
+ (debug_regions, contributes_to_priority): Fixed to handle EBBs.
+ (find_single_block_regions, find_rgns, find_more_rgns): Initialize
+ new fields.
+ (compute_dom_prob_ps): New assertion.
+ (check_live_1, update_live_1): Fixed to work with glat_start instead of
+ global_live_at_start.
+ (init_ready_list): New assertions.
+ (can_schedule_ready_p): Split update code to begin_schedule_ready.
+ (new_ready): Add support for BEGIN_CONTROL speculation.
+ (schedule_insns): Fixed code that updates register live information
+ to handle EBBs.
+ (schedule_region): Fixed to handle EBBs.
+ (init_regions): Use extend_regions and check_dead_notes1.
+ * params.def (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY,
+ PARAM_SCHED_SPEC_PROB_CUTOFF): New parameters.
+ * doc/tm.texi (TARGET_SCHED_ADJUST_COST_2, TARGET_SCHED_H_I_D_EXTENDED,
+ TARGET_SCHED_SPECULATE_INSN, TARGET_SCHED_NEEDS_BLOCK_P,
+ TARGET_SCHED_GEN_CHECK,
+ TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC,
+ TARGET_SCHED_SET_SCHED_FLAGS): Document.
+ * doc/invoke.texi (max-sched-insn-conflict-delay,
+ sched-spec-prob-cutoff): Document.
+
+2006-03-16 Maxim Kuvyrkov <mkuvyrkov@ispras.ru>
+
* sched-int.h (struct haifa_insn_data): New fields: resolved_deps,
inter_tick, queue_index.
(struct sched_info): Change signature of init_ready_list field.
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 4552eff5d3b..fe7ffeef7e7 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -2515,7 +2515,8 @@ modulo-sched.o : modulo-sched.c $(DDG_H) $(CONFIG_H) $(CONFIG_H) $(SYSTEM_H) \
cfghooks.h $(DF_H) $(GCOV_IO_H) hard-reg-set.h $(TM_H) timevar.h tree-pass.h
haifa-sched.o : haifa-sched.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) \
$(SCHED_INT_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h $(FUNCTION_H) \
- $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(TM_P_H) $(TARGET_H) output.h
+ $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h $(TM_P_H) $(TARGET_H) output.h \
+ $(PARAMS_H)
sched-deps.o : sched-deps.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
$(RTL_H) $(SCHED_INT_H) $(REGS_H) hard-reg-set.h $(FLAGS_H) insn-config.h \
$(FUNCTION_H) $(INSN_ATTR_H) toplev.h $(RECOG_H) except.h cselib.h \
diff --git a/gcc/ddg.c b/gcc/ddg.c
index 53cf6859c8a..c00e4991157 100644
--- a/gcc/ddg.c
+++ b/gcc/ddg.c
@@ -382,7 +382,7 @@ build_intra_loop_deps (ddg_ptr g)
init_deps (&tmp_deps);
/* Do the intra-block data dependence analysis for the given block. */
- get_block_head_tail (g->bb->index, &head, &tail);
+ get_ebb_head_tail (g->bb, g->bb, &head, &tail);
sched_analyze (&tmp_deps, head, tail);
/* Build intra-loop data dependencies using the scheduler dependency
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 64df67239af..f037f4b0991 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -6183,6 +6183,15 @@ The maximum number of iterations through CFG to extend regions.
N - do at most N iterations.
The default value is 2.
+@item max-sched-insn-conflict-delay
+The maximum conflict delay for an insn to be considered for speculative motion.
+The default value is 3.
+
+@item sched-spec-prob-cutoff
+The minimal probability of speculation success (in percents), so that
+speculative insn will be scheduled.
+The default value is 40.
+
@item max-last-value-rtl
The maximum size measured as number of RTLs that can be recorded in an expression
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index 674cd95c694..d18b0ba3e39 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -5838,8 +5838,8 @@ acceptable, you could use the hook to modify them too. See also
@deftypefn {Target Hook} int TARGET_SCHED_ADJUST_PRIORITY (rtx @var{insn}, int @var{priority})
This hook adjusts the integer scheduling priority @var{priority} of
-@var{insn}. It should return the new priority. Reduce the priority to
-execute @var{insn} earlier, increase the priority to execute @var{insn}
+@var{insn}. It should return the new priority. Increase the priority to
+execute @var{insn} earlier, reduce the priority to execute @var{insn}
later. Do not define this hook if you do not need to adjust the
scheduling priorities of insns.
@end deftypefn
@@ -6014,6 +6014,70 @@ closer to one another---i.e., closer than the dependence distance; however,
not in cases of "costly dependences", which this hooks allows to define.
@end deftypefn
+@deftypefn {Target Hook} int TARGET_SCHED_ADJUST_COST_2 (rtx @var{insn}, int @var{dep_type}, rtx @var{dep_insn}, int @var{cost})
+This hook is a modified version of @samp{TARGET_SCHED_ADJUST_COST}. Instead
+of passing dependence as a second parameter, it passes a type of that
+dependence. This is useful to calculate cost of dependence between insns
+not having the corresponding link. If @samp{TARGET_SCHED_ADJUST_COST_2} is
+definded it is used instead of @samp{TARGET_SCHED_ADJUST_COST}.
+@end deftypefn
+
+@deftypefn {Target Hook} void TARGET_SCHED_H_I_D_EXTENDED (void)
+This hook is called by the insn scheduler after emitting a new instruction to
+the instruction stream. The hook notifies a target backend to extend its
+per instruction data structures.
+@end deftypefn
+
+@deftypefn {Target Hook} int TARGET_SCHED_SPECULATE_INSN (rtx @var{insn}, int @var{request}, rtx *@var{new_pat})
+This hook is called by the insn scheduler when @var{insn} has only
+speculative dependencies and therefore can be scheduled speculatively.
+The hook is used to check if the pattern of @var{insn} has a speculative
+version and, in case of successful check, to generate that speculative
+pattern. The hook should return 1, if the instruction has a speculative form,
+or -1, if it doesn't. @var{request} describes the type of requested
+speculation. If the return value equals 1 then @var{new_pat} is assigned
+the generated speculative pattern.
+@end deftypefn
+
+@deftypefn {Target Hook} int TARGET_SCHED_NEEDS_BLOCK_P (rtx @var{insn})
+This hook is called by the insn scheduler during generation of recovery code
+for @var{insn}. It should return non-zero, if the corresponding check
+instruction should branch to recovery code, or zero otherwise.
+@end deftypefn
+
+@deftypefn {Target Hook} rtx TARGET_SCHED_GEN_CHECK (rtx @var{insn}, rtx @var{label}, int @var{mutate_p})
+This hook is called by the insn scheduler to generate a pattern for recovery
+check instruction. If @var{mutate_p} is zero, then @var{insn} is a
+speculative instruction for which the check should be generated.
+@var{label} is either a label of a basic block, where recovery code should
+be emitted, or a null pointer, when requested check doesn't branch to
+recovery code (a simple check). If @var{mutate_p} is non-zero, then
+a pattern for a branchy check corresponding to a simple check denoted by
+@var{insn} should be generated. In this case @var{label} can't be null.
+@end deftypefn
+
+@deftypefn {Target Hook} int TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC (rtx @var{insn})
+This hook is used as a workaround for
+@samp{TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD} not being
+called on the first instruction of the ready list. The hook is used to
+discard speculative instruction that stand first in the ready list from
+being scheduled on the current cycle. For non-speculative instructions,
+the hook should always return non-zero. For example, in the ia64 backend
+the hook is used to cancel data speculative insns when the ALAT table
+is nearly full.
+@end deftypefn
+
+@deftypefn {Target Hook} void TARGET_SCHED_SET_SCHED_FLAGS (unsigned int *@var{flags}, spec_info_t @var{spec_info})
+This hook is used by the insn scheduler to find out what features should be
+enabled/used. @var{flags} initially may have either the SCHED_RGN or SCHED_EBB
+bit set. This denotes the scheduler pass for which the data should be
+provided. The target backend should modify @var{flags} by modifying
+the bits correponding to the following features: USE_DEPS_LIST, USE_GLAT,
+DETACH_LIFE_INFO, and DO_SPECULATION. For the DO_SPECULATION feature
+an additional structure @var{spec_info} should be filled by the target.
+The structure describes speculation types that can be used in the scheduler.
+@end deftypefn
+
@node Sections
@section Dividing the Output into Sections (Texts, Data, @dots{})
@c the above section title is WAY too long. maybe cut the part between
diff --git a/gcc/genattr.c b/gcc/genattr.c
index 9d8cf95f0ae..83fb5e88f2c 100644
--- a/gcc/genattr.c
+++ b/gcc/genattr.c
@@ -250,6 +250,7 @@ main (int argc, char **argv)
printf (" define_insn_reservation will be changed after\n");
printf (" last call of dfa_start. */\n");
printf ("extern void dfa_clean_insn_cache (void);\n\n");
+ printf ("extern void dfa_clear_single_insn_cache (rtx);\n\n");
printf ("/* Initiate and finish work with DFA. They should be\n");
printf (" called as the first and the last interface\n");
printf (" functions. */\n");
diff --git a/gcc/genautomata.c b/gcc/genautomata.c
index 2c17ee4b446..eb06e6ccff8 100644
--- a/gcc/genautomata.c
+++ b/gcc/genautomata.c
@@ -6852,6 +6852,8 @@ output_reserved_units_table_name (FILE *f, automaton_t automaton)
#define DFA_CLEAN_INSN_CACHE_FUNC_NAME "dfa_clean_insn_cache"
+#define DFA_CLEAR_SINGLE_INSN_CACHE_FUNC_NAME "dfa_clear_single_insn_cache"
+
#define DFA_START_FUNC_NAME "dfa_start"
#define DFA_FINISH_FUNC_NAME "dfa_finish"
@@ -8335,7 +8337,8 @@ output_cpu_unit_reservation_p (void)
fprintf (output_file, " return 0;\n}\n\n");
}
-/* The function outputs PHR interface function `dfa_clean_insn_cache'. */
+/* The function outputs PHR interface functions `dfa_clean_insn_cache'
+ and 'dfa_clear_single_insn_cache'. */
static void
output_dfa_clean_insn_cache_func (void)
{
@@ -8347,6 +8350,16 @@ output_dfa_clean_insn_cache_func (void)
I_VARIABLE_NAME, I_VARIABLE_NAME,
DFA_INSN_CODES_LENGTH_VARIABLE_NAME, I_VARIABLE_NAME,
DFA_INSN_CODES_VARIABLE_NAME, I_VARIABLE_NAME);
+
+ fprintf (output_file,
+ "void\n%s (rtx %s)\n{\n int %s;\n\n",
+ DFA_CLEAR_SINGLE_INSN_CACHE_FUNC_NAME, INSN_PARAMETER_NAME,
+ I_VARIABLE_NAME);
+ fprintf (output_file,
+ " %s = INSN_UID (%s);\n if (%s < %s)\n %s [%s] = -1;\n}\n\n",
+ I_VARIABLE_NAME, INSN_PARAMETER_NAME, I_VARIABLE_NAME,
+ DFA_INSN_CODES_LENGTH_VARIABLE_NAME, DFA_INSN_CODES_VARIABLE_NAME,
+ I_VARIABLE_NAME);
}
/* The function outputs PHR interface function `dfa_start'. */
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index 75300b5f17d..84311b1ba34 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -143,6 +143,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "sched-int.h"
#include "target.h"
#include "output.h"
+#include "params.h"
#ifdef INSN_SCHEDULING
@@ -195,6 +196,10 @@ struct haifa_insn_data *h_i_d;
/* The minimal value of the INSN_TICK of an instruction. */
#define MIN_TICK (-max_insn_queue_index)
+/* Issue points are used to distinguish between instructions in max_issue ().
+ For now, all instructions are equally good. */
+#define ISSUE_POINTS(INSN) 1
+
/* Vector indexed by basic block number giving the starting line-number
for each basic block. */
static rtx *line_note_head;
@@ -203,6 +208,30 @@ static rtx *line_note_head;
last element in the list. */
static rtx note_list;
+static struct spec_info_def spec_info_var;
+/* Description of the speculative part of the scheduling.
+ If NULL - no speculation. */
+static spec_info_t spec_info;
+
+/* True, if recovery block was added during scheduling of current block.
+ Used to determine, if we need to fix INSN_TICKs. */
+static bool added_recovery_block_p;
+
+/* Counters of different types of speculative isntructions. */
+static int nr_begin_data, nr_be_in_data, nr_begin_control, nr_be_in_control;
+
+/* Pointers to GLAT data. See init_glat for more information. */
+regset *glat_start, *glat_end;
+
+/* Array used in {unlink, restore}_bb_notes. */
+static rtx *bb_header = 0;
+
+/* Number of basic_blocks. */
+static int old_last_basic_block;
+
+/* Basic block after which recovery blocks will be created. */
+static basic_block before_recovery;
+
/* Queues, etc. */
/* An instruction is ready to be scheduled when all insns preceding it
@@ -299,6 +328,9 @@ static struct ready_list *readyp;
/* Scheduling clock. */
static int clock_var;
+/* Number of instructions in current scheduling region. */
+static int rgn_n_insns;
+
static int may_trap_exp (rtx, int);
/* Nonzero iff the address is comprised from at most 1 register. */
@@ -462,13 +494,15 @@ haifa_classify_insn (rtx insn)
/* Forward declarations. */
+HAIFA_INLINE static int insn_cost1 (rtx, enum reg_note, rtx, rtx);
static int priority (rtx);
static int rank_for_schedule (const void *, const void *);
static void swap_sort (rtx *, int);
static void queue_insn (rtx, int);
static int schedule_insn (rtx);
static int find_set_reg_weight (rtx);
-static void find_insn_reg_weight (int);
+static void find_insn_reg_weight (basic_block);
+static void find_insn_reg_weight1 (rtx);
static void adjust_priority (rtx);
static void advance_one_cycle (void);
@@ -497,7 +531,7 @@ static void advance_one_cycle (void);
static rtx unlink_other_notes (rtx, rtx);
static rtx unlink_line_notes (rtx, rtx);
-static rtx reemit_notes (rtx, rtx);
+static void reemit_notes (rtx);
static rtx *ready_lastpos (struct ready_list *);
static void ready_add (struct ready_list *, rtx, bool);
@@ -509,15 +543,14 @@ static int early_queue_to_ready (state_t, struct ready_list *);
static void debug_ready_list (struct ready_list *);
-static rtx move_insn1 (rtx, rtx);
-static rtx move_insn (rtx, rtx);
+static void move_insn (rtx);
/* The following functions are used to implement multi-pass scheduling
on the first cycle. */
static rtx ready_element (struct ready_list *, int);
static rtx ready_remove (struct ready_list *, int);
static void ready_remove_insn (rtx);
-static int max_issue (struct ready_list *, int *);
+static int max_issue (struct ready_list *, int *, int);
static rtx choose_ready (struct ready_list *);
@@ -526,6 +559,48 @@ static int fix_tick_ready (rtx);
static void change_queue_index (rtx, int);
static void resolve_dep (rtx, rtx);
+/* The following functions are used to implement scheduling of data/control
+ speculative instructions. */
+
+static void extend_h_i_d (void);
+static void extend_ready (int);
+static void extend_global (rtx);
+static void extend_all (rtx);
+static void init_h_i_d (rtx);
+static void generate_recovery_code (rtx);
+static void process_insn_depend_be_in_spec (rtx, rtx, ds_t);
+static void begin_speculative_block (rtx);
+static void add_to_speculative_block (rtx);
+static dw_t dep_weak (ds_t);
+static edge find_fallthru_edge (basic_block);
+static void init_before_recovery (void);
+static basic_block create_recovery_block (void);
+static void create_check_block_twin (rtx, bool);
+static void fix_recovery_deps (basic_block);
+static void associate_line_notes_with_blocks (basic_block);
+static void change_pattern (rtx, rtx);
+static int speculate_insn (rtx, ds_t, rtx *);
+static void dump_new_block_header (int, basic_block, rtx, rtx);
+static void restore_bb_notes (basic_block);
+static void extend_bb (basic_block);
+static void fix_jump_move (rtx);
+static void move_block_after_check (rtx);
+static void move_succs (VEC(edge,gc) **, basic_block);
+static void init_glat (void);
+static void init_glat1 (basic_block);
+static void attach_life_info1 (basic_block);
+static void free_glat (void);
+static void sched_remove_insn (rtx);
+static void clear_priorities (rtx);
+static void add_jump_dependencies (rtx, rtx);
+static rtx bb_note (basic_block);
+static void calc_priorities (rtx);
+#ifdef ENABLE_CHECKING
+static int has_edge_p (VEC(edge,gc) *, int);
+static void check_cfg (rtx, rtx);
+static void check_sched_flags (void);
+#endif
+
#endif /* INSN_SCHEDULING */
/* Point to state used for the current scheduling pass. */
@@ -538,6 +613,9 @@ schedule_insns (void)
}
#else
+/* Working copy of frontend's sched_info variable. */
+static struct sched_info current_sched_info_var;
+
/* Pointer to the last instruction scheduled. Used by rank_for_schedule,
so that insns independent of the last scheduled insn will be preferred
over dependent instructions. */
@@ -551,6 +629,21 @@ static rtx last_scheduled_insn;
HAIFA_INLINE int
insn_cost (rtx insn, rtx link, rtx used)
{
+ return insn_cost1 (insn, used ? REG_NOTE_KIND (link) : REG_NOTE_MAX,
+ link, used);
+}
+
+/* Compute cost of executing INSN given the dependence on the insn USED.
+ If LINK is not NULL, then its REG_NOTE_KIND is used as a dependence type.
+ Otherwise, dependence between INSN and USED is assumed to be of type
+ DEP_TYPE. This function was introduced as a workaround for
+ targetm.adjust_cost hook.
+ This is the number of cycles between instruction issue and
+ instruction results. */
+
+HAIFA_INLINE static int
+insn_cost1 (rtx insn, enum reg_note dep_type, rtx link, rtx used)
+{
int cost = INSN_COST (insn);
if (cost < 0)
@@ -575,7 +668,7 @@ insn_cost (rtx insn, rtx link, rtx used)
}
/* In this case estimate cost without caring how insn is used. */
- if (link == 0 || used == 0)
+ if (used == 0)
return cost;
/* A USE insn should never require the value used to be computed.
@@ -585,11 +678,13 @@ insn_cost (rtx insn, rtx link, rtx used)
cost = 0;
else
{
+ gcc_assert (!link || dep_type == REG_NOTE_KIND (link));
+
if (INSN_CODE (insn) >= 0)
{
- if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
+ if (dep_type == REG_DEP_ANTI)
cost = 0;
- else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
+ else if (dep_type == REG_DEP_OUTPUT)
{
cost = (insn_default_latency (insn)
- insn_default_latency (used));
@@ -600,8 +695,14 @@ insn_cost (rtx insn, rtx link, rtx used)
cost = insn_latency (insn, used);
}
- if (targetm.sched.adjust_cost)
- cost = targetm.sched.adjust_cost (used, link, insn, cost);
+ if (targetm.sched.adjust_cost_2)
+ cost = targetm.sched.adjust_cost_2 (used, (int) dep_type, insn, cost);
+ else
+ {
+ gcc_assert (link);
+ if (targetm.sched.adjust_cost)
+ cost = targetm.sched.adjust_cost (used, link, insn, cost);
+ }
if (cost < 0)
cost = 0;
@@ -628,21 +729,68 @@ priority (rtx insn)
this_priority = insn_cost (insn, 0, 0);
else
{
- for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
- {
- rtx next;
- int next_priority;
+ rtx prev_first, twin;
+ basic_block rec;
- next = XEXP (link, 0);
+ /* For recovery check instructions we calculate priority slightly
+ different than that of normal instructions. Instead of walking
+ through INSN_DEPEND (check) list, we walk through INSN_DEPEND list
+ of each instruction in the corresponding recovery block. */
- /* Critical path is meaningful in block boundaries only. */
- if (! (*current_sched_info->contributes_to_priority) (next, insn))
- continue;
+ rec = RECOVERY_BLOCK (insn);
+ if (!rec || rec == EXIT_BLOCK_PTR)
+ {
+ prev_first = PREV_INSN (insn);
+ twin = insn;
+ }
+ else
+ {
+ prev_first = NEXT_INSN (BB_HEAD (rec));
+ twin = PREV_INSN (BB_END (rec));
+ }
- next_priority = insn_cost (insn, link, next) + priority (next);
- if (next_priority > this_priority)
- this_priority = next_priority;
+ do
+ {
+ for (link = INSN_DEPEND (twin); link; link = XEXP (link, 1))
+ {
+ rtx next;
+ int next_priority;
+
+ next = XEXP (link, 0);
+
+ if (BLOCK_FOR_INSN (next) != rec)
+ {
+ /* Critical path is meaningful in block boundaries
+ only. */
+ if (! (*current_sched_info->contributes_to_priority)
+ (next, insn)
+ /* If flag COUNT_SPEC_IN_CRITICAL_PATH is set,
+ then speculative instructions will less likely be
+ scheduled. That is because the priority of
+ their producers will increase, and, thus, the
+ producers will more likely be scheduled, thus,
+ resolving the dependence. */
+ || ((current_sched_info->flags & DO_SPECULATION)
+ && (DEP_STATUS (link) & SPECULATIVE)
+ && !(spec_info->flags
+ & COUNT_SPEC_IN_CRITICAL_PATH)))
+ continue;
+
+ next_priority = insn_cost1 (insn,
+ twin == insn ?
+ REG_NOTE_KIND (link) :
+ REG_DEP_ANTI,
+ twin == insn ? link : 0,
+ next) + priority (next);
+
+ if (next_priority > this_priority)
+ this_priority = next_priority;
+ }
+ }
+
+ twin = PREV_INSN (twin);
}
+ while (twin != prev_first);
}
INSN_PRIORITY (insn) = this_priority;
INSN_PRIORITY_KNOWN (insn) = 1;
@@ -684,6 +832,30 @@ rank_for_schedule (const void *x, const void *y)
if (priority_val)
return priority_val;
+ /* Prefer speculative insn with greater dependencies weakness. */
+ if (spec_info)
+ {
+ ds_t ds1, ds2;
+ dw_t dw1, dw2;
+ int dw;
+
+ ds1 = TODO_SPEC (tmp) & SPECULATIVE;
+ if (ds1)
+ dw1 = dep_weak (ds1);
+ else
+ dw1 = NO_DEP_WEAK;
+
+ ds2 = TODO_SPEC (tmp2) & SPECULATIVE;
+ if (ds2)
+ dw2 = dep_weak (ds2);
+ else
+ dw2 = NO_DEP_WEAK;
+
+ dw = dw2 - dw1;
+ if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
+ return dw;
+ }
+
/* Prefer an insn with smaller contribution to registers-pressure. */
if (!reload_completed &&
(weight_val = INSN_REG_WEIGHT (tmp) - INSN_REG_WEIGHT (tmp2)))
@@ -1015,17 +1187,29 @@ schedule_insn (rtx insn)
/* Update dependent instructions. */
for (link = INSN_DEPEND (insn); link; link = XEXP (link, 1))
{
- int effective_cost;
rtx next = XEXP (link, 0);
resolve_dep (next, insn);
- effective_cost = try_ready (next);
-
- if (effective_cost >= 0
- && SCHED_GROUP_P (next)
- && advance < effective_cost)
- advance = effective_cost;
+ if (!RECOVERY_BLOCK (insn)
+ || RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR)
+ {
+ int effective_cost;
+
+ effective_cost = try_ready (next);
+
+ if (effective_cost >= 0
+ && SCHED_GROUP_P (next)
+ && advance < effective_cost)
+ advance = effective_cost;
+ }
+ else
+ /* Check always has only one forward dependence (to the first insn in
+ the recovery block), therefore, this will be executed only once. */
+ {
+ gcc_assert (XEXP (link, 1) == 0);
+ fix_recovery_deps (RECOVERY_BLOCK (insn));
+ }
}
/* Annotate the instruction with issue information -- TImode
@@ -1056,7 +1240,7 @@ unlink_other_notes (rtx insn, rtx tail)
{
rtx prev = PREV_INSN (insn);
- while (insn != tail && NOTE_P (insn))
+ while (insn != tail && NOTE_NOT_BB_P (insn))
{
rtx next = NEXT_INSN (insn);
/* Delete the note from its current position. */
@@ -1066,8 +1250,7 @@ unlink_other_notes (rtx insn, rtx tail)
PREV_INSN (next) = prev;
/* See sched_analyze to see how these are handled. */
- if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_BASIC_BLOCK
- && NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
+ if (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END)
{
/* Insert the note at the end of the notes list. */
@@ -1113,31 +1296,43 @@ unlink_line_notes (rtx insn, rtx tail)
return insn;
}
-/* Return the head and tail pointers of BB. */
+/* Return the head and tail pointers of ebb starting at BEG and ending
+ at END. */
void
-get_block_head_tail (int b, rtx *headp, rtx *tailp)
-{
- /* HEAD and TAIL delimit the basic block being scheduled. */
- rtx head = BB_HEAD (BASIC_BLOCK (b));
- rtx tail = BB_END (BASIC_BLOCK (b));
-
- /* Don't include any notes or labels at the beginning of the
- basic block, or notes at the ends of basic blocks. */
- while (head != tail)
- {
- if (NOTE_P (head))
- head = NEXT_INSN (head);
- else if (NOTE_P (tail))
- tail = PREV_INSN (tail);
- else if (LABEL_P (head))
- head = NEXT_INSN (head);
- else
- break;
- }
+get_ebb_head_tail (basic_block beg, basic_block end, rtx *headp, rtx *tailp)
+{
+ rtx beg_head = BB_HEAD (beg);
+ rtx beg_tail = BB_END (beg);
+ rtx end_head = BB_HEAD (end);
+ rtx end_tail = BB_END (end);
+
+ /* Don't include any notes or labels at the beginning of the BEG
+ basic block, or notes at the end of the END basic blocks. */
+
+ if (LABEL_P (beg_head))
+ beg_head = NEXT_INSN (beg_head);
+
+ while (beg_head != beg_tail)
+ if (NOTE_P (beg_head))
+ beg_head = NEXT_INSN (beg_head);
+ else
+ break;
+
+ *headp = beg_head;
- *headp = head;
- *tailp = tail;
+ if (beg == end)
+ end_head = beg_head;
+ else if (LABEL_P (end_head))
+ end_head = NEXT_INSN (end_head);
+
+ while (end_head != end_tail)
+ if (NOTE_P (end_tail))
+ end_tail = PREV_INSN (end_tail);
+ else
+ break;
+
+ *tailp = end_tail;
}
/* Return nonzero if there are no real insns in the range [ HEAD, TAIL ]. */
@@ -1172,7 +1367,7 @@ rm_line_notes (rtx head, rtx tail)
/* Farm out notes, and maybe save them in NOTE_LIST.
This is needed to keep the debugger from
getting completely deranged. */
- if (NOTE_P (insn))
+ if (NOTE_NOT_BB_P (insn))
{
prev = insn;
insn = unlink_line_notes (insn, next_tail);
@@ -1263,6 +1458,7 @@ restore_line_notes (rtx head, rtx tail)
NEXT_INSN (prev) = note;
PREV_INSN (insn) = note;
NEXT_INSN (note) = insn;
+ set_block_for_insn (note, BLOCK_FOR_INSN (insn));
}
else
{
@@ -1350,7 +1546,7 @@ rm_other_notes (rtx head, rtx tail)
/* Farm out notes, and maybe save them in NOTE_LIST.
This is needed to keep the debugger from
getting completely deranged. */
- if (NOTE_P (insn))
+ if (NOTE_NOT_BB_P (insn))
{
prev = insn;
@@ -1391,44 +1587,51 @@ find_set_reg_weight (rtx x)
/* Calculate INSN_REG_WEIGHT for all insns of a block. */
static void
-find_insn_reg_weight (int b)
+find_insn_reg_weight (basic_block bb)
{
rtx insn, next_tail, head, tail;
- get_block_head_tail (b, &head, &tail);
+ get_ebb_head_tail (bb, bb, &head, &tail);
next_tail = NEXT_INSN (tail);
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
- {
- int reg_weight = 0;
- rtx x;
-
- /* Handle register life information. */
- if (! INSN_P (insn))
- continue;
+ find_insn_reg_weight1 (insn);
+}
- /* Increment weight for each register born here. */
- x = PATTERN (insn);
- reg_weight += find_set_reg_weight (x);
- if (GET_CODE (x) == PARALLEL)
- {
- int j;
- for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
- {
- x = XVECEXP (PATTERN (insn), 0, j);
- reg_weight += find_set_reg_weight (x);
- }
- }
- /* Decrement weight for each register that dies here. */
- for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+/* Calculate INSN_REG_WEIGHT for single insntruction.
+ Separated from find_insn_reg_weight because of need
+ to initialize new instruction in generate_recovery_code. */
+static void
+find_insn_reg_weight1 (rtx insn)
+{
+ int reg_weight = 0;
+ rtx x;
+
+ /* Handle register life information. */
+ if (! INSN_P (insn))
+ return;
+
+ /* Increment weight for each register born here. */
+ x = PATTERN (insn);
+ reg_weight += find_set_reg_weight (x);
+ if (GET_CODE (x) == PARALLEL)
+ {
+ int j;
+ for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
{
- if (REG_NOTE_KIND (x) == REG_DEAD
- || REG_NOTE_KIND (x) == REG_UNUSED)
- reg_weight--;
+ x = XVECEXP (PATTERN (insn), 0, j);
+ reg_weight += find_set_reg_weight (x);
}
-
- INSN_REG_WEIGHT (insn) = reg_weight;
}
+ /* Decrement weight for each register that dies here. */
+ for (x = REG_NOTES (insn); x; x = XEXP (x, 1))
+ {
+ if (REG_NOTE_KIND (x) == REG_DEAD
+ || REG_NOTE_KIND (x) == REG_UNUSED)
+ reg_weight--;
+ }
+
+ INSN_REG_WEIGHT (insn) = reg_weight;
}
/* Move insns that became ready to fire from queue to ready list. */
@@ -1670,36 +1873,17 @@ debug_ready_list (struct ready_list *ready)
fprintf (sched_dump, "\n");
}
-/* move_insn1: Remove INSN from insn chain, and link it after LAST insn. */
-
-static rtx
-move_insn1 (rtx insn, rtx last)
-{
- NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
- PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
-
- NEXT_INSN (insn) = NEXT_INSN (last);
- PREV_INSN (NEXT_INSN (last)) = insn;
-
- NEXT_INSN (last) = insn;
- PREV_INSN (insn) = last;
-
- return insn;
-}
-
/* Search INSN for REG_SAVE_NOTE note pairs for
NOTE_INSN_EHREGION_{BEG,END}; and convert them back into
NOTEs. The REG_SAVE_NOTE note following first one is contains the
saved value for NOTE_BLOCK_NUMBER which is useful for
- NOTE_INSN_EH_REGION_{BEG,END} NOTEs. LAST is the last instruction
- output by the instruction scheduler. Return the new value of LAST. */
+ NOTE_INSN_EH_REGION_{BEG,END} NOTEs. */
-static rtx
-reemit_notes (rtx insn, rtx last)
+static void
+reemit_notes (rtx insn)
{
- rtx note, retval;
+ rtx note, last = insn;
- retval = last;
for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
{
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
@@ -1710,31 +1894,96 @@ reemit_notes (rtx insn, rtx last)
remove_note (insn, note);
}
}
- return retval;
}
-/* Move INSN. Reemit notes if needed.
+/* Move INSN. Reemit notes if needed. Update CFG, if needed. */
+static void
+move_insn (rtx insn)
+{
+ rtx last = last_scheduled_insn;
- Return the last insn emitted by the scheduler, which is the
- return value from the first call to reemit_notes. */
+ if (PREV_INSN (insn) != last)
+ {
+ basic_block bb;
+ rtx note;
+ int jump_p = 0;
-static rtx
-move_insn (rtx insn, rtx last)
-{
- rtx retval = NULL;
+ bb = BLOCK_FOR_INSN (insn);
+
+ /* BB_HEAD is either LABEL or NOTE. */
+ gcc_assert (BB_HEAD (bb) != insn);
- move_insn1 (insn, last);
+ if (BB_END (bb) == insn)
+ /* If this is last instruction in BB, move end marker one
+ instruction up. */
+ {
+ /* Jumps are always placed at the end of basic block. */
+ jump_p = control_flow_insn_p (insn);
+
+ gcc_assert (!jump_p
+ || ((current_sched_info->flags & SCHED_RGN)
+ && RECOVERY_BLOCK (insn)
+ && RECOVERY_BLOCK (insn) != EXIT_BLOCK_PTR)
+ || (current_sched_info->flags & SCHED_EBB));
+
+ gcc_assert (BLOCK_FOR_INSN (PREV_INSN (insn)) == bb);
- /* If this is the first call to reemit_notes, then record
- its return value. */
- if (retval == NULL_RTX)
- retval = reemit_notes (insn, insn);
- else
- reemit_notes (insn, insn);
+ BB_END (bb) = PREV_INSN (insn);
+ }
+
+ gcc_assert (BB_END (bb) != last);
+
+ if (jump_p)
+ /* We move the block note along with jump. */
+ {
+ /* NT is needed for assertion below. */
+ rtx nt = current_sched_info->next_tail;
+
+ note = NEXT_INSN (insn);
+ while (NOTE_NOT_BB_P (note) && note != nt)
+ note = NEXT_INSN (note);
+
+ if (note != nt
+ && (LABEL_P (note)
+ || BARRIER_P (note)))
+ note = NEXT_INSN (note);
+
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+ }
+ else
+ note = insn;
- SCHED_GROUP_P (insn) = 0;
+ NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (note);
+ PREV_INSN (NEXT_INSN (note)) = PREV_INSN (insn);
- return retval;
+ NEXT_INSN (note) = NEXT_INSN (last);
+ PREV_INSN (NEXT_INSN (last)) = note;
+
+ NEXT_INSN (last) = insn;
+ PREV_INSN (insn) = last;
+
+ bb = BLOCK_FOR_INSN (last);
+
+ if (jump_p)
+ {
+ fix_jump_move (insn);
+
+ if (BLOCK_FOR_INSN (insn) != bb)
+ move_block_after_check (insn);
+
+ gcc_assert (BB_END (bb) == last);
+ }
+
+ set_block_for_insn (insn, bb);
+
+ /* Update BB_END, if needed. */
+ if (BB_END (bb) == last)
+ BB_END (bb) = insn;
+ }
+
+ reemit_notes (insn);
+
+ SCHED_GROUP_P (insn) = 0;
}
/* The following structure describe an entry of the stack of choices. */
@@ -1784,13 +2033,15 @@ static int cached_issue_rate = 0;
insns is insns with the best rank (the first insn in READY). To
make this function tries different samples of ready insns. READY
is current queue `ready'. Global array READY_TRY reflects what
- insns are already issued in this try. INDEX will contain index
- of the best insn in READY. The following function is used only for
- first cycle multipass scheduling. */
+ insns are already issued in this try. MAX_POINTS is the sum of points
+ of all instructions in READY. The function stops immediatelly,
+ if it reached the such a solution, that all instruction can be issued.
+ INDEX will contain index of the best insn in READY. The following
+ function is used only for first cycle multipass scheduling. */
static int
-max_issue (struct ready_list *ready, int *index)
+max_issue (struct ready_list *ready, int *index, int max_points)
{
- int n, i, all, n_ready, best, delay, tries_num;
+ int n, i, all, n_ready, best, delay, tries_num, points = -1;
struct choice_entry *top;
rtx insn;
@@ -1815,7 +2066,8 @@ max_issue (struct ready_list *ready, int *index)
{
best = top - choice_stack;
*index = choice_stack [1].index;
- if (top->n == issue_rate - cycle_issued_insns || best == all)
+ points = top->n;
+ if (top->n == max_points || best == all)
break;
}
i = top->index;
@@ -1838,7 +2090,7 @@ max_issue (struct ready_list *ready, int *index)
top->rest--;
n = top->n;
if (memcmp (top->state, curr_state, dfa_state_size) != 0)
- n++;
+ n += ISSUE_POINTS (insn);
top++;
top->rest = cached_first_cycle_multipass_dfa_lookahead;
top->index = i;
@@ -1855,7 +2107,14 @@ max_issue (struct ready_list *ready, int *index)
ready_try [top->index] = 0;
top--;
}
- memcpy (curr_state, choice_stack->state, dfa_state_size);
+ memcpy (curr_state, choice_stack->state, dfa_state_size);
+
+ if (sched_verbose >= 4)
+ fprintf (sched_dump, ";;\t\tChoosed insn : %s; points: %d/%d\n",
+ (*current_sched_info->print_insn) (ready_element (ready, *index),
+ 0),
+ points, max_points);
+
return best;
}
@@ -1875,9 +2134,10 @@ choose_ready (struct ready_list *ready)
else
{
/* Try to choose the better insn. */
- int index = 0, i;
+ int index = 0, i, n;
rtx insn;
-
+ int more_issue, max_points, try_data = 1, try_control = 1;
+
if (cached_first_cycle_multipass_dfa_lookahead != lookahead)
{
cached_first_cycle_multipass_dfa_lookahead = lookahead;
@@ -1888,26 +2148,79 @@ choose_ready (struct ready_list *ready)
insn = ready_element (ready, 0);
if (INSN_CODE (insn) < 0)
return ready_remove_first (ready);
+
+ if (spec_info
+ && spec_info->flags & (PREFER_NON_DATA_SPEC
+ | PREFER_NON_CONTROL_SPEC))
+ {
+ rtx x;
+ int s;
+
+ for (i = 0, n = ready->n_ready; i < n; i++)
+ {
+ x = ready_element (ready, i);
+ s = TODO_SPEC (x);
+
+ if (spec_info->flags & PREFER_NON_DATA_SPEC
+ && !(s & DATA_SPEC))
+ {
+ try_data = 0;
+ if (!(spec_info->flags & PREFER_NON_CONTROL_SPEC)
+ || !try_control)
+ break;
+ }
+
+ if (spec_info->flags & PREFER_NON_CONTROL_SPEC
+ && !(s & CONTROL_SPEC))
+ {
+ try_control = 0;
+ if (!(spec_info->flags & PREFER_NON_DATA_SPEC) || !try_data)
+ break;
+ }
+ }
+ }
+
+ if ((!try_data && (TODO_SPEC (insn) & DATA_SPEC))
+ || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
+ || (targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
+ && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard_spec
+ (insn)))
+ {
+ change_queue_index (insn, 1);
+ return 0;
+ }
+
+ max_points = ISSUE_POINTS (insn);
+ more_issue = issue_rate - cycle_issued_insns - 1;
+
for (i = 1; i < ready->n_ready; i++)
{
insn = ready_element (ready, i);
ready_try [i]
= (INSN_CODE (insn) < 0
+ || (!try_data && (TODO_SPEC (insn) & DATA_SPEC))
+ || (!try_control && (TODO_SPEC (insn) & CONTROL_SPEC))
|| (targetm.sched.first_cycle_multipass_dfa_lookahead_guard
- && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn)));
+ && !targetm.sched.first_cycle_multipass_dfa_lookahead_guard
+ (insn)));
+
+ if (!ready_try [i] && more_issue-- > 0)
+ max_points += ISSUE_POINTS (insn);
}
- if (max_issue (ready, &index) == 0)
+
+ if (max_issue (ready, &index, max_points) == 0)
return ready_remove_first (ready);
else
return ready_remove (ready, index);
}
}
-/* Use forward list scheduling to rearrange insns of block B in region RGN,
- possibly bringing insns from subsequent blocks in the same region. */
+/* Use forward list scheduling to rearrange insns of block pointed to by
+ TARGET_BB, possibly bringing insns from subsequent blocks in the same
+ region. */
void
-schedule_block (int b, int rgn_n_insns)
+schedule_block (basic_block *target_bb, int rgn_n_insns1)
{
struct ready_list ready;
int i, first_cycle_insn_p;
@@ -1930,35 +2243,28 @@ schedule_block (int b, int rgn_n_insns)
gcc_assert (head != tail || INSN_P (head));
+ added_recovery_block_p = false;
+
/* Debug info. */
if (sched_verbose)
- {
- fprintf (sched_dump,
- ";; ======================================================\n");
- fprintf (sched_dump,
- ";; -- basic block %d from %d to %d -- %s reload\n",
- b, INSN_UID (head), INSN_UID (tail),
- (reload_completed ? "after" : "before"));
- fprintf (sched_dump,
- ";; ======================================================\n");
- fprintf (sched_dump, "\n");
- }
+ dump_new_block_header (0, *target_bb, head, tail);
state_reset (curr_state);
/* Allocate the ready list. */
readyp = &ready;
- ready.veclen = rgn_n_insns + 1 + issue_rate;
+ ready.vec = NULL;
+ ready_try = NULL;
+ choice_stack = NULL;
+
+ rgn_n_insns = -1;
+ extend_ready (rgn_n_insns1 + 1);
+
ready.first = ready.veclen - 1;
- ready.vec = XNEWVEC (rtx, ready.veclen);
ready.n_ready = 0;
/* It is used for first cycle multipass scheduling. */
temp_state = alloca (dfa_state_size);
- ready_try = XCNEWVEC (char, rgn_n_insns + 1);
- choice_stack = XNEWVEC (struct choice_entry, rgn_n_insns + 1);
- for (i = 0; i <= rgn_n_insns; i++)
- choice_stack[i].state = xmalloc (dfa_state_size);
if (targetm.sched.md_init)
targetm.sched.md_init (sched_dump, sched_verbose, ready.veclen);
@@ -1966,6 +2272,9 @@ schedule_block (int b, int rgn_n_insns)
/* We start inserting insns after PREV_HEAD. */
last_scheduled_insn = prev_head;
+ gcc_assert (NOTE_P (last_scheduled_insn)
+ && BLOCK_FOR_INSN (last_scheduled_insn) == *target_bb);
+
/* Initialize INSN_QUEUE. Q_SIZE is the total number of insns in the
queue. */
q_ptr = 0;
@@ -1981,6 +2290,9 @@ schedule_block (int b, int rgn_n_insns)
in try_ready () (which is called through init_ready_list ()). */
(*current_sched_info->init_ready_list) ();
+ /* Now we can restore basic block notes and maintain precise cfg. */
+ restore_bb_notes (*target_bb);
+
last_clock_var = -1;
advance = 0;
@@ -2048,7 +2360,7 @@ schedule_block (int b, int rgn_n_insns)
if (sched_verbose >= 2)
{
- fprintf (sched_dump, ";;\tReady list (t =%3d): ",
+ fprintf (sched_dump, ";;\tReady list (t = %3d): ",
clock_var);
debug_ready_list (&ready);
}
@@ -2074,7 +2386,11 @@ schedule_block (int b, int rgn_n_insns)
/* Select and remove the insn from the ready list. */
if (sort_p)
- insn = choose_ready (&ready);
+ {
+ insn = choose_ready (&ready);
+ if (!insn)
+ continue;
+ }
else
insn = ready_remove_first (&ready);
@@ -2136,11 +2452,50 @@ schedule_block (int b, int rgn_n_insns)
continue;
}
- if (! (*current_sched_info->can_schedule_ready_p) (insn))
- goto next;
+ if (current_sched_info->can_schedule_ready_p
+ && ! (*current_sched_info->can_schedule_ready_p) (insn))
+ /* We normally get here only if we don't want to move
+ insn from the split block. */
+ {
+ TODO_SPEC (insn) = (TODO_SPEC (insn) & ~SPECULATIVE) | HARD_DEP;
+ continue;
+ }
+
+ /* DECISSION is made. */
+
+ if (TODO_SPEC (insn) & SPECULATIVE)
+ generate_recovery_code (insn);
+
+ if (control_flow_insn_p (last_scheduled_insn)
+ /* This is used to to switch basic blocks by request
+ from scheduler front-end (actually, sched-ebb.c only).
+ This is used to process blocks with single fallthru
+ edge. If successing block has jump, it [jump] will try
+ move at the end of current bb, thus corrupting CFG. */
+ || current_sched_info->advance_target_bb (*target_bb, insn))
+ {
+ *target_bb = current_sched_info->advance_target_bb
+ (*target_bb, 0);
+
+ if (sched_verbose)
+ {
+ rtx x;
- last_scheduled_insn = move_insn (insn, last_scheduled_insn);
+ x = next_real_insn (last_scheduled_insn);
+ gcc_assert (x);
+ dump_new_block_header (1, *target_bb, x, tail);
+ }
+ last_scheduled_insn = bb_note (*target_bb);
+ }
+
+ /* Update counters, etc in the scheduler's front end. */
+ (*current_sched_info->begin_schedule_ready) (insn,
+ last_scheduled_insn);
+
+ move_insn (insn);
+ last_scheduled_insn = insn;
+
if (memcmp (curr_state, temp_state, dfa_state_size) != 0)
{
cycle_issued_insns++;
@@ -2165,7 +2520,6 @@ schedule_block (int b, int rgn_n_insns)
if (advance != 0)
break;
- next:
first_cycle_insn_p = 0;
/* Sort the ready list based on priority. This must be
@@ -2202,17 +2556,33 @@ schedule_block (int b, int rgn_n_insns)
{
/* We must maintain QUEUE_INDEX between blocks in region. */
for (i = ready.n_ready - 1; i >= 0; i--)
- QUEUE_INDEX (ready_element (&ready, i)) = QUEUE_NOWHERE;
+ {
+ rtx x;
+
+ x = ready_element (&ready, i);
+ QUEUE_INDEX (x) = QUEUE_NOWHERE;
+ TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
+ }
if (q_size)
for (i = 0; i <= max_insn_queue_index; i++)
{
rtx link;
for (link = insn_queue[i]; link; link = XEXP (link, 1))
- QUEUE_INDEX (XEXP (link, 0)) = QUEUE_NOWHERE;
+ {
+ rtx x;
+
+ x = XEXP (link, 0);
+ QUEUE_INDEX (x) = QUEUE_NOWHERE;
+ TODO_SPEC (x) = (TODO_SPEC (x) & ~SPECULATIVE) | HARD_DEP;
+ }
free_INSN_LIST_list (&insn_queue[i]);
}
+ }
+ if (!current_sched_info->queue_must_finish_empty
+ || added_recovery_block_p)
+ {
/* INSN_TICK (minimum clock tick at which the insn becomes
ready) may be not correct for the insn in the subsequent
blocks of the region. We should use a correct value of
@@ -2222,6 +2592,14 @@ schedule_block (int b, int rgn_n_insns)
fix_inter_tick (NEXT_INSN (prev_head), last_scheduled_insn);
}
+#ifdef ENABLE_CHECKING
+ /* After the reload the ia64 backend doesn't maintain BB_END, so
+ if we want to check anything, better do it now.
+ And it already clobbered previously scheduled code. */
+ if (reload_completed)
+ check_cfg (BB_HEAD (BLOCK_FOR_INSN (prev_head)), 0);
+#endif
+
if (targetm.sched.md_finish)
targetm.sched.md_finish (sched_dump, sched_verbose);
@@ -2234,12 +2612,16 @@ schedule_block (int b, int rgn_n_insns)
of the insns. */
if (note_list != 0)
{
+ basic_block head_bb = BLOCK_FOR_INSN (head);
rtx note_head = note_list;
while (PREV_INSN (note_head))
{
+ set_block_for_insn (note_head, head_bb);
note_head = PREV_INSN (note_head);
}
+ /* In the above cycle we've missed this note: */
+ set_block_for_insn (note_head, head_bb);
PREV_INSN (note_head) = PREV_INSN (head);
NEXT_INSN (PREV_INSN (head)) = note_head;
@@ -2303,16 +2685,23 @@ set_priorities (rtx head, rtx tail)
return n_insn;
}
+/* Next LUID to assign to an instruction. */
+static int luid;
+
/* Initialize some global state for the scheduler. */
void
sched_init (void)
{
- int luid;
basic_block b;
rtx insn;
int i;
+ /* Switch to working copy of sched_info. */
+ memcpy (&current_sched_info_var, current_sched_info,
+ sizeof (current_sched_info_var));
+ current_sched_info = &current_sched_info_var;
+
/* Disable speculative loads in their presence if cc0 defined. */
#ifdef HAVE_cc0
flag_schedule_speculative_load = 0;
@@ -2327,6 +2716,25 @@ sched_init (void)
sched_dump = ((sched_verbose_param >= 10 || !dump_file)
? stderr : dump_file);
+ /* Initialize SPEC_INFO. */
+ if (targetm.sched.set_sched_flags)
+ {
+ spec_info = &spec_info_var;
+ targetm.sched.set_sched_flags (spec_info);
+ if (current_sched_info->flags & DO_SPECULATION)
+ spec_info->weakness_cutoff =
+ (PARAM_VALUE (PARAM_SCHED_SPEC_PROB_CUTOFF) * MAX_DEP_WEAK) / 100;
+ else
+ /* So we won't read anything accidently. */
+ spec_info = 0;
+#ifdef ENABLE_CHECKING
+ check_sched_flags ();
+#endif
+ }
+ else
+ /* So we won't read anything accidently. */
+ spec_info = 0;
+
/* Initialize issue_rate. */
if (targetm.sched.issue_rate)
issue_rate = targetm.sched.issue_rate ();
@@ -2340,15 +2748,14 @@ sched_init (void)
cached_first_cycle_multipass_dfa_lookahead = 0;
}
- /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
- pseudos which do not cross calls. */
- old_max_uid = get_max_uid () + 1;
-
- h_i_d = XCNEWVEC (struct haifa_insn_data, old_max_uid);
+ old_max_uid = 0;
+ h_i_d = 0;
+ extend_h_i_d ();
for (i = 0; i < old_max_uid; i++)
{
h_i_d[i].cost = -1;
+ h_i_d[i].todo_spec = HARD_DEP;
h_i_d[i].queue_index = QUEUE_NOWHERE;
h_i_d[i].tick = INVALID_TICK;
h_i_d[i].inter_tick = INVALID_TICK;
@@ -2387,59 +2794,30 @@ sched_init (void)
init_alias_analysis ();
- if (write_symbols != NO_DEBUG)
- {
- rtx line;
-
- line_note_head = XCNEWVEC (rtx, last_basic_block);
-
- /* Save-line-note-head:
- Determine the line-number at the start of each basic block.
- This must be computed and saved now, because after a basic block's
- predecessor has been scheduled, it is impossible to accurately
- determine the correct line number for the first insn of the block. */
-
- FOR_EACH_BB (b)
- {
- for (line = BB_HEAD (b); line; line = PREV_INSN (line))
- if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
- {
- line_note_head[b->index] = line;
- break;
- }
- /* Do a forward search as well, since we won't get to see the first
- notes in a basic block. */
- for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
- {
- if (INSN_P (line))
- break;
- if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
- line_note_head[b->index] = line;
- }
- }
- }
+ line_note_head = 0;
+ old_last_basic_block = 0;
+ glat_start = 0;
+ glat_end = 0;
+ extend_bb (0);
- /* The following is done to keep current_sched_info->next_tail non null. */
-
- insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
- if (NEXT_INSN (insn) == 0
- || (!NOTE_P (insn)
- && !LABEL_P (insn)
- /* Don't emit a NOTE if it would end up before a BARRIER. */
- && !BARRIER_P (NEXT_INSN (insn))))
- {
- emit_note_after (NOTE_INSN_DELETED, insn);
- /* Make insn to appear outside BB. */
- BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
- }
+ if (current_sched_info->flags & USE_GLAT)
+ init_glat ();
/* Compute INSN_REG_WEIGHT for all blocks. We must do this before
removing death notes. */
FOR_EACH_BB_REVERSE (b)
- find_insn_reg_weight (b->index);
+ find_insn_reg_weight (b);
if (targetm.sched.md_init_global)
targetm.sched.md_init_global (sched_dump, sched_verbose, old_max_uid);
+
+ nr_begin_data = nr_begin_control = nr_be_in_data = nr_be_in_control = 0;
+ before_recovery = 0;
+
+#ifdef ENABLE_CHECKING
+ /* This is used preferably for finding bugs in check_cfg () itself. */
+ check_cfg (0, 0);
+#endif
}
/* Free global data used during insn scheduling. */
@@ -2452,11 +2830,38 @@ sched_finish (void)
dfa_finish ();
free_dependency_caches ();
end_alias_analysis ();
- if (write_symbols != NO_DEBUG)
- free (line_note_head);
+ free (line_note_head);
+ free_glat ();
if (targetm.sched.md_finish_global)
- targetm.sched.md_finish_global (sched_dump, sched_verbose);
+ targetm.sched.md_finish_global (sched_dump, sched_verbose);
+
+ if (spec_info && spec_info->dump)
+ {
+ char c = reload_completed ? 'a' : 'b';
+
+ fprintf (spec_info->dump,
+ ";; %s:\n", current_function_name ());
+
+ fprintf (spec_info->dump,
+ ";; Procedure %cr-begin-data-spec motions == %d\n",
+ c, nr_begin_data);
+ fprintf (spec_info->dump,
+ ";; Procedure %cr-be-in-data-spec motions == %d\n",
+ c, nr_be_in_data);
+ fprintf (spec_info->dump,
+ ";; Procedure %cr-begin-control-spec motions == %d\n",
+ c, nr_begin_control);
+ fprintf (spec_info->dump,
+ ";; Procedure %cr-be-in-control-spec motions == %d\n",
+ c, nr_be_in_control);
+ }
+
+#ifdef ENABLE_CHECKING
+ /* After reload ia64 backend clobbers CFG, so can't check anything. */
+ if (!reload_completed)
+ check_cfg (0, 0);
+#endif
current_sched_info = NULL;
}
@@ -2538,21 +2943,159 @@ fix_inter_tick (rtx head, rtx tail)
0 < N - queued for N cycles. */
int
try_ready (rtx next)
-{
- if (LOG_LINKS (next)
- || (current_sched_info->new_ready
- && !current_sched_info->new_ready (next)))
+{
+ ds_t old_ts, *ts;
+ rtx link;
+
+ ts = &TODO_SPEC (next);
+ old_ts = *ts;
+
+ gcc_assert (!(old_ts & ~(SPECULATIVE | HARD_DEP))
+ && ((old_ts & HARD_DEP)
+ || (old_ts & SPECULATIVE)));
+
+ if (!(current_sched_info->flags & DO_SPECULATION))
+ {
+ if (!LOG_LINKS (next))
+ *ts &= ~HARD_DEP;
+ }
+ else
{
- gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);
+ *ts &= ~SPECULATIVE & ~HARD_DEP;
+
+ link = LOG_LINKS (next);
+ if (link)
+ {
+ /* LOG_LINKS are maintained sorted.
+ So if DEP_STATUS of the first dep is SPECULATIVE,
+ than all other deps are speculative too. */
+ if (DEP_STATUS (link) & SPECULATIVE)
+ {
+ /* Now we've got NEXT with speculative deps only.
+ 1. Look at the deps to see what we have to do.
+ 2. Check if we can do 'todo'. */
+ *ts = DEP_STATUS (link) & SPECULATIVE;
+ while ((link = XEXP (link, 1)))
+ *ts = ds_merge (*ts, DEP_STATUS (link) & SPECULATIVE);
+
+ if (dep_weak (*ts) < spec_info->weakness_cutoff)
+ /* Too few points. */
+ *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
+ }
+ else
+ *ts |= HARD_DEP;
+ }
+ }
+
+ if (*ts & HARD_DEP)
+ gcc_assert (*ts == old_ts
+ && QUEUE_INDEX (next) == QUEUE_NOWHERE);
+ else if (current_sched_info->new_ready)
+ *ts = current_sched_info->new_ready (next, *ts);
+
+ /* * if !(old_ts & SPECULATIVE) (e.g. HARD_DEP or 0), then insn might
+ have its original pattern or changed (speculative) one. This is due
+ to changing ebb in region scheduling.
+ * But if (old_ts & SPECULATIVE), then we are pretty sure that insn
+ has speculative pattern.
+
+ We can't assert (!(*ts & HARD_DEP) || *ts == old_ts) here because
+ control-speculative NEXT could have been discarded by sched-rgn.c
+ (the same case as when discarded by can_schedule_ready_p ()). */
+
+ if ((*ts & SPECULATIVE)
+ /* If (old_ts == *ts), then (old_ts & SPECULATIVE) and we don't
+ need to change anything. */
+ && *ts != old_ts)
+ {
+ int res;
+ rtx new_pat;
+
+ gcc_assert ((*ts & SPECULATIVE) && !(*ts & ~SPECULATIVE));
+
+ res = speculate_insn (next, *ts, &new_pat);
+
+ switch (res)
+ {
+ case -1:
+ /* It would be nice to change DEP_STATUS of all dependences,
+ which have ((DEP_STATUS & SPECULATIVE) == *ts) to HARD_DEP,
+ so we won't reanalyze anything. */
+ *ts = (*ts & ~SPECULATIVE) | HARD_DEP;
+ break;
+
+ case 0:
+ /* We follow the rule, that every speculative insn
+ has non-null ORIG_PAT. */
+ if (!ORIG_PAT (next))
+ ORIG_PAT (next) = PATTERN (next);
+ break;
+
+ case 1:
+ if (!ORIG_PAT (next))
+ /* If we gonna to overwrite the original pattern of insn,
+ save it. */
+ ORIG_PAT (next) = PATTERN (next);
+
+ change_pattern (next, new_pat);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* We need to restore pattern only if (*ts == 0), because otherwise it is
+ either correct (*ts & SPECULATIVE),
+ or we simply don't care (*ts & HARD_DEP). */
+
+ gcc_assert (!ORIG_PAT (next)
+ || !RECOVERY_BLOCK (next)
+ || RECOVERY_BLOCK (next) == EXIT_BLOCK_PTR);
+
+ if (*ts == 0 && ORIG_PAT (next) && !RECOVERY_BLOCK (next))
+ /* We should change pattern of every previously speculative
+ instruction - and we determine if NEXT was speculative by using
+ ORIG_PAT field. Except one case - simple checks have ORIG_PAT
+ pat too, hence we also check for the RECOVERY_BLOCK. */
+ {
+ change_pattern (next, ORIG_PAT (next));
+ ORIG_PAT (next) = 0;
+ }
+
+ if (*ts & HARD_DEP)
+ {
+ /* We can't assert (QUEUE_INDEX (next) == QUEUE_NOWHERE) here because
+ control-speculative NEXT could have been discarded by sched-rgn.c
+ (the same case as when discarded by can_schedule_ready_p ()). */
+ /*gcc_assert (QUEUE_INDEX (next) == QUEUE_NOWHERE);*/
+
+ change_queue_index (next, QUEUE_NOWHERE);
return -1;
}
if (sched_verbose >= 2)
- fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s\n",
- (*current_sched_info->print_insn) (next, 0));
-
- adjust_priority (next);
+ {
+ int s = TODO_SPEC (next);
+
+ fprintf (sched_dump, ";;\t\tdependencies resolved: insn %s",
+ (*current_sched_info->print_insn) (next, 0));
+
+ if (spec_info && spec_info->dump)
+ {
+ if (s & BEGIN_DATA)
+ fprintf (spec_info->dump, "; data-spec;");
+ if (s & BEGIN_CONTROL)
+ fprintf (spec_info->dump, "; control-spec;");
+ if (s & BE_IN_CONTROL)
+ fprintf (spec_info->dump, "; in-control-spec;");
+ }
+ fprintf (sched_dump, "\n");
+ }
+
+ adjust_priority (next);
+
return fix_tick_ready (next);
}
@@ -2570,7 +3113,7 @@ fix_tick_ready (rtx next)
int full_p;
tick = INSN_TICK (next);
- /* if tick is note equals to INVALID_TICK, then update
+ /* if tick is not equal to INVALID_TICK, then update
INSN_TICK of NEXT with the most recent resolved dependence
cost. Overwise, recalculate from scratch. */
full_p = tick == INVALID_TICK;
@@ -2581,10 +3124,7 @@ fix_tick_ready (rtx next)
pro = XEXP (link, 0);
gcc_assert (INSN_TICK (pro) >= MIN_TICK);
- /* We should specify FORWARD link to insn_cost,
- but are giving a BACKWARD one.
- This is ok, because only REG_NOTE_KIND of link is used.
- May be substitute LINK with REG_NOTE_KIND? */
+
tick1 = INSN_TICK (pro) + insn_cost (pro, link, next);
if (tick1 > tick)
tick = tick1;
@@ -2601,7 +3141,7 @@ fix_tick_ready (rtx next)
delay = QUEUE_READY;
change_queue_index (next, delay);
-
+
return delay;
}
@@ -2664,4 +3204,1447 @@ resolve_dep (rtx next, rtx insn)
&& (LOG_LINKS (next) || INSN_DEP_COUNT (next) == 0));
}
+/* Extend H_I_D data. */
+static void
+extend_h_i_d (void)
+{
+ /* We use LUID 0 for the fake insn (UID 0) which holds dependencies for
+ pseudos which do not cross calls. */
+ int new_max_uid = get_max_uid() + 1;
+
+ h_i_d = xrecalloc (h_i_d, new_max_uid, old_max_uid, sizeof (*h_i_d));
+ old_max_uid = new_max_uid;
+
+ if (targetm.sched.h_i_d_extended)
+ targetm.sched.h_i_d_extended ();
+}
+
+/* Extend READY, READY_TRY and CHOICE_STACK arrays.
+ N_NEW_INSNS is the number of additional elements to allocate. */
+static void
+extend_ready (int n_new_insns)
+{
+ int i;
+
+ readyp->veclen = rgn_n_insns + n_new_insns + 1 + issue_rate;
+ readyp->vec = XRESIZEVEC (rtx, readyp->vec, readyp->veclen);
+
+ ready_try = xrecalloc (ready_try, rgn_n_insns + n_new_insns + 1,
+ rgn_n_insns + 1, sizeof (char));
+
+ rgn_n_insns += n_new_insns;
+
+ choice_stack = XRESIZEVEC (struct choice_entry, choice_stack,
+ rgn_n_insns + 1);
+
+ for (i = rgn_n_insns; n_new_insns--; i--)
+ choice_stack[i].state = xmalloc (dfa_state_size);
+}
+
+/* Extend global scheduler structures (those, that live across calls to
+ schedule_block) to include information about just emitted INSN. */
+static void
+extend_global (rtx insn)
+{
+ gcc_assert (INSN_P (insn));
+ /* These structures have scheduler scope. */
+ extend_h_i_d ();
+ init_h_i_d (insn);
+
+ extend_dependency_caches (1, 0);
+}
+
+/* Extends global and local scheduler structures to include information
+ about just emitted INSN. */
+static void
+extend_all (rtx insn)
+{
+ extend_global (insn);
+
+ /* These structures have block scope. */
+ extend_ready (1);
+
+ (*current_sched_info->add_remove_insn) (insn, 0);
+}
+
+/* Initialize h_i_d entry of the new INSN with default values.
+ Values, that are not explicitly initialized here, hold zero. */
+static void
+init_h_i_d (rtx insn)
+{
+ INSN_LUID (insn) = luid++;
+ INSN_COST (insn) = -1;
+ TODO_SPEC (insn) = HARD_DEP;
+ QUEUE_INDEX (insn) = QUEUE_NOWHERE;
+ INSN_TICK (insn) = INVALID_TICK;
+ INTER_TICK (insn) = INVALID_TICK;
+ find_insn_reg_weight1 (insn);
+}
+
+/* Generates recovery code for INSN. */
+static void
+generate_recovery_code (rtx insn)
+{
+ if (TODO_SPEC (insn) & BEGIN_SPEC)
+ begin_speculative_block (insn);
+
+ /* Here we have insn with no dependencies to
+ instructions other then CHECK_SPEC ones. */
+
+ if (TODO_SPEC (insn) & BE_IN_SPEC)
+ add_to_speculative_block (insn);
+}
+
+/* Helper function.
+ Tries to add speculative dependencies of type FS between instructions
+ in LINK list and TWIN. */
+static void
+process_insn_depend_be_in_spec (rtx link, rtx twin, ds_t fs)
+{
+ for (; link; link = XEXP (link, 1))
+ {
+ ds_t ds;
+ rtx consumer;
+
+ consumer = XEXP (link, 0);
+
+ ds = DEP_STATUS (link);
+
+ if (fs && (ds & DEP_TYPES) == DEP_TRUE)
+ ds = (ds & ~BEGIN_SPEC) | fs;
+
+ add_back_forw_dep (consumer, twin, REG_NOTE_KIND (link), ds);
+ }
+}
+
+/* Generates recovery code for BEGIN speculative INSN. */
+static void
+begin_speculative_block (rtx insn)
+{
+ if (TODO_SPEC (insn) & BEGIN_DATA)
+ nr_begin_data++;
+ if (TODO_SPEC (insn) & BEGIN_CONTROL)
+ nr_begin_control++;
+
+ create_check_block_twin (insn, false);
+
+ TODO_SPEC (insn) &= ~BEGIN_SPEC;
+}
+
+/* Generates recovery code for BE_IN speculative INSN. */
+static void
+add_to_speculative_block (rtx insn)
+{
+ ds_t ts;
+ rtx link, twins = NULL;
+
+ ts = TODO_SPEC (insn);
+ gcc_assert (!(ts & ~BE_IN_SPEC));
+
+ if (ts & BE_IN_DATA)
+ nr_be_in_data++;
+ if (ts & BE_IN_CONTROL)
+ nr_be_in_control++;
+
+ TODO_SPEC (insn) &= ~BE_IN_SPEC;
+ gcc_assert (!TODO_SPEC (insn));
+
+ DONE_SPEC (insn) |= ts;
+
+ /* First we convert all simple checks to branchy. */
+ for (link = LOG_LINKS (insn); link;)
+ {
+ rtx check;
+
+ check = XEXP (link, 0);
+
+ if (RECOVERY_BLOCK (check))
+ {
+ create_check_block_twin (check, true);
+ link = LOG_LINKS (insn);
+ }
+ else
+ link = XEXP (link, 1);
+ }
+
+ clear_priorities (insn);
+
+ do
+ {
+ rtx link, check, twin;
+ basic_block rec;
+
+ link = LOG_LINKS (insn);
+ gcc_assert (!(DEP_STATUS (link) & BEGIN_SPEC)
+ && (DEP_STATUS (link) & BE_IN_SPEC)
+ && (DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
+
+ check = XEXP (link, 0);
+ gcc_assert (!RECOVERY_BLOCK (check) && !ORIG_PAT (check)
+ && QUEUE_INDEX (check) == QUEUE_NOWHERE);
+
+ rec = BLOCK_FOR_INSN (check);
+
+ twin = emit_insn_before (copy_rtx (PATTERN (insn)), BB_END (rec));
+ extend_global (twin);
+
+ RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
+
+ if (sched_verbose && spec_info->dump)
+ /* INSN_BB (insn) isn't determined for twin insns yet.
+ So we can't use current_sched_info->print_insn. */
+ fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
+ INSN_UID (twin), rec->index);
+
+ twins = alloc_INSN_LIST (twin, twins);
+
+ /* Add dependences between TWIN and all apropriate
+ instructions from REC. */
+ do
+ {
+ add_back_forw_dep (twin, check, REG_DEP_TRUE, DEP_TRUE);
+
+ do
+ {
+ link = XEXP (link, 1);
+ if (link)
+ {
+ check = XEXP (link, 0);
+ if (BLOCK_FOR_INSN (check) == rec)
+ break;
+ }
+ else
+ break;
+ }
+ while (1);
+ }
+ while (link);
+
+ process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, ts);
+
+ for (link = LOG_LINKS (insn); link;)
+ {
+ check = XEXP (link, 0);
+
+ if (BLOCK_FOR_INSN (check) == rec)
+ {
+ delete_back_forw_dep (insn, check);
+ link = LOG_LINKS (insn);
+ }
+ else
+ link = XEXP (link, 1);
+ }
+ }
+ while (LOG_LINKS (insn));
+
+ /* We can't add the dependence between insn and twin earlier because
+ that would make twin appear in the INSN_DEPEND (insn). */
+ while (twins)
+ {
+ rtx twin;
+
+ twin = XEXP (twins, 0);
+ calc_priorities (twin);
+ add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
+
+ twin = XEXP (twins, 1);
+ free_INSN_LIST_node (twins);
+ twins = twin;
+ }
+}
+
+/* Extends and fills with zeros (only the new part) array pointed to by P. */
+void *
+xrecalloc (void *p, size_t new_nmemb, size_t old_nmemb, size_t size)
+{
+ gcc_assert (new_nmemb >= old_nmemb);
+ p = XRESIZEVAR (void, p, new_nmemb * size);
+ memset (((char *) p) + old_nmemb * size, 0, (new_nmemb - old_nmemb) * size);
+ return p;
+}
+
+/* Return the probability of speculation success for the speculation
+ status DS. */
+static dw_t
+dep_weak (ds_t ds)
+{
+ ds_t res = 1, dt;
+ int n = 0;
+
+ dt = FIRST_SPEC_TYPE;
+ do
+ {
+ if (ds & dt)
+ {
+ res *= (ds_t) get_dep_weak (ds, dt);
+ n++;
+ }
+
+ if (dt == LAST_SPEC_TYPE)
+ break;
+ dt <<= SPEC_TYPE_SHIFT;
+ }
+ while (1);
+
+ gcc_assert (n);
+ while (--n)
+ res /= MAX_DEP_WEAK;
+
+ if (res < MIN_DEP_WEAK)
+ res = MIN_DEP_WEAK;
+
+ gcc_assert (res <= MAX_DEP_WEAK);
+
+ return (dw_t) res;
+}
+
+/* Helper function.
+ Find fallthru edge from PRED. */
+static edge
+find_fallthru_edge (basic_block pred)
+{
+ edge e;
+ edge_iterator ei;
+ basic_block succ;
+
+ succ = pred->next_bb;
+ gcc_assert (succ->prev_bb == pred);
+
+ if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
+ {
+ FOR_EACH_EDGE (e, ei, pred->succs)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ gcc_assert (e->dest == succ);
+ return e;
+ }
+ }
+ else
+ {
+ FOR_EACH_EDGE (e, ei, succ->preds)
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ gcc_assert (e->src == pred);
+ return e;
+ }
+ }
+
+ return NULL;
+}
+
+/* Initialize BEFORE_RECOVERY variable. */
+static void
+init_before_recovery (void)
+{
+ basic_block last;
+ edge e;
+
+ last = EXIT_BLOCK_PTR->prev_bb;
+ e = find_fallthru_edge (last);
+
+ if (e)
+ {
+ /* We create two basic blocks:
+ 1. Single instruction block is inserted right after E->SRC
+ and has jump to
+ 2. Empty block right before EXIT_BLOCK.
+ Between these two blocks recovery blocks will be emitted. */
+
+ basic_block single, empty;
+ rtx x, label;
+
+ single = create_empty_bb (last);
+ empty = create_empty_bb (single);
+
+ single->count = last->count;
+ empty->count = last->count;
+ single->frequency = last->frequency;
+ empty->frequency = last->frequency;
+ BB_COPY_PARTITION (single, last);
+ BB_COPY_PARTITION (empty, last);
+
+ redirect_edge_succ (e, single);
+ make_single_succ_edge (single, empty, 0);
+ make_single_succ_edge (empty, EXIT_BLOCK_PTR,
+ EDGE_FALLTHRU | EDGE_CAN_FALLTHRU);
+
+ label = block_label (empty);
+ x = emit_jump_insn_after (gen_jump (label), BB_END (single));
+ JUMP_LABEL (x) = label;
+ LABEL_NUSES (label)++;
+ extend_global (x);
+
+ emit_barrier_after (x);
+
+ add_block (empty, 0);
+ add_block (single, 0);
+
+ before_recovery = single;
+
+ if (sched_verbose >= 2 && spec_info->dump)
+ fprintf (spec_info->dump,
+ ";;\t\tFixed fallthru to EXIT : %d->>%d->%d->>EXIT\n",
+ last->index, single->index, empty->index);
+ }
+ else
+ before_recovery = last;
+}
+
+/* Returns new recovery block. */
+static basic_block
+create_recovery_block (void)
+{
+ rtx label;
+ basic_block rec;
+
+ added_recovery_block_p = true;
+
+ if (!before_recovery)
+ init_before_recovery ();
+
+ label = gen_label_rtx ();
+ gcc_assert (BARRIER_P (NEXT_INSN (BB_END (before_recovery))));
+ label = emit_label_after (label, NEXT_INSN (BB_END (before_recovery)));
+
+ rec = create_basic_block (label, label, before_recovery);
+ emit_barrier_after (BB_END (rec));
+
+ if (BB_PARTITION (before_recovery) != BB_UNPARTITIONED)
+ BB_SET_PARTITION (rec, BB_COLD_PARTITION);
+
+ if (sched_verbose && spec_info->dump)
+ fprintf (spec_info->dump, ";;\t\tGenerated recovery block rec%d\n",
+ rec->index);
+
+ before_recovery = rec;
+
+ return rec;
+}
+
+/* This function creates recovery code for INSN. If MUTATE_P is nonzero,
+ INSN is a simple check, that should be converted to branchy one. */
+static void
+create_check_block_twin (rtx insn, bool mutate_p)
+{
+ basic_block rec;
+ rtx label, check, twin, link;
+ ds_t fs;
+
+ gcc_assert (ORIG_PAT (insn)
+ && (!mutate_p
+ || (RECOVERY_BLOCK (insn) == EXIT_BLOCK_PTR
+ && !(TODO_SPEC (insn) & SPECULATIVE))));
+
+ /* Create recovery block. */
+ if (mutate_p || targetm.sched.needs_block_p (insn))
+ {
+ rec = create_recovery_block ();
+ label = BB_HEAD (rec);
+ }
+ else
+ {
+ rec = EXIT_BLOCK_PTR;
+ label = 0;
+ }
+
+ /* Emit CHECK. */
+ check = targetm.sched.gen_check (insn, label, mutate_p);
+
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ /* To have mem_reg alive at the beginning of second_bb,
+ we emit check BEFORE insn, so insn after splitting
+ insn will be at the beginning of second_bb, which will
+ provide us with the correct life information. */
+ check = emit_jump_insn_before (check, insn);
+ JUMP_LABEL (check) = label;
+ LABEL_NUSES (label)++;
+ }
+ else
+ check = emit_insn_before (check, insn);
+
+ /* Extend data structures. */
+ extend_all (check);
+ RECOVERY_BLOCK (check) = rec;
+
+ if (sched_verbose && spec_info->dump)
+ fprintf (spec_info->dump, ";;\t\tGenerated check insn : %s\n",
+ (*current_sched_info->print_insn) (check, 0));
+
+ gcc_assert (ORIG_PAT (insn));
+
+ /* Initialize TWIN (twin is a dublicate of original instruction
+ in the recovery block). */
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ rtx link;
+
+ for (link = RESOLVED_DEPS (insn); link; link = XEXP (link, 1))
+ if (DEP_STATUS (link) & DEP_OUTPUT)
+ {
+ RESOLVED_DEPS (check) =
+ alloc_DEPS_LIST (XEXP (link, 0), RESOLVED_DEPS (check), DEP_TRUE);
+ PUT_REG_NOTE_KIND (RESOLVED_DEPS (check), REG_DEP_TRUE);
+ }
+
+ twin = emit_insn_after (ORIG_PAT (insn), BB_END (rec));
+ extend_global (twin);
+
+ if (sched_verbose && spec_info->dump)
+ /* INSN_BB (insn) isn't determined for twin insns yet.
+ So we can't use current_sched_info->print_insn. */
+ fprintf (spec_info->dump, ";;\t\tGenerated twin insn : %d/rec%d\n",
+ INSN_UID (twin), rec->index);
+ }
+ else
+ {
+ ORIG_PAT (check) = ORIG_PAT (insn);
+ HAS_INTERNAL_DEP (check) = 1;
+ twin = check;
+ /* ??? We probably should change all OUTPUT dependencies to
+ (TRUE | OUTPUT). */
+ }
+
+ RESOLVED_DEPS (twin) = copy_DEPS_LIST_list (RESOLVED_DEPS (insn));
+
+ if (rec != EXIT_BLOCK_PTR)
+ /* In case of branchy check, fix CFG. */
+ {
+ basic_block first_bb, second_bb;
+ rtx jump;
+ edge e;
+ int edge_flags;
+
+ first_bb = BLOCK_FOR_INSN (check);
+ e = split_block (first_bb, check);
+ /* split_block emits note if *check == BB_END. Probably it
+ is better to rip that note off. */
+ gcc_assert (e->src == first_bb);
+ second_bb = e->dest;
+
+ /* This is fixing of incoming edge. */
+ /* ??? Which other flags should be specified? */
+ if (BB_PARTITION (first_bb) != BB_PARTITION (rec))
+ /* Partition type is the same, if it is "unpartitioned". */
+ edge_flags = EDGE_CROSSING;
+ else
+ edge_flags = 0;
+
+ e = make_edge (first_bb, rec, edge_flags);
+
+ add_block (second_bb, first_bb);
+
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (second_bb)));
+ label = block_label (second_bb);
+ jump = emit_jump_insn_after (gen_jump (label), BB_END (rec));
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label)++;
+ extend_global (jump);
+
+ if (BB_PARTITION (second_bb) != BB_PARTITION (rec))
+ /* Partition type is the same, if it is "unpartitioned". */
+ {
+ /* Rewritten from cfgrtl.c. */
+ if (flag_reorder_blocks_and_partition
+ && targetm.have_named_sections
+ /*&& !any_condjump_p (jump)*/)
+ /* any_condjump_p (jump) == false.
+ We don't need the same note for the check because
+ any_condjump_p (check) == true. */
+ {
+ REG_NOTES (jump) = gen_rtx_EXPR_LIST (REG_CROSSING_JUMP,
+ NULL_RTX,
+ REG_NOTES (jump));
+ }
+ edge_flags = EDGE_CROSSING;
+ }
+ else
+ edge_flags = 0;
+
+ make_single_succ_edge (rec, second_bb, edge_flags);
+
+ add_block (rec, EXIT_BLOCK_PTR);
+ }
+
+ /* Move backward dependences from INSN to CHECK and
+ move forward dependences from INSN to TWIN. */
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ {
+ ds_t ds;
+
+ /* If BEGIN_DATA: [insn ~~TRUE~~> producer]:
+ check --TRUE--> producer ??? or ANTI ???
+ twin --TRUE--> producer
+ twin --ANTI--> check
+
+ If BEGIN_CONTROL: [insn ~~ANTI~~> producer]:
+ check --ANTI--> producer
+ twin --ANTI--> producer
+ twin --ANTI--> check
+
+ If BE_IN_SPEC: [insn ~~TRUE~~> producer]:
+ check ~~TRUE~~> producer
+ twin ~~TRUE~~> producer
+ twin --ANTI--> check */
+
+ ds = DEP_STATUS (link);
+
+ if (ds & BEGIN_SPEC)
+ {
+ gcc_assert (!mutate_p);
+ ds &= ~BEGIN_SPEC;
+ }
+
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
+ add_back_forw_dep (twin, XEXP (link, 0), REG_NOTE_KIND (link), ds);
+ }
+ else
+ add_back_forw_dep (check, XEXP (link, 0), REG_NOTE_KIND (link), ds);
+ }
+
+ for (link = LOG_LINKS (insn); link;)
+ if ((DEP_STATUS (link) & BEGIN_SPEC)
+ || mutate_p)
+ /* We can delete this dep only if we totally overcome it with
+ BEGIN_SPECULATION. */
+ {
+ delete_back_forw_dep (insn, XEXP (link, 0));
+ link = LOG_LINKS (insn);
+ }
+ else
+ link = XEXP (link, 1);
+
+ fs = 0;
+
+ /* Fields (DONE_SPEC (x) & BEGIN_SPEC) and CHECK_SPEC (x) are set only
+ here. */
+
+ gcc_assert (!DONE_SPEC (insn));
+
+ if (!mutate_p)
+ {
+ ds_t ts = TODO_SPEC (insn);
+
+ DONE_SPEC (insn) = ts & BEGIN_SPEC;
+ CHECK_SPEC (check) = ts & BEGIN_SPEC;
+
+ if (ts & BEGIN_DATA)
+ fs = set_dep_weak (fs, BE_IN_DATA, get_dep_weak (ts, BEGIN_DATA));
+ if (ts & BEGIN_CONTROL)
+ fs = set_dep_weak (fs, BE_IN_CONTROL, get_dep_weak (ts, BEGIN_CONTROL));
+ }
+ else
+ CHECK_SPEC (check) = CHECK_SPEC (insn);
+
+ /* Future speculations: call the helper. */
+ process_insn_depend_be_in_spec (INSN_DEPEND (insn), twin, fs);
+
+ if (rec != EXIT_BLOCK_PTR)
+ {
+ /* Which types of dependencies should we use here is,
+ generally, machine-dependent question... But, for now,
+ it is not. */
+
+ if (!mutate_p)
+ {
+ add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE);
+ add_back_forw_dep (twin, insn, REG_DEP_OUTPUT, DEP_OUTPUT);
+ }
+ else
+ {
+ if (spec_info->dump)
+ fprintf (spec_info->dump, ";;\t\tRemoved simple check : %s\n",
+ (*current_sched_info->print_insn) (insn, 0));
+
+ for (link = INSN_DEPEND (insn); link; link = INSN_DEPEND (insn))
+ delete_back_forw_dep (XEXP (link, 0), insn);
+
+ if (QUEUE_INDEX (insn) != QUEUE_NOWHERE)
+ try_ready (check);
+
+ sched_remove_insn (insn);
+ }
+
+ add_back_forw_dep (twin, check, REG_DEP_ANTI, DEP_ANTI);
+ }
+ else
+ add_back_forw_dep (check, insn, REG_DEP_TRUE, DEP_TRUE | DEP_OUTPUT);
+
+ if (!mutate_p)
+ /* Fix priorities. If MUTATE_P is nonzero, this is not neccessary,
+ because it'll be done later in add_to_speculative_block. */
+ {
+ clear_priorities (twin);
+ calc_priorities (twin);
+ }
+}
+
+/* Removes dependency between instructions in the recovery block REC
+ and usual region instructions. It keeps inner dependences so it
+ won't be neccessary to recompute them. */
+static void
+fix_recovery_deps (basic_block rec)
+{
+ rtx note, insn, link, jump, ready_list = 0;
+ bitmap_head in_ready;
+
+ bitmap_initialize (&in_ready, 0);
+
+ /* NOTE - a basic block note. */
+ note = NEXT_INSN (BB_HEAD (rec));
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+ insn = BB_END (rec);
+ gcc_assert (JUMP_P (insn));
+ insn = PREV_INSN (insn);
+
+ do
+ {
+ for (link = INSN_DEPEND (insn); link;)
+ {
+ rtx consumer;
+
+ consumer = XEXP (link, 0);
+
+ if (BLOCK_FOR_INSN (consumer) != rec)
+ {
+ delete_back_forw_dep (consumer, insn);
+
+ if (!bitmap_bit_p (&in_ready, INSN_LUID (consumer)))
+ {
+ ready_list = alloc_INSN_LIST (consumer, ready_list);
+ bitmap_set_bit (&in_ready, INSN_LUID (consumer));
+ }
+
+ link = INSN_DEPEND (insn);
+ }
+ else
+ {
+ gcc_assert ((DEP_STATUS (link) & DEP_TYPES) == DEP_TRUE);
+
+ link = XEXP (link, 1);
+ }
+ }
+
+ insn = PREV_INSN (insn);
+ }
+ while (insn != note);
+
+ bitmap_clear (&in_ready);
+
+ /* Try to add instructions to the ready or queue list. */
+ for (link = ready_list; link; link = XEXP (link, 1))
+ try_ready (XEXP (link, 0));
+ free_INSN_LIST_list (&ready_list);
+
+ /* Fixing jump's dependences. */
+ insn = BB_HEAD (rec);
+ jump = BB_END (rec);
+
+ gcc_assert (LABEL_P (insn));
+ insn = NEXT_INSN (insn);
+
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
+ add_jump_dependencies (insn, jump);
+}
+
+/* The function saves line notes at the beginning of block B. */
+static void
+associate_line_notes_with_blocks (basic_block b)
+{
+ rtx line;
+
+ for (line = BB_HEAD (b); line; line = PREV_INSN (line))
+ if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
+ {
+ line_note_head[b->index] = line;
+ break;
+ }
+ /* Do a forward search as well, since we won't get to see the first
+ notes in a basic block. */
+ for (line = BB_HEAD (b); line; line = NEXT_INSN (line))
+ {
+ if (INSN_P (line))
+ break;
+ if (NOTE_P (line) && NOTE_LINE_NUMBER (line) > 0)
+ line_note_head[b->index] = line;
+ }
+}
+
+/* Changes pattern of the INSN to NEW_PAT. */
+static void
+change_pattern (rtx insn, rtx new_pat)
+{
+ int t;
+
+ t = validate_change (insn, &PATTERN (insn), new_pat, 0);
+ gcc_assert (t);
+ /* Invalidate INSN_COST, so it'll be recalculated. */
+ INSN_COST (insn) = -1;
+ /* Invalidate INSN_TICK, so it'll be recalculated. */
+ INSN_TICK (insn) = INVALID_TICK;
+ dfa_clear_single_insn_cache (insn);
+}
+
+
+/* -1 - can't speculate,
+ 0 - for speculation with REQUEST mode it is OK to use
+ current instruction pattern,
+ 1 - need to change pattern for *NEW_PAT to be speculative. */
+static int
+speculate_insn (rtx insn, ds_t request, rtx *new_pat)
+{
+ gcc_assert (current_sched_info->flags & DO_SPECULATION
+ && (request & SPECULATIVE));
+
+ if (!NONJUMP_INSN_P (insn)
+ || HAS_INTERNAL_DEP (insn)
+ || SCHED_GROUP_P (insn)
+ || side_effects_p (PATTERN (insn))
+ || (request & spec_info->mask) != request)
+ return -1;
+
+ gcc_assert (!RECOVERY_BLOCK (insn));
+
+ if (request & BE_IN_SPEC)
+ {
+ if (may_trap_p (PATTERN (insn)))
+ return -1;
+
+ if (!(request & BEGIN_SPEC))
+ return 0;
+ }
+
+ return targetm.sched.speculate_insn (insn, request & BEGIN_SPEC, new_pat);
+}
+
+/* Print some information about block BB, which starts with HEAD and
+ ends with TAIL, before scheduling it.
+ I is zero, if scheduler is about to start with the fresh ebb. */
+static void
+dump_new_block_header (int i, basic_block bb, rtx head, rtx tail)
+{
+ if (!i)
+ fprintf (sched_dump,
+ ";; ======================================================\n");
+ else
+ fprintf (sched_dump,
+ ";; =====================ADVANCING TO=====================\n");
+ fprintf (sched_dump,
+ ";; -- basic block %d from %d to %d -- %s reload\n",
+ bb->index, INSN_UID (head), INSN_UID (tail),
+ (reload_completed ? "after" : "before"));
+ fprintf (sched_dump,
+ ";; ======================================================\n");
+ fprintf (sched_dump, "\n");
+}
+
+/* Unlink basic block notes and labels and saves them, so they
+ can be easily restored. We unlink basic block notes in EBB to
+ provide back-compatability with the previous code, as target backends
+ assume, that there'll be only instructions between
+ current_sched_info->{head and tail}. We restore these notes as soon
+ as we can.
+ FIRST (LAST) is the first (last) basic block in the ebb.
+ NB: In usual case (FIRST == LAST) nothing is really done. */
+void
+unlink_bb_notes (basic_block first, basic_block last)
+{
+ /* We DON'T unlink basic block notes of the first block in the ebb. */
+ if (first == last)
+ return;
+
+ bb_header = xmalloc (last_basic_block * sizeof (*bb_header));
+
+ /* Make a sentinel. */
+ if (last->next_bb != EXIT_BLOCK_PTR)
+ bb_header[last->next_bb->index] = 0;
+
+ first = first->next_bb;
+ do
+ {
+ rtx prev, label, note, next;
+
+ label = BB_HEAD (last);
+ if (LABEL_P (label))
+ note = NEXT_INSN (label);
+ else
+ note = label;
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+
+ prev = PREV_INSN (label);
+ next = NEXT_INSN (note);
+ gcc_assert (prev && next);
+
+ NEXT_INSN (prev) = next;
+ PREV_INSN (next) = prev;
+
+ bb_header[last->index] = label;
+
+ if (last == first)
+ break;
+
+ last = last->prev_bb;
+ }
+ while (1);
+}
+
+/* Restore basic block notes.
+ FIRST is the first basic block in the ebb. */
+static void
+restore_bb_notes (basic_block first)
+{
+ if (!bb_header)
+ return;
+
+ /* We DON'T unlink basic block notes of the first block in the ebb. */
+ first = first->next_bb;
+ /* Remember: FIRST is actually a second basic block in the ebb. */
+
+ while (first != EXIT_BLOCK_PTR
+ && bb_header[first->index])
+ {
+ rtx prev, label, note, next;
+
+ label = bb_header[first->index];
+ prev = PREV_INSN (label);
+ next = NEXT_INSN (prev);
+
+ if (LABEL_P (label))
+ note = NEXT_INSN (label);
+ else
+ note = label;
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+
+ bb_header[first->index] = 0;
+
+ NEXT_INSN (prev) = label;
+ NEXT_INSN (note) = next;
+ PREV_INSN (next) = note;
+
+ first = first->next_bb;
+ }
+
+ free (bb_header);
+ bb_header = 0;
+}
+
+/* Extend per basic block data structures of the scheduler.
+ If BB is NULL, initialize structures for the whole CFG.
+ Otherwise, initialize them for the just created BB. */
+static void
+extend_bb (basic_block bb)
+{
+ rtx insn;
+
+ if (write_symbols != NO_DEBUG)
+ {
+ /* Save-line-note-head:
+ Determine the line-number at the start of each basic block.
+ This must be computed and saved now, because after a basic block's
+ predecessor has been scheduled, it is impossible to accurately
+ determine the correct line number for the first insn of the block. */
+ line_note_head = xrecalloc (line_note_head, last_basic_block,
+ old_last_basic_block,
+ sizeof (*line_note_head));
+
+ if (bb)
+ associate_line_notes_with_blocks (bb);
+ else
+ FOR_EACH_BB (bb)
+ associate_line_notes_with_blocks (bb);
+ }
+
+ old_last_basic_block = last_basic_block;
+
+ if (current_sched_info->flags & USE_GLAT)
+ {
+ glat_start = xrealloc (glat_start,
+ last_basic_block * sizeof (*glat_start));
+ glat_end = xrealloc (glat_end, last_basic_block * sizeof (*glat_end));
+ }
+
+ /* The following is done to keep current_sched_info->next_tail non null. */
+
+ insn = BB_END (EXIT_BLOCK_PTR->prev_bb);
+ if (NEXT_INSN (insn) == 0
+ || (!NOTE_P (insn)
+ && !LABEL_P (insn)
+ /* Don't emit a NOTE if it would end up before a BARRIER. */
+ && !BARRIER_P (NEXT_INSN (insn))))
+ {
+ emit_note_after (NOTE_INSN_DELETED, insn);
+ /* Make insn to appear outside BB. */
+ BB_END (EXIT_BLOCK_PTR->prev_bb) = insn;
+ }
+}
+
+/* Add a basic block BB to extended basic block EBB.
+ If EBB is EXIT_BLOCK_PTR, then BB is recovery block.
+ If EBB is NULL, then BB should be a new region. */
+void
+add_block (basic_block bb, basic_block ebb)
+{
+ gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO
+ && bb->il.rtl->global_live_at_start == 0
+ && bb->il.rtl->global_live_at_end == 0);
+
+ extend_bb (bb);
+
+ glat_start[bb->index] = 0;
+ glat_end[bb->index] = 0;
+
+ if (current_sched_info->add_block)
+ /* This changes only data structures of the front-end. */
+ current_sched_info->add_block (bb, ebb);
+}
+
+/* Helper function.
+ Fix CFG after both in- and inter-block movement of
+ control_flow_insn_p JUMP. */
+static void
+fix_jump_move (rtx jump)
+{
+ basic_block bb, jump_bb, jump_bb_next;
+
+ bb = BLOCK_FOR_INSN (PREV_INSN (jump));
+ jump_bb = BLOCK_FOR_INSN (jump);
+ jump_bb_next = jump_bb->next_bb;
+
+ gcc_assert (current_sched_info->flags & SCHED_EBB
+ || (RECOVERY_BLOCK (jump)
+ && RECOVERY_BLOCK (jump) != EXIT_BLOCK_PTR));
+
+ if (!NOTE_INSN_BASIC_BLOCK_P (BB_END (jump_bb_next)))
+ /* if jump_bb_next is not empty. */
+ BB_END (jump_bb) = BB_END (jump_bb_next);
+
+ if (BB_END (bb) != PREV_INSN (jump))
+ /* Then there are instruction after jump that should be placed
+ to jump_bb_next. */
+ BB_END (jump_bb_next) = BB_END (bb);
+ else
+ /* Otherwise jump_bb_next is empty. */
+ BB_END (jump_bb_next) = NEXT_INSN (BB_HEAD (jump_bb_next));
+
+ /* To make assertion in move_insn happy. */
+ BB_END (bb) = PREV_INSN (jump);
+
+ update_bb_for_insn (jump_bb_next);
+}
+
+/* Fix CFG after interblock movement of control_flow_insn_p JUMP. */
+static void
+move_block_after_check (rtx jump)
+{
+ basic_block bb, jump_bb, jump_bb_next;
+ VEC(edge,gc) *t;
+
+ bb = BLOCK_FOR_INSN (PREV_INSN (jump));
+ jump_bb = BLOCK_FOR_INSN (jump);
+ jump_bb_next = jump_bb->next_bb;
+
+ update_bb_for_insn (jump_bb);
+
+ gcc_assert (RECOVERY_BLOCK (jump)
+ || RECOVERY_BLOCK (BB_END (jump_bb_next)));
+
+ unlink_block (jump_bb_next);
+ link_block (jump_bb_next, bb);
+
+ t = bb->succs;
+ bb->succs = 0;
+ move_succs (&(jump_bb->succs), bb);
+ move_succs (&(jump_bb_next->succs), jump_bb);
+ move_succs (&t, jump_bb_next);
+
+ if (current_sched_info->fix_recovery_cfg)
+ current_sched_info->fix_recovery_cfg
+ (bb->index, jump_bb->index, jump_bb_next->index);
+}
+
+/* Helper function for move_block_after_check.
+ This functions attaches edge vector pointed to by SUCCSP to
+ block TO. */
+static void
+move_succs (VEC(edge,gc) **succsp, basic_block to)
+{
+ edge e;
+ edge_iterator ei;
+
+ gcc_assert (to->succs == 0);
+
+ to->succs = *succsp;
+
+ FOR_EACH_EDGE (e, ei, to->succs)
+ e->src = to;
+
+ *succsp = 0;
+}
+
+/* Initialize GLAT (global_live_at_{start, end}) structures.
+ GLAT structures are used to substitute global_live_{start, end}
+ regsets during scheduling. This is neccessary to use such functions as
+ split_block (), as they assume consistancy of register live information. */
+static void
+init_glat (void)
+{
+ basic_block bb;
+
+ FOR_ALL_BB (bb)
+ init_glat1 (bb);
+}
+
+/* Helper function for init_glat. */
+static void
+init_glat1 (basic_block bb)
+{
+ gcc_assert (bb->il.rtl->global_live_at_start != 0
+ && bb->il.rtl->global_live_at_end != 0);
+
+ glat_start[bb->index] = bb->il.rtl->global_live_at_start;
+ glat_end[bb->index] = bb->il.rtl->global_live_at_end;
+
+ if (current_sched_info->flags & DETACH_LIFE_INFO)
+ {
+ bb->il.rtl->global_live_at_start = 0;
+ bb->il.rtl->global_live_at_end = 0;
+ }
+}
+
+/* Attach reg_live_info back to basic blocks.
+ Also save regsets, that should not have been changed during scheduling,
+ for checking purposes (see check_reg_live). */
+void
+attach_life_info (void)
+{
+ basic_block bb;
+
+ FOR_ALL_BB (bb)
+ attach_life_info1 (bb);
+}
+
+/* Helper function for attach_life_info. */
+static void
+attach_life_info1 (basic_block bb)
+{
+ gcc_assert (bb->il.rtl->global_live_at_start == 0
+ && bb->il.rtl->global_live_at_end == 0);
+
+ if (glat_start[bb->index])
+ {
+ gcc_assert (glat_end[bb->index]);
+
+ bb->il.rtl->global_live_at_start = glat_start[bb->index];
+ bb->il.rtl->global_live_at_end = glat_end[bb->index];
+
+ /* Make them NULL, so they won't be freed in free_glat. */
+ glat_start[bb->index] = 0;
+ glat_end[bb->index] = 0;
+
+#ifdef ENABLE_CHECKING
+ if (bb->index < NUM_FIXED_BLOCKS
+ || current_sched_info->region_head_or_leaf_p (bb, 0))
+ {
+ glat_start[bb->index] = ALLOC_REG_SET (&reg_obstack);
+ COPY_REG_SET (glat_start[bb->index],
+ bb->il.rtl->global_live_at_start);
+ }
+
+ if (bb->index < NUM_FIXED_BLOCKS
+ || current_sched_info->region_head_or_leaf_p (bb, 1))
+ {
+ glat_end[bb->index] = ALLOC_REG_SET (&reg_obstack);
+ COPY_REG_SET (glat_end[bb->index], bb->il.rtl->global_live_at_end);
+ }
+#endif
+ }
+ else
+ {
+ gcc_assert (!glat_end[bb->index]);
+
+ bb->il.rtl->global_live_at_start = ALLOC_REG_SET (&reg_obstack);
+ bb->il.rtl->global_live_at_end = ALLOC_REG_SET (&reg_obstack);
+ }
+}
+
+/* Free GLAT information. */
+static void
+free_glat (void)
+{
+#ifdef ENABLE_CHECKING
+ if (current_sched_info->flags & DETACH_LIFE_INFO)
+ {
+ basic_block bb;
+
+ FOR_ALL_BB (bb)
+ {
+ if (glat_start[bb->index])
+ FREE_REG_SET (glat_start[bb->index]);
+ if (glat_end[bb->index])
+ FREE_REG_SET (glat_end[bb->index]);
+ }
+ }
+#endif
+
+ free (glat_start);
+ free (glat_end);
+}
+
+/* Remove INSN from the instruction stream.
+ INSN should have any dependencies. */
+static void
+sched_remove_insn (rtx insn)
+{
+ change_queue_index (insn, QUEUE_NOWHERE);
+ current_sched_info->add_remove_insn (insn, 1);
+ remove_insn (insn);
+}
+
+/* Clear priorities of all instructions, that are
+ forward dependent on INSN. */
+static void
+clear_priorities (rtx insn)
+{
+ rtx link;
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ {
+ rtx pro;
+
+ pro = XEXP (link, 0);
+ if (INSN_PRIORITY_KNOWN (pro))
+ {
+ INSN_PRIORITY_KNOWN (pro) = 0;
+ clear_priorities (pro);
+ }
+ }
+}
+
+/* Recompute priorities of instructions, whose priorities might have been
+ changed due to changes in INSN. */
+static void
+calc_priorities (rtx insn)
+{
+ rtx link;
+
+ for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
+ {
+ rtx pro;
+
+ pro = XEXP (link, 0);
+ if (!INSN_PRIORITY_KNOWN (pro))
+ {
+ priority (pro);
+ calc_priorities (pro);
+ }
+ }
+}
+
+
+/* Add dependences between JUMP and other instructions in the recovery
+ block. INSN is the first insn the recovery block. */
+static void
+add_jump_dependencies (rtx insn, rtx jump)
+{
+ do
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == jump)
+ break;
+
+ if (!INSN_DEPEND (insn))
+ add_back_forw_dep (jump, insn, REG_DEP_ANTI, DEP_ANTI);
+ }
+ while (1);
+ gcc_assert (LOG_LINKS (jump));
+}
+
+/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
+static rtx
+bb_note (basic_block bb)
+{
+ rtx note;
+
+ note = BB_HEAD (bb);
+ if (LABEL_P (note))
+ note = NEXT_INSN (note);
+
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
+ return note;
+}
+
+#ifdef ENABLE_CHECKING
+extern void debug_spec_status (ds_t);
+
+/* Dump information about the dependence status S. */
+void
+debug_spec_status (ds_t s)
+{
+ FILE *f = stderr;
+
+ if (s & BEGIN_DATA)
+ fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak (s, BEGIN_DATA));
+ if (s & BE_IN_DATA)
+ fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak (s, BE_IN_DATA));
+ if (s & BEGIN_CONTROL)
+ fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak (s, BEGIN_CONTROL));
+ if (s & BE_IN_CONTROL)
+ fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak (s, BE_IN_CONTROL));
+
+ if (s & HARD_DEP)
+ fprintf (f, "HARD_DEP; ");
+
+ if (s & DEP_TRUE)
+ fprintf (f, "DEP_TRUE; ");
+ if (s & DEP_ANTI)
+ fprintf (f, "DEP_ANTI; ");
+ if (s & DEP_OUTPUT)
+ fprintf (f, "DEP_OUTPUT; ");
+
+ fprintf (f, "\n");
+}
+
+/* Helper function for check_cfg.
+ Return non-zero, if edge vector pointed to by EL has edge with TYPE in
+ its flags. */
+static int
+has_edge_p (VEC(edge,gc) *el, int type)
+{
+ edge e;
+ edge_iterator ei;
+
+ FOR_EACH_EDGE (e, ei, el)
+ if (e->flags & type)
+ return 1;
+ return 0;
+}
+
+/* Check few properties of CFG between HEAD and TAIL.
+ If HEAD (TAIL) is NULL check from the beginning (till the end) of the
+ instruction stream. */
+static void
+check_cfg (rtx head, rtx tail)
+{
+ rtx next_tail;
+ basic_block bb = 0;
+ int not_first = 0, not_last;
+
+ if (head == NULL)
+ head = get_insns ();
+ if (tail == NULL)
+ tail = get_last_insn ();
+ next_tail = NEXT_INSN (tail);
+
+ do
+ {
+ not_last = head != tail;
+
+ if (not_first)
+ gcc_assert (NEXT_INSN (PREV_INSN (head)) == head);
+ if (not_last)
+ gcc_assert (PREV_INSN (NEXT_INSN (head)) == head);
+
+ if (LABEL_P (head)
+ || (NOTE_INSN_BASIC_BLOCK_P (head)
+ && (!not_first
+ || (not_first && !LABEL_P (PREV_INSN (head))))))
+ {
+ gcc_assert (bb == 0);
+ bb = BLOCK_FOR_INSN (head);
+ if (bb != 0)
+ gcc_assert (BB_HEAD (bb) == head);
+ else
+ /* This is the case of jump table. See inside_basic_block_p (). */
+ gcc_assert (LABEL_P (head) && !inside_basic_block_p (head));
+ }
+
+ if (bb == 0)
+ {
+ gcc_assert (!inside_basic_block_p (head));
+ head = NEXT_INSN (head);
+ }
+ else
+ {
+ gcc_assert (inside_basic_block_p (head)
+ || NOTE_P (head));
+ gcc_assert (BLOCK_FOR_INSN (head) == bb);
+
+ if (LABEL_P (head))
+ {
+ head = NEXT_INSN (head);
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (head));
+ }
+ else
+ {
+ if (control_flow_insn_p (head))
+ {
+ gcc_assert (BB_END (bb) == head);
+
+ if (any_uncondjump_p (head))
+ gcc_assert (EDGE_COUNT (bb->succs) == 1
+ && BARRIER_P (NEXT_INSN (head)));
+ else if (any_condjump_p (head))
+ gcc_assert (EDGE_COUNT (bb->succs) > 1
+ && !BARRIER_P (NEXT_INSN (head)));
+ }
+ if (BB_END (bb) == head)
+ {
+ if (EDGE_COUNT (bb->succs) > 1)
+ gcc_assert (control_flow_insn_p (head)
+ || has_edge_p (bb->succs, EDGE_COMPLEX));
+ bb = 0;
+ }
+
+ head = NEXT_INSN (head);
+ }
+ }
+
+ not_first = 1;
+ }
+ while (head != next_tail);
+
+ gcc_assert (bb == 0);
+}
+
+/* Perform few consistancy checks of flags in different data structures. */
+static void
+check_sched_flags (void)
+{
+ unsigned int f = current_sched_info->flags;
+
+ if (flag_sched_stalled_insns)
+ gcc_assert (!(f & DO_SPECULATION));
+ if (f & DO_SPECULATION)
+ gcc_assert (!flag_sched_stalled_insns
+ && (f & DETACH_LIFE_INFO)
+ && spec_info
+ && spec_info->mask);
+ if (f & DETACH_LIFE_INFO)
+ gcc_assert (f & USE_GLAT);
+}
+
+/* Checks global_live_at_{start, end} regsets. */
+void
+check_reg_live (void)
+{
+ basic_block bb;
+
+ FOR_ALL_BB (bb)
+ {
+ int i;
+
+ i = bb->index;
+
+ if (glat_start[i])
+ gcc_assert (bitmap_equal_p (bb->il.rtl->global_live_at_start,
+ glat_start[i]));
+ if (glat_end[i])
+ gcc_assert (bitmap_equal_p (bb->il.rtl->global_live_at_end,
+ glat_end[i]));
+ }
+}
+#endif /* ENABLE_CHECKING */
+
#endif /* INSN_SCHEDULING */
diff --git a/gcc/lists.c b/gcc/lists.c
index c9df54a779c..907ccf5ef2b 100644
--- a/gcc/lists.c
+++ b/gcc/lists.c
@@ -249,4 +249,20 @@ remove_free_INSN_LIST_elem (rtx elem, rtx *listp)
free_INSN_LIST_node (remove_list_elem (elem, listp));
}
+/* Create and return a copy of the DEPS_LIST LIST. */
+rtx
+copy_DEPS_LIST_list (rtx list)
+{
+ rtx res = NULL_RTX, *resp = &res;
+
+ while (list)
+ {
+ *resp = alloc_DEPS_LIST (XEXP (list, 0), 0, XWINT (list, 2));
+ PUT_REG_NOTE_KIND (*resp, REG_NOTE_KIND (list));
+ resp = &XEXP (*resp, 1);
+ list = XEXP (list, 1);
+ }
+ return res;
+}
+
#include "gt-lists.h"
diff --git a/gcc/modulo-sched.c b/gcc/modulo-sched.c
index 3d8ee8d57b8..5a270339492 100644
--- a/gcc/modulo-sched.c
+++ b/gcc/modulo-sched.c
@@ -237,12 +237,6 @@ sms_print_insn (rtx insn, int aligned ATTRIBUTE_UNUSED)
return tmp;
}
-static int
-contributes_to_priority (rtx next, rtx insn)
-{
- return BLOCK_NUM (next) == BLOCK_NUM (insn);
-}
-
static void
compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED,
regset cond_exec ATTRIBUTE_UNUSED,
@@ -259,12 +253,16 @@ static struct sched_info sms_sched_info =
NULL,
NULL,
sms_print_insn,
- contributes_to_priority,
+ NULL,
compute_jump_reg_dependencies,
NULL, NULL,
NULL, NULL,
0, 0, 0,
+ NULL, NULL, NULL, NULL, NULL,
+#ifdef ENABLE_CHECKING
+ NULL,
+#endif
0
};
@@ -314,7 +312,7 @@ const_iteration_count (rtx count_reg, basic_block pre_header,
if (! pre_header)
return NULL_RTX;
- get_block_head_tail (pre_header->index, &head, &tail);
+ get_ebb_head_tail (pre_header, pre_header, &head, &tail);
for (insn = tail; insn != PREV_INSN (head); insn = PREV_INSN (insn))
if (INSN_P (insn) && single_set (insn) &&
@@ -794,7 +792,7 @@ loop_single_full_bb_p (struct loop *loop)
/* Make sure that basic blocks other than the header
have only notes labels or jumps. */
- get_block_head_tail (bbs[i]->index, &head, &tail);
+ get_ebb_head_tail (bbs[i], bbs[i], &head, &tail);
for (; head != NEXT_INSN (tail); head = NEXT_INSN (head))
{
if (NOTE_P (head) || LABEL_P (head)
@@ -972,7 +970,7 @@ sms_schedule (void)
bb = loop->header;
- get_block_head_tail (bb->index, &head, &tail);
+ get_ebb_head_tail (bb, bb, &head, &tail);
latch_edge = loop_latch_edge (loop);
gcc_assert (loop->single_exit);
if (loop->single_exit->count)
@@ -1074,7 +1072,7 @@ sms_schedule (void)
if (dump_file)
print_ddg (dump_file, g);
- get_block_head_tail (loop->header->index, &head, &tail);
+ get_ebb_head_tail (loop->header, loop->header, &head, &tail);
latch_edge = loop_latch_edge (loop);
gcc_assert (loop->single_exit);
diff --git a/gcc/params.def b/gcc/params.def
index e34a50319c5..69241e77814 100644
--- a/gcc/params.def
+++ b/gcc/params.def
@@ -504,6 +504,16 @@ DEFPARAM(PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS,
"The maximum number of iterations through CFG to extend regions",
2, 0, 0)
+DEFPARAM(PARAM_MAX_SCHED_INSN_CONFLICT_DELAY,
+ "max-sched-insn-conflict-delay",
+ "The maximum conflict delay for an insn to be considered for speculative motion",
+ 3, 1, 10)
+
+DEFPARAM(PARAM_SCHED_SPEC_PROB_CUTOFF,
+ "sched-spec-prob-cutoff",
+ "The minimal probability of speculation success (in percents), so that speculative insn will be scheduled.",
+ 40, 0, 100)
+
DEFPARAM(PARAM_MAX_LAST_VALUE_RTL,
"max-last-value-rtl",
"The maximum number of RTL nodes that can be recorded as combiner's last value",
diff --git a/gcc/rtl.h b/gcc/rtl.h
index 1ee04fc9382..22ecd16f77c 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1758,6 +1758,7 @@ rtx alloc_DEPS_LIST (rtx, rtx, HOST_WIDE_INT);
void remove_free_DEPS_LIST_elem (rtx, rtx *);
void remove_free_INSN_LIST_elem (rtx, rtx *);
rtx remove_list_elem (rtx, rtx *);
+rtx copy_DEPS_LIST_list (rtx);
/* regclass.c */
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 0597d304bbc..33ee695a930 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -112,8 +112,6 @@ static void adjust_add_sorted_back_dep (rtx, rtx, rtx *);
static void adjust_back_add_forw_dep (rtx, rtx *);
static void delete_forw_dep (rtx, rtx);
static dw_t estimate_dep_weak (rtx, rtx);
-static dw_t get_dep_weak (ds_t, ds_t);
-static ds_t ds_merge (ds_t, ds_t);
#ifdef INSN_SCHEDULING
#ifdef ENABLE_CHECKING
static void check_dep_status (enum reg_note, ds_t, bool);
@@ -1777,19 +1775,35 @@ init_dependency_caches (int luid)
what we consider "very high". */
if (luid / n_basic_blocks > 100 * 5)
{
- int i;
+ cache_size = 0;
+ extend_dependency_caches (luid, true);
+ }
+}
- true_dependency_cache = XNEWVEC (bitmap_head, luid);
- anti_dependency_cache = XNEWVEC (bitmap_head, luid);
- output_dependency_cache = XNEWVEC (bitmap_head, luid);
+/* Create or extend (depending on CREATE_P) dependency caches to
+ size N. */
+void
+extend_dependency_caches (int n, bool create_p)
+{
+ if (create_p || true_dependency_cache)
+ {
+ int i, luid = cache_size + n;
+
+ true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
+ luid);
+ output_dependency_cache = XRESIZEVEC (bitmap_head,
+ output_dependency_cache, luid);
+ anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
+ luid);
#ifdef ENABLE_CHECKING
- forward_dependency_cache = XNEWVEC (bitmap_head, luid);
+ forward_dependency_cache = XRESIZEVEC (bitmap_head,
+ forward_dependency_cache, luid);
#endif
if (current_sched_info->flags & DO_SPECULATION)
spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
luid);
- for (i = 0; i < luid; i++)
+ for (i = cache_size; i < luid; i++)
{
bitmap_initialize (&true_dependency_cache[i], 0);
bitmap_initialize (&output_dependency_cache[i], 0);
@@ -2037,7 +2051,7 @@ delete_back_forw_dep (rtx insn, rtx elem)
}
/* Return weakness of speculative type TYPE in the dep_status DS. */
-static dw_t
+dw_t
get_dep_weak (ds_t ds, ds_t type)
{
ds = ds & type;
@@ -2074,7 +2088,7 @@ set_dep_weak (ds_t ds, ds_t type, dw_t dw)
}
/* Return the join of two dep_statuses DS1 and DS2. */
-static ds_t
+ds_t
ds_merge (ds_t ds1, ds_t ds2)
{
ds_t ds, t;
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 0b2e1bf193d..4126a5d7561 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -43,14 +43,23 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "target.h"
#include "output.h"
-/* The number of insns to be scheduled in total. */
-static int target_n_insns;
/* The number of insns scheduled so far. */
static int sched_n_insns;
+/* The number of insns to be scheduled in total. */
+static int n_insns;
+
+/* Set of blocks, that already have their dependencies calculated. */
+static bitmap_head dont_calc_deps;
+/* Set of basic blocks, that are ebb heads of tails respectively. */
+static bitmap_head ebb_head, ebb_tail;
+
+/* Last basic block in current ebb. */
+static basic_block last_bb;
+
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
-static int can_schedule_ready_p (rtx);
+static void begin_schedule_ready (rtx, rtx);
static int schedule_more_p (void);
static const char *ebb_print_insn (rtx, int);
static int rank (rtx, rtx);
@@ -59,16 +68,22 @@ static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
static basic_block earliest_block_with_similiar_load (basic_block, rtx);
static void add_deps_for_risky_insns (rtx, rtx);
static basic_block schedule_ebb (rtx, rtx);
-static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx,
- rtx);
-static void add_missing_bbs (rtx, basic_block, basic_block);
+
+static void add_remove_insn (rtx, int);
+static void add_block1 (basic_block, basic_block);
+static basic_block advance_target_bb (basic_block, rtx);
+static void fix_recovery_cfg (int, int, int);
+
+#ifdef ENABLE_CHECKING
+static int ebb_head_or_leaf_p (basic_block, int);
+#endif
/* Return nonzero if there are more insns that should be scheduled. */
static int
schedule_more_p (void)
{
- return sched_n_insns < target_n_insns;
+ return sched_n_insns < n_insns;
}
/* Add all insns that are initially ready to the ready list READY. Called
@@ -77,11 +92,11 @@ schedule_more_p (void)
static void
init_ready_list (void)
{
+ int n = 0;
rtx prev_head = current_sched_info->prev_head;
rtx next_tail = current_sched_info->next_tail;
rtx insn;
- target_n_insns = 0;
sched_n_insns = 0;
#if 0
@@ -95,18 +110,74 @@ init_ready_list (void)
for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
{
try_ready (insn);
- target_n_insns++;
+ n++;
}
-}
-/* Called after taking INSN from the ready list. Returns nonzero if this
- insn can be scheduled, nonzero if we should silently discard it. */
+ gcc_assert (n == n_insns);
+}
-static int
-can_schedule_ready_p (rtx insn ATTRIBUTE_UNUSED)
+/* INSN is being scheduled after LAST. Update counters. */
+static void
+begin_schedule_ready (rtx insn, rtx last)
{
sched_n_insns++;
- return 1;
+
+ if (BLOCK_FOR_INSN (insn) == last_bb
+ /* INSN is a jump in the last block, ... */
+ && control_flow_insn_p (insn)
+ /* that is going to be moved over some instructions. */
+ && last != PREV_INSN (insn))
+ {
+ edge e;
+ edge_iterator ei;
+ basic_block bb;
+
+ /* An obscure special case, where we do have partially dead
+ instruction scheduled after last control flow instruction.
+ In this case we can create new basic block. It is
+ always exactly one basic block last in the sequence. */
+
+ FOR_EACH_EDGE (e, ei, last_bb->succs)
+ if (e->flags & EDGE_FALLTHRU)
+ break;
+
+#ifdef ENABLE_CHECKING
+ gcc_assert (!e || !(e->flags & EDGE_COMPLEX));
+
+ gcc_assert (BLOCK_FOR_INSN (insn) == last_bb
+ && !RECOVERY_BLOCK (insn)
+ && BB_HEAD (last_bb) != insn
+ && BB_END (last_bb) == insn);
+
+ {
+ rtx x;
+
+ x = NEXT_INSN (insn);
+ if (e)
+ gcc_assert (NOTE_P (x) || LABEL_P (x));
+ else
+ gcc_assert (BARRIER_P (x));
+ }
+#endif
+
+ if (e)
+ {
+ bb = split_edge (e);
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
+ }
+ else
+ bb = create_basic_block (insn, 0, last_bb);
+
+ /* split_edge () creates BB before E->DEST. Keep in mind, that
+ this operation extends scheduling region till the end of BB.
+ Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out
+ of the scheduling region. */
+ current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
+ gcc_assert (current_sched_info->next_tail);
+
+ add_block (bb, last_bb);
+ gcc_assert (last_bb == bb);
+ }
}
/* Return a string that contains the insn uid and optionally anything else
@@ -173,9 +244,9 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
it may guard the fallthrough block from using a value that has
conditionally overwritten that of the main codepath. So we
consider that it restores the value of the main codepath. */
- bitmap_and (set, e->dest->il.rtl->global_live_at_start, cond_set);
+ bitmap_and (set, glat_start [e->dest->index], cond_set);
else
- bitmap_ior_into (used, e->dest->il.rtl->global_live_at_start);
+ bitmap_ior_into (used, glat_start [e->dest->index]);
}
/* Used in schedule_insns to initialize current_sched_info for scheduling
@@ -184,7 +255,7 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used,
static struct sched_info ebb_sched_info =
{
init_ready_list,
- can_schedule_ready_p,
+ NULL,
schedule_more_p,
NULL,
rank,
@@ -196,143 +267,19 @@ static struct sched_info ebb_sched_info =
NULL, NULL,
0, 1, 0,
- 0
+ add_remove_insn,
+ begin_schedule_ready,
+ add_block1,
+ advance_target_bb,
+ fix_recovery_cfg,
+#ifdef ENABLE_CHECKING
+ ebb_head_or_leaf_p,
+#endif
+ /* We need to DETACH_LIVE_INFO to be able to create new basic blocks.
+ See begin_schedule_ready (). */
+ SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO
};
-/* It is possible that ebb scheduling eliminated some blocks.
- Place blocks from FIRST to LAST before BEFORE. */
-
-static void
-add_missing_bbs (rtx before, basic_block first, basic_block last)
-{
- for (; last != first->prev_bb; last = last->prev_bb)
- {
- before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before);
- NOTE_BASIC_BLOCK (before) = last;
- BB_HEAD (last) = before;
- BB_END (last) = before;
- update_bb_for_insn (last);
- }
-}
-
-/* Fixup the CFG after EBB scheduling. Re-recognize the basic
- block boundaries in between HEAD and TAIL and update basic block
- structures between BB and LAST. */
-
-static basic_block
-fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head,
- rtx tail)
-{
- rtx insn = head;
- rtx last_inside = BB_HEAD (bb);
- rtx aftertail = NEXT_INSN (tail);
-
- head = BB_HEAD (bb);
-
- for (; insn != aftertail; insn = NEXT_INSN (insn))
- {
- gcc_assert (!LABEL_P (insn));
- /* Create new basic blocks just before first insn. */
- if (inside_basic_block_p (insn))
- {
- if (!last_inside)
- {
- rtx note;
-
- /* Re-emit the basic block note for newly found BB header. */
- if (LABEL_P (insn))
- {
- note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn);
- head = insn;
- last_inside = note;
- }
- else
- {
- note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn);
- head = note;
- last_inside = insn;
- }
- }
- else
- last_inside = insn;
- }
- /* Control flow instruction terminate basic block. It is possible
- that we've eliminated some basic blocks (made them empty).
- Find the proper basic block using BLOCK_FOR_INSN and arrange things in
- a sensible way by inserting empty basic blocks as needed. */
- if (control_flow_insn_p (insn) || (insn == tail && last_inside))
- {
- basic_block curr_bb = BLOCK_FOR_INSN (insn);
- rtx note;
-
- if (!control_flow_insn_p (insn))
- curr_bb = last;
- if (bb == last->next_bb)
- {
- edge f;
- rtx h;
- edge_iterator ei;
-
- /* An obscure special case, where we do have partially dead
- instruction scheduled after last control flow instruction.
- In this case we can create new basic block. It is
- always exactly one basic block last in the sequence. Handle
- it by splitting the edge and repositioning the block.
- This is somewhat hackish, but at least avoid cut&paste
-
- A safer solution can be to bring the code into sequence,
- do the split and re-emit it back in case this will ever
- trigger problem. */
-
- FOR_EACH_EDGE (f, ei, bb->prev_bb->succs)
- if (f->flags & EDGE_FALLTHRU)
- break;
-
- if (f)
- {
- last = curr_bb = split_edge (f);
- h = BB_HEAD (curr_bb);
- BB_HEAD (curr_bb) = head;
- BB_END (curr_bb) = insn;
- /* Edge splitting created misplaced BASIC_BLOCK note, kill
- it. */
- delete_insn (h);
- }
- /* It may happen that code got moved past unconditional jump in
- case the code is completely dead. Kill it. */
- else
- {
- rtx next = next_nonnote_insn (insn);
- delete_insn_chain (head, insn);
- /* We keep some notes in the way that may split barrier from the
- jump. */
- if (BARRIER_P (next))
- {
- emit_barrier_after (prev_nonnote_insn (head));
- delete_insn (next);
- }
- insn = NULL;
- }
- }
- else
- {
- BB_HEAD (curr_bb) = head;
- BB_END (curr_bb) = insn;
- add_missing_bbs (BB_HEAD (curr_bb), bb, curr_bb->prev_bb);
- }
- note = LABEL_P (head) ? NEXT_INSN (head) : head;
- NOTE_BASIC_BLOCK (note) = curr_bb;
- update_bb_for_insn (curr_bb);
- bb = curr_bb->next_bb;
- last_inside = NULL;
- if (!insn)
- break;
- }
- }
- add_missing_bbs (BB_HEAD (last->next_bb), bb, last);
- return bb->prev_bb;
-}
-
/* Returns the earliest block in EBB currently being processed where a
"similar load" 'insn2' is found, and hence LOAD_INSN can move
speculatively into the found block. All the following must hold:
@@ -488,29 +435,40 @@ add_deps_for_risky_insns (rtx head, rtx tail)
static basic_block
schedule_ebb (rtx head, rtx tail)
{
- int n_insns;
- basic_block b;
+ basic_block first_bb, target_bb;
struct deps tmp_deps;
- basic_block first_bb = BLOCK_FOR_INSN (head);
- basic_block last_bb = BLOCK_FOR_INSN (tail);
+
+ first_bb = BLOCK_FOR_INSN (head);
+ last_bb = BLOCK_FOR_INSN (tail);
if (no_real_insns_p (head, tail))
return BLOCK_FOR_INSN (tail);
- init_deps_global ();
+ gcc_assert (INSN_P (head) && INSN_P (tail));
+
+ if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
+ {
+ init_deps_global ();
- /* Compute LOG_LINKS. */
- init_deps (&tmp_deps);
- sched_analyze (&tmp_deps, head, tail);
- free_deps (&tmp_deps);
+ /* Compute LOG_LINKS. */
+ init_deps (&tmp_deps);
+ sched_analyze (&tmp_deps, head, tail);
+ free_deps (&tmp_deps);
- /* Compute INSN_DEPEND. */
- compute_forward_dependences (head, tail);
+ /* Compute INSN_DEPEND. */
+ compute_forward_dependences (head, tail);
- add_deps_for_risky_insns (head, tail);
+ add_deps_for_risky_insns (head, tail);
- if (targetm.sched.dependencies_evaluation_hook)
- targetm.sched.dependencies_evaluation_hook (head, tail);
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+
+ finish_deps_global ();
+ }
+ else
+ /* Only recovery blocks can have their dependencies already calculated,
+ and they always are single block ebbs. */
+ gcc_assert (first_bb == last_bb);
/* Set priorities. */
current_sched_info->sched_max_insns_priority = 0;
@@ -546,10 +504,16 @@ schedule_ebb (rtx head, rtx tail)
schedule_block (). */
rm_other_notes (head, tail);
+ unlink_bb_notes (first_bb, last_bb);
+
current_sched_info->queue_must_finish_empty = 1;
- schedule_block (-1, n_insns);
+ target_bb = first_bb;
+ schedule_block (&target_bb, n_insns);
+ /* We might pack all instructions into fewer blocks,
+ so we may made some of them empty. Can't assert (b == last_bb). */
+
/* Sanity check: verify that all region insns were scheduled. */
gcc_assert (sched_n_insns == n_insns);
head = current_sched_info->head;
@@ -557,10 +521,17 @@ schedule_ebb (rtx head, rtx tail)
if (write_symbols != NO_DEBUG)
restore_line_notes (head, tail);
- b = fix_basic_block_boundaries (first_bb, last_bb, head, tail);
- finish_deps_global ();
- return b;
+ if (EDGE_COUNT (last_bb->preds) == 0)
+ /* LAST_BB is unreachable. */
+ {
+ gcc_assert (first_bb != last_bb
+ && EDGE_COUNT (last_bb->succs) == 0);
+ last_bb = last_bb->prev_bb;
+ delete_basic_block (last_bb->next_bb);
+ }
+
+ return last_bb;
}
/* The one entry point in this file. */
@@ -570,6 +541,9 @@ schedule_ebbs (void)
{
basic_block bb;
int probability_cutoff;
+ rtx tail;
+ sbitmap large_region_blocks, blocks;
+ int any_large_regions;
if (profile_info && flag_branch_probabilities)
probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK);
@@ -590,11 +564,18 @@ schedule_ebbs (void)
compute_bb_for_insn ();
+ /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
+ bitmap_initialize (&dont_calc_deps, 0);
+ bitmap_clear (&dont_calc_deps);
+ bitmap_initialize (&ebb_head, 0);
+ bitmap_clear (&ebb_head);
+ bitmap_initialize (&ebb_tail, 0);
+ bitmap_clear (&ebb_tail);
+
/* Schedule every region in the subroutine. */
FOR_EACH_BB (bb)
{
rtx head = BB_HEAD (bb);
- rtx tail;
for (;;)
{
@@ -628,11 +609,71 @@ schedule_ebbs (void)
break;
}
+ bitmap_set_bit (&ebb_head, BLOCK_NUM (head));
bb = schedule_ebb (head, tail);
+ bitmap_set_bit (&ebb_tail, bb->index);
}
+ bitmap_clear (&dont_calc_deps);
- /* Updating life info can be done by local propagation over the modified
- superblocks. */
+ gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO);
+ /* We can create new basic blocks during scheduling, and
+ attach_life_info () will create regsets for them
+ (along with attaching existing info back). */
+ attach_life_info ();
+
+ /* Updating register live information. */
+ allocate_reg_life_data ();
+
+ any_large_regions = 0;
+ large_region_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (large_region_blocks);
+ FOR_EACH_BB (bb)
+ SET_BIT (large_region_blocks, bb->index);
+
+ blocks = sbitmap_alloc (last_basic_block);
+ sbitmap_zero (blocks);
+
+ /* Update life information. For regions consisting of multiple blocks
+ we've possibly done interblock scheduling that affects global liveness.
+ For regions consisting of single blocks we need to do only local
+ liveness. */
+ FOR_EACH_BB (bb)
+ {
+ int bbi;
+
+ bbi = bb->index;
+
+ if (!bitmap_bit_p (&ebb_head, bbi)
+ || !bitmap_bit_p (&ebb_tail, bbi)
+ /* New blocks (e.g. recovery blocks) should be processed
+ as parts of large regions. */
+ || !glat_start[bbi])
+ any_large_regions = 1;
+ else
+ {
+ SET_BIT (blocks, bbi);
+ RESET_BIT (large_region_blocks, bbi);
+ }
+ }
+
+ update_life_info (blocks, UPDATE_LIFE_LOCAL, 0);
+ sbitmap_free (blocks);
+
+ if (any_large_regions)
+ {
+ update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, 0);
+
+#ifdef ENABLE_CHECKING
+ /* !!! We can't check reg_live_info here because of the fact,
+ that destination registers of COND_EXEC's may be dead
+ before scheduling (while they should be alive). Don't know why. */
+ /*check_reg_live ();*/
+#endif
+ }
+ sbitmap_free (large_region_blocks);
+
+ bitmap_clear (&ebb_head);
+ bitmap_clear (&ebb_tail);
/* Reposition the prologue and epilogue notes in case we moved the
prologue/epilogue insns. */
@@ -644,3 +685,77 @@ schedule_ebbs (void)
sched_finish ();
}
+
+/* INSN has been added to/removed from current ebb. */
+static void
+add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p)
+{
+ if (!remove_p)
+ n_insns++;
+ else
+ n_insns--;
+}
+
+/* BB was added to ebb after AFTER. */
+static void
+add_block1 (basic_block bb, basic_block after)
+{
+ /* Recovery blocks are always bounded by BARRIERS,
+ therefore, they always form single block EBB,
+ therefore, we can use rec->index to identify such EBBs. */
+ if (after == EXIT_BLOCK_PTR)
+ bitmap_set_bit (&dont_calc_deps, bb->index);
+ else if (after == last_bb)
+ last_bb = bb;
+}
+
+/* Return next block in ebb chain. For parameter meaning please refer to
+ sched-int.h: struct sched_info: advance_target_bb. */
+static basic_block
+advance_target_bb (basic_block bb, rtx insn)
+{
+ if (insn)
+ {
+ if (BLOCK_FOR_INSN (insn) != bb
+ && control_flow_insn_p (insn)
+ && !RECOVERY_BLOCK (insn)
+ && !RECOVERY_BLOCK (BB_END (bb)))
+ {
+ gcc_assert (!control_flow_insn_p (BB_END (bb))
+ && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
+ return bb;
+ }
+ else
+ return 0;
+ }
+ else if (bb != last_bb)
+ return bb->next_bb;
+ else
+ gcc_unreachable ();
+}
+
+/* Fix internal data after interblock movement of jump instruction.
+ For parameter meaning please refer to
+ sched-int.h: struct sched_info: fix_recovery_cfg. */
+static void
+fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti)
+{
+ gcc_assert (last_bb->index != bbi);
+
+ if (jump_bb_nexti == last_bb->index)
+ last_bb = BASIC_BLOCK (jump_bbi);
+}
+
+#ifdef ENABLE_CHECKING
+/* Return non zero, if BB is first or last (depending of LEAF_P) block in
+ current ebb. For more information please refer to
+ sched-int.h: struct sched_info: region_head_or_leaf_p. */
+static int
+ebb_head_or_leaf_p (basic_block bb, int leaf_p)
+{
+ if (!leaf_p)
+ return bitmap_bit_p (&ebb_head, bb->index);
+ else
+ return bitmap_bit_p (&ebb_tail, bb->index);
+}
+#endif /* ENABLE_CHECKING */
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index 17c7f7bfb1e..cdaca1b83f0 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -148,10 +148,12 @@ struct sched_info
int (*can_schedule_ready_p) (rtx);
/* Return nonzero if there are more insns that should be scheduled. */
int (*schedule_more_p) (void);
- /* Called after an insn has all its dependencies resolved. Return nonzero
- if it should be moved to the ready list or the queue, or zero if we
- should silently discard it. */
- int (*new_ready) (rtx);
+ /* Called after an insn has all its hard dependencies resolved.
+ Adjusts status of instruction (which is passed through second parameter)
+ to indicate if instruction should be moved to the ready list or the
+ queue, or if it should silently discard it (until next resolved
+ dependence). */
+ ds_t (*new_ready) (rtx, ds_t);
/* Compare priority of two insns. Return a positive number if the second
insn is to be preferred for scheduling, and a negative one if the first
is to be preferred. Zero if they are equally good. */
@@ -187,11 +189,73 @@ struct sched_info
/* Maximum priority that has been assigned to an insn. */
int sched_max_insns_priority;
+ /* Hooks to support speculative scheduling. */
+
+ /* Called to notify frontend that instruction is being added (second
+ parameter == 0) or removed (second parameter == 1). */
+ void (*add_remove_insn) (rtx, int);
+
+ /* Called to notify frontend that instruction is being scheduled.
+ The first parameter - instruction to scheduled, the second parameter -
+ last scheduled instruction. */
+ void (*begin_schedule_ready) (rtx, rtx);
+
+ /* Called to notify frontend, that new basic block is being added.
+ The first parameter - new basic block.
+ The second parameter - block, after which new basic block is being added,
+ or EXIT_BLOCK_PTR, if recovery block is being added,
+ or NULL, if standalone block is being added. */
+ void (*add_block) (basic_block, basic_block);
+
+ /* If the second parameter is not NULL, return nonnull value, if the
+ basic block should be advanced.
+ If the second parameter is NULL, return the next basic block in EBB.
+ The first parameter is the current basic block in EBB. */
+ basic_block (*advance_target_bb) (basic_block, rtx);
+
+ /* Called after blocks were rearranged due to movement of jump instruction.
+ The first parameter - index of basic block, in which jump currently is.
+ The second parameter - index of basic block, in which jump used
+ to be.
+ The third parameter - index of basic block, that follows the second
+ parameter. */
+ void (*fix_recovery_cfg) (int, int, int);
+
+#ifdef ENABLE_CHECKING
+ /* If the second parameter is zero, return nonzero, if block is head of the
+ region.
+ If the second parameter is nonzero, return nonzero, if block is leaf of
+ the region.
+ global_live_at_start should not change in region heads and
+ global_live_at_end should not change in region leafs due to scheduling. */
+ int (*region_head_or_leaf_p) (basic_block, int);
+#endif
+
/* ??? FIXME: should use straight bitfields inside sched_info instead of
this flag field. */
unsigned int flags;
};
+/* This structure holds description of the properties for speculative
+ scheduling. */
+struct spec_info_def
+{
+ /* Holds types of allowed speculations: BEGIN_{DATA|CONTROL},
+ BE_IN_{DATA_CONTROL}. */
+ int mask;
+
+ /* A dump file for additional information on speculative scheduling. */
+ FILE *dump;
+
+ /* Minimal cumulative weakness of speculative instruction's
+ dependencies, so that insn will be scheduled. */
+ dw_t weakness_cutoff;
+
+ /* Flags from the enum SPEC_SCHED_FLAGS. */
+ int flags;
+};
+typedef struct spec_info_def *spec_info_t;
+
extern struct sched_info *current_sched_info;
/* Indexed by INSN_UID, the collection of all data associated with
@@ -256,9 +320,26 @@ struct haifa_insn_data
/* Nonzero if instruction has internal dependence
(e.g. add_dependence was invoked with (insn == elem)). */
unsigned int has_internal_dep : 1;
+
+ /* What speculations are neccessary to apply to schedule the instruction. */
+ ds_t todo_spec;
+ /* What speculations were already applied. */
+ ds_t done_spec;
+ /* What speculations are checked by this instruction. */
+ ds_t check_spec;
+
+ /* Recovery block for speculation checks. */
+ basic_block recovery_block;
+
+ /* Original pattern of the instruction. */
+ rtx orig_pat;
};
extern struct haifa_insn_data *h_i_d;
+/* Used only if (current_sched_info->flags & USE_GLAT) != 0.
+ These regsets store global_live_at_{start, end} information
+ for each basic block. */
+extern regset *glat_start, *glat_end;
/* Accessor macros for h_i_d. There are more in haifa-sched.c and
sched-rgn.c. */
@@ -272,6 +353,11 @@ extern struct haifa_insn_data *h_i_d;
#define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost)
#define INSN_REG_WEIGHT(INSN) (h_i_d[INSN_UID (INSN)].reg_weight)
#define HAS_INTERNAL_DEP(INSN) (h_i_d[INSN_UID (INSN)].has_internal_dep)
+#define TODO_SPEC(INSN) (h_i_d[INSN_UID (INSN)].todo_spec)
+#define DONE_SPEC(INSN) (h_i_d[INSN_UID (INSN)].done_spec)
+#define CHECK_SPEC(INSN) (h_i_d[INSN_UID (INSN)].check_spec)
+#define RECOVERY_BLOCK(INSN) (h_i_d[INSN_UID (INSN)].recovery_block)
+#define ORIG_PAT(INSN) (h_i_d[INSN_UID (INSN)].orig_pat)
/* DEP_STATUS of the link incapsulates information, that is needed for
speculative scheduling. Namely, it is 4 integers in the range
@@ -400,9 +486,27 @@ enum SCHED_FLAGS {
/* Perform data or control (or both) speculation.
Results in generation of data and control speculative dependencies.
Requires USE_DEPS_LIST set. */
- DO_SPECULATION = USE_DEPS_LIST << 1
+ DO_SPECULATION = USE_DEPS_LIST << 1,
+ SCHED_RGN = DO_SPECULATION << 1,
+ SCHED_EBB = SCHED_RGN << 1,
+ /* Detach register live information from basic block headers.
+ This is necessary to invoke functions, that change CFG (e.g. split_edge).
+ Requires USE_GLAT. */
+ DETACH_LIFE_INFO = SCHED_EBB << 1,
+ /* Save register live information from basic block headers to
+ glat_{start, end} arrays. */
+ USE_GLAT = DETACH_LIFE_INFO << 1
+};
+
+enum SPEC_SCHED_FLAGS {
+ COUNT_SPEC_IN_CRITICAL_PATH = 1,
+ PREFER_NON_DATA_SPEC = COUNT_SPEC_IN_CRITICAL_PATH << 1,
+ PREFER_NON_CONTROL_SPEC = PREFER_NON_DATA_SPEC << 1
};
+#define NOTE_NOT_BB_P(NOTE) (NOTE_P (NOTE) && (NOTE_LINE_NUMBER (NOTE) \
+ != NOTE_INSN_BASIC_BLOCK))
+
extern FILE *sched_dump;
extern int sched_verbose;
@@ -500,16 +604,19 @@ extern void compute_forward_dependences (rtx, rtx);
extern rtx find_insn_list (rtx, rtx);
extern void init_dependency_caches (int);
extern void free_dependency_caches (void);
+extern void extend_dependency_caches (int, bool);
extern enum DEPS_ADJUST_RESULT add_or_update_back_dep (rtx, rtx,
enum reg_note, ds_t);
extern void add_or_update_back_forw_dep (rtx, rtx, enum reg_note, ds_t);
extern void add_back_forw_dep (rtx, rtx, enum reg_note, ds_t);
extern void delete_back_forw_dep (rtx, rtx);
+extern dw_t get_dep_weak (ds_t, ds_t);
extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
+extern ds_t ds_merge (ds_t, ds_t);
/* Functions in haifa-sched.c. */
extern int haifa_classify_insn (rtx);
-extern void get_block_head_tail (int, rtx *, rtx *);
+extern void get_ebb_head_tail (basic_block, basic_block, rtx *, rtx *);
extern int no_real_insns_p (rtx, rtx);
extern void rm_line_notes (rtx, rtx);
@@ -521,10 +628,18 @@ extern void rm_other_notes (rtx, rtx);
extern int insn_cost (rtx, rtx, rtx);
extern int set_priorities (rtx, rtx);
-extern void schedule_block (int, int);
+extern void schedule_block (basic_block *, int);
extern void sched_init (void);
extern void sched_finish (void);
extern int try_ready (rtx);
+extern void * xrecalloc (void *, size_t, size_t, size_t);
+extern void unlink_bb_notes (basic_block, basic_block);
+extern void add_block (basic_block, basic_block);
+extern void attach_life_info (void);
+
+#ifdef ENABLE_CHECKING
+extern void check_reg_live (void);
+#endif
#endif /* GCC_SCHED_INT_H */
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 314d72851db..77eec4b0df0 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -96,8 +96,15 @@ static bool sched_is_disabled_for_current_region_p (void);
control flow graph edges, in the 'up' direction. */
typedef struct
{
- int rgn_nr_blocks; /* Number of blocks in region. */
- int rgn_blocks; /* cblocks in the region (actually index in rgn_bb_table). */
+ /* Number of extended basic blocks in region. */
+ int rgn_nr_blocks;
+ /* cblocks in the region (actually index in rgn_bb_table). */
+ int rgn_blocks;
+ /* Dependencies for this region are already computed. Basically, indicates,
+ that this is a recovery block. */
+ unsigned int dont_calc_deps : 1;
+ /* This region has at least one non-trivial ebb. */
+ unsigned int has_real_ebb : 1;
}
region;
@@ -125,6 +132,8 @@ static int min_spec_prob;
#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
+#define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps)
+#define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb)
#define BLOCK_TO_BB(block) (block_to_bb[block])
#define CONTAINING_RGN(block) (containing_rgn[block])
@@ -140,8 +149,15 @@ extern void debug_live (int, int);
static int current_nr_blocks;
static int current_blocks;
-/* The mapping from bb to block. */
-#define BB_TO_BLOCK(bb) (rgn_bb_table[current_blocks + (bb)])
+static int rgn_n_insns;
+
+/* The mapping from ebb to block. */
+/* ebb_head [i] - is index in rgn_bb_table, while
+ EBB_HEAD (i) - is basic block index.
+ BASIC_BLOCK (EBB_HEAD (i)) - head of ebb. */
+#define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
+#define EBB_FIRST_BB(ebb) BASIC_BLOCK (BB_TO_BLOCK (ebb))
+#define EBB_LAST_BB(ebb) BASIC_BLOCK (rgn_bb_table[ebb_head[ebb + 1] - 1])
/* Target info declarations.
@@ -244,6 +260,12 @@ static edgeset *pot_split;
/* For every bb, a set of its ancestor edges. */
static edgeset *ancestor_edges;
+/* Array of EBBs sizes. Currently we can get a ebb only through
+ splitting of currently scheduling block, therefore, we don't need
+ ebb_head array for every region, its sufficient to hold it only
+ for current one. */
+static int *ebb_head;
+
static void compute_dom_prob_ps (int);
#define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN))))
@@ -381,13 +403,12 @@ debug_regions (void)
rgn_table[rgn].rgn_nr_blocks);
fprintf (sched_dump, ";;\tbb/block: ");
- for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
- {
- current_blocks = RGN_BLOCKS (rgn);
+ /* We don't have ebb_head initialized yet, so we can't use
+ BB_TO_BLOCK (). */
+ current_blocks = RGN_BLOCKS (rgn);
- gcc_assert (bb == BLOCK_TO_BB (BB_TO_BLOCK (bb)));
- fprintf (sched_dump, " %d/%d ", bb, BB_TO_BLOCK (bb));
- }
+ for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++)
+ fprintf (sched_dump, " %d/%d ", bb, rgn_bb_table[current_blocks + bb]);
fprintf (sched_dump, "\n\n");
}
@@ -409,6 +430,8 @@ find_single_block_region (void)
rgn_bb_table[nr_regions] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = nr_regions;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
CONTAINING_RGN (bb->index) = nr_regions;
BLOCK_TO_BB (bb->index) = 0;
nr_regions++;
@@ -852,6 +875,8 @@ find_rgns (void)
rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
CONTAINING_RGN (bb->index) = nr_regions;
BLOCK_TO_BB (bb->index) = count = 0;
@@ -921,6 +946,8 @@ find_rgns (void)
rgn_bb_table[idx] = bb->index;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
CONTAINING_RGN (bb->index) = nr_regions++;
BLOCK_TO_BB (bb->index) = 0;
}
@@ -1152,6 +1179,8 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
degree[bbn] = -1;
rgn_bb_table[idx] = bbn;
RGN_BLOCKS (nr_regions) = idx++;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
CONTAINING_RGN (bbn) = nr_regions;
BLOCK_TO_BB (bbn) = 0;
@@ -1205,6 +1234,8 @@ extend_rgns (int *degree, int *idxp, sbitmap header, int *loop_hdr)
{
RGN_BLOCKS (nr_regions) = idx;
RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_DONT_CALC_DEPS (nr_regions) = 0;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
nr_regions++;
}
@@ -1254,6 +1285,9 @@ compute_dom_prob_ps (int bb)
edge_iterator in_ei;
edge in_edge;
+ /* We shouldn't have any real ebbs yet. */
+ gcc_assert (ebb_head [bb] == bb + current_blocks);
+
if (IS_RGN_ENTRY (bb))
{
SET_BIT (dom[bb], 0);
@@ -1519,8 +1553,14 @@ check_live_1 (int src, rtx x)
{
basic_block b = candidate_table[src].split_bbs.first_member[i];
- if (REGNO_REG_SET_P (b->il.rtl->global_live_at_start,
- regno + j))
+ /* We can have split blocks, that were recently generated.
+ such blocks are always outside current region. */
+ gcc_assert (glat_start[b->index]
+ || CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src)));
+ if (!glat_start[b->index]
+ || REGNO_REG_SET_P (glat_start[b->index],
+ regno + j))
{
return 0;
}
@@ -1534,7 +1574,11 @@ check_live_1 (int src, rtx x)
{
basic_block b = candidate_table[src].split_bbs.first_member[i];
- if (REGNO_REG_SET_P (b->il.rtl->global_live_at_start, regno))
+ gcc_assert (glat_start[b->index]
+ || CONTAINING_RGN (b->index)
+ != CONTAINING_RGN (BB_TO_BLOCK (src)));
+ if (!glat_start[b->index]
+ || REGNO_REG_SET_P (glat_start[b->index], regno))
{
return 0;
}
@@ -1593,8 +1637,7 @@ update_live_1 (int src, rtx x)
{
basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (b->il.rtl->global_live_at_start,
- regno + j);
+ SET_REGNO_REG_SET (glat_start[b->index], regno + j);
}
}
}
@@ -1604,7 +1647,7 @@ update_live_1 (int src, rtx x)
{
basic_block b = candidate_table[src].update_bbs.first_member[i];
- SET_REGNO_REG_SET (b->il.rtl->global_live_at_start, regno);
+ SET_REGNO_REG_SET (glat_start[b->index], regno);
}
}
}
@@ -1880,25 +1923,35 @@ static int sched_target_n_insns;
static int target_n_insns;
/* The number of insns from the entire region scheduled so far. */
static int sched_n_insns;
-/* Nonzero if the last scheduled insn was a jump. */
-static int last_was_jump;
/* Implementations of the sched_info functions for region scheduling. */
static void init_ready_list (void);
static int can_schedule_ready_p (rtx);
-static int new_ready (rtx);
+static void begin_schedule_ready (rtx, rtx);
+static ds_t new_ready (rtx, ds_t);
static int schedule_more_p (void);
static const char *rgn_print_insn (rtx, int);
static int rgn_rank (rtx, rtx);
static int contributes_to_priority (rtx, rtx);
static void compute_jump_reg_dependencies (rtx, regset, regset, regset);
+/* Functions for speculative scheduling. */
+static void add_remove_insn (rtx, int);
+static void extend_regions (void);
+static void add_block1 (basic_block, basic_block);
+static void fix_recovery_cfg (int, int, int);
+static basic_block advance_target_bb (basic_block, rtx);
+static void check_dead_notes1 (int, sbitmap);
+#ifdef ENABLE_CHECKING
+static int region_head_or_leaf_p (basic_block, int);
+#endif
+
/* Return nonzero if there are more insns that should be scheduled. */
static int
schedule_more_p (void)
{
- return ! last_was_jump && sched_target_n_insns < target_n_insns;
+ return sched_target_n_insns < target_n_insns;
}
/* Add all insns that are initially ready to the ready list READY. Called
@@ -1915,7 +1968,6 @@ init_ready_list (void)
target_n_insns = 0;
sched_target_n_insns = 0;
sched_n_insns = 0;
- last_was_jump = 0;
/* Print debugging information. */
if (sched_verbose >= 5)
@@ -1946,6 +1998,8 @@ init_ready_list (void)
{
try_ready (insn);
target_n_insns++;
+
+ gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL));
}
/* Add to ready list all 'ready' insns in valid source blocks.
@@ -1958,7 +2012,8 @@ init_ready_list (void)
rtx src_next_tail;
rtx tail, head;
- get_block_head_tail (BB_TO_BLOCK (bb_src), &head, &tail);
+ get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src),
+ &head, &tail);
src_next_tail = NEXT_INSN (tail);
src_head = head;
@@ -1974,18 +2029,29 @@ init_ready_list (void)
static int
can_schedule_ready_p (rtx insn)
{
- if (JUMP_P (insn))
- last_was_jump = 1;
+ /* An interblock motion? */
+ if (INSN_BB (insn) != target_bb
+ && IS_SPECULATIVE_INSN (insn)
+ && !check_live (insn, INSN_BB (insn)))
+ return 0;
+ else
+ return 1;
+}
+/* Updates counter and other information. Splitted from can_schedule_ready_p ()
+ because when we schedule insn speculatively then insn passed to
+ can_schedule_ready_p () differs from the one passed to
+ begin_schedule_ready (). */
+static void
+begin_schedule_ready (rtx insn, rtx last ATTRIBUTE_UNUSED)
+{
/* An interblock motion? */
if (INSN_BB (insn) != target_bb)
{
- basic_block b1;
-
if (IS_SPECULATIVE_INSN (insn))
{
- if (!check_live (insn, INSN_BB (insn)))
- return 0;
+ gcc_assert (check_live (insn, INSN_BB (insn)));
+
update_live (insn, INSN_BB (insn));
/* For speculative load, mark insns fed by it. */
@@ -1995,32 +2061,6 @@ can_schedule_ready_p (rtx insn)
nr_spec++;
}
nr_inter++;
-
- /* Update source block boundaries. */
- b1 = BLOCK_FOR_INSN (insn);
- if (insn == BB_HEAD (b1) && insn == BB_END (b1))
- {
- /* We moved all the insns in the basic block.
- Emit a note after the last insn and update the
- begin/end boundaries to point to the note. */
- rtx note = emit_note_after (NOTE_INSN_DELETED, insn);
- BB_HEAD (b1) = note;
- BB_END (b1) = note;
- }
- else if (insn == BB_END (b1))
- {
- /* We took insns from the end of the basic block,
- so update the end of block boundary so that it
- points to the first insn we did not move. */
- BB_END (b1) = PREV_INSN (insn);
- }
- else if (insn == BB_HEAD (b1))
- {
- /* We took insns from the start of the basic block,
- so update the start of block boundary so that
- it points to the first insn we did not move. */
- BB_HEAD (b1) = NEXT_INSN (insn);
- }
}
else
{
@@ -2028,28 +2068,44 @@ can_schedule_ready_p (rtx insn)
sched_target_n_insns++;
}
sched_n_insns++;
-
- return 1;
}
-/* Called after INSN has all its dependencies resolved. Return nonzero
- if it should be moved to the ready list or the queue, or zero if we
- should silently discard it. */
-static int
-new_ready (rtx next)
+/* Called after INSN has all its hard dependencies resolved and the speculation
+ of type TS is enough to overcome them all.
+ Return nonzero if it should be moved to the ready list or the queue, or zero
+ if we should silently discard it. */
+static ds_t
+new_ready (rtx next, ds_t ts)
{
- /* For speculative insns, before inserting to ready/queue,
- check live, exception-free, and issue-delay. */
- if (INSN_BB (next) != target_bb
- && (!IS_VALID (INSN_BB (next))
+ if (INSN_BB (next) != target_bb)
+ {
+ int not_ex_free = 0;
+
+ /* For speculative insns, before inserting to ready/queue,
+ check live, exception-free, and issue-delay. */
+ if (!IS_VALID (INSN_BB (next))
|| CANT_MOVE (next)
|| (IS_SPECULATIVE_INSN (next)
&& ((recog_memoized (next) >= 0
- && min_insn_conflict_delay (curr_state, next, next) > 3)
+ && min_insn_conflict_delay (curr_state, next, next)
+ > PARAM_VALUE (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY))
+ || RECOVERY_BLOCK (next)
|| !check_live (next, INSN_BB (next))
- || !is_exception_free (next, INSN_BB (next), target_bb)))))
- return 0;
- return 1;
+ || (not_ex_free = !is_exception_free (next, INSN_BB (next),
+ target_bb)))))
+ {
+ if (not_ex_free
+ /* We are here because is_exception_free () == false.
+ But we possibly can handle that with control speculation. */
+ && current_sched_info->flags & DO_SPECULATION)
+ /* Here we got new control-speculative instruction. */
+ ts = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK);
+ else
+ ts = (ts & ~SPECULATIVE) | HARD_DEP;
+ }
+ }
+
+ return ts;
}
/* Return a string that contains the insn uid and optionally anything else
@@ -2112,7 +2168,8 @@ rgn_rank (rtx insn1, rtx insn2)
static int
contributes_to_priority (rtx next, rtx insn)
{
- return BLOCK_NUM (next) == BLOCK_NUM (insn);
+ /* NEXT and INSN reside in one ebb. */
+ return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn));
}
/* INSN is a JUMP_INSN, COND_SET is the set of registers that are
@@ -2148,7 +2205,18 @@ static struct sched_info region_sched_info =
NULL, NULL,
0, 0, 0,
- 0
+ add_remove_insn,
+ begin_schedule_ready,
+ add_block1,
+ advance_target_bb,
+ fix_recovery_cfg,
+#ifdef ENABLE_CHECKING
+ region_head_or_leaf_p,
+#endif
+ SCHED_RGN | USE_GLAT
+#ifdef ENABLE_CHECKING
+ | DETACH_LIFE_INFO
+#endif
};
/* Determine if PAT sets a CLASS_LIKELY_SPILLED_P register. */
@@ -2447,7 +2515,8 @@ compute_block_backward_dependences (int bb)
tmp_deps = bb_deps[bb];
/* Do the analysis for this block. */
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
sched_analyze (&tmp_deps, head, tail);
add_branch_dependences (head, tail);
@@ -2489,7 +2558,8 @@ debug_dependencies (void)
rtx next_tail;
rtx insn;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
next_tail = NEXT_INSN (tail);
fprintf (sched_dump, "\n;; --- Region Dependences --- b %d bb %d \n",
BB_TO_BLOCK (bb), bb);
@@ -2576,48 +2646,68 @@ schedule_region (int rgn)
edge_iterator ei;
edge e;
int bb;
- int rgn_n_insns = 0;
int sched_rgn_n_insns = 0;
+ rgn_n_insns = 0;
/* Set variables for the current region. */
current_nr_blocks = RGN_NR_BLOCKS (rgn);
current_blocks = RGN_BLOCKS (rgn);
+
+ /* See comments in add_block1, for what reasons we allocate +1 element. */
+ ebb_head = xrealloc (ebb_head, (current_nr_blocks + 1) * sizeof (*ebb_head));
+ for (bb = 0; bb <= current_nr_blocks; bb++)
+ ebb_head[bb] = current_blocks + bb;
/* Don't schedule region that is marked by
NOTE_DISABLE_SCHED_OF_BLOCK. */
if (sched_is_disabled_for_current_region_p ())
return;
- init_deps_global ();
+ if (!RGN_DONT_CALC_DEPS (rgn))
+ {
+ init_deps_global ();
- /* Initializations for region data dependence analysis. */
- bb_deps = XNEWVEC (struct deps, current_nr_blocks);
- for (bb = 0; bb < current_nr_blocks; bb++)
- init_deps (bb_deps + bb);
+ /* Initializations for region data dependence analysis. */
+ bb_deps = XNEWVEC (struct deps, current_nr_blocks);
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ init_deps (bb_deps + bb);
- /* Compute LOG_LINKS. */
- for (bb = 0; bb < current_nr_blocks; bb++)
- compute_block_backward_dependences (bb);
+ /* Compute LOG_LINKS. */
+ for (bb = 0; bb < current_nr_blocks; bb++)
+ compute_block_backward_dependences (bb);
- /* Compute INSN_DEPEND. */
- for (bb = current_nr_blocks - 1; bb >= 0; bb--)
- {
- rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+ /* Compute INSN_DEPEND. */
+ for (bb = current_nr_blocks - 1; bb >= 0; bb--)
+ {
+ rtx head, tail;
- compute_forward_dependences (head, tail);
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
- if (targetm.sched.dependencies_evaluation_hook)
- targetm.sched.dependencies_evaluation_hook (head, tail);
+ compute_forward_dependences (head, tail);
- }
+ if (targetm.sched.dependencies_evaluation_hook)
+ targetm.sched.dependencies_evaluation_hook (head, tail);
+ }
+ free_pending_lists ();
+
+ finish_deps_global ();
+
+ free (bb_deps);
+ }
+ else
+ /* This is a recovery block. It is always a single block region. */
+ gcc_assert (current_nr_blocks == 1);
+
/* Set priorities. */
current_sched_info->sched_max_insns_priority = 0;
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+
+ gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb));
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
rgn_n_insns += set_priorities (head, tail);
}
@@ -2660,18 +2750,36 @@ schedule_region (int rgn)
/* Compute probabilities, dominators, split_edges. */
for (bb = 0; bb < current_nr_blocks; bb++)
compute_dom_prob_ps (bb);
+
+ /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
+ /* We don't need them anymore. But we want to avoid dublication of
+ aux fields in the newly created edges. */
+ FOR_EACH_BB (block)
+ {
+ if (CONTAINING_RGN (block->index) != rgn)
+ continue;
+ FOR_EACH_EDGE (e, ei, block->succs)
+ e->aux = NULL;
+ }
}
/* Now we can schedule all blocks. */
for (bb = 0; bb < current_nr_blocks; bb++)
{
+ basic_block first_bb, last_bb, curr_bb;
rtx head, tail;
int b = BB_TO_BLOCK (bb);
- get_block_head_tail (b, &head, &tail);
+ first_bb = EBB_FIRST_BB (bb);
+ last_bb = EBB_LAST_BB (bb);
+
+ get_ebb_head_tail (first_bb, last_bb, &head, &tail);
if (no_real_insns_p (head, tail))
- continue;
+ {
+ gcc_assert (first_bb == last_bb);
+ continue;
+ }
current_sched_info->prev_head = PREV_INSN (head);
current_sched_info->next_tail = NEXT_INSN (tail);
@@ -2696,26 +2804,29 @@ schedule_region (int rgn)
if (REG_NOTE_KIND (note) == REG_SAVE_NOTE)
remove_note (head, note);
}
+ else
+ /* This means that first block in ebb is empty.
+ It looks to me as an impossible thing. There at least should be
+ a recovery check, that caused the splitting. */
+ gcc_unreachable ();
/* Remove remaining note insns from the block, save them in
note_list. These notes are restored at the end of
schedule_block (). */
rm_other_notes (head, tail);
+ unlink_bb_notes (first_bb, last_bb);
+
target_bb = bb;
gcc_assert (flag_schedule_interblock || current_nr_blocks == 1);
current_sched_info->queue_must_finish_empty = current_nr_blocks == 1;
- schedule_block (b, rgn_n_insns);
+ curr_bb = first_bb;
+ schedule_block (&curr_bb, rgn_n_insns);
+ gcc_assert (EBB_FIRST_BB (bb) == first_bb);
sched_rgn_n_insns += sched_n_insns;
- /* Update target block boundaries. */
- if (head == BB_HEAD (BASIC_BLOCK (b)))
- BB_HEAD (BASIC_BLOCK (b)) = current_sched_info->head;
- if (tail == BB_END (BASIC_BLOCK (b)))
- BB_END (BASIC_BLOCK (b)) = current_sched_info->tail;
-
/* Clean up. */
if (current_nr_blocks > 1)
{
@@ -2734,29 +2845,16 @@ schedule_region (int rgn)
for (bb = 0; bb < current_nr_blocks; bb++)
{
rtx head, tail;
- get_block_head_tail (BB_TO_BLOCK (bb), &head, &tail);
+
+ get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail);
restore_line_notes (head, tail);
}
}
/* Done with this region. */
- free_pending_lists ();
-
- finish_deps_global ();
-
- free (bb_deps);
if (current_nr_blocks > 1)
{
- /* Cleanup ->aux used for EDGE_TO_BIT mapping. */
- FOR_EACH_BB (block)
- {
- if (CONTAINING_RGN (block->index) != rgn)
- continue;
- FOR_EACH_EDGE (e, ei, block->succs)
- e->aux = NULL;
- }
-
free (prob);
sbitmap_vector_free (dom);
sbitmap_vector_free (pot_split);
@@ -2778,10 +2876,11 @@ init_regions (void)
int rgn;
nr_regions = 0;
- rgn_table = XNEWVEC (region, n_basic_blocks);
- rgn_bb_table = XNEWVEC (int, n_basic_blocks);
- block_to_bb = XNEWVEC (int, last_basic_block);
- containing_rgn = XNEWVEC (int, last_basic_block);
+ rgn_table = 0;
+ rgn_bb_table = 0;
+ block_to_bb = 0;
+ containing_rgn = 0;
+ extend_regions ();
/* Compute regions for scheduling. */
if (reload_completed
@@ -2806,6 +2905,8 @@ init_regions (void)
to using the cfg code in flow.c. */
free_dominance_info (CDI_DOMINATORS);
}
+ RGN_BLOCKS (nr_regions) = RGN_BLOCKS (nr_regions - 1) +
+ RGN_NR_BLOCKS (nr_regions - 1);
if (CHECK_DEAD_NOTES)
@@ -2814,15 +2915,8 @@ init_regions (void)
deaths_in_region = XNEWVEC (int, nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
- {
- int b;
+ check_dead_notes1 (rgn, blocks);
- sbitmap_zero (blocks);
- for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
- SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
-
- deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
- }
sbitmap_free (blocks);
}
else
@@ -2858,9 +2952,15 @@ schedule_insns (void)
init_regions ();
+ /* EBB_HEAD is a region-scope sctructure. But we realloc it for
+ each region to save time/memory/something else. */
+ ebb_head = 0;
+
/* Schedule every region in the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
schedule_region (rgn);
+
+ free(ebb_head);
/* Update life analysis for the subroutine. Do single block regions
first so that we can verify that live_at_start didn't change. Then
@@ -2875,8 +2975,11 @@ schedule_insns (void)
that live_at_start should change at region heads. Not sure what the
best way to test for this kind of thing... */
+ if (current_sched_info->flags & DETACH_LIFE_INFO)
+ /* this flag can be set either by the target or by ENABLE_CHECKING. */
+ attach_life_info ();
+
allocate_reg_life_data ();
- compute_bb_for_insn ();
any_large_regions = 0;
large_region_blocks = sbitmap_alloc (last_basic_block);
@@ -2891,8 +2994,13 @@ schedule_insns (void)
we've possibly done interblock scheduling that affects global liveness.
For regions consisting of single blocks we need to do only local
liveness. */
- for (rgn = 0; rgn < nr_regions; rgn++)
- if (RGN_NR_BLOCKS (rgn) > 1)
+ for (rgn = 0; rgn < nr_regions; rgn++)
+ if (RGN_NR_BLOCKS (rgn) > 1
+ /* Or the only block of this region has been splitted. */
+ || RGN_HAS_REAL_EBB (rgn)
+ /* New blocks (e.g. recovery blocks) should be processed
+ as parts of large regions. */
+ || !glat_start[rgn_bb_table[RGN_BLOCKS (rgn)]])
any_large_regions = 1;
else
{
@@ -2904,16 +3012,21 @@ schedule_insns (void)
regs_ever_live, which should not change after reload. */
update_life_info (blocks, UPDATE_LIFE_LOCAL,
(reload_completed ? PROP_DEATH_NOTES
- : PROP_DEATH_NOTES | PROP_REG_INFO));
+ : (PROP_DEATH_NOTES | PROP_REG_INFO)));
if (any_large_regions)
{
update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL,
- PROP_DEATH_NOTES | PROP_REG_INFO);
+ (reload_completed ? PROP_DEATH_NOTES
+ : (PROP_DEATH_NOTES | PROP_REG_INFO)));
+
+#ifdef ENABLE_CHECKING
+ check_reg_live ();
+#endif
}
if (CHECK_DEAD_NOTES)
{
- /* Verify the counts of basic block notes in single the basic block
+ /* Verify the counts of basic block notes in single basic block
regions. */
for (rgn = 0; rgn < nr_regions; rgn++)
if (RGN_NR_BLOCKS (rgn) == 1)
@@ -2960,6 +3073,209 @@ schedule_insns (void)
sbitmap_free (blocks);
sbitmap_free (large_region_blocks);
}
+
+/* INSN has been added to/removed from current region. */
+static void
+add_remove_insn (rtx insn, int remove_p)
+{
+ if (!remove_p)
+ rgn_n_insns++;
+ else
+ rgn_n_insns--;
+
+ if (INSN_BB (insn) == target_bb)
+ {
+ if (!remove_p)
+ target_n_insns++;
+ else
+ target_n_insns--;
+ }
+}
+
+/* Extend internal data structures. */
+static void
+extend_regions (void)
+{
+ rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks);
+ rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, n_basic_blocks);
+ block_to_bb = XRESIZEVEC (int, block_to_bb, last_basic_block);
+ containing_rgn = XRESIZEVEC (int, containing_rgn, last_basic_block);
+}
+
+/* BB was added to ebb after AFTER. */
+static void
+add_block1 (basic_block bb, basic_block after)
+{
+ extend_regions ();
+
+ if (after == 0 || after == EXIT_BLOCK_PTR)
+ {
+ int i;
+
+ i = RGN_BLOCKS (nr_regions);
+ /* I - first free position in rgn_bb_table. */
+
+ rgn_bb_table[i] = bb->index;
+ RGN_NR_BLOCKS (nr_regions) = 1;
+ RGN_DONT_CALC_DEPS (nr_regions) = after == EXIT_BLOCK_PTR;
+ RGN_HAS_REAL_EBB (nr_regions) = 0;
+ CONTAINING_RGN (bb->index) = nr_regions;
+ BLOCK_TO_BB (bb->index) = 0;
+
+ nr_regions++;
+
+ RGN_BLOCKS (nr_regions) = i + 1;
+
+ if (CHECK_DEAD_NOTES)
+ {
+ sbitmap blocks = sbitmap_alloc (last_basic_block);
+ deaths_in_region = xrealloc (deaths_in_region, nr_regions *
+ sizeof (*deaths_in_region));
+
+ check_dead_notes1 (nr_regions - 1, blocks);
+
+ sbitmap_free (blocks);
+ }
+ }
+ else
+ {
+ int i, pos;
+
+ /* We need to fix rgn_table, block_to_bb, containing_rgn
+ and ebb_head. */
+
+ BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index);
+
+ /* We extend ebb_head to one more position to
+ easily find the last position of the last ebb in
+ the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1]
+ is _always_ valid for access. */
+
+ i = BLOCK_TO_BB (after->index) + 1;
+ for (pos = ebb_head[i]; rgn_bb_table[pos] != after->index; pos--);
+ pos++;
+ gcc_assert (pos > ebb_head[i - 1]);
+ /* i - ebb right after "AFTER". */
+ /* ebb_head[i] - VALID. */
+
+ /* Source position: ebb_head[i]
+ Destination posistion: ebb_head[i] + 1
+ Last position:
+ RGN_BLOCKS (nr_regions) - 1
+ Number of elements to copy: (last_position) - (source_position) + 1
+ */
+
+ memmove (rgn_bb_table + pos + 1,
+ rgn_bb_table + pos,
+ ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1)
+ * sizeof (*rgn_bb_table));
+
+ rgn_bb_table[pos] = bb->index;
+
+ for (; i <= current_nr_blocks; i++)
+ ebb_head [i]++;
+
+ i = CONTAINING_RGN (after->index);
+ CONTAINING_RGN (bb->index) = i;
+
+ RGN_HAS_REAL_EBB (i) = 1;
+
+ for (++i; i <= nr_regions; i++)
+ RGN_BLOCKS (i)++;
+
+ /* We don't need to call check_dead_notes1 () because this new block
+ is just a split of the old. We don't want to count anything twice. */
+ }
+}
+
+/* Fix internal data after interblock movement of jump instruction.
+ For parameter meaning please refer to
+ sched-int.h: struct sched_info: fix_recovery_cfg. */
+static void
+fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti)
+{
+ int old_pos, new_pos, i;
+
+ BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi);
+
+ for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1;
+ rgn_bb_table[old_pos] != check_bb_nexti;
+ old_pos--);
+ gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]);
+
+ for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1;
+ rgn_bb_table[new_pos] != bbi;
+ new_pos--);
+ new_pos++;
+ gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]);
+
+ gcc_assert (new_pos < old_pos);
+
+ memmove (rgn_bb_table + new_pos + 1,
+ rgn_bb_table + new_pos,
+ (old_pos - new_pos) * sizeof (*rgn_bb_table));
+
+ rgn_bb_table[new_pos] = check_bb_nexti;
+
+ for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++)
+ ebb_head[i]++;
+}
+
+/* Return next block in ebb chain. For parameter meaning please refer to
+ sched-int.h: struct sched_info: advance_target_bb. */
+static basic_block
+advance_target_bb (basic_block bb, rtx insn)
+{
+ if (insn)
+ return 0;
+
+ gcc_assert (BLOCK_TO_BB (bb->index) == target_bb
+ && BLOCK_TO_BB (bb->next_bb->index) == target_bb);
+ return bb->next_bb;
+}
+
+/* Count and remove death notes in region RGN, which consists of blocks
+ with indecies in BLOCKS. */
+static void
+check_dead_notes1 (int rgn, sbitmap blocks)
+{
+ int b;
+
+ sbitmap_zero (blocks);
+ for (b = RGN_NR_BLOCKS (rgn) - 1; b >= 0; --b)
+ SET_BIT (blocks, rgn_bb_table[RGN_BLOCKS (rgn) + b]);
+
+ deaths_in_region[rgn] = count_or_remove_death_notes (blocks, 1);
+}
+
+#ifdef ENABLE_CHECKING
+/* Return non zero, if BB is head or leaf (depending of LEAF_P) block in
+ current region. For more information please refer to
+ sched-int.h: struct sched_info: region_head_or_leaf_p. */
+static int
+region_head_or_leaf_p (basic_block bb, int leaf_p)
+{
+ if (!leaf_p)
+ return bb->index == rgn_bb_table[RGN_BLOCKS (CONTAINING_RGN (bb->index))];
+ else
+ {
+ int i;
+ edge e;
+ edge_iterator ei;
+
+ i = CONTAINING_RGN (bb->index);
+
+ FOR_EACH_EDGE (e, ei, bb->succs)
+ if (CONTAINING_RGN (e->dest->index) == i
+ /* except self-loop. */
+ && e->dest != bb)
+ return 0;
+
+ return 1;
+ }
+}
+#endif /* ENABLE_CHECKING */
+
#endif
static bool
diff --git a/gcc/target-def.h b/gcc/target-def.h
index fa38166769c..690d0a50767 100644
--- a/gcc/target-def.h
+++ b/gcc/target-def.h
@@ -288,6 +288,14 @@ Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD 0
#define TARGET_SCHED_DFA_NEW_CYCLE 0
#define TARGET_SCHED_IS_COSTLY_DEPENDENCE 0
+#define TARGET_SCHED_ADJUST_COST_2 0
+#define TARGET_SCHED_H_I_D_EXTENDED 0
+#define TARGET_SCHED_SPECULATE_INSN 0
+#define TARGET_SCHED_NEEDS_BLOCK_P 0
+#define TARGET_SCHED_GEN_CHECK 0
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC 0
+#define TARGET_SCHED_SET_SCHED_FLAGS 0
+
#define TARGET_SCHED \
{TARGET_SCHED_ADJUST_COST, \
@@ -308,7 +316,14 @@ Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD, \
TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD, \
TARGET_SCHED_DFA_NEW_CYCLE, \
- TARGET_SCHED_IS_COSTLY_DEPENDENCE}
+ TARGET_SCHED_IS_COSTLY_DEPENDENCE, \
+ TARGET_SCHED_ADJUST_COST_2, \
+ TARGET_SCHED_H_I_D_EXTENDED, \
+ TARGET_SCHED_SPECULATE_INSN, \
+ TARGET_SCHED_NEEDS_BLOCK_P, \
+ TARGET_SCHED_GEN_CHECK, \
+ TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC, \
+ TARGET_SCHED_SET_SCHED_FLAGS}
#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD 0
diff --git a/gcc/target.h b/gcc/target.h
index e6e6ba83155..1b768e4f49c 100644
--- a/gcc/target.h
+++ b/gcc/target.h
@@ -51,6 +51,7 @@ Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#include "insn-modes.h"
struct stdarg_info;
+struct spec_info_def;
/* The struct used by the secondary_reload target hook. */
typedef struct secondary_reload_info
@@ -306,6 +307,58 @@ struct gcc_target
between the already scheduled insn (first parameter) and the
the second insn (second parameter). */
bool (* is_costly_dependence) (rtx, rtx, rtx, int, int);
+
+ /* Given the current cost, COST, of an insn, INSN, calculate and
+ return a new cost based on its relationship to DEP_INSN through the
+ dependence of type DEP_TYPE. The default is to make no adjustment. */
+ int (* adjust_cost_2) (rtx insn, int, rtx def_insn, int cost);
+
+ /* The following member value is a pointer to a function called
+ by the insn scheduler. This hook is called to notify the backend
+ that new instructions were emitted. */
+ void (* h_i_d_extended) (void);
+
+ /* The following member value is a pointer to a function called
+ by the insn scheduler.
+ The first parameter is an instruction, the second parameter is the type
+ of the requested speculation, and the third parameter is a pointer to the
+ speculative pattern of the corresponding type (set if return value == 1).
+ It should return
+ -1, if there is no pattern, that will satisfy the requested speculation
+ type,
+ 0, if current pattern satisfies the requested speculation type,
+ 1, if pattern of the instruction should be changed to the newly
+ generated one. */
+ int (* speculate_insn) (rtx, HOST_WIDE_INT, rtx *);
+
+ /* The following member value is a pointer to a function called
+ by the insn scheduler. It should return true if the check instruction
+ corresponding to the instruction passed as the parameter needs a
+ recovery block. */
+ bool (* needs_block_p) (rtx);
+
+ /* The following member value is a pointer to a function called
+ by the insn scheduler. It should return a pattern for the check
+ instruction.
+ The first parameter is a speculative instruction, the second parameter
+ is the label of the corresponding recovery block (or null, if it is a
+ simple check). If the mutation of the check is requested (e.g. from
+ ld.c to chk.a), the third parameter is true - in this case the first
+ parameter is the previous check. */
+ rtx (* gen_check) (rtx, rtx, bool);
+
+ /* The following member value is a pointer to a function controlling
+ what insns from the ready insn queue will be considered for the
+ multipass insn scheduling. If the hook returns zero for the insn
+ passed as the parameter, the insn will not be chosen to be
+ issued. This hook is used to discard speculative instructions,
+ that stand at the first position of the ready list. */
+ bool (* first_cycle_multipass_dfa_lookahead_guard_spec) (rtx);
+
+ /* The following member value is a pointer to a function that provides
+ information about the speculation capabilities of the target.
+ The parameter is a pointer to spec_info variable. */
+ void (* set_sched_flags) (struct spec_info_def *);
} sched;
/* Functions relating to vectorization. */