diff options
-rw-r--r-- | gcc/ChangeLog | 26 | ||||
-rw-r--r-- | gcc/common.opt | 4 | ||||
-rw-r--r-- | gcc/doc/invoke.texi | 8 | ||||
-rw-r--r-- | gcc/haifa-sched.c | 51 | ||||
-rw-r--r-- | gcc/ira.c | 11 | ||||
-rw-r--r-- | gcc/passes.def | 1 | ||||
-rw-r--r-- | gcc/sched-deps.c | 5 | ||||
-rw-r--r-- | gcc/sched-int.h | 2 | ||||
-rw-r--r-- | gcc/sched-rgn.c | 66 | ||||
-rw-r--r-- | gcc/timevar.def | 1 | ||||
-rw-r--r-- | gcc/tree-pass.h | 1 |
11 files changed, 160 insertions, 16 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index cf1120099f9..dfaf4e38d14 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,29 @@ +2013-11-06 Vladimir Makarov <vmakarov@redhat.com> + + * tree-pass.h (make_pass_live_range_shrinkage): New external. + * timevar.def (TV_LIVE_RANGE_SHRINKAGE): New. + * sched-rgn.c (gate_handle_live_range_shrinkage): New. + (rest_of_handle_live_range_shrinkage): Ditto + (class pass_live_range_shrinkage): Ditto. + (pass_data_live_range_shrinkage): Ditto. + (make_pass_live_range_shrinkage): Ditto. + * sched-int.h (initialize_live_range_shrinkage): New prototype. + (finish_live_range_shrinkage): Ditto. + * sched-deps.c (create_insn_reg_set): Make void return value. + * passes.def: Add pass_live_range_shrinkage. + * ira.c (update_equiv_regs): Don't move if + flag_live_range_shrinkage. + * haifa-sched.c (live_range_shrinkage_p): New. + (initialize_live_range_shrinkage, finish_live_range_shrinkage): + New functions. + (rank_for_schedule): Add code for pressure relief through live + range shrinkage. + (schedule_insn): Print more debug info. + (sched_init): Setup SCHED_PRESSURE_WEIGHTED for pressure relief + through live range shrinkage. + * doc/invoke.texi (-flive-range-shrinkage): New. + * common.opt (flive-range-shrinkage): New. + 2013-11-06 Uros Bizjak <ubizjak@gmail.com> PR target/59021 diff --git a/gcc/common.opt b/gcc/common.opt index bda479071c4..7e1e3ded458 100644 --- a/gcc/common.opt +++ b/gcc/common.opt @@ -1738,6 +1738,10 @@ fregmove Common Ignore Does nothing. Preserved for backward compatibility. +flive-range-shrinkage +Common Report Var(flag_live_range_shrinkage) Init(0) Optimization +Relief of register pressure through live range shrinkage + frename-registers Common Report Var(flag_rename_registers) Init(2) Optimization Perform a register renaming optimization pass diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index b933b2a58c0..00634457f12 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -377,7 +377,7 @@ Objective-C and Objective-C++ Dialects}. -fira-region=@var{region} -fira-hoist-pressure @gol -fira-loop-pressure -fno-ira-share-save-slots @gol -fno-ira-share-spill-slots -fira-verbose=@var{n} @gol --fivopts -fkeep-inline-functions -fkeep-static-consts @gol +-fivopts -fkeep-inline-functions -fkeep-static-consts -flive-range-shrinkage @gol -floop-block -floop-interchange -floop-strip-mine -floop-nest-optimize @gol -floop-parallelize-all -flto -flto-compression-level @gol -flto-partition=@var{alg} -flto-report -flto-report-wpa -fmerge-all-constants @gol @@ -7268,6 +7268,12 @@ registers after writing to their lower 32-bit half. Enabled for x86 at levels @option{-O2}, @option{-O3}. +@item -flive-range-shrinkage +@opindex flive-range-shrinkage +Attempt to decrease register pressure through register live range +shrinkage. This is helpful for fast processors with small or moderate +size register sets. + @item -fira-algorithm=@var{algorithm} Use the specified coloring algorithm for the integrated register allocator. The @var{algorithm} argument can be @samp{priority}, which diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c index ab21d0d481b..728d51b7308 100644 --- a/gcc/haifa-sched.c +++ b/gcc/haifa-sched.c @@ -150,6 +150,24 @@ along with GCC; see the file COPYING3. If not see #ifdef INSN_SCHEDULING +/* True if we do register pressure relief through live-range + shrinkage. */ +static bool live_range_shrinkage_p; + +/* Switch on live range shrinkage. */ +void +initialize_live_range_shrinkage (void) +{ + live_range_shrinkage_p = true; +} + +/* Switch off live range shrinkage. */ +void +finish_live_range_shrinkage (void) +{ + live_range_shrinkage_p = false; +} + /* issue_rate is the number of insns that can be scheduled in the same machine cycle. It can be defined in the config/mach/mach.h file, otherwise we set it to 1. */ @@ -2519,7 +2537,7 @@ rank_for_schedule (const void *x, const void *y) rtx tmp = *(const rtx *) y; rtx tmp2 = *(const rtx *) x; int tmp_class, tmp2_class; - int val, priority_val, info_val; + int val, priority_val, info_val, diff; if (MAY_HAVE_DEBUG_INSNS) { @@ -2532,6 +2550,22 @@ rank_for_schedule (const void *x, const void *y) return INSN_LUID (tmp) - INSN_LUID (tmp2); } + if (live_range_shrinkage_p) + { + /* Don't use SCHED_PRESSURE_MODEL -- it results in much worse + code. */ + gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED); + if ((INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) < 0 + || INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2) < 0) + && (diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) + - INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp2))) != 0) + return diff; + /* Sort by INSN_LUID (original insn order), so that we make the + sort stable. This minimizes instruction movement, thus + minimizing sched's effect on debugging and cross-jumping. */ + return INSN_LUID (tmp) - INSN_LUID (tmp2); + } + /* The insn in a schedule group should be issued the first. */ if (flag_sched_group_heuristic && SCHED_GROUP_P (tmp) != SCHED_GROUP_P (tmp2)) @@ -2542,8 +2576,6 @@ rank_for_schedule (const void *x, const void *y) if (sched_pressure != SCHED_PRESSURE_NONE) { - int diff; - /* Prefer insn whose scheduling results in the smallest register pressure excess. */ if ((diff = (INSN_REG_PRESSURE_EXCESS_COST_CHANGE (tmp) @@ -3731,7 +3763,10 @@ schedule_insn (rtx insn) { fputc (':', sched_dump); for (i = 0; i < ira_pressure_classes_num; i++) - fprintf (sched_dump, "%s%+d(%d)", + fprintf (sched_dump, "%s%s%+d(%d)", + scheduled_insns.length () > 1 + && INSN_LUID (insn) + < INSN_LUID (scheduled_insns[scheduled_insns.length () - 2]) ? "@" : "", reg_class_names[ira_pressure_classes[i]], pressure_info[i].set_increase, pressure_info[i].change); } @@ -6578,9 +6613,11 @@ sched_init (void) if (targetm.sched.dispatch (NULL_RTX, IS_DISPATCH_ON)) targetm.sched.dispatch_do (NULL_RTX, DISPATCH_INIT); - if (flag_sched_pressure - && !reload_completed - && common_sched_info->sched_pass_id == SCHED_RGN_PASS) + if (live_range_shrinkage_p) + sched_pressure = SCHED_PRESSURE_WEIGHTED; + else if (flag_sched_pressure + && !reload_completed + && common_sched_info->sched_pass_id == SCHED_RGN_PASS) sched_pressure = ((enum sched_pressure_algorithm) PARAM_VALUE (PARAM_SCHED_PRESSURE_ALGORITHM)); else diff --git a/gcc/ira.c b/gcc/ira.c index 9e94704706c..10e71d97db3 100644 --- a/gcc/ira.c +++ b/gcc/ira.c @@ -3794,11 +3794,12 @@ update_equiv_regs (void) if (! reg_equiv[regno].replace || reg_equiv[regno].loop_depth < loop_depth - /* There is no sense to move insns if we did - register pressure-sensitive scheduling was - done because it will not improve allocation - but worsen insn schedule with a big - probability. */ + /* There is no sense to move insns if live range + shrinkage or register pressure-sensitive + scheduling were done because it will not + improve allocation but worsen insn schedule + with a big probability. */ + || flag_live_range_shrinkage || (flag_sched_pressure && flag_schedule_insns)) continue; diff --git a/gcc/passes.def b/gcc/passes.def index 1e2c4dc00ca..8d8dd800315 100644 --- a/gcc/passes.def +++ b/gcc/passes.def @@ -366,6 +366,7 @@ along with GCC; see the file COPYING3. If not see NEXT_PASS (pass_mode_switching); NEXT_PASS (pass_match_asm_constraints); NEXT_PASS (pass_sms); + NEXT_PASS (pass_live_range_shrinkage); NEXT_PASS (pass_sched); NEXT_PASS (pass_ira); NEXT_PASS (pass_reload); diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c index 353090157dd..8496014a72b 100644 --- a/gcc/sched-deps.c +++ b/gcc/sched-deps.c @@ -1938,8 +1938,8 @@ create_insn_reg_use (int regno, rtx insn) return use; } -/* Allocate and return reg_set_data structure for REGNO and INSN. */ -static struct reg_set_data * +/* Allocate reg_set_data structure for REGNO and INSN. */ +static void create_insn_reg_set (int regno, rtx insn) { struct reg_set_data *set; @@ -1949,7 +1949,6 @@ create_insn_reg_set (int regno, rtx insn) set->insn = insn; set->next_insn_set = INSN_REG_SET_LIST (insn); INSN_REG_SET_LIST (insn) = set; - return set; } /* Set up insn register uses for INSN and dependency context DEPS. */ diff --git a/gcc/sched-int.h b/gcc/sched-int.h index 90d5b05c75e..33112eef075 100644 --- a/gcc/sched-int.h +++ b/gcc/sched-int.h @@ -1333,6 +1333,8 @@ extern void debug_ds (ds_t); /* Functions in haifa-sched.c. */ +extern void initialize_live_range_shrinkage (void); +extern void finish_live_range_shrinkage (void); extern void sched_init_region_reg_pressure_info (void); extern int haifa_classify_insn (const_rtx); extern void get_ebb_head_tail (basic_block, basic_block, rtx *, rtx *); diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 73a236b5929..b2a7dbd4a94 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -3565,6 +3565,33 @@ advance_target_bb (basic_block bb, rtx insn) #endif static bool +gate_handle_live_range_shrinkage (void) +{ +#ifdef INSN_SCHEDULING + return flag_live_range_shrinkage; +#else + return 0; +#endif +} + +/* Run instruction scheduler. */ +static unsigned int +rest_of_handle_live_range_shrinkage (void) +{ +#ifdef INSN_SCHEDULING + int saved; + + initialize_live_range_shrinkage (); + saved = flag_schedule_interblock; + flag_schedule_interblock = false; + schedule_insns (); + flag_schedule_interblock = saved; + finish_live_range_shrinkage (); +#endif + return 0; +} + +static bool gate_handle_sched (void) { #ifdef INSN_SCHEDULING @@ -3622,6 +3649,45 @@ rest_of_handle_sched2 (void) namespace { +const pass_data pass_data_live_range_shrinkage = +{ + RTL_PASS, /* type */ + "lr_shrinkage", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + true, /* has_gate */ + true, /* has_execute */ + TV_LIVE_RANGE_SHRINKAGE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + ( TODO_df_finish | TODO_verify_rtl_sharing + | TODO_verify_flow ), /* todo_flags_finish */ +}; + +class pass_live_range_shrinkage : public rtl_opt_pass +{ +public: + pass_live_range_shrinkage(gcc::context *ctxt) + : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt) + {} + + /* opt_pass methods: */ + bool gate () { return gate_handle_live_range_shrinkage (); } + unsigned int execute () { return rest_of_handle_live_range_shrinkage (); } + +}; // class pass_live_range_shrinkage + +} // anon namespace + +rtl_opt_pass * +make_pass_live_range_shrinkage (gcc::context *ctxt) +{ + return new pass_live_range_shrinkage (ctxt); +} + +namespace { + const pass_data pass_data_sched = { RTL_PASS, /* type */ diff --git a/gcc/timevar.def b/gcc/timevar.def index afdadb878a7..897f66dd82e 100644 --- a/gcc/timevar.def +++ b/gcc/timevar.def @@ -224,6 +224,7 @@ DEFTIMEVAR (TV_COMBINE , "combiner") DEFTIMEVAR (TV_IFCVT , "if-conversion") DEFTIMEVAR (TV_MODE_SWITCH , "mode switching") DEFTIMEVAR (TV_SMS , "sms modulo scheduling") +DEFTIMEVAR (TV_LIVE_RANGE_SHRINKAGE , "live range shrinkage") DEFTIMEVAR (TV_SCHED , "scheduling") DEFTIMEVAR (TV_IRA , "integrated RA") DEFTIMEVAR (TV_LRA , "LRA non-specific") diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h index 3aeaeeb114b..9efee1e7e00 100644 --- a/gcc/tree-pass.h +++ b/gcc/tree-pass.h @@ -531,6 +531,7 @@ extern rtl_opt_pass *make_pass_lower_subreg2 (gcc::context *ctxt); extern rtl_opt_pass *make_pass_mode_switching (gcc::context *ctxt); extern rtl_opt_pass *make_pass_sms (gcc::context *ctxt); extern rtl_opt_pass *make_pass_sched (gcc::context *ctxt); +extern rtl_opt_pass *make_pass_live_range_shrinkage (gcc::context *ctxt); extern rtl_opt_pass *make_pass_ira (gcc::context *ctxt); extern rtl_opt_pass *make_pass_reload (gcc::context *ctxt); extern rtl_opt_pass *make_pass_clean_state (gcc::context *ctxt); |