summaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authortbsaunde <tbsaunde@138bc75d-0d04-0410-961f-82ee72b054a4>2015-07-09 02:49:51 +0000
committertbsaunde <tbsaunde@138bc75d-0d04-0410-961f-82ee72b054a4>2015-07-09 02:49:51 +0000
commit5fe18e785a87cd6fa3bef8df8c83adf50274f42d (patch)
treee4dc3a562a1e0900ee3ab1384aba87e00500bcef /gcc
parentbddd01c17102927f2cab4b73704474ad913278c2 (diff)
downloadgcc-5fe18e785a87cd6fa3bef8df8c83adf50274f42d.tar.gz
reduce conditional compilation for LOAD_EXTEND_OP
Provide a default in files where that is possible, so that everything else there can be unconditionally compiled. However rtlanal.c and reload.c do tricky things that break providing a global default, so we can't do that yet. gcc/ChangeLog: 2015-07-08 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> * combine.c (try_combine): Don't check if LOAD_EXTEND_OP is defined. (simplify_set): Likewise. * cse.c (cse_insn): Likewise. * fold-const.c (fold_single_bit_test): Likewise. (fold_unary_loc): Likewise. * postreload.c (reload_cse_simplify_set): Likewise. (reload_cse_simplify_operands): Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@225591 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/combine.c8
-rw-r--r--gcc/cse.c6
-rw-r--r--gcc/fold-const.c10
-rw-r--r--gcc/postreload.c18
5 files changed, 29 insertions, 24 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 3432adbdc7d..b8e63660bb0 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,14 @@
+2015-07-08 Trevor Saunders <tbsaunde+gcc@tbsaunde.org>
+
+ * combine.c (try_combine): Don't check if LOAD_EXTEND_OP is
+ defined.
+ (simplify_set): Likewise.
+ * cse.c (cse_insn): Likewise.
+ * fold-const.c (fold_single_bit_test): Likewise.
+ (fold_unary_loc): Likewise.
+ * postreload.c (reload_cse_simplify_set): Likewise.
+ (reload_cse_simplify_operands): Likewise.
+
2015-07-08 Jiong Wang <jiong.wang@arm.com>
* config/aarch64/aarch64.c (aarch64_unspec_may_trap_p): New function.
diff --git a/gcc/combine.c b/gcc/combine.c
index 9be230a51e2..11cee8559c1 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -113,6 +113,10 @@ along with GCC; see the file COPYING3. If not see
#include "obstack.h"
#include "rtl-iter.h"
+#ifndef LOAD_EXTEND_OP
+#define LOAD_EXTEND_OP(M) UNKNOWN
+#endif
+
/* Number of attempts to combine instructions in this function. */
static int combine_attempts;
@@ -3744,7 +3748,6 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
be written as a ZERO_EXTEND. */
if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
{
-#ifdef LOAD_EXTEND_OP
/* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
what it really is. */
if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
@@ -3752,7 +3755,6 @@ try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
SUBREG_REG (*split)));
else
-#endif
SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
SUBREG_REG (*split)));
}
@@ -6772,7 +6774,6 @@ simplify_set (rtx x)
}
}
-#ifdef LOAD_EXTEND_OP
/* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
would require a paradoxical subreg. Replace the subreg with a
zero_extend to avoid the reload that would otherwise be required. */
@@ -6790,7 +6791,6 @@ simplify_set (rtx x)
src = SET_SRC (x);
}
-#endif
/* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
are comparing an item known to be 0 or -1 against 0, use a logical
diff --git a/gcc/cse.c b/gcc/cse.c
index eefc7bc6656..af06543cf06 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -51,6 +51,10 @@ along with GCC; see the file COPYING3. If not see
#include "dbgcnt.h"
#include "rtl-iter.h"
+#ifndef LOAD_EXTEND_OP
+#define LOAD_EXTEND_OP(M) UNKNOWN
+#endif
+
/* The basic idea of common subexpression elimination is to go
through the code, keeping a record of expressions that would
have the same value at the current scan point, and replacing
@@ -4867,7 +4871,6 @@ cse_insn (rtx_insn *insn)
}
}
-#ifdef LOAD_EXTEND_OP
/* See if a MEM has already been loaded with a widening operation;
if it has, we can use a subreg of that. Many CISC machines
also have such operations, but this is only likely to be
@@ -4913,7 +4916,6 @@ cse_insn (rtx_insn *insn)
break;
}
}
-#endif /* LOAD_EXTEND_OP */
/* Try to express the constant using a register+offset expression
derived from a constant anchor. */
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index 0ad7d86ea1a..61eee4ae830 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -77,6 +77,10 @@ along with GCC; see the file COPYING3. If not see
#include "generic-match.h"
#include "optabs.h"
+#ifndef LOAD_EXTEND_OP
+#define LOAD_EXTEND_OP(M) UNKNOWN
+#endif
+
/* Nonzero if we are folding constants inside an initializer; zero
otherwise. */
int folding_initializer = 0;
@@ -6646,12 +6650,8 @@ fold_single_bit_test (location_t loc, enum tree_code code,
/* If we are going to be able to omit the AND below, we must do our
operations as unsigned. If we must use the AND, we have a choice.
Normally unsigned is faster, but for some machines signed is. */
-#ifdef LOAD_EXTEND_OP
ops_unsigned = (LOAD_EXTEND_OP (operand_mode) == SIGN_EXTEND
&& !flag_syntax_only) ? 0 : 1;
-#else
- ops_unsigned = 1;
-#endif
signed_type = lang_hooks.types.type_for_mode (operand_mode, 0);
unsigned_type = lang_hooks.types.type_for_mode (operand_mode, 1);
@@ -7815,7 +7815,6 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
cst &= HOST_WIDE_INT_M1U
<< (TYPE_PRECISION (TREE_TYPE (and1)) - 1);
change = (cst == 0);
-#ifdef LOAD_EXTEND_OP
if (change
&& !flag_syntax_only
&& (LOAD_EXTEND_OP (TYPE_MODE (TREE_TYPE (and0)))
@@ -7825,7 +7824,6 @@ fold_unary_loc (location_t loc, enum tree_code code, tree type, tree op0)
and0 = fold_convert_loc (loc, uns, and0);
and1 = fold_convert_loc (loc, uns, and1);
}
-#endif
}
if (change)
{
diff --git a/gcc/postreload.c b/gcc/postreload.c
index 3db2c07224a..03babc87695 100644
--- a/gcc/postreload.c
+++ b/gcc/postreload.c
@@ -54,6 +54,10 @@ along with GCC; see the file COPYING3. If not see
#include "tree-pass.h"
#include "dbgcnt.h"
+#ifndef LOAD_EXTEND_OP
+#define LOAD_EXTEND_OP(M) UNKNOWN
+#endif
+
static int reload_cse_noop_set_p (rtx);
static bool reload_cse_simplify (rtx_insn *, rtx);
static void reload_cse_regs_1 (void);
@@ -254,9 +258,7 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
int old_cost;
cselib_val *val;
struct elt_loc_list *l;
-#ifdef LOAD_EXTEND_OP
enum rtx_code extend_op = UNKNOWN;
-#endif
bool speed = optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn));
dreg = true_regnum (SET_DEST (set));
@@ -269,7 +271,6 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
dclass = REGNO_REG_CLASS (dreg);
-#ifdef LOAD_EXTEND_OP
/* When replacing a memory with a register, we need to honor assumptions
that combine made wrt the contents of sign bits. We'll do this by
generating an extend instruction instead of a reg->reg copy. Thus
@@ -279,7 +280,6 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
&& (extend_op = LOAD_EXTEND_OP (GET_MODE (src))) != UNKNOWN
&& !REG_P (SET_DEST (set)))
return 0;
-#endif
val = cselib_lookup (src, GET_MODE (SET_DEST (set)), 0, VOIDmode);
if (! val)
@@ -301,7 +301,6 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
if (CONSTANT_P (this_rtx) && ! references_value_p (this_rtx, 0))
{
-#ifdef LOAD_EXTEND_OP
if (extend_op != UNKNOWN)
{
wide_int result;
@@ -326,19 +325,17 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
}
this_rtx = immed_wide_int_const (result, word_mode);
}
-#endif
+
this_cost = set_src_cost (this_rtx, GET_MODE (SET_DEST (set)), speed);
}
else if (REG_P (this_rtx))
{
-#ifdef LOAD_EXTEND_OP
if (extend_op != UNKNOWN)
{
this_rtx = gen_rtx_fmt_e (extend_op, word_mode, this_rtx);
this_cost = set_src_cost (this_rtx, word_mode, speed);
}
else
-#endif
this_cost = register_move_cost (GET_MODE (this_rtx),
REGNO_REG_CLASS (REGNO (this_rtx)),
dclass);
@@ -353,7 +350,6 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
&& REG_P (this_rtx)
&& !REG_P (SET_SRC (set))))
{
-#ifdef LOAD_EXTEND_OP
if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (set))) < BITS_PER_WORD
&& extend_op != UNKNOWN
#ifdef CANNOT_CHANGE_MODE_CLASS
@@ -367,7 +363,6 @@ reload_cse_simplify_set (rtx set, rtx_insn *insn)
ORIGINAL_REGNO (wide_dest) = ORIGINAL_REGNO (SET_DEST (set));
validate_change (insn, &SET_DEST (set), wide_dest, 1);
}
-#endif
validate_unshare_change (insn, &SET_SRC (set), this_rtx, 1);
old_cost = this_cost, did_change = 1;
@@ -439,7 +434,6 @@ reload_cse_simplify_operands (rtx_insn *insn, rtx testreg)
continue;
op = recog_data.operand[i];
-#ifdef LOAD_EXTEND_OP
if (MEM_P (op)
&& GET_MODE_BITSIZE (GET_MODE (op)) < BITS_PER_WORD
&& LOAD_EXTEND_OP (GET_MODE (op)) != UNKNOWN)
@@ -490,7 +484,7 @@ reload_cse_simplify_operands (rtx_insn *insn, rtx testreg)
safe to optimize, but is it worth the trouble? */
continue;
}
-#endif /* LOAD_EXTEND_OP */
+
if (side_effects_p (op))
continue;
v = cselib_lookup (op, recog_data.operand_mode[i], 0, VOIDmode);