summaryrefslogtreecommitdiff
path: root/gcc/config/arm/arm.c
diff options
context:
space:
mode:
authorbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2011-08-25 19:29:43 +0000
committerbstarynk <bstarynk@138bc75d-0d04-0410-961f-82ee72b054a4>2011-08-25 19:29:43 +0000
commit46dfcc3ee85a4a02abce4d45ee619f240c116af6 (patch)
tree6c3dc3d53cd17d62447673b81abbcfc69bacd2f3 /gcc/config/arm/arm.c
parent2a8624373adc103f943e22e781c2d6fadb828eae (diff)
downloadgcc-46dfcc3ee85a4a02abce4d45ee619f240c116af6.tar.gz
2011-08-25 Basile Starynkevitch <basile@starynkevitch.net>
MELT branch merged with trunk rev 178073 using svnmerge. 2011-08-25 Basile Starynkevitch <basile@starynkevitch.net> * gcc/melt-runtime.c (melt_linemap_compute_current_location): Use the linemap_position_for_column function for GCC 4.7 when merging with GCC trunk rev 178073. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/melt-branch@178087 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/arm/arm.c')
-rw-r--r--gcc/config/arm/arm.c618
1 files changed, 487 insertions, 131 deletions
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index a51c87a078f..3162b30582e 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -151,7 +151,7 @@ static bool arm_slowmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, boo
static bool arm_fastmul_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
static bool arm_xscale_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
static bool arm_9e_rtx_costs (rtx, enum rtx_code, enum rtx_code, int *, bool);
-static bool arm_rtx_costs (rtx, int, int, int *, bool);
+static bool arm_rtx_costs (rtx, int, int, int, int *, bool);
static int arm_address_cost (rtx, bool);
static bool arm_memory_load_p (rtx);
static bool arm_cirrus_insn_p (rtx);
@@ -1038,6 +1038,49 @@ bit_count (unsigned long value)
return count;
}
+typedef struct
+{
+ enum machine_mode mode;
+ const char *name;
+} arm_fixed_mode_set;
+
+/* A small helper for setting fixed-point library libfuncs. */
+
+static void
+arm_set_fixed_optab_libfunc (optab optable, enum machine_mode mode,
+ const char *funcname, const char *modename,
+ int num_suffix)
+{
+ char buffer[50];
+
+ if (num_suffix == 0)
+ sprintf (buffer, "__gnu_%s%s", funcname, modename);
+ else
+ sprintf (buffer, "__gnu_%s%s%d", funcname, modename, num_suffix);
+
+ set_optab_libfunc (optable, mode, buffer);
+}
+
+static void
+arm_set_fixed_conv_libfunc (convert_optab optable, enum machine_mode to,
+ enum machine_mode from, const char *funcname,
+ const char *toname, const char *fromname)
+{
+ char buffer[50];
+ const char *maybe_suffix_2 = "";
+
+ /* Follow the logic for selecting a "2" suffix in fixed-bit.h. */
+ if (ALL_FIXED_POINT_MODE_P (from) && ALL_FIXED_POINT_MODE_P (to)
+ && UNSIGNED_FIXED_POINT_MODE_P (from) == UNSIGNED_FIXED_POINT_MODE_P (to)
+ && ALL_FRACT_MODE_P (from) == ALL_FRACT_MODE_P (to))
+ maybe_suffix_2 = "2";
+
+ sprintf (buffer, "__gnu_%s%s%s%s", funcname, fromname, toname,
+ maybe_suffix_2);
+
+ set_conv_libfunc (optable, to, from, buffer);
+}
+
/* Set up library functions unique to ARM. */
static void
@@ -1183,6 +1226,137 @@ arm_init_libfuncs (void)
break;
}
+ /* Use names prefixed with __gnu_ for fixed-point helper functions. */
+ {
+ const arm_fixed_mode_set fixed_arith_modes[] =
+ {
+ { QQmode, "qq" },
+ { UQQmode, "uqq" },
+ { HQmode, "hq" },
+ { UHQmode, "uhq" },
+ { SQmode, "sq" },
+ { USQmode, "usq" },
+ { DQmode, "dq" },
+ { UDQmode, "udq" },
+ { TQmode, "tq" },
+ { UTQmode, "utq" },
+ { HAmode, "ha" },
+ { UHAmode, "uha" },
+ { SAmode, "sa" },
+ { USAmode, "usa" },
+ { DAmode, "da" },
+ { UDAmode, "uda" },
+ { TAmode, "ta" },
+ { UTAmode, "uta" }
+ };
+ const arm_fixed_mode_set fixed_conv_modes[] =
+ {
+ { QQmode, "qq" },
+ { UQQmode, "uqq" },
+ { HQmode, "hq" },
+ { UHQmode, "uhq" },
+ { SQmode, "sq" },
+ { USQmode, "usq" },
+ { DQmode, "dq" },
+ { UDQmode, "udq" },
+ { TQmode, "tq" },
+ { UTQmode, "utq" },
+ { HAmode, "ha" },
+ { UHAmode, "uha" },
+ { SAmode, "sa" },
+ { USAmode, "usa" },
+ { DAmode, "da" },
+ { UDAmode, "uda" },
+ { TAmode, "ta" },
+ { UTAmode, "uta" },
+ { QImode, "qi" },
+ { HImode, "hi" },
+ { SImode, "si" },
+ { DImode, "di" },
+ { TImode, "ti" },
+ { SFmode, "sf" },
+ { DFmode, "df" }
+ };
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE (fixed_arith_modes); i++)
+ {
+ arm_set_fixed_optab_libfunc (add_optab, fixed_arith_modes[i].mode,
+ "add", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (ssadd_optab, fixed_arith_modes[i].mode,
+ "ssadd", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (usadd_optab, fixed_arith_modes[i].mode,
+ "usadd", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (sub_optab, fixed_arith_modes[i].mode,
+ "sub", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (sssub_optab, fixed_arith_modes[i].mode,
+ "sssub", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (ussub_optab, fixed_arith_modes[i].mode,
+ "ussub", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (smul_optab, fixed_arith_modes[i].mode,
+ "mul", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (ssmul_optab, fixed_arith_modes[i].mode,
+ "ssmul", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (usmul_optab, fixed_arith_modes[i].mode,
+ "usmul", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (sdiv_optab, fixed_arith_modes[i].mode,
+ "div", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (udiv_optab, fixed_arith_modes[i].mode,
+ "udiv", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (ssdiv_optab, fixed_arith_modes[i].mode,
+ "ssdiv", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (usdiv_optab, fixed_arith_modes[i].mode,
+ "usdiv", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (neg_optab, fixed_arith_modes[i].mode,
+ "neg", fixed_arith_modes[i].name, 2);
+ arm_set_fixed_optab_libfunc (ssneg_optab, fixed_arith_modes[i].mode,
+ "ssneg", fixed_arith_modes[i].name, 2);
+ arm_set_fixed_optab_libfunc (usneg_optab, fixed_arith_modes[i].mode,
+ "usneg", fixed_arith_modes[i].name, 2);
+ arm_set_fixed_optab_libfunc (ashl_optab, fixed_arith_modes[i].mode,
+ "ashl", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (ashr_optab, fixed_arith_modes[i].mode,
+ "ashr", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (lshr_optab, fixed_arith_modes[i].mode,
+ "lshr", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (ssashl_optab, fixed_arith_modes[i].mode,
+ "ssashl", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (usashl_optab, fixed_arith_modes[i].mode,
+ "usashl", fixed_arith_modes[i].name, 3);
+ arm_set_fixed_optab_libfunc (cmp_optab, fixed_arith_modes[i].mode,
+ "cmp", fixed_arith_modes[i].name, 2);
+ }
+
+ for (i = 0; i < ARRAY_SIZE (fixed_conv_modes); i++)
+ for (j = 0; j < ARRAY_SIZE (fixed_conv_modes); j++)
+ {
+ if (i == j
+ || (!ALL_FIXED_POINT_MODE_P (fixed_conv_modes[i].mode)
+ && !ALL_FIXED_POINT_MODE_P (fixed_conv_modes[j].mode)))
+ continue;
+
+ arm_set_fixed_conv_libfunc (fract_optab, fixed_conv_modes[i].mode,
+ fixed_conv_modes[j].mode, "fract",
+ fixed_conv_modes[i].name,
+ fixed_conv_modes[j].name);
+ arm_set_fixed_conv_libfunc (satfract_optab,
+ fixed_conv_modes[i].mode,
+ fixed_conv_modes[j].mode, "satfract",
+ fixed_conv_modes[i].name,
+ fixed_conv_modes[j].name);
+ arm_set_fixed_conv_libfunc (fractuns_optab,
+ fixed_conv_modes[i].mode,
+ fixed_conv_modes[j].mode, "fractuns",
+ fixed_conv_modes[i].name,
+ fixed_conv_modes[j].name);
+ arm_set_fixed_conv_libfunc (satfractuns_optab,
+ fixed_conv_modes[i].mode,
+ fixed_conv_modes[j].mode, "satfractuns",
+ fixed_conv_modes[i].name,
+ fixed_conv_modes[j].name);
+ }
+ }
+
if (TARGET_AAPCS_BASED)
synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
}
@@ -4203,6 +4377,10 @@ aapcs_allocate_return_reg (enum machine_mode mode, const_tree type,
rtx
aapcs_libcall_value (enum machine_mode mode)
{
+ if (BYTES_BIG_ENDIAN && ALL_FIXED_POINT_MODE_P (mode)
+ && GET_MODE_SIZE (mode) <= 4)
+ mode = SImode;
+
return aapcs_allocate_return_reg (mode, NULL_TREE, NULL_TREE);
}
@@ -6703,7 +6881,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 1)) == REG)
*total = COSTS_N_INSNS (1); /* Need to subtract from 32 */
else if (GET_CODE (XEXP (x, 1)) != CONST_INT)
- *total = rtx_cost (XEXP (x, 1), code, speed);
+ *total = rtx_cost (XEXP (x, 1), code, 1, speed);
/* Fall through */
case ROTATERT:
@@ -6715,7 +6893,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
/* Fall through */
case ASHIFT: case LSHIFTRT: case ASHIFTRT:
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
if (mode == DImode)
{
*total += COSTS_N_INSNS (3);
@@ -6738,14 +6916,14 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 0)) == CONST_INT
&& const_ok_for_arm (INTVAL (XEXP (x, 0))))
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& const_ok_for_arm (INTVAL (XEXP (x, 1))))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
@@ -6762,14 +6940,14 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
&& arm_const_double_rtx (XEXP (x, 0)))
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
&& arm_const_double_rtx (XEXP (x, 1)))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
@@ -6783,7 +6961,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 0)) == CONST_INT
&& const_ok_for_arm (INTVAL (XEXP (x, 0))))
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
@@ -6792,8 +6970,8 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| subcode == LSHIFTRT
|| subcode == ROTATE || subcode == ROTATERT)
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
- *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode, 0, speed);
return true;
}
@@ -6801,23 +6979,23 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 0)) == MULT
&& power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
{
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, speed);
- *total += rtx_cost (XEXP (x, 1), code, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, 0, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
if (subcode == MULT
&& power_of_two_operand (XEXP (XEXP (x, 1), 1), SImode))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
- *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 1), 0), subcode, 0, speed);
return true;
}
if (GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMPARE
|| GET_RTX_CLASS (GET_CODE (XEXP (x, 1))) == RTX_COMM_COMPARE)
{
- *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, speed);
+ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, speed);
if (GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
&& REGNO (XEXP (XEXP (x, 1), 0)) != CC_REGNUM)
*total += COSTS_N_INSNS (1);
@@ -6834,8 +7012,8 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
{
*total = COSTS_N_INSNS (1);
*total += rtx_cost (XEXP (XEXP (x, 0), 0), GET_CODE (XEXP (x, 0)),
- speed);
- *total += rtx_cost (XEXP (x, 1), code, speed);
+ 0, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
}
@@ -6859,7 +7037,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
&& arm_const_double_rtx (XEXP (x, 1)))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
@@ -6873,7 +7051,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE
|| GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE)
{
- *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 1), code, speed);
+ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 1), code, 1, speed);
if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
&& REGNO (XEXP (XEXP (x, 0), 0)) != CC_REGNUM)
*total += COSTS_N_INSNS (1);
@@ -6900,7 +7078,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& const_ok_for_op (INTVAL (XEXP (x, 1)), code))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
@@ -6911,7 +7089,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& const_ok_for_op (INTVAL (XEXP (x, 1)), code))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
subcode = GET_CODE (XEXP (x, 0));
@@ -6919,16 +7097,16 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| subcode == LSHIFTRT
|| subcode == ROTATE || subcode == ROTATERT)
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
return true;
}
if (subcode == MULT
&& power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
return true;
}
@@ -6954,7 +7132,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
&& (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
|| GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
{
- *total = rtx_cost (XEXP (XEXP (x, 0), 0), LSHIFTRT, speed);
+ *total = rtx_cost (XEXP (XEXP (x, 0), 0), LSHIFTRT, 0, speed);
return true;
}
*total = COSTS_N_INSNS (2); /* Plus the cost of the MULT */
@@ -6986,11 +7164,11 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| (subcode == MULT
&& power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode)))
{
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
/* Register shifts cost an extra cycle. */
if (GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
*total += COSTS_N_INSNS (1) + rtx_cost (XEXP (XEXP (x, 0), 1),
- subcode, speed);
+ subcode, 1, speed);
return true;
}
}
@@ -7011,14 +7189,14 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
&& GET_CODE (XEXP (operand, 0)) == REG
&& REGNO (XEXP (operand, 0)) == CC_REGNUM))
*total += COSTS_N_INSNS (1);
- *total += (rtx_cost (XEXP (x, 1), code, speed)
- + rtx_cost (XEXP (x, 2), code, speed));
+ *total += (rtx_cost (XEXP (x, 1), code, 1, speed)
+ + rtx_cost (XEXP (x, 2), code, 2, speed));
return true;
case NE:
if (mode == SImode && XEXP (x, 1) == const0_rtx)
{
- *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, speed);
+ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
goto scc_insn;
@@ -7027,7 +7205,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM)
&& mode == SImode && XEXP (x, 1) == const0_rtx)
{
- *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, speed);
+ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
goto scc_insn;
@@ -7036,7 +7214,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if ((GET_CODE (XEXP (x, 0)) != REG || REGNO (XEXP (x, 0)) != CC_REGNUM)
&& mode == SImode && XEXP (x, 1) == const0_rtx)
{
- *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, speed);
+ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
goto scc_insn;
@@ -7077,7 +7255,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
if (GET_CODE (XEXP (x, 1)) == CONST_INT
&& const_ok_for_op (INTVAL (XEXP (x, 1)), code))
{
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
@@ -7086,16 +7264,16 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
|| subcode == LSHIFTRT
|| subcode == ROTATE || subcode == ROTATERT)
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
return true;
}
if (subcode == MULT
&& power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
{
- *total += rtx_cost (XEXP (x, 1), code, speed);
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), subcode, 0, speed);
return true;
}
@@ -7105,10 +7283,10 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
case UMAX:
case SMIN:
case SMAX:
- *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, speed);
+ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, speed);
if (GET_CODE (XEXP (x, 1)) != CONST_INT
|| !const_ok_for_arm (INTVAL (XEXP (x, 1))))
- *total += rtx_cost (XEXP (x, 1), code, speed);
+ *total += rtx_cost (XEXP (x, 1), code, 1, speed);
return true;
case ABS:
@@ -7185,7 +7363,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
case ZERO_EXTRACT:
case SIGN_EXTRACT:
- *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, speed);
+ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
case CONST_INT:
@@ -7210,7 +7388,7 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
case LO_SUM:
*total = COSTS_N_INSNS (1);
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
case CONST_DOUBLE:
@@ -7221,6 +7399,9 @@ arm_rtx_costs_1 (rtx x, enum rtx_code outer, int* total, bool speed)
*total = COSTS_N_INSNS (4);
return true;
+ case SET:
+ return false;
+
default:
*total = COSTS_N_INSNS (4);
return false;
@@ -7396,7 +7577,7 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case ROTATE:
if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
{
- *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, false);
+ *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code, 0, false);
return true;
}
/* Fall through */
@@ -7406,15 +7587,15 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
case ASHIFTRT:
if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
{
- *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code, false);
+ *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code, 0, false);
return true;
}
else if (mode == SImode)
{
- *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, false);
+ *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code, 0, false);
/* Slightly disparage register shifts, but not by much. */
if (GET_CODE (XEXP (x, 1)) != CONST_INT)
- *total += 1 + rtx_cost (XEXP (x, 1), code, false);
+ *total += 1 + rtx_cost (XEXP (x, 1), code, 1, false);
return true;
}
@@ -7466,8 +7647,8 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
&& power_of_two_operand (XEXP (XEXP (x, 0), 1), SImode))
{
*total = COSTS_N_INSNS (TARGET_THUMB2 ? 2 : 1);
- *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, false);
- *total += rtx_cost (XEXP (x, 1), code, false);
+ *total += rtx_cost (XEXP (XEXP (x, 0), 0), code, 0, false);
+ *total += rtx_cost (XEXP (x, 1), code, 1, false);
return true;
}
@@ -7568,6 +7749,9 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
*total = COSTS_N_INSNS (1) + 1;
return true;
+ case SET:
+ return false;
+
default:
if (mode != VOIDmode)
*total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
@@ -7579,8 +7763,8 @@ arm_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
/* RTX costs when optimizing for size. */
static bool
-arm_rtx_costs (rtx x, int code, int outer_code, int *total,
- bool speed)
+arm_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
+ int *total, bool speed)
{
if (!speed)
return arm_size_rtx_costs (x, (enum rtx_code) code,
@@ -7633,7 +7817,7 @@ arm_slowmul_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer_code,
}
*total = COSTS_N_INSNS (cost);
- *total += rtx_cost (XEXP (x, 0), code, speed);
+ *total += rtx_cost (XEXP (x, 0), code, 0, speed);
return true;
}
@@ -9252,8 +9436,9 @@ arm_return_in_msb (const_tree valtype)
{
return (TARGET_AAPCS_BASED
&& BYTES_BIG_ENDIAN
- && (AGGREGATE_TYPE_P (valtype)
- || TREE_CODE (valtype) == COMPLEX_TYPE));
+ && (AGGREGATE_TYPE_P (valtype)
+ || TREE_CODE (valtype) == COMPLEX_TYPE
+ || FIXED_POINT_TYPE_P (valtype)));
}
/* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
@@ -11261,6 +11446,15 @@ arm_pad_arg_upward (enum machine_mode mode, const_tree type)
if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
return false;
+ /* Half-float values are only passed to libcalls, not regular functions.
+ They should be passed and returned as "short"s (see RTABI). To achieve
+ that effect in big-endian mode, pad downwards so the value is passed in
+ the least-significant end of the register. ??? This needs to be here
+ rather than in arm_pad_reg_upward due to peculiarity in the handling of
+ libcall arguments. */
+ if (BYTES_BIG_ENDIAN && mode == HFmode)
+ return false;
+
return true;
}
@@ -11278,7 +11472,8 @@ arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
{
if (TARGET_AAPCS_BASED
&& BYTES_BIG_ENDIAN
- && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
+ && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE
+ || FIXED_POINT_TYPE_P (type))
&& int_size_in_bytes (type) <= 4)
return true;
@@ -11477,8 +11672,7 @@ is_jump_table (rtx insn)
{
rtx table;
- if (GET_CODE (insn) == JUMP_INSN
- && JUMP_LABEL (insn) != NULL
+ if (jump_to_label_p (insn)
&& ((table = next_real_insn (JUMP_LABEL (insn)))
== next_real_insn (insn))
&& table != NULL
@@ -11531,6 +11725,19 @@ get_jump_table_size (rtx insn)
return 0;
}
+/* Return the maximum amount of padding that will be inserted before
+ label LABEL. */
+
+static HOST_WIDE_INT
+get_label_padding (rtx label)
+{
+ HOST_WIDE_INT align, min_insn_size;
+
+ align = 1 << label_to_alignment (label);
+ min_insn_size = TARGET_THUMB ? 2 : 4;
+ return align > min_insn_size ? align - min_insn_size : 0;
+}
+
/* Move a minipool fix MP from its current location to before MAX_MP.
If MAX_MP is NULL, then MP doesn't need moving, but the addressing
constraints may need updating. */
@@ -12077,8 +12284,12 @@ create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
within range. */
gcc_assert (GET_CODE (from) != BARRIER);
- /* Count the length of this insn. */
- count += get_attr_length (from);
+ /* Count the length of this insn. This must stay in sync with the
+ code that pushes minipool fixes. */
+ if (LABEL_P (from))
+ count += get_label_padding (from);
+ else
+ count += get_attr_length (from);
/* If there is a jump table, add its length. */
tmp = is_jump_table (from);
@@ -12508,6 +12719,11 @@ arm_reorg (void)
insn = table;
}
}
+ else if (LABEL_P (insn))
+ /* Add the worst-case padding due to alignment. We don't add
+ the _current_ padding because the minipool insertions
+ themselves might change it. */
+ address += get_label_padding (insn);
}
fix = minipool_fix_head;
@@ -13074,11 +13290,24 @@ output_mov_double_arm_from_fpa (rtx *operands)
/* Output a move between double words. It must be REG<-MEM
or MEM<-REG. */
const char *
-output_move_double (rtx *operands)
+output_move_double (rtx *operands, bool emit, int *count)
{
enum rtx_code code0 = GET_CODE (operands[0]);
enum rtx_code code1 = GET_CODE (operands[1]);
rtx otherops[3];
+ if (count)
+ *count = 1;
+
+ /* The only case when this might happen is when
+ you are looking at the length of a DImode instruction
+ that has an invalid constant in it. */
+ if (code0 == REG && code1 != MEM)
+ {
+ gcc_assert (!emit);
+ *count = 2;
+ return "";
+ }
+
if (code0 == REG)
{
@@ -13091,35 +13320,49 @@ output_move_double (rtx *operands)
switch (GET_CODE (XEXP (operands[1], 0)))
{
case REG:
- if (TARGET_LDRD
- && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
- output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands);
- else
- output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+
+ if (emit)
+ {
+ if (TARGET_LDRD
+ && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
+ output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands);
+ else
+ output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+ }
break;
case PRE_INC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
+
break;
case PRE_DEC:
- if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
- else
- output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
+ else
+ output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
+ }
break;
case POST_INC:
- if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands);
- else
- output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
+
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands);
+ else
+ output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
+ }
break;
case POST_DEC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
break;
case PRE_MODIFY:
@@ -13137,8 +13380,13 @@ output_move_double (rtx *operands)
if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
{
/* Registers overlap so split out the increment. */
- output_asm_insn ("add%?\t%1, %1, %2", otherops);
- output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
+ if (emit)
+ {
+ output_asm_insn ("add%?\t%1, %1, %2", otherops);
+ output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
+ }
+ if (count)
+ *count = 2;
}
else
{
@@ -13149,11 +13397,20 @@ output_move_double (rtx *operands)
|| GET_CODE (otherops[2]) != CONST_INT
|| (INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256))
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ {
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ }
else
{
- output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
- output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
+ }
+ if (count)
+ *count = 2;
+
}
}
}
@@ -13166,11 +13423,19 @@ output_move_double (rtx *operands)
|| GET_CODE (otherops[2]) != CONST_INT
|| (INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256))
- output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ {
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ }
else
{
- output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
- output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
+ output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
+ }
+ if (count)
+ *count = 2;
}
}
break;
@@ -13183,12 +13448,19 @@ output_move_double (rtx *operands)
/* Use the second register of the pair to avoid problematic
overlap. */
otherops[1] = operands[1];
- output_asm_insn ("adr%?\t%0, %1", otherops);
+ if (emit)
+ output_asm_insn ("adr%?\t%0, %1", otherops);
operands[1] = otherops[0];
- if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
- else
- output_asm_insn ("ldm%(ia%)\t%1, %M0", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ else
+ output_asm_insn ("ldm%(ia%)\t%1, %M0", operands);
+ }
+
+ if (count)
+ *count = 2;
break;
/* ??? This needs checking for thumb2. */
@@ -13207,17 +13479,20 @@ output_move_double (rtx *operands)
switch ((int) INTVAL (otherops[2]))
{
case -8:
- output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
+ if (emit)
+ output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
return "";
case -4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
+ if (emit)
+ output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
return "";
case 4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
+ if (emit)
+ output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
return "";
}
}
@@ -13245,34 +13520,50 @@ output_move_double (rtx *operands)
if (reg_overlap_mentioned_p (operands[0], otherops[2])
|| (fix_cm3_ldrd && reg0 == REGNO (otherops[1])))
{
- output_asm_insn ("add%?\t%0, %1, %2", otherops);
- output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ if (emit)
+ {
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ }
+ if (count)
+ *count = 2;
}
else
{
otherops[0] = operands[0];
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
+ if (emit)
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
}
return "";
}
if (GET_CODE (otherops[2]) == CONST_INT)
+ {
+ if (emit)
+ {
+ if (!(const_ok_for_arm (INTVAL (otherops[2]))))
+ output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
+ else
+ output_asm_insn ("add%?\t%0, %1, %2", otherops);
+ }
+
+ }
+ else
{
- if (!(const_ok_for_arm (INTVAL (otherops[2]))))
- output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
- else
+ if (emit)
output_asm_insn ("add%?\t%0, %1, %2", otherops);
}
- else
- output_asm_insn ("add%?\t%0, %1, %2", otherops);
}
else
- output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ {
+ if (emit)
+ output_asm_insn ("sub%?\t%0, %1, %2", otherops);
+ }
if (TARGET_LDRD)
return "ldr%(d%)\t%0, [%1]";
-
- return "ldm%(ia%)\t%1, %M0";
+
+ return "ldm%(ia%)\t%1, %M0";
}
else
{
@@ -13280,13 +13571,24 @@ output_move_double (rtx *operands)
/* Take care of overlapping base/data reg. */
if (reg_mentioned_p (operands[0], operands[1]))
{
- output_asm_insn ("ldr%?\t%0, %1", otherops);
- output_asm_insn ("ldr%?\t%0, %1", operands);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ }
+ if (count)
+ *count = 2;
+
}
else
{
- output_asm_insn ("ldr%?\t%0, %1", operands);
- output_asm_insn ("ldr%?\t%0, %1", otherops);
+ if (emit)
+ {
+ output_asm_insn ("ldr%?\t%0, %1", operands);
+ output_asm_insn ("ldr%?\t%0, %1", otherops);
+ }
+ if (count)
+ *count = 2;
}
}
}
@@ -13300,34 +13602,45 @@ output_move_double (rtx *operands)
switch (GET_CODE (XEXP (operands[0], 0)))
{
case REG:
- if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0]", operands);
- else
- output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0]", operands);
+ else
+ output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ }
break;
case PRE_INC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
+ if (emit)
+ output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
break;
case PRE_DEC:
- if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
- else
- output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
+ else
+ output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
+ }
break;
case POST_INC:
- if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands);
- else
- output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
+ if (emit)
+ {
+ if (TARGET_LDRD)
+ output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands);
+ else
+ output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
+ }
break;
case POST_DEC:
gcc_assert (TARGET_LDRD);
- output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
+ if (emit)
+ output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
break;
case PRE_MODIFY:
@@ -13345,19 +13658,35 @@ output_move_double (rtx *operands)
{
if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
{
- output_asm_insn ("str%?\t%0, [%1, %2]!", otherops);
- output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
+ if (emit)
+ {
+ output_asm_insn ("str%?\t%0, [%1, %2]!", otherops);
+ output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
+ }
+ if (count)
+ *count = 2;
}
else
{
- output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
- output_asm_insn ("str%?\t%0, [%1], %2", otherops);
+ if (emit)
+ {
+ output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
+ output_asm_insn ("str%?\t%0, [%1], %2", otherops);
+ }
+ if (count)
+ *count = 2;
}
}
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
- output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
+ {
+ if (emit)
+ output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
+ }
else
- output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
+ {
+ if (emit)
+ output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
+ }
break;
case PLUS:
@@ -13367,19 +13696,22 @@ output_move_double (rtx *operands)
switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
{
case -8:
- output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
+ if (emit)
+ output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
return "";
case -4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
+ if (emit)
+ output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
return "";
case 4:
if (TARGET_THUMB2)
break;
- output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
+ if (emit)
+ output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
return "";
}
}
@@ -13392,7 +13724,8 @@ output_move_double (rtx *operands)
{
otherops[0] = operands[1];
otherops[1] = XEXP (XEXP (operands[0], 0), 0);
- output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
+ if (emit)
+ output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
return "";
}
/* Fall through */
@@ -13400,8 +13733,14 @@ output_move_double (rtx *operands)
default:
otherops[0] = adjust_address (operands[0], SImode, 4);
otherops[1] = operands[1];
- output_asm_insn ("str%?\t%1, %0", operands);
- output_asm_insn ("str%?\t%H1, %0", otherops);
+ if (emit)
+ {
+ output_asm_insn ("str%?\t%1, %0", operands);
+ output_asm_insn ("str%?\t%H1, %0", otherops);
+ }
+ if (count)
+ *count = 2;
+
}
}
@@ -14891,6 +15230,7 @@ arm_output_epilogue (rtx sibling)
&& !crtl->calls_eh_return
&& bit_count(saved_regs_mask) * 4 == count
&& !IS_INTERRUPT (func_type)
+ && !IS_STACKALIGN (func_type)
&& !crtl->tail_call_emit)
{
unsigned long mask;
@@ -19425,6 +19765,8 @@ arm_scalar_mode_supported_p (enum machine_mode mode)
{
if (mode == HFmode)
return (arm_fp16_format != ARM_FP16_FORMAT_NONE);
+ else if (ALL_FIXED_POINT_MODE_P (mode))
+ return true;
else
return default_scalar_mode_supported_p (mode);
}
@@ -22544,6 +22886,11 @@ arm_vector_mode_supported_p (enum machine_mode mode)
|| (mode == V8QImode)))
return true;
+ if (TARGET_INT_SIMD && (mode == V4UQQmode || mode == V4QQmode
+ || mode == V2UHQmode || mode == V2HQmode || mode == V2UHAmode
+ || mode == V2HAmode))
+ return true;
+
return false;
}
@@ -23988,4 +24335,13 @@ arm_attr_length_push_multi(rtx parallel_op, rtx first_op)
return 4;
}
+/* Compute the number of instructions emitted by output_move_double. */
+int
+arm_count_output_move_double_insns (rtx *operands)
+{
+ int count;
+ output_move_double (operands, false, &count);
+ return count;
+}
+
#include "gt-arm.h"