summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Clifton <nickc@gcc.gnu.org>2000-02-14 22:51:36 +0000
committerNick Clifton <nickc@gcc.gnu.org>2000-02-14 22:51:36 +0000
commit8f90be4c548e561185025888c42a986b10bfe3c2 (patch)
tree46e2297952ebf8e12e5a9bdc2b6a9087d7412780
parent77de5d85d0492c686d37f00851c2189ee71e6348 (diff)
downloadgcc-8f90be4c548e561185025888c42a986b10bfe3c2.tar.gz
Backend for Motorola's MCore processors.
From-SVN: r31972
-rw-r--r--gcc/config/mcore/crti.asm74
-rw-r--r--gcc/config/mcore/crtn.asm57
-rw-r--r--gcc/config/mcore/gfloat.h64
-rw-r--r--gcc/config/mcore/lib1.asm313
-rw-r--r--gcc/config/mcore/mcore-elf.h206
-rw-r--r--gcc/config/mcore/mcore-pe.h251
-rw-r--r--gcc/config/mcore/mcore-protos.h109
-rw-r--r--gcc/config/mcore/mcore.c3574
-rw-r--r--gcc/config/mcore/mcore.h1458
-rw-r--r--gcc/config/mcore/mcore.md3526
-rw-r--r--gcc/config/mcore/t-mcore64
-rw-r--r--gcc/config/mcore/t-mcore-pe47
-rw-r--r--gcc/config/mcore/xm-mcore.h42
13 files changed, 9785 insertions, 0 deletions
diff --git a/gcc/config/mcore/crti.asm b/gcc/config/mcore/crti.asm
new file mode 100644
index 00000000000..50a78146dec
--- /dev/null
+++ b/gcc/config/mcore/crti.asm
@@ -0,0 +1,74 @@
+# crti.asm for ELF based systems
+
+# Copyright (C) 1992, 1998, 1999 Free Software Foundation, Inc.
+# Written By David Vinayak Henkel-Wallace, June 1992
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# In addition to the permissions in the GNU General Public License, the
+# Free Software Foundation gives you unlimited permission to link the
+# compiled version of this file with other programs, and to distribute
+# those programs without any restriction coming from the use of this
+# file. (The General Public License restrictions do apply in other
+# respects; for example, they cover modification of the file, and
+# distribution when not linked into another program.)
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+#
+# As a special exception, if you link this library with files
+# compiled with GCC to produce an executable, this does not cause
+# the resulting executable to be covered by the GNU General Public License.
+# This exception does not however invalidate any other reasons why
+# the executable file might be covered by the GNU General Public License.
+#
+
+# This file just makes a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+ .file "crti.asm"
+
+ .section ".init"
+ .global _init
+ .type _init,@function
+ .align 4
+_init:
+ subi r0, 16
+ st.w r15, (r0, 12)
+
+ # These nops are here to align the end of this code with a 16 byte
+ # boundary. The linker will start inserting code into the .init
+ # section at such a boundary.
+
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+
+ .section ".fini"
+ .global _fini
+ .type _fini,@function
+ .align 4
+_fini:
+ subi r0, 16
+ st.w r15, (r0, 12)
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
diff --git a/gcc/config/mcore/crtn.asm b/gcc/config/mcore/crtn.asm
new file mode 100644
index 00000000000..253d425d73c
--- /dev/null
+++ b/gcc/config/mcore/crtn.asm
@@ -0,0 +1,57 @@
+# crtn.asm for ELF based systems
+
+# Copyright (C) 1992, 1999, 2000 Free Software Foundation, Inc.
+# Written By David Vinayak Henkel-Wallace, June 1992
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# In addition to the permissions in the GNU General Public License, the
+# Free Software Foundation gives you unlimited permission to link the
+# compiled version of this file with other programs, and to distribute
+# those programs without any restriction coming from the use of this
+# file. (The General Public License restrictions do apply in other
+# respects; for example, they cover modification of the file, and
+# distribution when not linked into another program.)
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+#
+# As a special exception, if you link this library with files
+# compiled with GCC to produce an executable, this does not cause
+# the resulting executable to be covered by the GNU General Public License.
+# This exception does not however invalidate any other reasons why
+# the executable file might be covered by the GNU General Public License.
+#
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ .file "crtn.asm"
+
+ .section ".init"
+ .align 4
+
+ ldw r15,(r0, 12)
+ addi r0,16
+ jmp r15
+
+ .section ".fini"
+ .align 4
+
+ ldw r15, (r0, 12)
+ addi r0,16
+ jmp r15
+
+# Th-th-th-that is all folks!
+
diff --git a/gcc/config/mcore/gfloat.h b/gcc/config/mcore/gfloat.h
new file mode 100644
index 00000000000..4469ff44456
--- /dev/null
+++ b/gcc/config/mcore/gfloat.h
@@ -0,0 +1,64 @@
+/* Output routines for Motorola MCore processor
+ Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* float.h for the M*Core microprocessor. It uses IEEE floating point.
+ * float is 32 bit IEEE-754 format
+ * double is 64 bit IEEE-754 format
+ * long double is not defined right now...
+ */
+#ifndef __FLOAT_H___
+#define __FLOAT_H___
+
+#define FLT_RADIX 2
+#define FLT_ROUNDS 1
+
+#define FLT_MANT_DIG 24
+#define FLT_DIG 6
+#define FLT_EPSILON ((float)1.19209290e-07)
+#define FLT_MIN_EXP (-125)
+#define FLT_MIN ((float)1.17549435e-38)
+#define FLT_MIN_10_EXP (-37)
+#define FLT_MAX_EXP 128
+#define FLT_MAX ((float)3.40282347e+38)
+#define FLT_MAX_10_EXP 38
+
+#define DBL_MANT_DIG 53
+#define DBL_DIG 15
+#define DBL_EPSILON 2.2204460492503131e-16
+#define DBL_MIN_EXP (-1021)
+#define DBL_MIN 2.2250738585072014e-308
+#define DBL_MIN_10_EXP (-307)
+#define DBL_MAX_EXP 1024
+#define DBL_MAX 1.7976931348623157e+308
+#define DBL_MAX_10_EXP 308
+
+
+/* No definitions for LDBL at this time. */
+
+#undef LDBL_MANT_DIG
+#undef LDBL_DIG
+#undef LDBL_EPSILON
+#undef LDBL_MIN_EXP
+#undef LDBL_MIN
+#undef LDBL_MIN_10_EXP
+#undef LDBL_MAX_EXP
+#undef LDBL_MAX
+#undef LDBL_MAX_10_EXP
+
+#endif /* __FLOAT_H__ */
diff --git a/gcc/config/mcore/lib1.asm b/gcc/config/mcore/lib1.asm
new file mode 100644
index 00000000000..f8a4a8fcff7
--- /dev/null
+++ b/gcc/config/mcore/lib1.asm
@@ -0,0 +1,313 @@
+/* libgcc1 routines for the MCore.
+ Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with files
+ compiled with GCC to produce an executable, this does not cause
+ the resulting executable to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__, x)
+
+#ifdef __ELF__
+#define TYPE(x) .type SYM (x),@function
+#define SIZE(x) .size SYM (x), . - SYM (x)
+#else
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+.macro FUNC_START name
+ .text
+ .globl SYM (\name)
+ TYPE (\name)
+SYM (\name):
+.endm
+
+.macro FUNC_END name
+ SIZE (\name)
+.endm
+
+#ifdef L_udivsi3
+FUNC_START udiv32
+FUNC_START udivsi32
+
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+// appears to be wrong...
+// tested out incorrectly in our OS work...
+// mov r7,r3 // looking at divisor
+// ff1 r7 // I can move 32-r7 more bits to left.
+// addi r7,1 // ok, one short of that...
+// mov r1,r2
+// lsr r1,r7 // bits that came from low order...
+// rsubi r7,31 // r7 == "32-n" == LEFT distance
+// addi r7,1 // this is (32-n)
+// lsl r4,r7 // fixes the high 32 (quotient)
+// lsl r2,r7
+// cmpnei r4,0
+// bf 4f // the sentinel went away...
+
+ // run the remaining bits
+
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4: mov r2,r4 // return quotient
+ mov r3,r1 // and piggyback the remainder
+ jmp r15
+FUNC_END udiv32
+FUNC_END udivsi32
+#endif
+
+#ifdef L_umodsi3
+FUNC_START urem32
+FUNC_START umodsi3
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+ mov r2,r1 // return remainder
+ jmp r15
+FUNC_END urem32
+FUNC_END umodsi3
+#endif
+
+#ifdef L_divsi3
+FUNC_START div32
+FUNC_START divsi3
+ mov r5,r2 // calc sign of quotient
+ xor r5,r3
+ abs r2 // do unsigned divide
+ abs r3
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+// tested out incorrectly in our OS work...
+// mov r7,r3 // looking at divisor
+// ff1 r7 // I can move 32-r7 more bits to left.
+// addi r7,1 // ok, one short of that...
+// mov r1,r2
+// lsr r1,r7 // bits that came from low order...
+// rsubi r7,31 // r7 == "32-n" == LEFT distance
+// addi r7,1 // this is (32-n)
+// lsl r4,r7 // fixes the high 32 (quotient)
+// lsl r2,r7
+// cmpnei r4,0
+// bf 4f // the sentinel went away...
+
+ // run the remaining bits
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4: mov r2,r4 // return quotient
+ mov r3,r1 // piggyback the remainder
+ btsti r5,31 // after adjusting for sign
+ bf 3f
+ rsubi r2,0
+ rsubi r3,0
+3: jmp r15
+FUNC_END div32
+FUNC_END divsi3
+#endif
+
+#ifdef L_modsi3
+FUNC_START rem32
+FUNC_START modsi3
+ mov r5,r2 // calc sign of remainder
+ abs r2 // do unsigned divide
+ abs r3
+ movi r1,0 // r1-r2 form 64 bit dividend
+ movi r4,1 // r4 is quotient (1 for a sentinel)
+ cmpnei r3,0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations; skip across high order 0 bits in dividend
+ mov r7,r2
+ cmpnei r7,0
+ bt 8f
+ movi r2,0 // 0 dividend
+ jmp r15 // quick return
+8:
+ ff1 r7 // figure distance to skip
+ lsl r4,r7 // move the sentinel along (with 0's behind)
+ lsl r2,r7 // and the low 32 bits of numerator
+
+1: lslc r2,1 // 1 bit left shift of r1-r2
+ addc r1,r1
+ cmphs r1,r3 // upper 32 of dividend >= divisor?
+ bf 2f
+ sub r1,r3 // if yes, subtract divisor
+2: addc r4,r4 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+ mov r2,r1 // return remainder
+ btsti r5,31 // after adjusting for sign
+ bf 3f
+ rsubi r2,0
+3: jmp r15
+FUNC_END rem32
+FUNC_END modsi3
+#endif
+
+
+/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2}
+ will behave as __cmpdf2. So, we stub the implementations to
+ jump on to __cmpdf2 and __cmpsf2.
+
+ All of these shortcircuit the return path so that __cmp{sd}f2
+ will go directly back to the caller. */
+
+.macro COMPARE_DF_JUMP name
+ .import SYM (cmpdf2)
+FUNC_START \name
+ jmpi SYM (cmpdf2)
+FUNC_END \name
+.endm
+
+#ifdef L_eqdf2
+COMPARE_DF_JUMP eqdf2
+#endif /* L_eqdf2 */
+
+#ifdef L_nedf2
+COMPARE_DF_JUMP nedf2
+#endif /* L_nedf2 */
+
+#ifdef L_gtdf2
+COMPARE_DF_JUMP gtdf2
+#endif /* L_gtdf2 */
+
+#ifdef L_gedf2
+COMPARE_DF_JUMP gedf2
+#endif /* L_gedf2 */
+
+#ifdef L_ltdf2
+COMPARE_DF_JUMP ltdf2
+#endif /* L_ltdf2 */
+
+#ifdef L_ledf2
+COMPARE_DF_JUMP ledf2
+#endif /* L_ledf2 */
+
+/* SINGLE PRECISION FLOATING POINT STUBS */
+
+.macro COMPARE_SF_JUMP name
+ .import SYM (cmpsf2)
+FUNC_START \name
+ jmpi SYM (cmpsf2)
+FUNC_END \name
+.endm
+
+#ifdef L_eqsf2
+COMPARE_SF_JUMP eqsf2
+#endif /* L_eqsf2 */
+
+#ifdef L_nesf2
+COMPARE_SF_JUMP nesf2
+#endif /* L_nesf2 */
+
+#ifdef L_gtsf2
+COMPARE_SF_JUMP gtsf2
+#endif /* L_gtsf2 */
+
+#ifdef L_gesf2
+COMPARE_SF_JUMP __gesf2
+#endif /* L_gesf2 */
+
+#ifdef L_ltsf2
+COMPARE_SF_JUMP __ltsf2
+#endif /* L_ltsf2 */
+
+#ifdef L_lesf2
+COMPARE_SF_JUMP lesf2
+#endif /* L_lesf2 */
diff --git a/gcc/config/mcore/mcore-elf.h b/gcc/config/mcore/mcore-elf.h
new file mode 100644
index 00000000000..f8623904e14
--- /dev/null
+++ b/gcc/config/mcore/mcore-elf.h
@@ -0,0 +1,206 @@
+/* Definitions of MCore target.
+ Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
+ Contributed by Cygnus Solutions.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef __MCORE_ELF_H__
+#define __MCORE_ELF_H__
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (Motorola MCORE/elf)", stderr)
+
+#define SUBTARGET_CPP_PREDEFINES " -D__ELF__"
+
+#include "svr4.h"
+#include "mcore/mcore.h"
+
+/* Use DWARF2 debugging info. */
+#ifndef DWARF2_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO 1
+#endif
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* But allow DWARF 1 if the user wants it. */
+#ifndef DWARF_DEBUGGING_INFO
+#define DWARF_DEBUGGING_INFO 1
+#endif
+
+/* The numbers used to denote specific machine registers in the System V
+ Release 4 DWARF debugging information are quite likely to be totally
+ different from the numbers used in BSD stabs debugging information
+ for the same kind of target machine. Thus, we undefine the macro
+ DBX_REGISTER_NUMBER here as an extra inducement to get people to
+ provide proper machine-specific definitions of DBX_REGISTER_NUMBER
+ (which is also used to provide DWARF registers numbers in dwarfout.c)
+ in their tm.h files which include this file. */
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* When using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+#undef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(FILE) \
+do \
+ { \
+ if (write_symbols != DBX_DEBUG) \
+ fputs ("gcc2_compiled.:\n", FILE); \
+ else \
+ fputs ("\t.stabs\t\"gcc2_compiled.\", 0x3c, 0, 0, 0\n", FILE); \
+ } \
+while (0)
+
+/* MCore defines .long and .short to NOT force any alignment.
+ This lets you misalign as much as you wish. */
+#define UNALIGNED_INT_ASM_OP ".long"
+#define UNALIGNED_SHORT_ASM_OP ".short"
+
+#define EXPORTS_SECTION_ASM_OP "\t.section .exports"
+
+#define SUBTARGET_EXTRA_SECTIONS in_const, in_exports
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ EXPORT_SECTION_FUNCTION \
+
+/* CONST_SECTION_FUNCTION is defined svr4.h. */
+
+#define EXPORT_SECTION_FUNCTION \
+void \
+exports_section () \
+{ \
+ if (in_section != in_exports) \
+ { \
+ fprintf (asm_out_file, "%s\n", EXPORTS_SECTION_ASM_OP); \
+ in_section = in_exports; \
+ } \
+}
+
+#define SUBTARGET_SWITCH_SECTIONS \
+ case in_exports: exports_section (); break; \
+ case in_const: const_section (); break;
+
+
+#define MCORE_EXPORT_NAME(STREAM, NAME) \
+ do \
+ { \
+ exports_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ MCORE_STRIP_NAME_ENCODING (NAME)); \
+ } \
+ while (0);
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ if (mcore_dllexport_name_p (NAME)) \
+ { \
+ MCORE_EXPORT_NAME (FILE, NAME); \
+ function_section (DECL); \
+ } \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } \
+ while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ if (mcore_dllexport_name_p (NAME)) \
+ { \
+ enum in_section save_section = in_section; \
+ MCORE_EXPORT_NAME (FILE, NAME); \
+ switch_to_section (save_section, (DECL)); \
+ } \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } \
+ while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+ do \
+ { \
+ char * name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL)));\
+ } \
+ } \
+ while (0)
+
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s crti.o%s crtbegin.o%s"
+
+/* Include the OS stub library, so that the code can be simulated.
+ This is not the right way to do this. Ideally this kind of thing
+ should be done in the linker script - but I have not worked out how
+ to specify the location of a linker script in a gcc command line yet. */
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "%{!mno-lsim:-lsim} crtend.o%s crtn.o%s"
+
+
+/* The subroutine calls in the .init and .fini sections create literal
+ pools which must be jumped around... */
+#define FORCE_INIT_SECTION_ALIGN asm ("br 1f ; .literals ; 1:")
+#define FORCE_FINI_SECTION_ALIGN asm ("br 1f ; .literals ; 1:")
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"aw\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"aw\""
+
+#endif /* __MCORE_ELF_H__ */
diff --git a/gcc/config/mcore/mcore-pe.h b/gcc/config/mcore/mcore-pe.h
new file mode 100644
index 00000000000..89d4e6128b5
--- /dev/null
+++ b/gcc/config/mcore/mcore-pe.h
@@ -0,0 +1,251 @@
+/* Definitions of target machine for GNU compiler, for MCore using COFF/PE.
+ Copyright (C) 1994, 1999, 2000 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#define TARGET_VERSION fputs (" (MCORE/pe)", stderr)
+
+#define SUBTARGET_CPP_PREDEFINES " -D__pe__"
+
+#include "svr3.h"
+#include "mcore/mcore.h"
+#include "dbxcoff.h"
+
+#undef SDB_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO 1
+
+/* Computed in toplev.c. */
+#undef PREFERRED_DEBUGGING_TYPE
+
+/* Lay out additional 'sections' where we place things like code
+ and readonly data. This gets them out of default places. */
+
+#define SUBTARGET_SWITCH_SECTIONS \
+ case in_drectve: drectve_section (); break; \
+ case in_rdata: rdata_section (); break;
+
+#define DRECTVE_SECTION_ASM_OP "\t.section .drectve"
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+
+#define SUBTARGET_EXTRA_SECTIONS in_drectve, in_rdata
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ DRECTVE_SECTION_FUNCTION \
+ RDATA_SECTION_FUNCTION
+
+#define DRECTVE_SECTION_FUNCTION \
+void \
+drectve_section () \
+{ \
+ if (in_section != in_drectve) \
+ { \
+ fprintf (asm_out_file, "%s\n", DRECTVE_SECTION_ASM_OP); \
+ in_section = in_drectve; \
+ } \
+}
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION() rdata_section ()
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+#undef SELECT_SECTION
+#define SELECT_SECTION(DECL, RELOC) \
+{ \
+ if (TREE_CODE (DECL) == STRING_CST) \
+ { \
+ if (! flag_writable_strings) \
+ rdata_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (DECL) == VAR_DECL) \
+ { \
+ if ((0 && RELOC) /* should be (flag_pic && RELOC) */ \
+ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
+ || !DECL_INITIAL (DECL) \
+ || (DECL_INITIAL (DECL) != error_mark_node \
+ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ data_section (); \
+ else \
+ rdata_section (); \
+ } \
+ else \
+ rdata_section (); \
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of RTX in mode MODE. RTX is some kind
+ of constant in RTL. The argument MODE is redundant except
+ in the case of a `const_int' rtx. Currently, these always
+ go into the const section. */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE, RTX) rdata_section ()
+
+#define MCORE_EXPORT_NAME(STREAM, NAME) \
+ do \
+ { \
+ drectve_section (); \
+ fprintf (STREAM, "\t.ascii \" -export:%s\"\n", \
+ MCORE_STRIP_NAME_ENCODING (NAME)); \
+ } \
+ while (0);
+
+/* Output the label for an initialized variable. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ if (mcore_dllexport_name_p (NAME)) \
+ { \
+ enum in_section save_section = in_section; \
+ MCORE_EXPORT_NAME (STREAM, NAME); \
+ switch_to_section (save_section, (DECL)); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+ } \
+ while (0)
+
+/* Output a function label definition. */
+#define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL) \
+ do \
+ { \
+ if (mcore_dllexport_name_p (NAME)) \
+ { \
+ MCORE_EXPORT_NAME (STREAM, NAME); \
+ function_section (DECL); \
+ } \
+ ASM_OUTPUT_LABEL ((STREAM), (NAME)); \
+ } \
+ while (0);
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+ do \
+ { \
+ extern char * version_string; \
+ fprintf (STREAM, "%s Generated by gcc %s for MCore/pe\n", \
+ ASM_COMMENT_START, version_string); \
+ output_file_directive ((STREAM), main_input_filename); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_SOURCE_LINE
+#define ASM_OUTPUT_SOURCE_LINE(FILE, LINE) \
+ { \
+ if (write_symbols == DBX_DEBUG) \
+ { \
+ static int sym_lineno = 1; \
+ char buffer[256]; \
+ \
+ ASM_GENERATE_INTERNAL_LABEL (buffer, "LM", sym_lineno); \
+ fprintf (FILE, ".stabn 68,0,%d,", LINE); \
+ assemble_name (FILE, buffer); \
+ putc ('-', FILE); \
+ assemble_name (FILE, \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
+ putc ('\n', FILE); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "LM", sym_lineno); \
+ sym_lineno ++; \
+ } \
+ }
+
+#define STARTFILE_SPEC "crt0.o%s"
+#define ENDFILE_SPEC "%{!mno-lsim:-lsim}"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"x\""
+
+#define INT_ASM_OP ".long"
+
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM, NAME) \
+ do \
+ { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+ } \
+ while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM, NAME) \
+ do \
+ { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+ } \
+ while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+#undef INIT_SECTION_ASM_OP
+
+#define UNIQUE_SECTION_P(DECL) DECL_ONE_ONLY (DECL)
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#undef ASM_OUTPUT_SECTION_NAME
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+ do \
+ { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", NAME); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", NAME); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", NAME); \
+ /* Functions may have been compiled at various levels of \
+ optimization so we can't use `same_size' here. \
+ Instead, have the linker pick one. */ \
+ if ((DECL) && DECL_ONE_ONLY (DECL)) \
+ fprintf (STREAM, "\t.linkonce %s\n", \
+ TREE_CODE (DECL) == FUNCTION_DECL \
+ ? "discard" : "same_size"); \
+ } \
+ while (0)
diff --git a/gcc/config/mcore/mcore-protos.h b/gcc/config/mcore/mcore-protos.h
new file mode 100644
index 00000000000..5069a62dbc1
--- /dev/null
+++ b/gcc/config/mcore/mcore-protos.h
@@ -0,0 +1,109 @@
+/* Prototypes for exported functions defined in mcore.c
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ Contributed by Nick Clifton (nickc@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+extern char * mcore_output_jump_label_table PARAMS ((void));
+extern void mcore_expand_prolog PARAMS ((void));
+extern void mcore_expand_epilog PARAMS ((void));
+extern int mcore_const_ok_for_inline PARAMS ((long));
+extern int mcore_num_ones PARAMS ((int));
+extern int mcore_num_zeros PARAMS ((int));
+extern int mcore_initial_elimination_offset PARAMS ((int, int));
+extern int mcore_byte_offset PARAMS ((unsigned int));
+extern int mcore_halfword_offset PARAMS ((unsigned int));
+extern int mcore_const_trick_uses_not PARAMS ((long));
+extern void mcore_override_options PARAMS ((void));
+extern int mcore_dllexport_name_p PARAMS ((char *));
+extern int mcore_dllimport_name_p PARAMS ((char *));
+extern int mcore_naked_function_p PARAMS ((void));
+
+#ifdef TREE_CODE
+extern void mcore_unique_section PARAMS ((tree, int));
+extern void mcore_encode_section_info PARAMS ((tree));
+extern int mcore_valid_machine_decl_attribute PARAMS ((tree, tree, tree, tree));
+extern tree mcore_merge_machine_decl_attributes PARAMS ((tree, tree));
+
+#ifdef HAVE_MACHINE_MODES
+extern int mcore_function_arg_partial_nregs PARAMS ((CUMULATIVE_ARGS, enum machine_mode, tree, int));
+extern void mcore_setup_incoming_varargs PARAMS ((CUMULATIVE_ARGS, enum machine_mode, tree, int *));
+extern int mcore_num_arg_regs PARAMS ((enum machine_mode, tree));
+extern int mcore_must_pass_on_stack PARAMS ((enum machine_mode, tree));
+#endif /* HAVE_MACHINE_MODES */
+
+#ifdef RTX_CODE
+extern rtx mcore_function_value PARAMS ((tree, tree));
+#endif /* RTX_CODE */
+#endif /* TREE_CODE */
+
+#ifdef RTX_CODE
+
+extern rtx arch_compare_op0;
+extern rtx arch_compare_op1;
+
+extern char * mcore_output_bclri PARAMS ((rtx, int));
+extern char * mcore_output_bseti PARAMS ((rtx, int));
+extern char * mcore_output_cmov PARAMS ((rtx *, int, char *));
+extern char * mcore_output_call PARAMS ((rtx *, int));
+extern int mcore_is_dead PARAMS ((rtx, rtx));
+extern int mcore_expand_insv PARAMS ((rtx *));
+extern int mcore_modify_comparison PARAMS ((RTX_CODE));
+extern void mcore_expand_block_move PARAMS ((rtx, rtx, rtx *));
+extern rtx mcore_dependent_simplify_rtx PARAMS ((rtx, int, int, int, int *));
+extern void mcore_dependent_reorg PARAMS ((rtx));
+extern int mcore_const_costs PARAMS ((rtx, RTX_CODE));
+extern int mcore_and_cost PARAMS ((rtx));
+extern int mcore_ior_cost PARAMS ((rtx));
+extern char * mcore_output_andn PARAMS ((rtx, rtx *));
+extern void mcore_print_operand_address PARAMS ((FILE *, rtx));
+extern void mcore_print_operand PARAMS ((FILE *, rtx, int));
+extern rtx mcore_gen_compare_reg PARAMS ((RTX_CODE));
+extern int mcore_symbolic_address_p PARAMS ((rtx));
+extern enum reg_class mcore_reload_class PARAMS ((rtx, enum reg_class));
+extern int mcore_is_same_reg PARAMS ((rtx, rtx));
+extern int mcore_arith_S_operand PARAMS ((rtx));
+
+#ifdef HAVE_MACHINE_MODES
+extern char * mcore_output_move PARAMS ((rtx, rtx *, enum machine_mode));
+extern char * mcore_output_movedouble PARAMS ((rtx *, enum machine_mode));
+extern char * mcore_output_inline_const_forced PARAMS ((rtx, rtx *, enum machine_mode));
+extern int mcore_arith_reg_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_general_movsrc_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_general_movdst_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_reload_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_J_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_K_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_K_operand_not_0 PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_M_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_K_S_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_imm_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_any_imm_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_arith_O_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_literal_K_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_addsub_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_compare_operand PARAMS ((rtx, enum machine_mode));
+extern int mcore_load_multiple_operation PARAMS ((rtx, enum machine_mode));
+extern int mcore_store_multiple_operation PARAMS ((rtx, enum machine_mode));
+extern int mcore_call_address_operand PARAMS ((rtx, enum machine_mode));
+
+#ifdef TREE_CODE
+extern rtx mcore_function_arg PARAMS ((CUMULATIVE_ARGS, enum machine_mode, tree, int));
+#endif /* TREE_CODE */
+#endif /* HAVE_MACHINE_MODES */
+#endif /* RTX_CODE */
diff --git a/gcc/config/mcore/mcore.c b/gcc/config/mcore/mcore.c
new file mode 100644
index 00000000000..2fa8c818311
--- /dev/null
+++ b/gcc/config/mcore/mcore.c
@@ -0,0 +1,3574 @@
+/* Output routines for Motorola MCore processor
+ Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include "assert.h"
+#include "gansidecl.h"
+
+#include "config.h"
+#include "rtl.h"
+#include "mcore.h"
+
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "tree.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "obstack.h"
+#include "expr.h"
+#include "reload.h"
+#include "recog.h"
+#include "function.h"
+#include "ggc.h"
+#include "toplev.h"
+#include "mcore-protos.h"
+
+static int const_ok_for_mcore PARAMS ((int));
+static int try_constant_tricks PARAMS ((long, int *, int *));
+
+/* Maximum size we are allowed to grow the stack in a single operation.
+ If we want more, we must do it in increments of at most this size.
+ If this value is 0, we don't check at all. */
+const char * mcore_stack_increment_string = 0;
+int mcore_stack_increment = STACK_UNITS_MAXSTEP;
+
+/* For dumping information about frame sizes. */
+char * mcore_current_function_name = 0;
+long mcore_current_compilation_timestamp = 0;
+
+/* Global variables for machine-dependent things. */
+
+/* Saved operands from the last compare to use when we generate an scc
+ or bcc insn. */
+rtx arch_compare_op0;
+rtx arch_compare_op1;
+
+/* Provides the class number of the smallest class containing
+ reg number. */
+int regno_reg_class[FIRST_PSEUDO_REGISTER] =
+{
+ GENERAL_REGS, ONLYR1_REGS, LRW_REGS, LRW_REGS,
+ LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
+ LRW_REGS, LRW_REGS, LRW_REGS, LRW_REGS,
+ LRW_REGS, LRW_REGS, LRW_REGS, GENERAL_REGS,
+ GENERAL_REGS, C_REGS, NO_REGS, NO_REGS,
+};
+
+/* Provide reg_class from a letter such as appears in the machine
+ description. */
+enum reg_class reg_class_from_letter[] =
+{
+ /* a */ LRW_REGS, /* b */ ONLYR1_REGS, /* c */ C_REGS, /* d */ NO_REGS,
+ /* e */ NO_REGS, /* f */ NO_REGS, /* g */ NO_REGS, /* h */ NO_REGS,
+ /* i */ NO_REGS, /* j */ NO_REGS, /* k */ NO_REGS, /* l */ NO_REGS,
+ /* m */ NO_REGS, /* n */ NO_REGS, /* o */ NO_REGS, /* p */ NO_REGS,
+ /* q */ NO_REGS, /* r */ GENERAL_REGS, /* s */ NO_REGS, /* t */ NO_REGS,
+ /* u */ NO_REGS, /* v */ NO_REGS, /* w */ NO_REGS, /* x */ ALL_REGS,
+ /* y */ NO_REGS, /* z */ NO_REGS
+};
+
+/* Adjust the stack and return the number of bytes taken to do it. */
+static void
+output_stack_adjust (direction, size)
+ int direction;
+ int size;
+{
+ /* If extending stack a lot, we do it incrementally. */
+ if (direction < 0 && size > mcore_stack_increment && mcore_stack_increment > 0)
+ {
+ rtx tmp = gen_rtx (REG, SImode, 1);
+ rtx memref;
+ emit_insn (gen_movsi (tmp, GEN_INT (mcore_stack_increment)));
+ do
+ {
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
+ memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_insn (gen_movsi (memref, stack_pointer_rtx));
+ size -= mcore_stack_increment;
+ }
+ while (size > mcore_stack_increment);
+
+ /* 'size' is now the residual for the last adjustment, which doesn't
+ * require a probe. */
+ }
+
+ if (size)
+ {
+ rtx insn;
+ rtx val = GEN_INT (size);
+
+ if (size > 32)
+ {
+ rtx nval = gen_rtx (REG, SImode, 1);
+ emit_insn (gen_movsi (nval, val));
+ val = nval;
+ }
+
+ if (direction > 0)
+ insn = gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
+ else
+ insn = gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, val);
+
+ emit_insn (insn);
+ }
+}
+
+/* Work out the registers which need to be saved, both as a mask and a
+ count. */
+static int
+calc_live_regs (count)
+ int * count;
+{
+ int reg;
+ int live_regs_mask = 0;
+
+ * count = 0;
+
+ for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg++)
+ {
+ if (regs_ever_live[reg] && !call_used_regs[reg])
+ {
+ (*count)++;
+ live_regs_mask |= (1 << reg);
+ }
+ }
+
+ return live_regs_mask;
+}
+
+/* Print the operand address in x to the stream. */
+void
+mcore_print_operand_address (stream, x)
+ FILE * stream;
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ fprintf (stream, "(%s)", reg_names[REGNO (x)]);
+ break;
+
+ case PLUS:
+ {
+ rtx base = XEXP (x, 0);
+ rtx index = XEXP (x, 1);
+
+ if (GET_CODE (base) != REG)
+ {
+ /* Ensure that BASE is a register (one of them must be). */
+ rtx temp = base;
+ base = index;
+ index = temp;
+ }
+
+ switch (GET_CODE (index))
+ {
+ case CONST_INT:
+ fprintf (stream, "(%s,%d)", reg_names[REGNO(base)],
+ INTVAL (index));
+ break;
+
+ default:
+ debug_rtx (x);
+
+ abort ();
+ }
+ }
+
+ break;
+
+ default:
+ output_addr_const (stream, x);
+ break;
+ }
+}
+
+/* Print operand x (an rtx) in assembler syntax to file stream
+ according to modifier code.
+
+ 'R' print the next register or memory location along, ie the lsw in
+ a double word value
+ 'O' print a constant without the #
+ 'M' print a constant as its negative
+ 'P' print log2 of a power of two
+ 'Q' print log2 of an inverse of a power of two
+ 'U' print register for ldm/stm instruction
+ 'X' print byte number for xtrbN instruction */
+void
+mcore_print_operand (stream, x, code)
+ FILE * stream;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case 'N':
+ if (INTVAL(x) == -1)
+ fprintf (asm_out_file, "32");
+ else
+ fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x) + 1));
+ break;
+ case 'P':
+ fprintf (asm_out_file, "%d", exact_log2 (INTVAL (x)));
+ break;
+ case 'Q':
+ fprintf (asm_out_file, "%d", exact_log2 (~INTVAL (x)));
+ break;
+ case 'O':
+ fprintf (asm_out_file, "%d", INTVAL (x));
+ break;
+ case 'M':
+ fprintf (asm_out_file, "%d", - INTVAL (x));
+ break;
+ case 'R':
+ /* Next location along in memory or register. */
+ switch (GET_CODE (x))
+ {
+ case REG:
+ fputs (reg_names[REGNO (x) + 1], (stream));
+ break;
+ case MEM:
+ mcore_print_operand_address (stream,
+ XEXP (adj_offsettable_operand (x, 4), 0));
+ break;
+ default:
+ abort ();
+ }
+ break;
+ case 'U':
+ fprintf (asm_out_file, "%s-%s", reg_names[REGNO (x)],
+ reg_names[REGNO (x) + 3]);
+ break;
+ case 'x':
+ fprintf (asm_out_file, "0x%x", INTVAL (x));
+ break;
+ case 'X':
+ fprintf (asm_out_file, "%d", 3 - INTVAL (x) / 8);
+ break;
+
+ default:
+ switch (GET_CODE (x))
+ {
+ case REG:
+ fputs (reg_names[REGNO (x)], (stream));
+ break;
+ case MEM:
+ output_address (XEXP (x, 0));
+ break;
+ default:
+ output_addr_const (stream, x);
+ break;
+ }
+ break;
+ }
+}
+
+/* What does a constant cost ? */
+int
+mcore_const_costs (exp, code)
+ rtx exp;
+ enum rtx_code code;
+{
+
+ int val = INTVAL (exp);
+
+ /* Easy constants. */
+ if ( CONST_OK_FOR_I (val)
+ || CONST_OK_FOR_M (val)
+ || CONST_OK_FOR_N (val)
+ || (code == PLUS && CONST_OK_FOR_L (val)))
+ return 1;
+ else if (code == AND
+ && ( CONST_OK_FOR_M (~val)
+ || CONST_OK_FOR_N (~val)))
+ return 2;
+ else if (code == PLUS
+ && ( CONST_OK_FOR_I (-val)
+ || CONST_OK_FOR_M (-val)
+ || CONST_OK_FOR_N (-val)))
+ return 2;
+
+ return 5;
+}
+
+/* What does an and instruction cost - we do this b/c immediates may
+ have been relaxed. We want to ensure that cse will cse relaxed immeds
+ out. Otherwise we'll get bad code (multiple reloads of the same const) */
+int
+mcore_and_cost (x)
+ rtx x;
+{
+ int val;
+
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return 2;
+
+ val = INTVAL (XEXP (x, 1));
+
+ /* Do it directly. */
+ if (CONST_OK_FOR_K (val) || CONST_OK_FOR_M (~val))
+ return 2;
+ /* Takes one instruction to load. */
+ else if (const_ok_for_mcore (val))
+ return 3;
+ /* Takes two instructions to load. */
+ else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
+ return 4;
+
+ /* takes a lrw to load */
+ return 5;
+}
+
+/* What does an or cost - see and_cost(). */
+int
+mcore_ior_cost (x)
+ rtx x;
+{
+ int val;
+
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return 2;
+
+ val = INTVAL (XEXP (x, 1));
+
+ /* Do it directly with bclri. */
+ if (CONST_OK_FOR_M (val))
+ return 2;
+ /* Takes one instruction to load. */
+ else if (const_ok_for_mcore (val))
+ return 3;
+ /* Takes two instructions to load. */
+ else if (TARGET_HARDLIT && mcore_const_ok_for_inline (val))
+ return 4;
+
+ /* Takes a lrw to load. */
+ return 5;
+}
+
+/* Check to see if a comparison against a constant can be made more efficient
+ by incrementing/decrementing the constant to get one that is more efficient
+ to load. */
+int
+mcore_modify_comparison (code)
+ enum rtx_code code;
+{
+ rtx op1 = arch_compare_op1;
+
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ int val = INTVAL (op1);
+
+ switch (code)
+ {
+ case LE:
+ if (CONST_OK_FOR_J (val + 1))
+ {
+ arch_compare_op1 = GEN_INT (val + 1);
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Prepare the operands for a comparison. */
+rtx
+mcore_gen_compare_reg (code)
+ enum rtx_code code;
+{
+ rtx op0 = arch_compare_op0;
+ rtx op1 = arch_compare_op1;
+ rtx cc_reg = gen_rtx (REG, CCmode, CC_REG);
+
+ if (CONSTANT_P (op1) && GET_CODE (op1) != CONST_INT)
+ op1 = force_reg (SImode, op1);
+
+ /* cmpnei: 0-31 (K immediate)
+ cmplti: 1-32 (J immediate, 0 using btsti x,31) */
+ switch (code)
+ {
+ case EQ: /* use inverted condition, cmpne */
+ code = NE;
+ /* drop through */
+ case NE: /* use normal condition, cmpne */
+ if (GET_CODE (op1) == CONST_INT && ! CONST_OK_FOR_K (INTVAL (op1)))
+ op1 = force_reg (SImode, op1);
+ break;
+
+ case LE: /* use inverted condition, reversed cmplt */
+ code = GT;
+ /* drop through */
+ case GT: /* use normal condition, reversed cmplt */
+ if (GET_CODE (op1) == CONST_INT)
+ op1 = force_reg (SImode, op1);
+ break;
+
+ case GE: /* use inverted condition, cmplt */
+ code = LT;
+ /* drop through */
+ case LT: /* use normal condition, cmplt */
+ if (GET_CODE (op1) == CONST_INT &&
+ /* covered by btsti x,31 */
+ INTVAL (op1) != 0 &&
+ ! CONST_OK_FOR_J (INTVAL (op1)))
+ op1 = force_reg (SImode, op1);
+ break;
+
+ case GTU: /* use inverted condition, cmple */
+ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) == 0)
+ {
+ /* Unsigned > 0 is the same as != 0, but we need
+ to invert the condition, so we want to set
+ code = EQ. This cannot be done however, as the
+ mcore does not support such a test. Instead we
+ cope with this case in the "bgtu" pattern itself
+ so we should never reach this point. */
+ /* code = EQ; */
+ abort ();
+ break;
+ }
+ code = LEU;
+ /* drop through */
+ case LEU: /* use normal condition, reversed cmphs */
+ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
+ op1 = force_reg (SImode, op1);
+ break;
+
+ case LTU: /* use inverted condition, cmphs */
+ code = GEU;
+ /* drop through */
+ case GEU: /* use normal condition, cmphs */
+ if (GET_CODE (op1) == CONST_INT && INTVAL (op1) != 0)
+ op1 = force_reg (SImode, op1);
+ break;
+
+ default:
+ break;
+ }
+
+ emit_insn (gen_rtx (SET, VOIDmode, cc_reg, gen_rtx (code, CCmode, op0, op1)));
+
+ return cc_reg;
+}
+
+
+int
+mcore_symbolic_address_p (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ case CONST:
+ x = XEXP (x, 0);
+ return ( (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (x, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT);
+ default:
+ return 0;
+ }
+}
+
+int
+mcore_call_address_operand (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ return register_operand (x, mode) || CONSTANT_P (x);
+}
+
+/* Functions to output assembly code for a function call. */
+char *
+mcore_output_call (operands, index)
+ rtx operands[];
+ int index;
+{
+ static char buffer[20];
+ rtx addr = operands [index];
+
+ if (REG_P (addr))
+ {
+ if (TARGET_CG_DATA)
+ {
+ if (mcore_current_function_name == 0)
+ abort ();
+
+ ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name,
+ "unknown", 1);
+ }
+
+ sprintf (buffer, "jsr\t%%%d", index);
+ }
+ else
+ {
+ if (TARGET_CG_DATA)
+ {
+ if (mcore_current_function_name == 0)
+ abort ();
+
+ if (GET_CODE (addr) != SYMBOL_REF)
+ abort ();
+
+ ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, XSTR (addr, 0), 0);
+ }
+
+ sprintf (buffer, "jbsr\t%%%d", index);
+ }
+
+ return buffer;
+}
+
+/* Can we load a constant with a single instruction ? */
+static int
+const_ok_for_mcore (value)
+ int value;
+{
+ if (value >= 0 && value <= 127)
+ return 1;
+
+ /* Try exact power of two. */
+ if ((value & (value - 1)) == 0)
+ return 1;
+
+ /* Try exact power of two - 1. */
+ if ((value & (value + 1)) == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Can we load a constant inline with up to 2 instructions ? */
+int
+mcore_const_ok_for_inline (value)
+ long value;
+{
+ int x, y;
+
+ return try_constant_tricks (value, & x, & y) > 0;
+}
+
+/* Are we loading the constant using a not ? */
+int
+mcore_const_trick_uses_not (value)
+ long value;
+{
+ int x, y;
+
+ return try_constant_tricks (value, & x, & y) == 2;
+}
+
+/* Try tricks to load a constant inline and return the trick number if
+ success (0 is non-inlinable).
+ *
+ * 0: not inlinable
+ * 1: single instruction (do the usual thing)
+ * 2: single insn followed by a 'not'
+ * 3: single insn followed by a subi
+ * 4: single insn followed by an addi
+ * 5: single insn followed by rsubi
+ * 6: single insn followed by bseti
+ * 7: single insn followed by bclri
+ * 8: single insn followed by rotli
+ * 9: single insn followed by lsli
+ * 10: single insn followed by ixh
+ * 11: single insn followed by ixw
+ */
+
+static int
+try_constant_tricks (value, x, y)
+ long value;
+ int * x;
+ int * y;
+{
+ int i;
+ unsigned bit, shf, rot;
+
+ if (const_ok_for_mcore (value))
+ return 1; /* do the usual thing */
+
+ if (TARGET_HARDLIT)
+ {
+ if (const_ok_for_mcore (~value))
+ {
+ *x = ~value;
+ return 2;
+ }
+
+ for (i = 1; i <= 32; i++)
+ {
+ if (const_ok_for_mcore (value - i))
+ {
+ *x = value - i;
+ *y = i;
+
+ return 3;
+ }
+
+ if (const_ok_for_mcore (value + i))
+ {
+ *x = value + i;
+ *y = i;
+
+ return 4;
+ }
+ }
+
+ bit = 0x80000000UL;
+
+ for (i = 0; i <= 31; i++)
+ {
+ if (const_ok_for_mcore (i - value))
+ {
+ *x = i - value;
+ *y = i;
+
+ return 5;
+ }
+
+ if (const_ok_for_mcore (value & ~bit))
+ {
+ *y = bit;
+ *x = value & ~bit;
+
+ return 6;
+ }
+
+ if (const_ok_for_mcore (value | bit))
+ {
+ *y = ~bit;
+ *x = value | bit;
+
+ return 7;
+ }
+
+ bit >>= 1;
+ }
+
+ shf = value;
+ rot = value;
+
+ for (i = 1; i < 31; i++)
+ {
+ int c;
+
+ /* MCore has rotate left. */
+ c = rot << 31;
+ rot >>= 1;
+ rot &= 0x7FFFFFFF;
+ rot |= c; /* Simulate rotate. */
+
+ if (const_ok_for_mcore (rot))
+ {
+ *y = i;
+ *x = rot;
+
+ return 8;
+ }
+
+ if (shf & 1)
+ shf = 0; /* Can't use logical shift, low order bit is one. */
+
+ shf >>= 1;
+
+ if (shf != 0 && const_ok_for_mcore (shf))
+ {
+ *y = i;
+ *x = shf;
+
+ return 9;
+ }
+ }
+
+ if ((value % 3) == 0 && const_ok_for_mcore (value / 3))
+ {
+ *x = value / 3;
+
+ return 10;
+ }
+
+ if ((value % 5) == 0 && const_ok_for_mcore (value / 5))
+ {
+ *x = value / 5;
+
+ return 11;
+ }
+ }
+
+ return 0;
+}
+
+
+/* Check whether reg is dead at first. This is done by searching ahead
+ for either the next use (i.e., reg is live), a death note, or a set of
+ reg. Don't just use dead_or_set_p() since reload does not always mark
+ deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
+ can ignore subregs by extracting the actual register. BRC */
+int
+mcore_is_dead (first, reg)
+ rtx first;
+ rtx reg;
+{
+ rtx insn;
+
+ /* For mcore, subregs can't live independently of their parent regs. */
+ if (GET_CODE (reg) == SUBREG)
+ reg = SUBREG_REG (reg);
+
+ /* Dies immediately. */
+ if (dead_or_set_p (first, reg))
+ return 1;
+
+ /* Look for conclusive evidence of live/death, otherwise we have
+ to assume that it is live. */
+ for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN)
+ return 0; /* We lose track, assume it is alive. */
+
+ else if (GET_CODE(insn) == CALL_INSN)
+ {
+ /* Call's might use it for target or register parms. */
+ if (reg_referenced_p (reg, PATTERN (insn))
+ || find_reg_fusage (insn, USE, reg))
+ return 0;
+ else if (dead_or_set_p (insn, reg))
+ return 1;
+ }
+ else if (GET_CODE (insn) == INSN)
+ {
+ if (reg_referenced_p (reg, PATTERN (insn)))
+ return 0;
+ else if (dead_or_set_p (insn, reg))
+ return 1;
+ }
+ }
+
+ /* No conclusive evidence either way, we can not take the chance
+ that control flow hid the use from us -- "I'm not dead yet". */
+ return 0;
+}
+
+
+/* Count the number of ones in mask. */
+int
+mcore_num_ones (mask)
+ int mask;
+{
+ /* A trick to count set bits recently posted on comp.compilers */
+ mask = (mask >> 1 & 0x55555555) + (mask & 0x55555555);
+ mask = ((mask >> 2) & 0x33333333) + (mask & 0x33333333);
+ mask = ((mask >> 4) + mask) & 0x0f0f0f0f;
+ mask = ((mask >> 8) + mask);
+
+ return (mask + (mask >> 16)) & 0xff;
+}
+
+/* Count the number of zeros in mask. */
+int
+mcore_num_zeros (mask)
+ int mask;
+{
+ return 32 - mcore_num_ones (mask);
+}
+
+/* Determine byte being masked. */
+int
+mcore_byte_offset (mask)
+ unsigned int mask;
+{
+ if (mask == 0x00ffffffUL)
+ return 0;
+ else if (mask == 0xff00ffffUL)
+ return 1;
+ else if (mask == 0xffff00ffUL)
+ return 2;
+ else if (mask == 0xffffff00UL)
+ return 3;
+
+ return -1;
+}
+
+/* Determine halfword being masked. */
+int
+mcore_halfword_offset (mask)
+ unsigned int mask;
+{
+ if (mask == 0x0000ffffL)
+ return 0;
+ else if (mask == 0xffff0000UL)
+ return 1;
+
+ return -1;
+}
+
+/* Output a series of bseti's corresponding to mask. */
+char *
+mcore_output_bseti (dst, mask)
+ rtx dst;
+ int mask;
+{
+ rtx out_operands[2];
+ int bit;
+
+ out_operands[0] = dst;
+
+ for (bit = 0; bit < 32; bit++)
+ {
+ if ((mask & 0x1) == 0x1)
+ {
+ out_operands[1] = GEN_INT (bit);
+
+ output_asm_insn ("bseti\t%0,%1", out_operands);
+ }
+ mask >>= 1;
+ }
+
+ return "";
+}
+
+/* Output a series of bclri's corresponding to mask. */
+char *
+mcore_output_bclri (dst, mask)
+ rtx dst;
+ int mask;
+{
+ rtx out_operands[2];
+ int bit;
+
+ out_operands[0] = dst;
+
+ for (bit = 0; bit < 32; bit++)
+ {
+ if ((mask & 0x1) == 0x0)
+ {
+ out_operands[1] = GEN_INT (bit);
+
+ output_asm_insn ("bclri\t%0,%1", out_operands);
+ }
+
+ mask >>= 1;
+ }
+
+ return "";
+}
+
+/* Output a conditional move of two constants that are +/- 1 within each
+ other. See the "movtK" patterns in mcore.md. I'm not sure this is
+ really worth the effort. */
+char *
+mcore_output_cmov (operands, cmp_t, test)
+ rtx operands[];
+ int cmp_t;
+ char * test;
+{
+ int load_value;
+ int adjust_value;
+ rtx out_operands[4];
+
+ out_operands[0] = operands[0];
+
+ /* check to see which constant is loadable */
+
+ if (const_ok_for_mcore (INTVAL (operands[1])))
+ {
+ out_operands[1] = operands[1];
+ out_operands[2] = operands[2];
+ }
+ else if (const_ok_for_mcore (INTVAL (operands[2])))
+ {
+ out_operands[1] = operands[2];
+ out_operands[2] = operands[1];
+
+ /* complement test since constants are swapped */
+ cmp_t = (cmp_t == 0);
+ }
+ load_value = INTVAL (out_operands[1]);
+ adjust_value = INTVAL (out_operands[2]);
+
+ /* first output the test if folded into the pattern */
+
+ if (test)
+ output_asm_insn (test, operands);
+
+ /* load the constant - for now, only support constants that can be
+ generated with a single instruction. maybe add general inlinable
+ constants later (this will increase the # of patterns since the
+ instruction sequence has a different length attribute). */
+
+ if (load_value >= 0 && load_value <= 127)
+ output_asm_insn ("movi\t%0,%1", out_operands);
+ else if ((load_value & (load_value - 1)) == 0)
+ output_asm_insn ("bgeni\t%0,%P1", out_operands);
+ else if ((load_value & (load_value + 1)) == 0)
+ output_asm_insn ("bmaski\t%0,%N1", out_operands);
+
+ /* output the constant adjustment */
+
+ if (load_value > adjust_value)
+ {
+ if (cmp_t)
+ output_asm_insn ("decf\t%0", out_operands);
+ else
+ output_asm_insn ("dect\t%0", out_operands);
+ }
+ else
+ {
+ if (cmp_t)
+ output_asm_insn ("incf\t%0", out_operands);
+ else
+ output_asm_insn ("inct\t%0", out_operands);
+ }
+
+ return "";
+}
+
+/* Outputs the peephole for moving a constant that gets not'ed followed
+ by an and (i.e. combine the not and the and into andn) BRC */
+char *
+mcore_output_andn (insn, operands)
+ rtx insn ATTRIBUTE_UNUSED;
+ rtx operands[];
+{
+ int x, y;
+ rtx out_operands[3];
+ char * load_op;
+ char buf[256];
+
+ if (try_constant_tricks (INTVAL (operands[1]), &x, &y) != 2)
+ abort ();
+
+ out_operands[0] = operands[0];
+ out_operands[1] = GEN_INT(x);
+ out_operands[2] = operands[2];
+
+ if (x >= 0 && x <= 127)
+ load_op = "movi\t%0,%1";
+ /* try exact power of two */
+ else if ((x & (x - 1)) == 0)
+ load_op = "bgeni\t%0,%P1";
+ /* try exact power of two - 1 */
+ else if ((x & (x + 1)) == 0)
+ load_op = "bmaski\t%0,%N1";
+ else
+ load_op = "BADMOVI\t%0,%1";
+
+ sprintf (buf, "%s\n\tandn\t%%2,%%0", load_op);
+ output_asm_insn (buf, out_operands);
+
+ return "";
+}
+
+/* Output an inline constant. */
+static char *
+output_inline_const (mode, operands)
+ enum machine_mode mode;
+ rtx operands[];
+{
+ int x = 0, y = 0;
+ int trick_no;
+ rtx out_operands[3];
+ char buf[256];
+ char load_op[256];
+ char *dst_fmt;
+ int value;
+
+ value = INTVAL (operands[1]);
+
+ if ((trick_no = try_constant_tricks (value, &x, &y)) == 0)
+ {
+ /* lrw's are handled separately: Large inlinable constants
+ never get turned into lrw's. Our caller uses try_constant_tricks
+ to back off to an lrw rather than calling this routine. */
+ abort ();
+ }
+
+ if (trick_no == 1)
+ x = value;
+
+ /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment */
+
+ out_operands[0] = operands[0];
+ out_operands[1] = GEN_INT (x);
+
+ if (trick_no > 2)
+ out_operands[2] = GEN_INT (y);
+
+ /* Select dst format based on mode */
+
+ if (mode == DImode && (! TARGET_LITTLE_END))
+ dst_fmt = "%R0";
+ else
+ dst_fmt = "%0";
+
+ if (x >= 0 && x <= 127)
+ sprintf (load_op, "movi\t%s,%%1", dst_fmt);
+ /* Try exact power of two. */
+ else if ((x & (x - 1)) == 0)
+ sprintf (load_op, "bgeni\t%s,%%P1", dst_fmt);
+ /* try exact power of two - 1. */
+ else if ((x & (x + 1)) == 0)
+ sprintf (load_op, "bmaski\t%s,%%N1", dst_fmt);
+ else
+ sprintf (load_op, "BADMOVI\t%s,%%1", dst_fmt);
+
+ switch (trick_no)
+ {
+ case 1:
+ strcpy (buf, load_op);
+ break;
+ case 2: /* not */
+ sprintf (buf, "%s\n\tnot\t%s\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 3: /* add */
+ sprintf (buf, "%s\n\taddi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 4: /* sub */
+ sprintf (buf, "%s\n\tsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 5: /* rsub */
+ /* never happens unless -mrsubi, see try_constant_tricks() */
+ sprintf (buf, "%s\n\trsubi\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 6: /* bset */
+ sprintf (buf, "%s\n\tbseti\t%s,%%P2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 7: /* bclr */
+ sprintf (buf, "%s\n\tbclri\t%s,%%Q2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 8: /* rotl */
+ sprintf (buf, "%s\n\trotli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 9: /* lsl */
+ sprintf (buf, "%s\n\tlsli\t%s,%%2\t// %d 0x%x", load_op, dst_fmt, value, value);
+ break;
+ case 10: /* ixh */
+ sprintf (buf, "%s\n\tixh\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
+ break;
+ case 11: /* ixw */
+ sprintf (buf, "%s\n\tixw\t%s,%s\t// %d 0x%x", load_op, dst_fmt, dst_fmt, value, value);
+ break;
+ default:
+ return "";
+ }
+
+ output_asm_insn (buf, out_operands);
+
+ return "";
+}
+
+/* Output a move of a word or less value. */
+char *
+mcore_output_move (insn, operands, mode)
+ rtx insn ATTRIBUTE_UNUSED;
+ rtx operands[];
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ rtx dst = operands[0];
+ rtx src = operands[1];
+
+ if (GET_CODE (dst) == REG)
+ {
+ if (GET_CODE (src) == REG)
+ {
+ if (REGNO (src) == CC_REG) /* r-c */
+ return "mvc\t%0";
+ else
+ return "mov\t%0,%1"; /* r-r*/
+ }
+ else if (GET_CODE (src) == MEM)
+ {
+ if (GET_CODE (XEXP (src, 0)) == LABEL_REF)
+ return "lrw\t%0,[%1]"; /* a-R */
+ else
+ return "ldw\t%0,%1"; /* r-m */
+ }
+ else if (GET_CODE (src) == CONST_INT)
+ {
+ int x, y;
+
+ if (CONST_OK_FOR_I (INTVAL (src))) /* r-I */
+ return "movi\t%0,%1";
+ else if (CONST_OK_FOR_M (INTVAL (src))) /* r-M */
+ return "bgeni\t%0,%P1\t// %1 %x1";
+ else if (CONST_OK_FOR_N (INTVAL (src))) /* r-N */
+ return "bmaski\t%0,%N1\t// %1 %x1";
+ else if (try_constant_tricks (INTVAL (src), &x, &y)) /* R-P */
+ return output_inline_const (SImode, operands); /* 1-2 insns */
+ else
+ return "lrw\t%0,%x1\t// %1"; /* get it from literal pool */
+ }
+ else
+ return "lrw\t%0, %1"; /* into the literal pool */
+ }
+ else if (GET_CODE (dst) == MEM) /* m-r */
+ return "stw\t%1,%0";
+
+ abort ();
+}
+
+/* Outputs a constant inline -- regardless of the cost.
+ Useful for things where we've gotten into trouble and think we'd
+ be doing an lrw into r15 (forbidden). This lets us get out of
+ that pickle even after register allocation. */
+char *
+mcore_output_inline_const_forced (insn, operands, mode)
+ rtx insn ATTRIBUTE_UNUSED;
+ rtx operands[];
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ unsigned long value = INTVAL (operands[1]);
+ unsigned long ovalue = value;
+ struct piece
+ {
+ int low;
+ int shift;
+ }
+ part[6];
+ int i;
+
+ if (mcore_const_ok_for_inline (value))
+ return output_inline_const (SImode, operands);
+
+ for (i = 0; (unsigned) i < sizeof (part) / sizeof (part[0]); i++)
+ {
+ part[i].shift = 0;
+ part[i].low = (value & 0x1F);
+ value -= part[i].low;
+
+ if (mcore_const_ok_for_inline (value))
+ break;
+ else
+ {
+ value >>= 5;
+ part[i].shift = 5;
+
+ while ((value & 1) == 0)
+ {
+ part[i].shift++;
+ value >>= 1;
+ }
+
+ if (mcore_const_ok_for_inline (value))
+ break;
+ }
+ }
+
+ /* 5 bits per iteration, a maximum of 5 times == 25 bits and leaves
+ 7 bits left in the constant -- which we know we can cover with
+ a movi. The final value can't be zero otherwise we'd have stopped
+ in the previous iteration. */
+ if (value == 0 || ! mcore_const_ok_for_inline (value))
+ abort ();
+
+ /* Now, work our way backwards emitting the constant. */
+
+ /* Emit the value that remains -- it will be non-zero. */
+ operands[1] = GEN_INT (value);
+ output_asm_insn (output_inline_const (SImode, operands), operands);
+
+ while (i >= 0)
+ {
+ /* Shift anything we've already loaded. */
+ if (part[i].shift)
+ {
+ operands[2] = GEN_INT (part[i].shift);
+ output_asm_insn ("lsli %0,%2", operands);
+ value <<= part[i].shift;
+ }
+
+ /* Add anything we need into the low 5 bits. */
+ if (part[i].low != 0)
+ {
+ operands[2] = GEN_INT (part[i].low);
+ output_asm_insn ("addi %0,%2", operands);
+ value += part[i].low;
+ }
+
+ i--;
+ }
+
+ if (value != ovalue) /* sanity */
+ abort ();
+
+ /* We've output all the instructions. */
+ return "";
+}
+
+/* Return a sequence of instructions to perform DI or DF move.
+ Since the MCORE cannot move a DI or DF in one instruction, we have
+ to take care when we see overlapping source and dest registers. */
+char *
+mcore_output_movedouble (operands, mode)
+ rtx operands[];
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ rtx dst = operands[0];
+ rtx src = operands[1];
+
+ if (GET_CODE (dst) == REG)
+ {
+ if (GET_CODE (src) == REG)
+ {
+ int dstreg = REGNO (dst);
+ int srcreg = REGNO (src);
+ /* Ensure the second source not overwritten. */
+ if (srcreg + 1 == dstreg)
+ return "mov %R0,%R1\n\tmov %0,%1";
+ else
+ return "mov %0,%1\n\tmov %R0,%R1";
+ }
+ else if (GET_CODE (src) == MEM)
+ {
+ rtx memexp = memexp = XEXP (src, 0);
+ int dstreg = REGNO (dst);
+ int basereg = -1;
+
+ if (GET_CODE (memexp) == LABEL_REF)
+ return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
+ else if (GET_CODE (memexp) == REG)
+ basereg = REGNO (memexp);
+ else if (GET_CODE (memexp) == PLUS)
+ {
+ if (GET_CODE (XEXP (memexp, 0)) == REG)
+ basereg = REGNO (XEXP (memexp, 0));
+ else if (GET_CODE (XEXP (memexp, 1)) == REG)
+ basereg = REGNO (XEXP (memexp, 1));
+ else
+ abort ();
+ }
+ else
+ abort ();
+
+ /* ??? length attribute is wrong here */
+ if (dstreg == basereg)
+ {
+ /* just load them in reverse order */
+ return "ldw\t%R0,%R1\n\tldw\t%0,%1";
+ /* XXX: alternative: move basereg to basereg+1
+ * and then fall through */
+ }
+ else
+ return "ldw\t%0,%1\n\tldw\t%R0,%R1";
+ }
+ else if (GET_CODE (src) == CONST_INT)
+ {
+ if (TARGET_LITTLE_END)
+ {
+ if (CONST_OK_FOR_I (INTVAL (src)))
+ output_asm_insn ("movi %0,%1", operands);
+ else if (CONST_OK_FOR_M (INTVAL (src)))
+ output_asm_insn ("bgeni %0,%P1", operands);
+ else if (INTVAL (src) == -1)
+ output_asm_insn ("bmaski %0,32", operands);
+ else if (CONST_OK_FOR_N (INTVAL (src)))
+ output_asm_insn ("bmaski %0,%N1", operands);
+ else
+ abort ();
+
+ if (INTVAL (src) < 0)
+ return "bmaski %R0,32";
+ else
+ return "movi %R0,0";
+ }
+ else
+ {
+ if (CONST_OK_FOR_I (INTVAL (src)))
+ output_asm_insn ("movi %R0,%1", operands);
+ else if (CONST_OK_FOR_M (INTVAL (src)))
+ output_asm_insn ("bgeni %R0,%P1", operands);
+ else if (INTVAL (src) == -1)
+ output_asm_insn ("bmaski %R0,32", operands);
+ else if (CONST_OK_FOR_N (INTVAL (src)))
+ output_asm_insn ("bmaski %R0,%N1", operands);
+ else
+ abort ();
+
+ if (INTVAL (src) < 0)
+ return "bmaski %0,32";
+ else
+ return "movi %0,0";
+ }
+ }
+ else
+ abort ();
+ }
+ else if (GET_CODE (dst) == MEM && GET_CODE (src) == REG)
+ return "stw\t%1,%0\n\tstw\t%R1,%R0";
+ else
+ abort ();
+}
+
+/* Predicates used by the templates. */
+
+/* Non zero if OP can be source of a simple move operation. */
+int
+mcore_general_movsrc_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
+ if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ return 1;
+
+ return general_operand (op, mode);
+}
+
+/* Non zero if OP can be destination of a simple move operation. */
+int
+mcore_general_movdst_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == REG && REGNO (op) == CC_REG)
+ return 0;
+
+ return general_operand (op, mode);
+}
+
+/* Nonzero if OP is a normal arithmetic register. */
+int
+mcore_arith_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (! register_operand (op, mode))
+ return 0;
+
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+
+ if (GET_CODE (op) == REG)
+ return REGNO (op) != CC_REG;
+
+ return 1;
+}
+
+/* Non zero if OP should be recognized during reload for an ixh/ixw
+ operand. See the ixh/ixw patterns. */
+int
+mcore_reload_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mcore_arith_reg_operand (op, mode))
+ return 1;
+
+ if (! reload_in_progress)
+ return 0;
+
+ return GET_CODE (op) == MEM;
+}
+
+/* Nonzero if OP is a valid source operand for an arithmetic insn. */
+int
+mcore_arith_J_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for an arithmetic insn. */
+int
+mcore_arith_K_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for a shift or rotate insn. */
+int
+mcore_arith_K_operand_not_0 (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if ( GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_K (INTVAL (op))
+ && INTVAL (op) != 0)
+ return 1;
+
+ return 0;
+}
+
+int
+mcore_arith_K_S_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ if (CONST_OK_FOR_K (INTVAL (op)) || CONST_OK_FOR_M (~INTVAL (op)))
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+mcore_arith_S_operand (op)
+ rtx op;
+{
+ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (~INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+int
+mcore_arith_M_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for loading */
+int
+mcore_arith_imm_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT && const_ok_for_mcore (INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+int
+mcore_arith_any_imm_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT)
+ return 1;
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for a cmov with two consts +/- 1 */
+int
+mcore_arith_O_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_O (INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for a btsti. */
+int
+mcore_literal_K_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
+ return 1;
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for an add/sub insn. */
+int
+mcore_addsub_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ return 1;
+
+ /* The following is removed because it precludes large constants from being
+ returned as valid source operands for and add/sub insn. While large
+ constants may not directly be used in an add/sub, they may if first loaded
+ into a register. Thus, this predicate should indicate that they are valid,
+ and the constraint in mcore.md should control whether an additional load to
+ register is needed. (see mcore.md, addsi) -- DAC 4/2/1998 */
+ /*
+ if (CONST_OK_FOR_J(INTVAL(op)) || CONST_OK_FOR_L(INTVAL(op)))
+ return 1;
+ */
+ }
+
+ return 0;
+}
+
+/* Nonzero if OP is a valid source operand for a compare operation. */
+int
+mcore_compare_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT && INTVAL (op) == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Expand insert bit field. BRC */
+int
+mcore_expand_insv (operands)
+ rtx operands[];
+{
+ int width = INTVAL (operands[1]);
+ int posn = INTVAL (operands[2]);
+ int mask;
+ rtx mreg, sreg, ereg;
+
+ /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
+ for width==1 must be removed. Look around line 368. This is something
+ we really want the md part to do. */
+
+ if (width == 1 && GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Do directly with bseti or bclri */
+ /* RBE: 2/97 consider only low bit of constant */
+ if ((INTVAL(operands[3])&1) == 0)
+ {
+ mask = ~(1 << posn);
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (AND, SImode, operands[0], GEN_INT (mask))));
+ }
+ else
+ {
+ mask = 1 << posn;
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (IOR, SImode, operands[0], GEN_INT (mask))));
+ }
+
+ return 1;
+ }
+
+ /* Look at some bitfield placements that we aren't interested
+ * in handling ourselves, unless specifically directed to do so */
+ if (! TARGET_W_FIELD)
+ return 0; /* Generally, give up about now. */
+
+ if (width == 8 && posn % 8 == 0)
+ /* Byte sized and aligned; let caller break it up. */
+ return 0;
+
+ if (width == 16 && posn % 16 == 0)
+ /* Short sized and aligned; let caller break it up. */
+ return 0;
+
+ /* The general case - we can do this a little bit better than what the
+ machine independent part tries. This will get rid of all the subregs
+ that mess up constant folding in combine when working with relaxed
+ immediates. */
+
+ /* If setting the entire field, do it directly. */
+ if (GET_CODE (operands[3]) == CONST_INT &&
+ INTVAL (operands[3]) == ((1 << width) - 1))
+ {
+ mreg = force_reg (SImode, GEN_INT (INTVAL (operands[3]) << posn));
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (IOR, SImode, operands[0], mreg)));
+ return 1;
+ }
+
+ /* Generate the clear mask. */
+ mreg = force_reg (SImode, GEN_INT (~(((1 << width) - 1) << posn)));
+
+ /* Clear the field, to overlay it later with the source. */
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (AND, SImode, operands[0], mreg)));
+
+ /* If the source is constant 0, we've nothing to add back. */
+ if (GET_CODE (operands[3]) == CONST_INT && INTVAL (operands[3]) == 0)
+ return 1;
+
+ /* XXX: Should we worry about more games with constant values?
+ We've covered the high profile: set/clear single-bit and many-bit
+ fields. How often do we see "arbitrary bit pattern" constants? */
+ sreg = copy_to_mode_reg (SImode, operands[3]);
+
+ /* Extract src as same width as dst (needed for signed values). We
+ always have to do this since we widen everything to SImode.
+ We don't have to mask if we're shifting this up against the
+ MSB of the register (e.g., the shift will push out any hi-order
+ bits. */
+ if (width + posn != GET_MODE_SIZE (SImode))
+ {
+ ereg = force_reg (SImode, GEN_INT ((1 << width) - 1));
+ emit_insn (gen_rtx (SET, SImode, sreg,
+ gen_rtx (AND, SImode, sreg, ereg)));
+ }
+
+ /* Insert source value in dest. */
+ if (posn != 0)
+ emit_insn (gen_rtx (SET, SImode, sreg,
+ gen_rtx (ASHIFT, SImode, sreg, GEN_INT (posn))));
+
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (IOR, SImode, operands[0], sreg)));
+
+ return 1;
+}
+
+/* Return 1 if OP is a load multiple operation. It is known to be a
+ PARALLEL and the first section will be tested. */
+int
+mcore_load_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ int dest_regno;
+ rtx src_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Similar, but tests for store multiple. */
+int
+mcore_store_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ int src_regno;
+ rtx dest_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* ??? Block move stuff stolen from m88k. This code has not been
+ verified for correctness. */
+
+/* Emit code to perform a block move. Choose the best method.
+
+ OPERANDS[0] is the destination.
+ OPERANDS[1] is the source.
+ OPERANDS[2] is the size.
+ OPERANDS[3] is the alignment safe to use. */
+
+/* Emit code to perform a block move with an offset sequence of ldw/st
+ instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
+ known constants. DEST and SRC are registers. OFFSET is the known
+ starting point for the output pattern. */
+
+static enum machine_mode mode_from_align[] =
+{
+ VOIDmode, QImode, HImode, VOIDmode, SImode,
+ VOIDmode, VOIDmode, VOIDmode, DImode
+};
+
+static void
+block_move_sequence (dest, dst_mem, src, src_mem, size, align, offset)
+ rtx dest, dst_mem;
+ rtx src, src_mem;
+ int size;
+ int align;
+ int offset;
+{
+ rtx temp[2];
+ enum machine_mode mode[2];
+ int amount[2];
+ int active[2];
+ int phase = 0;
+ int next;
+ int offset_ld = offset;
+ int offset_st = offset;
+
+ active[0] = active[1] = FALSE;
+
+ /* Establish parameters for the first load and for the second load if
+ it is known to be the same mode as the first. */
+ amount[0] = amount[1] = align;
+
+ mode[0] = mode_from_align[align];
+
+ temp[0] = gen_reg_rtx (mode[0]);
+
+ if (size >= 2 * align)
+ {
+ mode[1] = mode[0];
+ temp[1] = gen_reg_rtx (mode[1]);
+ }
+
+ do
+ {
+ rtx srcp, dstp;
+
+ next = phase;
+ phase = !phase;
+
+ if (size > 0)
+ {
+ /* Change modes as the sequence tails off. */
+ if (size < amount[next])
+ {
+ amount[next] = (size >= 4 ? 4 : (size >= 2 ? 2 : 1));
+ mode[next] = mode_from_align[amount[next]];
+ temp[next] = gen_reg_rtx (mode[next]);
+ }
+
+ size -= amount[next];
+ srcp = gen_rtx (MEM,
+#if 0
+ MEM_IN_STRUCT_P (src_mem) ? mode[next] : BLKmode,
+#else
+ mode[next],
+#endif
+ gen_rtx (PLUS, Pmode, src,
+ gen_rtx (CONST_INT, SImode, offset_ld)));
+
+ RTX_UNCHANGING_P (srcp) = RTX_UNCHANGING_P (src_mem);
+ MEM_VOLATILE_P (srcp) = MEM_VOLATILE_P (src_mem);
+ MEM_IN_STRUCT_P (srcp) = 1;
+ emit_insn (gen_rtx (SET, VOIDmode, temp[next], srcp));
+ offset_ld += amount[next];
+ active[next] = TRUE;
+ }
+
+ if (active[phase])
+ {
+ active[phase] = FALSE;
+
+ dstp = gen_rtx (MEM,
+#if 0
+ MEM_IN_STRUCT_P (dst_mem) ? mode[phase] : BLKmode,
+#else
+ mode[phase],
+#endif
+ gen_rtx (PLUS, Pmode, dest,
+ gen_rtx (CONST_INT, SImode, offset_st)));
+
+ RTX_UNCHANGING_P (dstp) = RTX_UNCHANGING_P (dst_mem);
+ MEM_VOLATILE_P (dstp) = MEM_VOLATILE_P (dst_mem);
+ MEM_IN_STRUCT_P (dstp) = 1;
+ emit_insn (gen_rtx (SET, VOIDmode, dstp, temp[phase]));
+ offset_st += amount[phase];
+ }
+ }
+ while (active[next]);
+}
+
+void
+mcore_expand_block_move (dst_mem, src_mem, operands)
+ rtx dst_mem;
+ rtx src_mem;
+ rtx * operands;
+{
+ int align = INTVAL (operands[3]);
+ int bytes;
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ bytes = INTVAL (operands[2]);
+
+ if (bytes <= 0)
+ return;
+ if (align > 4)
+ align = 4;
+
+ /* RBE: bumped 1 and 2 byte align from 1 and 2 to 4 and 8 bytes before
+ we give up and go to memcpy.. */
+ if ((align == 4 && (bytes <= 4*4
+ || ((bytes & 01) == 0 && bytes <= 8*4)
+ || ((bytes & 03) == 0 && bytes <= 16*4)))
+ || (align == 2 && bytes <= 4*2)
+ || (align == 1 && bytes <= 4*1))
+ {
+ block_move_sequence (operands[0], dst_mem, operands[1], src_mem,
+ bytes, align, 0);
+ return;
+ }
+ }
+
+ /* If we get here, just use the library routine. */
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "memcpy"), 0, VOIDmode, 3,
+ operands[0], Pmode, operands[1], Pmode, operands[2],
+ SImode);
+}
+
+
+/* Code to generate prologue and epilogue sequences. */
+static int number_of_regs_before_varargs;
+/* Set by SETUP_INCOMING_VARARGS to indicate to prolog that this is
+ for a varargs function. */
+static int current_function_anonymous_args;
+
+
+#define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
+#define STORE_REACH (64) /* Maximum displace of word store + 4. */
+#define ADDI_REACH (32) /* Maximum addi operand. */
+
+struct mcore_frame
+{
+ int arg_size; /* stdarg spills (bytes) */
+ int reg_size; /* non-volatile reg saves (bytes) */
+ int reg_mask; /* non-volatile reg saves */
+ int local_size; /* locals */
+ int outbound_size; /* arg overflow on calls out */
+ int pad_outbound;
+ int pad_local;
+ int pad_reg;
+
+ /* describe the steps we'll use to grow it */
+#define MAX_STACK_GROWS 4 /* gives us some spare space */
+ int growth[MAX_STACK_GROWS];
+ int arg_offset;
+ int reg_offset;
+ int reg_growth;
+ int local_growth;
+};
+
+static void
+layout_mcore_frame (infp)
+ struct mcore_frame * infp;
+{
+ int n;
+ unsigned int i;
+ int nbytes;
+ int regarg;
+ int localregarg;
+ int localreg;
+ int outbounds;
+ unsigned int growths;
+ int step;
+
+ /* Might have to spill bytes to re-assemble a big argument that
+ was passed partially in registers and partially on the stack. */
+ nbytes = current_function_pretend_args_size;
+
+ /* Determine how much space for spilled anonymous args (e.g., stdarg). */
+ if (current_function_anonymous_args)
+ nbytes += (NPARM_REGS - number_of_regs_before_varargs) * UNITS_PER_WORD;
+
+ infp->arg_size = nbytes;
+
+ /* How much space to save non-volatile registers we stomp. */
+ infp->reg_mask = calc_live_regs (& n);
+ infp->reg_size = n * 4;
+
+ /* And the rest of it... locals and space for overflowed outbounds. */
+ infp->local_size = get_frame_size ();
+ infp->outbound_size = current_function_outgoing_args_size;
+
+ /* Make sure we have a whole number of words for the locals. */
+ if (infp->local_size % STACK_BYTES)
+ infp->local_size = (infp->local_size + STACK_BYTES - 1) & ~ (STACK_BYTES -1);
+
+ /* Only thing we know we have to pad is the outbound space, since
+ we've aligned our locals assuming that base of locals is aligned. */
+ infp->pad_local = 0;
+ infp->pad_reg = 0;
+ infp->pad_outbound = 0;
+ if (infp->outbound_size % STACK_BYTES)
+ infp->pad_outbound = STACK_BYTES - (infp->outbound_size % STACK_BYTES);
+
+ /* Now we see how we want to stage the prologue so that it does
+ the most appropriate stack growth and register saves to either:
+ (1) run fast,
+ (2) reduce instruction space, or
+ (3) reduce stack space. */
+ for (i = 0; i < sizeof (infp->growth) / sizeof (infp->growth[0]); i++)
+ infp->growth[i] = 0;
+
+ regarg = infp->reg_size + infp->arg_size;
+ localregarg = infp->local_size + regarg;
+ localreg = infp->local_size + infp->reg_size;
+ outbounds = infp->outbound_size + infp->pad_outbound;
+ growths = 0;
+
+ /* XXX: Consider one where we consider localregarg + outbound too! */
+
+ /* Frame of <= 32 bytes and using stm would get <= 2 registers.
+ use stw's with offsets and buy the frame in one shot. */
+ if (localregarg <= ADDI_REACH
+ && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
+ {
+ /* Make sure we'll be aligned. */
+ if (localregarg % STACK_BYTES)
+ infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
+
+ step = localregarg + infp->pad_reg;
+ infp->reg_offset = infp->local_size;
+
+ if (outbounds + step <= ADDI_REACH && !frame_pointer_needed)
+ {
+ step += outbounds;
+ infp->reg_offset += outbounds;
+ outbounds = 0;
+ }
+
+ infp->arg_offset = step - 4;
+ infp->growth[growths++] = step;
+ infp->reg_growth = growths;
+ infp->local_growth = growths;
+
+ /* If we haven't already folded it in... */
+ if (outbounds)
+ infp->growth[growths++] = outbounds;
+
+ goto finish;
+ }
+
+ /* Frame can't be done with a single subi, but can be done with 2
+ insns. If the 'stm' is getting <= 2 registers, we use stw's and
+ shift some of the stack purchase into the first subi, so both are
+ single instructions. */
+ if (localregarg <= STORE_REACH
+ && (infp->local_size > ADDI_REACH)
+ && (infp->reg_size <= 8 || (infp->reg_mask & 0xc000) != 0xc000))
+ {
+ int all;
+
+ /* Make sure we'll be aligned; use either pad_reg or pad_local. */
+ if (localregarg % STACK_BYTES)
+ infp->pad_reg = STACK_BYTES - (localregarg % STACK_BYTES);
+
+ all = localregarg + infp->pad_reg + infp->pad_local;
+ step = ADDI_REACH; /* As much up front as we can. */
+ if (step > all)
+ step = all;
+
+ /* XXX: Consider whether step will still be aligned; we believe so. */
+ infp->arg_offset = step - 4;
+ infp->growth[growths++] = step;
+ infp->reg_growth = growths;
+ infp->reg_offset = step - infp->pad_reg - infp->reg_size;
+ all -= step;
+
+ /* Can we fold in any space required for outbounds? */
+ if (outbounds + all <= ADDI_REACH && !frame_pointer_needed)
+ {
+ all += outbounds;
+ outbounds = 0;
+ }
+
+ /* Get the rest of the locals in place. */
+ step = all;
+ infp->growth[growths++] = step;
+ infp->local_growth = growths;
+ all -= step;
+
+ assert (all == 0);
+
+ /* Finish off if we need to do so... */
+ if (outbounds)
+ infp->growth[growths++] = outbounds;
+
+ goto finish;
+ }
+
+ /* Registers + args is nicely aligned, so we'll buy that in one shot.
+ Then we buy the rest of the frame in 1 or 2 steps depending on
+ whether we need a frame pointer. */
+ if ((regarg % STACK_BYTES) == 0)
+ {
+ infp->growth[growths++] = regarg;
+ infp->reg_growth = growths;
+ infp->arg_offset = regarg - 4;
+ infp->reg_offset = 0;
+
+ if (infp->local_size % STACK_BYTES)
+ infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
+
+ step = infp->local_size + infp->pad_local;
+
+ if (!frame_pointer_needed)
+ {
+ step += outbounds;
+ outbounds = 0;
+ }
+
+ infp->growth[growths++] = step;
+ infp->local_growth = growths;
+
+ /* If there's any left to be done... */
+ if (outbounds)
+ infp->growth[growths++] = outbounds;
+
+ goto finish;
+ }
+
+ /* XXX: optimizations that we'll want to play with....
+ * -- regarg is not aligned, but it's a small number of registers;
+ * use some of localsize so that regarg is aligned and then
+ * save the registers.
+ *
+ */
+
+ /* Simple encoding; plods down the stack buying the pieces as it goes.
+ * -- does not optimize space consumption.
+ * -- does not attempt to optimize instruction counts.
+ * -- but it is safe for all alignments.
+ */
+ if (regarg % STACK_BYTES != 0)
+ infp->pad_reg = STACK_BYTES - (regarg % STACK_BYTES);
+
+ infp->growth[growths++] = infp->arg_size + infp->reg_size + infp->pad_reg;
+ infp->reg_growth = growths;
+ infp->arg_offset = infp->growth[0] - 4;
+ infp->reg_offset = 0;
+
+ if (frame_pointer_needed)
+ {
+ if (infp->local_size % STACK_BYTES != 0)
+ infp->pad_local = STACK_BYTES - (infp->local_size % STACK_BYTES);
+
+ infp->growth[growths++] = infp->local_size + infp->pad_local;
+ infp->local_growth = growths;
+
+ infp->growth[growths++] = outbounds;
+ }
+ else
+ {
+ if ((infp->local_size + outbounds) % STACK_BYTES != 0)
+ infp->pad_local = STACK_BYTES - ((infp->local_size + outbounds) % STACK_BYTES);
+
+ infp->growth[growths++] = infp->local_size + infp->pad_local + outbounds;
+ infp->local_growth = growths;
+ }
+
+ /* Anything else that we've forgotten?, plus a few consistency checks. */
+ finish:
+ assert (infp->reg_offset >= 0);
+ assert (growths <= MAX_STACK_GROWS);
+
+ for (i = 0; i < growths; i++)
+ {
+ if (infp->growth[i] % STACK_BYTES)
+ {
+ fprintf (stderr,"stack growth of %d is not %d aligned\n",
+ infp->growth[i], STACK_BYTES);
+ abort ();
+ }
+ }
+}
+
+/* Define the offset between two registers, one to be eliminated, and
+ the other its replacement, at the start of a routine. */
+int
+mcore_initial_elimination_offset (from, to)
+ int from;
+ int to;
+{
+ int above_frame;
+ int below_frame;
+ struct mcore_frame fi;
+
+ layout_mcore_frame (& fi);
+
+ /* fp to ap */
+ above_frame = fi.local_size + fi.pad_local + fi.reg_size + fi.pad_reg;
+ /* sp to fp */
+ below_frame = fi.outbound_size + fi.pad_outbound;
+
+ if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
+ return above_frame;
+
+ if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return above_frame + below_frame;
+
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return below_frame;
+
+ abort ();
+
+ return 0;
+}
+
+/* Keep track of some information about varargs for the prolog. */
+void
+mcore_setup_incoming_varargs (args_so_far, mode, type, ptr_pretend_size)
+ CUMULATIVE_ARGS args_so_far;
+ enum machine_mode mode;
+ tree type;
+ int * ptr_pretend_size ATTRIBUTE_UNUSED;
+{
+ current_function_anonymous_args = 1;
+
+ /* We need to know how many argument registers are used before
+ the varargs start, so that we can push the remaining argument
+ registers during the prologue. */
+ number_of_regs_before_varargs = args_so_far + mcore_num_arg_regs (mode, type);
+
+ /* There is a bug somwehere in the arg handling code.
+ Until I can find it this workaround always pushes the
+ last named argument onto the stack. */
+ number_of_regs_before_varargs = args_so_far;
+
+ /* The last named argument may be split between argument registers
+ and the stack. Allow for this here. */
+ if (number_of_regs_before_varargs > NPARM_REGS)
+ number_of_regs_before_varargs = NPARM_REGS;
+}
+
+void
+mcore_expand_prolog ()
+{
+ struct mcore_frame fi;
+ int space_allocated = 0;
+ int growth = 0;
+
+ /* Find out what we're doing. */
+ layout_mcore_frame (&fi);
+
+ space_allocated = fi.arg_size + fi.reg_size + fi.local_size +
+ fi.outbound_size + fi.pad_outbound + fi.pad_local + fi.pad_reg;
+
+ if (TARGET_CG_DATA)
+ {
+ /* Emit a symbol for this routine's frame size. */
+ rtx x;
+ int len;
+
+ x = DECL_RTL (current_function_decl);
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+
+ if (mcore_current_function_name)
+ free (mcore_current_function_name);
+
+ len = strlen (XSTR (x, 0)) + 1;
+ mcore_current_function_name = (char *) malloc (len);
+
+ memcpy (mcore_current_function_name, XSTR (x, 0), len);
+
+ ASM_OUTPUT_CG_NODE (asm_out_file, mcore_current_function_name, space_allocated);
+
+ if (current_function_calls_alloca)
+ ASM_OUTPUT_CG_EDGE (asm_out_file, mcore_current_function_name, "alloca", 1);
+
+ /* 970425: RBE:
+ We're looking at how the 8byte alignment affects stack layout
+ and where we had to pad things. This emits information we can
+ extract which tells us about frame sizes and the like. */
+ fprintf (asm_out_file,
+ "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
+ mcore_current_function_name,
+ fi.arg_size, fi.reg_size, fi.reg_mask,
+ fi.local_size, fi.outbound_size,
+ frame_pointer_needed);
+ }
+
+ if (mcore_naked_function_p ())
+ return;
+
+ /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
+ output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
+
+ /* If we have a parameter passed partially in regs and partially in memory,
+ the registers will have been stored to memory already in function.c. So
+ we only need to do something here for varargs functions. */
+ if (fi.arg_size != 0 && current_function_pretend_args_size == 0)
+ {
+ int offset;
+ int rn = FIRST_PARM_REG + NPARM_REGS - 1;
+ int remaining = fi.arg_size;
+
+ for (offset = fi.arg_offset; remaining >= 4; offset -= 4, rn--, remaining -= 4)
+ {
+ emit_insn (gen_movsi
+ (gen_rtx (MEM, SImode,
+ plus_constant (stack_pointer_rtx, offset)),
+ gen_rtx (REG, SImode, rn)));
+ }
+ }
+
+ /* Do we need another stack adjustment before we do the register saves? */
+ if (growth < fi.reg_growth)
+ output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
+
+ if (fi.reg_size != 0)
+ {
+ int i;
+ int offs = fi.reg_offset;
+
+ for (i = 15; i >= 0; i--)
+ {
+ if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
+ {
+ int first_reg = 15;
+
+ while (fi.reg_mask & (1 << first_reg))
+ first_reg--;
+ first_reg++;
+
+ emit_insn (gen_store_multiple (gen_rtx (MEM, SImode, stack_pointer_rtx),
+ gen_rtx (REG, SImode, first_reg),
+ GEN_INT (16 - first_reg)));
+
+ i -= (15 - first_reg);
+ offs += (16 - first_reg) * 4;
+ }
+ else if (fi.reg_mask & (1 << i))
+ {
+ emit_insn (gen_movsi
+ (gen_rtx (MEM, SImode,
+ plus_constant (stack_pointer_rtx, offs)),
+ gen_rtx (REG, SImode, i)));
+ offs += 4;
+ }
+ }
+ }
+
+ /* Figure the locals + outbounds. */
+ if (frame_pointer_needed)
+ {
+ /* If we haven't already purchased to 'fp'. */
+ if (growth < fi.local_growth)
+ output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
+
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+
+ /* ... and then go any remaining distance for outbounds, etc. */
+ if (fi.growth[growth])
+ output_stack_adjust (-1, fi.growth[growth++]);
+ }
+ else
+ {
+ if (growth < fi.local_growth)
+ output_stack_adjust (-1, fi.growth[growth++]); /* grows it */
+ if (fi.growth[growth])
+ output_stack_adjust (-1, fi.growth[growth++]);
+ }
+}
+
+void
+mcore_expand_epilog ()
+{
+ struct mcore_frame fi;
+ int i;
+ int offs;
+ int growth = MAX_STACK_GROWS - 1 ;
+
+ /* Find out what we're doing. */
+ layout_mcore_frame(&fi);
+
+ if (mcore_naked_function_p ())
+ return;
+
+ /* If we had a frame pointer, restore the sp from that. */
+ if (frame_pointer_needed)
+ {
+ emit_insn (gen_movsi (stack_pointer_rtx, frame_pointer_rtx));
+ growth = fi.local_growth - 1;
+ }
+ else
+ {
+ /* XXX: while loop should accumulate and do a single sell. */
+ while (growth >= fi.local_growth)
+ {
+ if (fi.growth[growth] != 0)
+ output_stack_adjust (1, fi.growth[growth]);
+ growth--;
+ }
+ }
+
+ /* Make sure we've shrunk stack back to the point where the registers
+ were laid down. This is typically 0/1 iterations. Then pull the
+ register save information back off the stack. */
+
+ while (growth >= fi.reg_growth)
+ output_stack_adjust ( 1, fi.growth[growth--]);
+
+ offs = fi.reg_offset;
+
+ for (i = 15; i >= 0; i--)
+ {
+ if (offs == 0 && i == 15 && ((fi.reg_mask & 0xc000) == 0xc000))
+ {
+ int first_reg;
+
+ /* Find the starting register. */
+ first_reg = 15;
+
+ while (fi.reg_mask & (1 << first_reg))
+ first_reg--;
+
+ first_reg++;
+
+ emit_insn (gen_load_multiple (gen_rtx (REG, SImode, first_reg),
+ gen_rtx (MEM, SImode, stack_pointer_rtx),
+ GEN_INT (16 - first_reg)));
+
+ i -= (15 - first_reg);
+ offs += (16 - first_reg) * 4;
+ }
+ else if (fi.reg_mask & (1 << i))
+ {
+ emit_insn (gen_movsi
+ (gen_rtx (REG, SImode, i),
+ gen_rtx (MEM, SImode,
+ plus_constant (stack_pointer_rtx, offs))));
+ offs += 4;
+ }
+ }
+
+ /* Give back anything else. */
+ /* XXX: Should accumuate total and then give it back... */
+ while (growth >= 0)
+ output_stack_adjust ( 1, fi.growth[growth--]);
+}
+
+/* This code is borrowed from the SH port. */
+
+/* The MCORE cannot load a large constant into a register, constants have to
+ come from a pc relative load. The reference of a pc relative load
+ instruction must be less than 1k infront of the instruction. This
+ means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ lrw L1,r0
+ br L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ lrw L3,r0
+ br L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ lrw L1,r0
+ ..
+ lrw L3,r0
+ bra L4
+ align
+ L3:.long value
+ L4:.long value
+
+ Then the second move becomes the target for the shortening process. */
+
+typedef struct
+{
+ rtx value; /* Value in table. */
+ rtx label; /* Label of value. */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long. We subtact 4 from the range to allow for the case where
+ we need to add a branch/align before the constant pool. */
+
+#define MAX_COUNT 1016
+#define MAX_POOL_SIZE (MAX_COUNT/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+
+/* Dump out any constants accumulated in the final pass. These
+ will only be labels. */
+char *
+mcore_output_jump_label_table ()
+{
+ int i;
+
+ if (pool_size)
+ {
+ fprintf (asm_out_file, "\t.align 2\n");
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node * p = pool_vector + i;
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (p->label));
+
+ output_asm_insn (".long %0", &p->value);
+ }
+
+ pool_size = 0;
+ }
+
+ return "";
+}
+
+#if 0 /* XXX temporarily suppressed until I have time to look at what this code does. */
+
+/* We need these below. They use information stored in tables to figure out
+ what values are in what registers, etc. This is okay, since these tables
+ are valid at the time mcore_dependent_simplify_rtx() is invoked. Don't
+ use them anywhere else. BRC */
+
+extern unsigned HOST_WIDE_INT nonzero_bits PARAMS ((rtx, enum machine_mode));
+extern int num_sign_bit_copies PARAMS ((Rtx, enum machine_mode));
+
+/* Do machine dependent simplifications: see simplify_rtx() in combine.c.
+ GENERAL_SIMPLIFY controls whether general machine independent
+ simplifications should be tried after machine dependent ones. Thus,
+ we can filter out certain simplifications and keep the simplify_rtx()
+ from changing things that we just simplified in a machine dependent
+ fashion. This is experimental. BRC */
+rtx
+mcore_dependent_simplify_rtx (x, int_op0_mode, last, in_dest, general_simplify)
+ rtx x;
+ int int_op0_mode;
+ int last;
+ int in_dest;
+ int * general_simplify;
+{
+ enum machine_mode mode = GET_MODE (x);
+ enum rtx_code code = GET_CODE (x);
+
+ /* always simplify unless explicitly asked not to */
+
+ * general_simplify = 1;
+
+ if (code == IF_THEN_ELSE)
+ {
+ int i;
+ rtx cond = XEXP(x, 0);
+ rtx true = XEXP(x, 1);
+ rtx false = XEXP(x, 2);
+ enum rtx_code true_code = GET_CODE (cond);
+
+ /* On the mcore, when doing -mcmov-one, we don't want to simplify:
+
+ (if_then_else (ne A 0) C1 0)
+
+ if it would be turned into a shift by simplify_if_then_else().
+ instead, leave it alone so that it will collapse into a conditional
+ move. besides, at least for the mcore, doing this simplification does
+ not typically help. see combine.c, line 4217. BRC */
+
+ if (true_code == NE && XEXP (cond, 1) == const0_rtx
+ && false == const0_rtx && GET_CODE (true) == CONST_INT
+ && ((1 == nonzero_bits (XEXP (cond, 0), mode)
+ && (i = exact_log2 (INTVAL (true))) >= 0)
+ || ((num_sign_bit_copies (XEXP (cond, 0), mode)
+ == GET_MODE_BITSIZE (mode))
+ && (i = exact_log2 (- INTVAL (true))) >= 0)))
+ {
+ *general_simplify = 0;
+ return x;
+ }
+ }
+
+ return x;
+}
+#endif
+
+typedef enum
+{
+ COND_NO,
+ COND_MOV_INSN,
+ COND_CLR_INSN,
+ COND_INC_INSN,
+ COND_DEC_INSN,
+ COND_BRANCH_INSN
+}
+cond_type;
+
+/* Check whether insn is a candidate for a conditional. */
+static cond_type
+is_cond_candidate (insn)
+ rtx insn;
+{
+ /* The only things we conditionalize are those that can be directly
+ changed into a conditional. Only bother with SImode items. If
+ we wanted to be a little more aggressive, we could also do other
+ modes such as DImode with reg-reg move or load 0. */
+ if (GET_CODE (insn) == INSN)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src, dst;
+
+ if (GET_CODE (pat) != SET)
+ return COND_NO;
+
+ dst = XEXP (pat, 0);
+
+ if ((GET_CODE (dst) != REG &&
+ GET_CODE (dst) != SUBREG) ||
+ GET_MODE (dst) != SImode)
+ return COND_NO;
+
+ src = XEXP (pat, 1);
+
+ if ((GET_CODE (src) == REG ||
+ (GET_CODE (src) == SUBREG &&
+ GET_CODE (SUBREG_REG (src)) == REG)) &&
+ GET_MODE (src) == SImode)
+ return COND_MOV_INSN;
+ else if (GET_CODE (src) == CONST_INT &&
+ INTVAL (src) == 0)
+ return COND_CLR_INSN;
+ else if (GET_CODE (src) == PLUS &&
+ (GET_CODE (XEXP (src, 0)) == REG ||
+ (GET_CODE (XEXP (src, 0)) == SUBREG &&
+ GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
+ GET_MODE (XEXP (src, 0)) == SImode &&
+ GET_CODE (XEXP (src, 1)) == CONST_INT &&
+ INTVAL (XEXP (src, 1)) == 1)
+ return COND_INC_INSN;
+ else if (((GET_CODE (src) == MINUS &&
+ GET_CODE (XEXP (src, 1)) == CONST_INT &&
+ INTVAL( XEXP (src, 1)) == 1) ||
+ (GET_CODE (src) == PLUS &&
+ GET_CODE (XEXP (src, 1)) == CONST_INT &&
+ INTVAL (XEXP (src, 1)) == -1)) &&
+ (GET_CODE (XEXP (src, 0)) == REG ||
+ (GET_CODE (XEXP (src, 0)) == SUBREG &&
+ GET_CODE (SUBREG_REG (XEXP (src, 0))) == REG)) &&
+ GET_MODE (XEXP (src, 0)) == SImode)
+ return COND_DEC_INSN;
+
+ /* some insns that we don't bother with:
+ (set (rx:DI) (ry:DI))
+ (set (rx:DI) (const_int 0))
+ */
+
+ }
+ else if (GET_CODE (insn) == JUMP_INSN &&
+ GET_CODE (PATTERN (insn)) == SET &&
+ GET_CODE (XEXP (PATTERN (insn), 1)) == LABEL_REF)
+ return COND_BRANCH_INSN;
+
+ return COND_NO;
+}
+
+/* Emit a conditional version of insn and replace the old insn with the
+ new one. Return the new insn if emitted. */
+static rtx
+emit_new_cond_insn (insn, cond)
+ rtx insn;
+ int cond;
+{
+ rtx c_insn = 0;
+ rtx pat, dst, src;
+ cond_type num;
+
+ if ((num = is_cond_candidate (insn)) == COND_NO)
+ return NULL;
+
+ pat = PATTERN (insn);
+
+ if (GET_CODE (insn) == INSN)
+ {
+ dst = SET_DEST (pat);
+ src = SET_SRC (pat);
+ }
+ else
+ dst = JUMP_LABEL (insn);
+
+ switch (num)
+ {
+ case COND_MOV_INSN:
+ case COND_CLR_INSN:
+ if (cond)
+ c_insn = gen_movt0 (dst, src, dst);
+ else
+ c_insn = gen_movt0 (dst, dst, src);
+ break;
+
+ case COND_INC_INSN:
+ if (cond)
+ c_insn = gen_incscc (dst, dst);
+ else
+ c_insn = gen_incscc_false (dst, dst);
+ break;
+
+ case COND_DEC_INSN:
+ if (cond)
+ c_insn = gen_decscc (dst, dst);
+ else
+ c_insn = gen_decscc_false (dst, dst);
+ break;
+
+ case COND_BRANCH_INSN:
+ if (cond)
+ c_insn = gen_branch_true (dst);
+ else
+ c_insn = gen_branch_false (dst);
+ break;
+
+ default:
+ return NULL;
+ }
+
+ /* Only copy the notes if they exist. */
+ if (rtx_length [GET_CODE (c_insn)] >= 7 && rtx_length [GET_CODE (insn)] >= 7)
+ {
+ /* We really don't need to bother with the notes and links at this
+ point, but go ahead and save the notes. This will help is_dead()
+ when applying peepholes (links don't matter since they are not
+ used any more beyond this point for the mcore). */
+ REG_NOTES (c_insn) = REG_NOTES (insn);
+ }
+
+ if (num == COND_BRANCH_INSN)
+ {
+ /* For jumps, we need to be a little bit careful and emit the new jump
+ before the old one and to update the use count for the target label.
+ This way, the barrier following the old (uncond) jump will get
+ deleted, but the label won't. */
+ c_insn = emit_jump_insn_before (c_insn, insn);
+
+ ++ LABEL_NUSES (dst);
+
+ JUMP_LABEL (c_insn) = dst;
+ }
+ else
+ c_insn = emit_insn_after (c_insn, insn);
+
+ delete_insn (insn);
+
+ return c_insn;
+}
+
+/* Attempt to change a basic block into a series of conditional insns. This
+ works by taking the branch at the end of the 1st block and scanning for the
+ end of the 2nd block. If all instructions in the 2nd block have cond.
+ versions and the label at the start of block 3 is the same as the target
+ from the branch at block 1, then conditionalize all insn in block 2 using
+ the inverse condition of the branch at block 1. (Note I'm bending the
+ definition of basic block here.)
+
+ e.g., change:
+
+ bt L2 <-- end of block 1 (delete)
+ mov r7,r8
+ addu r7,1
+ br L3 <-- end of block 2
+
+ L2: ... <-- start of block 3 (NUSES==1)
+ L3: ...
+
+ to:
+
+ movf r7,r8
+ incf r7
+ bf L3
+
+ L3: ...
+
+ we can delete the L2 label if NUSES==1 and re-apply the optimization
+ starting at the last instruction of block 2. This may allow an entire
+ if-then-else statement to be conditionalized. BRC */
+static rtx
+conditionalize_block (first)
+ rtx first;
+{
+ rtx insn;
+ rtx br_pat;
+ rtx end_blk_1_br = 0;
+ rtx end_blk_2_insn = 0;
+ rtx start_blk_3_lab = 0;
+ int cond;
+ int br_lab_num;
+ int blk_size = 0;
+
+
+ /* Check that the first insn is a candidate conditional jump. This is
+ the one that we'll eliminate. If not, advance to the next insn to
+ try. */
+ if (GET_CODE (first) != JUMP_INSN ||
+ GET_CODE (PATTERN (first)) != SET ||
+ GET_CODE (XEXP (PATTERN (first), 1)) != IF_THEN_ELSE)
+ return NEXT_INSN (first);
+
+ /* Extract some information we need. */
+ end_blk_1_br = first;
+ br_pat = PATTERN (end_blk_1_br);
+
+ /* Complement the condition since we use the reverse cond. for the insns. */
+ cond = (GET_CODE (XEXP (XEXP (br_pat, 1), 0)) == EQ);
+
+ /* Determine what kind of branch we have. */
+ if (GET_CODE (XEXP (XEXP (br_pat, 1), 1)) == LABEL_REF)
+ {
+ /* A normal branch, so extract label out of first arm. */
+ br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 1), 0));
+ }
+ else
+ {
+ /* An inverse branch, so extract the label out of the 2nd arm
+ and complement the condition. */
+ cond = (cond == 0);
+ br_lab_num = CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat, 1), 2), 0));
+ }
+
+ /* Scan forward for the start of block 2: it must start with a
+ label and that label must be the same as the branch target
+ label from block 1. We don't care about whether block 2 actually
+ ends with a branch or a label (an uncond. branch is
+ conditionalizable). */
+ for (insn = NEXT_INSN (first); insn; insn = NEXT_INSN (insn))
+ {
+ enum rtx_code code;
+
+ code = GET_CODE (insn);
+
+ /* Look for the label at the start of block 3. */
+ if (code == CODE_LABEL && CODE_LABEL_NUMBER (insn) == br_lab_num)
+ break;
+
+ /* Skip barriers, notes, and conditionalizable insns. If the
+ insn is not conditionalizable or makes this optimization fail,
+ just return the next insn so we can start over from that point. */
+ if (code != BARRIER && code != NOTE && !is_cond_candidate (insn))
+ return NEXT_INSN (insn);
+
+ /* Remember the last real insn before the label (ie end of block 2). */
+ if (code == JUMP_INSN || code == INSN)
+ {
+ blk_size ++;
+ end_blk_2_insn = insn;
+ }
+ }
+
+ if (!insn)
+ return insn;
+
+ /* It is possible for this optimization to slow performance if the blocks
+ are long. This really depends upon whether the branch is likely taken
+ or not. If the branch is taken, we slow performance in many cases. But,
+ if the branch is not taken, we always help performance (for a single
+ block, but for a double block (i.e. when the optimization is re-applied)
+ this is not true since the 'right thing' depends on the overall length of
+ the collapsed block). As a compromise, don't apply this optimization on
+ blocks larger than size 2 (unlikely for the mcore) when speed is important.
+ the best threshold depends on the latencies of the instructions (i.e.,
+ the branch penalty). */
+ if (optimize > 1 && blk_size > 2)
+ return insn;
+
+ /* At this point, we've found the start of block 3 and we know that
+ it is the destination of the branch from block 1. Also, all
+ instructions in the block 2 are conditionalizable. So, apply the
+ conditionalization and delete the branch. */
+ start_blk_3_lab = insn;
+
+ for (insn = NEXT_INSN (end_blk_1_br); insn != start_blk_3_lab;
+ insn = NEXT_INSN (insn))
+ {
+ rtx newinsn;
+
+ if (INSN_DELETED_P (insn))
+ continue;
+
+ /* Try to form a conditional variant of the instruction and emit it. */
+ if ((newinsn = emit_new_cond_insn (insn, cond)))
+ {
+ if (end_blk_2_insn == insn)
+ end_blk_2_insn = newinsn;
+
+ insn = newinsn;
+ }
+ }
+
+ /* Note whether we will delete the label starting blk 3 when the jump
+ gets deleted. If so, we want to re-apply this optimization at the
+ last real instruction right before the label. */
+ if (LABEL_NUSES (start_blk_3_lab) == 1)
+ {
+ start_blk_3_lab = 0;
+ }
+
+ /* ??? we probably should redistribute the death notes for this insn, esp.
+ the death of cc, but it doesn't really matter this late in the game.
+ The peepholes all use is_dead() which will find the correct death
+ regardless of whether there is a note. */
+ delete_insn (end_blk_1_br);
+
+ if (! start_blk_3_lab)
+ return end_blk_2_insn;
+
+ /* Return the insn right after the label at the start of block 3. */
+ return NEXT_INSN (start_blk_3_lab);
+}
+
+/* Apply the conditionalization of blocks optimization. This is the
+ outer loop that traverses through the insns scanning for a branch
+ that signifies an opportunity to apply the optimization. Note that
+ this optimization is applied late. If we could apply it earlier,
+ say before cse 2, it may expose more optimization opportunities.
+ but, the pay back probably isn't really worth the effort (we'd have
+ to update all reg/flow/notes/links/etc to make it work - and stick it
+ in before cse 2). */
+static void
+conditionalize_optimization (first)
+ rtx first;
+{
+ rtx insn;
+
+ for (insn = first; insn; insn = conditionalize_block (insn))
+ continue;
+}
+
+static int saved_warn_return_type = -1;
+static int saved_warn_return_type_count = 0;
+
+/* This function is called from toplev.c before reorg. */
+void
+mcore_dependent_reorg (first)
+ rtx first;
+{
+ /* Reset this variable. */
+ current_function_anonymous_args = 0;
+
+ /* Restore the warn_return_type if it has been altered */
+ if (saved_warn_return_type != -1)
+ {
+ /* Only restore the value if we have reached another function.
+ The test of warn_return_type occurs in final_function () in
+ c-decl.c a long time after the code for the function is generated,
+ so we need a counter to tell us when we have finished parsing that
+ function and can restore the flag. */
+ if (--saved_warn_return_type_count == 0)
+ {
+ warn_return_type = saved_warn_return_type;
+ saved_warn_return_type = -1;
+ }
+ }
+
+ if (optimize == 0)
+ return;
+
+ /* Conditionalize blocks where we can. */
+ conditionalize_optimization (first);
+
+ /* Literal pool generation is now pushed off until the assembler. */
+}
+
+
+/* Return the reg_class to use when reloading the rtx X into the class
+ CLASS. */
+
+/* If the input is (PLUS REG CONSTANT) representing a stack slot address,
+ then we want to restrict the class to LRW_REGS since that ensures that
+ will be able to safely load the constant.
+
+ If the input is a constant that should be loaded with mvir1, then use
+ ONLYR1_REGS.
+
+ ??? We don't handle the case where we have (PLUS REG CONSTANT) and
+ the constant should be loaded with mvir1, because that can lead to cases
+ where an instruction needs two ONLYR1_REGS reloads. */
+enum reg_class
+mcore_reload_class (x, class)
+ rtx x;
+ enum reg_class class;
+{
+ enum reg_class new_class;
+
+ if (class == GENERAL_REGS && CONSTANT_P (x)
+ && (GET_CODE (x) != CONST_INT
+ || ( ! CONST_OK_FOR_I (INTVAL (x))
+ && ! CONST_OK_FOR_M (INTVAL (x))
+ && ! CONST_OK_FOR_N (INTVAL (x)))))
+ new_class = LRW_REGS;
+ else
+ new_class = class;
+
+ return new_class;
+}
+
+/* Tell me if a pair of reg/subreg rtx's actually refer to the same
+ register. Note that the current version doesn't worry about whether
+ they are the same mode or note (e.g., a QImode in r2 matches an HImode
+ in r2 matches an SImode in r2. Might think in the future about whether
+ we want to be able to say something about modes. */
+int
+mcore_is_same_reg (x, y)
+ rtx x;
+ rtx y;
+{
+ /* Strip any and all of the subreg wrappers. */
+ while (GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ while (GET_CODE (y) == SUBREG)
+ y = SUBREG_REG (y);
+
+ if (GET_CODE(x) == REG && GET_CODE(y) == REG && REGNO(x) == REGNO(y))
+ return 1;
+
+ return 0;
+}
+
+/* Called to register all of our global variables with the garbage
+ collector. */
+static void
+mcore_add_gc_roots ()
+{
+ ggc_add_rtx_root (&arch_compare_op0, 1);
+ ggc_add_rtx_root (&arch_compare_op1, 1);
+}
+
+void
+mcore_override_options ()
+{
+ if (mcore_stack_increment_string)
+ {
+ mcore_stack_increment = atoi (mcore_stack_increment_string);
+
+ if (mcore_stack_increment < 0
+ || (mcore_stack_increment == 0
+ && (mcore_stack_increment_string[0] != '0'
+ || mcore_stack_increment_string[1] != 0)))
+ error ("Invalid option `-mstack-increment=%s'",
+ mcore_stack_increment_string);
+ }
+
+ /* Only the m340 supports little endian code. */
+ if (TARGET_LITTLE_END && ! TARGET_M340)
+ target_flags |= M340_BIT;
+
+ mcore_add_gc_roots ();
+}
+
+int
+mcore_must_pass_on_stack (mode, type)
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ tree type;
+{
+ if (type == NULL)
+ return 0;
+
+ /* If the argugment can have its address taken, it must
+ be placed on the stack. */
+ if (TREE_ADDRESSABLE (type))
+ return 1;
+
+ return 0;
+}
+
+/* Compute the number of word sized registers needed to
+ hold a function argument of mode MODE and type TYPE. */
+int
+mcore_num_arg_regs (mode, type)
+ enum machine_mode mode;
+ tree type;
+{
+ int size;
+
+ if (MUST_PASS_IN_STACK (mode, type))
+ return 0;
+
+ if (type && mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ return ROUND_ADVANCE (size);
+}
+
+static rtx
+handle_structs_in_regs (mode, type, reg)
+ enum machine_mode mode;
+ tree type;
+ int reg;
+{
+ int size;
+
+ /* The MCore ABI defines that a structure whoes size is not a whole multiple
+ of bytes is passed packed into registers (or spilled onto the stack if
+ not enough registers are available) with the last few bytes of the
+ structure being packed, left-justified, into the last register/stack slot.
+ GCC handles this correctly if the last word is in a stack slot, but we
+ have to generate a special, PARALLEL RTX if the last word is in an
+ argument register. */
+ if (type
+ && TYPE_MODE (type) == BLKmode
+ && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && (size = int_size_in_bytes (type)) > UNITS_PER_WORD
+ && (size % UNITS_PER_WORD != 0)
+ && (reg + mcore_num_arg_regs (mode, type) <= (FIRST_PARM_REG + NPARM_REGS)))
+ {
+ rtx arg_regs [NPARM_REGS];
+ int nregs;
+ rtx result;
+ rtvec rtvec;
+
+ for (nregs = 0; size > 0; size -= UNITS_PER_WORD)
+ {
+ arg_regs [nregs] =
+ gen_rtx_EXPR_LIST (SImode, gen_rtx_REG (SImode, reg ++),
+ GEN_INT (nregs * UNITS_PER_WORD));
+ nregs ++;
+ }
+
+ /* We assume here that NPARM_REGS == 6. The assert checks this. */
+ assert (sizeof (arg_regs) / sizeof (arg_regs[0]) == 6);
+ rtvec = gen_rtvec (nregs, arg_regs[0], arg_regs[1], arg_regs[2],
+ arg_regs[3], arg_regs[4], arg_regs[5]);
+
+ result = gen_rtx_PARALLEL (mode, rtvec);
+ return result;
+ }
+
+ return gen_rtx_REG (mode, reg);
+}
+
+rtx
+mcore_function_value (valtype, func)
+ tree valtype;
+ tree func ATTRIBUTE_UNUSED;
+{
+ enum machine_mode mode;
+ int unsigned_p;
+
+ mode = TYPE_MODE (valtype);
+
+ PROMOTE_MODE (mode, unsigned_p, NULL);
+
+ return handle_structs_in_regs (mode, valtype, FIRST_RET_REG);
+}
+
+/* Define where to put the arguments to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On MCore the first args are normally in registers
+ and the rest are pushed. Any arg that starts within the first
+ NPARM_REGS words is at least partially passed in a register unless
+ its data type forbids. */
+rtx
+mcore_function_arg (cum, mode, type, named)
+ CUMULATIVE_ARGS cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int arg_reg;
+
+ if (! named)
+ return 0;
+
+ if (MUST_PASS_IN_STACK (mode, type))
+ return 0;
+
+ arg_reg = ROUND_REG (cum, mode);
+
+ if (arg_reg < NPARM_REGS)
+ return handle_structs_in_regs (mode, type, FIRST_PARM_REG + arg_reg);
+
+ return 0;
+}
+
+/* Implements the FUNCTION_ARG_PARTIAL_NREGS macro.
+ Returns the number of argument registers required to hold *part* of
+ a parameter of machine mode MODE and type TYPE (which may be NULL if
+ the type is not known). If the argument fits entirly in the argument
+ registers, or entirely on the stack, then 0 is returned. CUM is the
+ number of argument registers already used by earlier parameters to
+ the function. */
+int
+mcore_function_arg_partial_nregs (cum, mode, type, named)
+ CUMULATIVE_ARGS cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int reg = ROUND_REG (cum, mode);
+
+ if (named == 0)
+ return 0;
+
+ if (MUST_PASS_IN_STACK (mode, type))
+ return 0;
+
+ /* REG is not the *hardware* register number of the register that holds
+ the argument, it is the *argument* register number. So for example,
+ the first argument to a function goes in argument register 0, which
+ translates (for the MCore) into hardware register 2. The second
+ argument goes into argument register 1, which translates into hardware
+ register 3, and so on. NPARM_REGS is the number of argument registers
+ supported by the target, not the maximum hardware register number of
+ the target. */
+ if (reg >= NPARM_REGS)
+ return 0;
+
+ /* If the argument fits entirely in registers, return 0. */
+ if (reg + mcore_num_arg_regs (mode, type) <= NPARM_REGS)
+ return 0;
+
+ /* The argument overflows the number of available argument registers.
+ Compute how many argument registers have not yet been assigned to
+ hold an argument. */
+ reg = NPARM_REGS - reg;
+
+ /* Return partially in registers and partially on the stack. */
+ return reg;
+}
+
+/* Return non-zero if SYMBOL is marked as being dllexport'd. */
+int
+mcore_dllexport_name_p (symbol)
+ char * symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
+}
+
+/* Return non-zero if SYMBOL is marked as being dllimport'd. */
+int
+mcore_dllimport_name_p (symbol)
+ char * symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
+}
+
+/* Mark a DECL as being dllexport'd. */
+static void
+mcore_mark_dllexport (decl)
+ tree decl;
+{
+ char * oldname;
+ char * newname;
+ rtx rtlname;
+ tree idp;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if ( GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+
+ if (mcore_dllexport_name_p (oldname))
+ return; /* Already done. */
+
+ newname = alloca (strlen (oldname) + 4);
+ sprintf (newname, "@e.%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ XEXP (DECL_RTL (decl), 0) =
+ gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+}
+
+/* Mark a DECL as being dllimport'd. */
+static void
+mcore_mark_dllimport (decl)
+ tree decl;
+{
+ char * oldname;
+ char * newname;
+ tree idp;
+ rtx rtlname;
+ rtx newrtl;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if ( GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+
+ if (mcore_dllexport_name_p (oldname))
+ abort (); /* This shouldn't happen. */
+ else if (mcore_dllimport_name_p (oldname))
+ return; /* Already done. */
+
+ /* ??? One can well ask why we're making these checks here,
+ and that would be a good question. */
+
+ /* Imported variables can't be initialized. */
+ if (TREE_CODE (decl) == VAR_DECL
+ && !DECL_VIRTUAL_P (decl)
+ && DECL_INITIAL (decl))
+ {
+ error_with_decl (decl, "initialized variable `%s' is marked dllimport");
+ return;
+ }
+
+ /* `extern' needn't be specified with dllimport.
+ Specify `extern' now and hope for the best. Sigh. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl))
+ {
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ }
+
+ newname = alloca (strlen (oldname) + 11);
+ sprintf (newname, "@i.__imp_%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ /* ??? At least I think that's why we do this. */
+ idp = get_identifier (newname);
+
+ newrtl = gen_rtx (MEM, Pmode,
+ gen_rtx (SYMBOL_REF, Pmode,
+ IDENTIFIER_POINTER (idp)));
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+}
+
+static int
+mcore_dllexport_p (decl)
+ tree decl;
+{
+ if ( TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+
+ return lookup_attribute ("dllexport", DECL_MACHINE_ATTRIBUTES (decl)) != 0;
+}
+
+static int
+mcore_dllimport_p (decl)
+ tree decl;
+{
+ if ( TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+
+ return lookup_attribute ("dllimport", DECL_MACHINE_ATTRIBUTES (decl)) != 0;
+}
+
+/* Cover function to implement ENCODE_SECTION_INFO. */
+void
+mcore_encode_section_info (decl)
+ tree decl;
+{
+ /* This bit is copied from arm.h. */
+ if (optimize > 0
+ && TREE_CONSTANT (decl)
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
+ {
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl));
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+ }
+
+ /* Mark the decl so we can tell from the rtl whether the object is
+ dllexport'd or dllimport'd. */
+ if (mcore_dllexport_p (decl))
+ mcore_mark_dllexport (decl);
+ else if (mcore_dllimport_p (decl))
+ mcore_mark_dllimport (decl);
+
+ /* It might be that DECL has already been marked as dllimport, but
+ a subsequent definition nullified that. The attribute is gone
+ but DECL_RTL still has @i.__imp_foo. We need to remove that. */
+ else if ((TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != NULL_RTX
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
+ && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
+ {
+ char * oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
+ tree idp = get_identifier (oldname + 9);
+ rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+
+ /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
+ ??? We leave these alone for now. */
+ }
+}
+
+/* MCore specific attribute support.
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+ naked - do not create a function prologue/epilogue. */
+int
+mcore_valid_machine_decl_attribute (decl, attributes, attr, args)
+ tree decl;
+ tree attributes ATTRIBUTE_UNUSED;
+ tree attr;
+ tree args;
+{
+ if (args != NULL_TREE)
+ return 0;
+
+ if (is_attribute_p ("dllexport", attr))
+ return 1;
+
+ if (is_attribute_p ("dllimport", attr))
+ return 1;
+
+ if (is_attribute_p ("naked", attr) &&
+ TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ /* PR14310 - don't complain about lack of return statement
+ in naked functions. The solution here is a gross hack
+ but this is the only way to solve the problem without
+ adding a new feature to GCC. I did try submitting a patch
+ that would add such a new feature, but it was (rightfully)
+ rejected on the grounds that it was creeping featurism,
+ so hence this code. */
+ if (warn_return_type)
+ {
+ saved_warn_return_type = warn_return_type;
+ warn_return_type = 0;
+ saved_warn_return_type_count = 2;
+ }
+ else if (saved_warn_return_type_count)
+ saved_warn_return_type_count = 2;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Merge attributes in decls OLD and NEW.
+ This handles the following situation:
+
+ __declspec (dllimport) int foo;
+ int foo;
+
+ The second instance of `foo' nullifies the dllimport. */
+tree
+mcore_merge_machine_decl_attributes (old, new)
+ tree old;
+ tree new;
+{
+ tree a;
+ int delete_dllimport_p;
+
+ old = DECL_MACHINE_ATTRIBUTES (old);
+ new = DECL_MACHINE_ATTRIBUTES (new);
+
+ /* What we need to do here is remove from `old' dllimport if it doesn't
+ appear in `new'. dllimport behaves like extern: if a declaration is
+ marked dllimport and a definition appears later, then the object
+ is not dllimport'd. */
+ if ( lookup_attribute ("dllimport", old) != NULL_TREE
+ && lookup_attribute ("dllimport", new) == NULL_TREE)
+ delete_dllimport_p = 1;
+ else
+ delete_dllimport_p = 0;
+
+ a = merge_attributes (old, new);
+
+ if (delete_dllimport_p)
+ {
+ tree prev,t;
+
+ /* Scan the list for dllimport and delete it. */
+ for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t))
+ {
+ if (is_attribute_p ("dllimport", TREE_PURPOSE (t)))
+ {
+ if (prev == NULL_TREE)
+ a = TREE_CHAIN (a);
+ else
+ TREE_CHAIN (prev) = TREE_CHAIN (t);
+ break;
+ }
+ }
+ }
+
+ return a;
+}
+
+/* Cover function for UNIQUE_SECTION. */
+
+void
+mcore_unique_section (decl, reloc)
+ tree decl;
+ int reloc ATTRIBUTE_UNUSED;
+{
+ int len;
+ char * name;
+ char * string;
+ char * prefix;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+
+ /* Strip off any encoding in name. */
+ STRIP_NAME_ENCODING (name, name);
+
+ /* The object is put in, for example, section .text$foo.
+ The linker will then ultimately place them in .text
+ (everything from the $ on is stripped). */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ prefix = ".text$";
+ /* For compatability with EPOC, we ignore the fact that the
+ section might have relocs against it. */
+ else if (DECL_READONLY_SECTION (decl, 0))
+ prefix = ".rdata$";
+ else
+ prefix = ".data$";
+
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+
+ sprintf (string, "%s%s", prefix, name);
+
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+}
+
+int
+mcore_naked_function_p ()
+{
+ return lookup_attribute ("naked", DECL_MACHINE_ATTRIBUTES (current_function_decl)) != NULL_TREE;
+}
diff --git a/gcc/config/mcore/mcore.h b/gcc/config/mcore/mcore.h
new file mode 100644
index 00000000000..a165004e014
--- /dev/null
+++ b/gcc/config/mcore/mcore.h
@@ -0,0 +1,1458 @@
+/* Definitions of target machine for GNU compiler,
+ for Motorola M*CORE Processor.
+ Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef __MCORE__H
+#define __MCORE__H
+
+/* RBE: need to move these elsewhere. */
+#undef LIKE_PPC_ABI
+#define MCORE_STRUCT_ARGS
+/* RBE: end of "move elsewhere". */
+
+#include "hwint.h"
+
+#ifndef HAVE_MACHINE_MODES
+#include "machmode.h"
+#endif
+
+/* Run-time Target Specification. */
+#define TARGET_MCORE
+
+/* A C expression whose value is nonzero if IDENTIFIER with arguments ARGS
+ is a valid machine specific attribute for DECL.
+ The attributes in ATTRIBUTES have previously been assigned to DECL. */
+#undef VALID_MACHINE_DECL_ATTRIBUTE
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, IDENTIFIER, ARGS) \
+ mcore_valid_machine_decl_attribute (DECL, ATTRIBUTES, IDENTIFIER, ARGS)
+
+#define MERGE_MACHINE_DECL_ATTRIBUTES(OLD, NEW) \
+ mcore_merge_machine_decl_attributes (OLD, NEW)
+
+/* Support the __declspec keyword by turning them into attributes.
+ We currently only support: dllexport and dllimport.
+ Note that the current way we do this may result in a collision with
+ predefined attributes later on. This can be solved by using one attribute,
+ say __declspec__, and passing args to it. The problem with that approach
+ is that args are not accumulated: each new appearance would clobber any
+ existing args. XXX- FIXME the definition below relies upon string
+ concatenation, which is non-portable. */
+#define CPP_PREDEFINES \
+ "-D__mcore__ -D__MCORE__=1 -D__declspec(x)=__attribute__((x))" SUBTARGET_CPP_PREDEFINES
+
+/* If -m4align is ever re-enabled then uncomment this line as well:
+ #define CPP_SPEC "%{!m4align:-D__MCORE_ALIGN_8__} %{m4align:-D__MCORE__ALIGN_4__}" */
+
+#undef CPP_SPEC
+#define CPP_SPEC " \
+%{mbig-endian: \
+ %{mlittle-endian:%echoose either big or little endian, not both} \
+ -D__MCOREBE__} \
+%{m210: \
+ %{m340:%echoose either m340 or m210 not both} \
+ %{mlittle-endian:%ethe m210 does not have little endian support} \
+ -D__M210__} \
+%{!mbig-endian: -D__MCORELE__} \
+%{!m210: -D__M340__} \
+"
+/* If -m4align is ever re-enabled then add this line to the defintion of CPP_SPEC
+ %{!m4align:-D__MCORE_ALIGN_8__} %{m4align:-D__MCORE__ALIGN_4__} */
+
+/* We don't have a -lg library, so don't put it in the list. */
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared: %{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}}"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{mbig-endian:-EB} %{m210:-cpu=210 -EB}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian:-EB} %{m210:-EB} -X"
+
+/* Can only count on 16 bits of availability; change to long would affect
+ many architecture specific files (other architectures...). */
+extern int target_flags;
+
+#define HARDLIT_BIT (1 << 0) /* Build in-line literals using 2 insns */
+#define ALIGN8_BIT (1 << 1) /* Max alignment goes to 8 instead of 4 */
+#define DIV_BIT (1 << 2) /* Generate divide instructions */
+#define RELAX_IMM_BIT (1 << 3) /* Arbitrary immediates in and, or, tst */
+#define W_FIELD_BIT (1 << 4) /* Generate bit insv/extv using SImode */
+#define OVERALIGN_FUNC_BIT (1 << 5) /* Align functions to 4 byte boundary */
+#define CGDATA_BIT (1 << 6) /* Generate callgraph data */
+#define SLOW_BYTES_BIT (1 << 7) /* Slow byte access */
+#define LITTLE_END_BIT (1 << 8) /* Generate little endian code */
+#define M340_BIT (1 << 9) /* Generate code for the m340 */
+
+#define TARGET_DEFAULT \
+ (HARDLIT_BIT | ALIGN8_BIT | DIV_BIT | RELAX_IMM_BIT | M340_BIT | LITTLE_END_BIT)
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mlittle-endian", "m340" }
+#endif
+
+#define TARGET_HARDLIT (target_flags & HARDLIT_BIT)
+/* The ability to have 4 byte alignment is being suppressed for now.
+ If this ability is reenabled, you must enable the definition below
+ *and* edit t-mcore to enable multilibs for 4 byte alignment code. */
+#if 0
+#define TARGET_8ALIGN (target_flags & ALIGN8_BIT)
+#else
+#define TARGET_8ALIGN 1
+#endif
+#define TARGET_DIV (target_flags & DIV_BIT)
+#define TARGET_RELAX_IMM (target_flags & RELAX_IMM_BIT)
+#define TARGET_W_FIELD (target_flags & W_FIELD_BIT)
+#define TARGET_OVERALIGN_FUNC (target_flags & OVERALIGN_FUNC_BIT)
+#define TARGET_CG_DATA (target_flags & CGDATA_BIT)
+#define TARGET_CG_DATA (target_flags & CGDATA_BIT)
+#define TARGET_SLOW_BYTES (target_flags & SLOW_BYTES_BIT)
+#define TARGET_LITTLE_END (target_flags & LITTLE_END_BIT)
+#define TARGET_M340 (target_flags & M340_BIT)
+
+
+#define TARGET_SWITCHES \
+{ {"hardlit", HARDLIT_BIT, \
+ "Inline constants if it can be done in 2 insns or less" }, \
+ {"no-hardlit", - HARDLIT_BIT, \
+ "inline constants if it only takes 1 instruction" }, \
+ {"4align", - ALIGN8_BIT, \
+ "Set maximum alignment to 4" }, \
+ {"8align", ALIGN8_BIT, \
+ "Set maximum alignment to 8" }, \
+ {"div", DIV_BIT, \
+ "" }, \
+ {"no-div", - DIV_BIT, \
+ "Do not use the divide instruction" }, \
+ {"relax-immediates", RELAX_IMM_BIT, \
+ "" }, \
+ {"no-relax-immediates", - RELAX_IMM_BIT, \
+ "Do not arbitary sized immediates in bit operations" }, \
+ {"wide-bitfields", W_FIELD_BIT, \
+ "Always treat bitfield as int-sized" }, \
+ {"no-wide-bitfields", - W_FIELD_BIT, \
+ "" }, \
+ {"4byte-functions", OVERALIGN_FUNC_BIT, \
+ "Force functions to be aligned to a 4 byte boundary" }, \
+ {"no-4byte-functions", - OVERALIGN_FUNC_BIT, \
+ "Force functions to be aligned to a 2 byte boundary" }, \
+ {"callgraph-data", CGDATA_BIT, \
+ "Emit call graph information" }, \
+ {"no-callgraph-data", - CGDATA_BIT, \
+ "" }, \
+ {"slow-bytes", SLOW_BYTES_BIT, \
+ "Prefer word accesses over byte accesses" }, \
+ {"no-slow-bytes", - SLOW_BYTES_BIT, \
+ "" }, \
+ { "no-lsim", 0, "" }, \
+ {"little-endian", LITTLE_END_BIT, \
+ "Generate little endian code" }, \
+ {"big-endian", - LITTLE_END_BIT, \
+ "" }, \
+ {"210", - M340_BIT, \
+ "" }, \
+ {"340", M340_BIT, \
+ "Generate code for the M*Core M340" }, \
+ {"", TARGET_DEFAULT, \
+ "" } \
+}
+
+extern char * mcore_current_function_name;
+
+/* Target specific options (as opposed to the switches above). */
+extern const char * mcore_stack_increment_string;
+
+#define TARGET_OPTIONS \
+{ \
+ {"stack-increment=", & mcore_stack_increment_string, \
+ "Maximum amount for a single stack increment operation"} \
+}
+
+/* The MCore ABI says that bitfields are unsigned by default. */
+/* The EPOC C++ environment does not support exceptions. */
+#define CC1_SPEC "-funsigned-bitfields %{!DIN_GCC:-fno-rtti} %{!DIN_GCC:-fno-exceptions}"
+
+/* What options are we going to default to specific settings when
+ -O* happens; the user can subsequently override these settings.
+
+ Omitting the frame pointer is a very good idea on the MCore.
+ Scheduling isn't worth anything on the current MCore implementation. */
+#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \
+{ \
+ if (LEVEL) \
+ { \
+ flag_no_function_cse = 1; \
+ flag_omit_frame_pointer = 1; \
+ \
+ if (LEVEL >= 2) \
+ { \
+ flag_caller_saves = 0; \
+ flag_schedule_insns = 0; \
+ flag_schedule_insns_after_reload = 0; \
+ } \
+ } \
+ if (SIZE) \
+ { \
+ target_flags &= ~ HARDLIT_BIT; \
+ } \
+}
+
+/* What options are we going to force to specific settings,
+ regardless of what the user thought he wanted.
+ We also use this for some post-processing of options. */
+#define OVERRIDE_OPTIONS mcore_override_options ()
+
+/* Target machine storage Layout. */
+
+/* Define to use software floating point emulator for REAL_ARITHMETIC and
+ decimal <-> binary conversion. */
+#define REAL_ARITHMETIC
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ { \
+ (MODE) = SImode; \
+ (UNSIGNEDP) = 1; \
+ }
+
+#define PROMOTE_FUNCTION_ARGS
+
+#define PROMOTE_FUNCTION_RETURN
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN (! TARGET_LITTLE_END)
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN (! TARGET_LITTLE_END)
+
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#ifdef __MCORELE__
+#undef LIBGCC2_WORDS_BIG_ENDIAN
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Number of bits in an addressable storage unit. */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD 32
+#define MAX_BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE 32
+
+/* A C expression for the size in bits of the type `long long' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define LONG_LONG_TYPE_SIZE 64
+
+/* the size of the boolean type -- in C++; */
+#define BOOL_TYPE_SIZE 8
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Doubles must be alogned to an 8 byte boundary. */
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ ((MODE != BLKmode && (GET_MODE_SIZE (MODE) == 8)) \
+ ? BIGGEST_ALIGNMENT : PARM_BOUNDARY)
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY (TARGET_8ALIGN ? 64 : 32)
+
+/* Largest increment in UNITS we allow the stack to grow in a single operation. */
+extern int mcore_stack_increment;
+#define STACK_UNITS_MAXSTEP 4096
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY ((TARGET_OVERALIGN_FUNC) ? 32 : 16)
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT (TARGET_8ALIGN ? 64 : 32)
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 32
+
+/* Every structures size must be a multiple of 8 bits. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Look at the fundamental type that is used for a bitfield and use
+ that to impose alignment on the enclosing structure.
+ struct s {int a:8}; should have same alignment as "int", not "char". */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Largest integer machine mode for structures. If undefined, the default
+ is GET_MODE_SIZE(DImode). */
+#define MAX_FIXED_MODE_SIZE 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Standard register usage. */
+
+/* Register allocation for our first guess
+
+ r0 stack pointer
+ r1 scratch, target reg for xtrb?
+ r2-r7 arguments.
+ r8-r14 call saved
+ r15 link register
+ ap arg pointer (doesn't really exist, always eliminated)
+ c c bit
+ fp frame pointer (doesn't really exist, always eliminated)
+ x19 two control registers */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ MCore has 16 integer registers and 2 control registers + the arg
+ pointer. */
+
+#define FIRST_PSEUDO_REGISTER 20
+
+#define R1_REG 1 /* where literals are forced */
+#define LK_REG 15 /* overloaded on general register */
+#define AP_REG 16 /* fake arg pointer register */
+/* RBE: mcore.md depends on CC_REG being set to 17 */
+#define CC_REG 17 /* cant name it C_REG */
+#define FP_REG 18 /* fake frame pointer register */
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+
+#undef PC_REGNUM /* Define this if the program counter is overloaded on a register. */
+#define STACK_POINTER_REGNUM 0 /* Register to use for pushing function arguments. */
+#define FRAME_POINTER_REGNUM 8 /* When we need FP, use r8. */
+
+/* The assembler's names for the registers. RFP need not always be used as
+ the Real framepointer; it can also be used as a normal general register.
+ Note that the name `fp' is horribly misleading since `fp' is in fact only
+ the argument-and-return-context pointer. */
+#define REGISTER_NAMES \
+{ \
+ "sp", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "apvirtual", "c", "fpvirtual", "x19" \
+}
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+ /* r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ap c fp x19 */ \
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+/* RBE: r15 {link register} not available across calls,
+ * But we don't mark it that way here... */
+#define CALL_USED_REGISTERS \
+ /* r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ap c fp x19 */ \
+ { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1}
+
+/* The order in which register should be allocated. */
+#define REG_ALLOC_ORDER \
+ /* r7 r6 r5 r4 r3 r2 r15 r14 r13 r12 r11 r10 r9 r8 r1 r0 ap c fp x19*/ \
+ { 7, 6, 5, 4, 3, 2, 15, 14, 13, 12, 11, 10, 9, 8, 1, 0, 16, 17, 18, 19}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On the MCore regs are UNITS_PER_WORD bits wide; */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ We may keep double values in even registers. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((TARGET_8ALIGN && GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (((REGNO) & 1) == 0) : (REGNO < 18))
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) || GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2))
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms may be accessed
+ via the stack pointer) in functions that seem suitable. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the MCore. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer. */
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 16
+
+/* Register in which the static-chain is passed to a function. */
+#define STATIC_CHAIN_REGNUM 1
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM},}
+
+/* Given FROM and TO register numbers, say whether this elimination
+ is allowed. */
+#define CAN_ELIMINATE(FROM, TO) \
+ (!((FROM) == FRAME_POINTER_REGNUM && FRAME_POINTER_REQUIRED))
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ OFFSET = mcore_initial_elimination_offset (FROM, TO)
+
+/* Place that structure value return address is placed. */
+#define STRUCT_VALUE 0
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The MCore has only general registers. There are
+ also some special purpose registers: the T bit register, the
+ procedure Link and the Count Registers */
+enum reg_class
+{
+ NO_REGS,
+ ONLYR1_REGS,
+ LRW_REGS,
+ GENERAL_REGS,
+ C_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "ONLYR1_REGS", \
+ "LRW_REGS", \
+ "GENERAL_REGS", \
+ "C_REGS", \
+ "ALL_REGS", \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+/* ??? STACK_POINTER_REGNUM should be excluded from LRW_REGS. */
+#define REG_CLASS_CONTENTS \
+{ \
+ {0x000000}, /* NO_REGS */ \
+ {0x000002}, /* ONLYR1_REGS */ \
+ {0x007FFE}, /* LRW_REGS */ \
+ {0x01FFFF}, /* GENERAL_REGS */ \
+ {0x020000}, /* C_REGS */ \
+ {0x0FFFFF} /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+extern int regno_reg_class[];
+#define REGNO_REG_CLASS(REGNO) regno_reg_class[REGNO]
+
+/* When defined, the compiler allows registers explicitly used in the
+ rtl to be used as spill registers but prevents the compiler from
+ extending the lifetime of these registers. */
+#define SMALL_REGISTER_CLASSES 1
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS NO_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Get reg_class from a letter such as appears in the machine
+ description. */
+extern enum reg_class reg_class_from_letter[];
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ( (C) >= 'a' && (C) <= 'z' ? reg_class_from_letter[(C) - 'a'] : NO_REGS )
+
+/* The letters I, J, K, L, M, N, O, and P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+ I: loadable by movi (0..127)
+ J: arithmetic operand 1..32
+ K: shift operand 0..31
+ L: negative arithmetic operand -1..-32
+ M: powers of two, constants loadable by bgeni
+ N: powers of two minus 1, constants loadable by bmaski, including -1
+ O: allowed by cmov with two constants +/- 1 of each other
+ P: values we will generate 'inline' -- without an 'lrw'
+
+ Others defined for use after reload
+ Q: constant 1
+ R: a label
+ S: 0/1/2 cleared bits out of 32 [for bclri's]
+ T: 2 set bits out of 32 [for bseti's]
+ U: constant 0
+ xxxS: 1 cleared bit out of 32 (complement of power of 2). for bclri
+ xxxT: 2 cleared bits out of 32. for pairs of bclris. */
+#define CONST_OK_FOR_I(VALUE) (((int)(VALUE)) >= 0 && ((int)(VALUE)) <= 0x7f)
+#define CONST_OK_FOR_J(VALUE) (((int)(VALUE)) > 0 && ((int)(VALUE)) <= 32)
+#define CONST_OK_FOR_L(VALUE) (((int)(VALUE)) < 0 && ((int)(VALUE)) >= -32)
+#define CONST_OK_FOR_K(VALUE) (((int)(VALUE)) >= 0 && ((int)(VALUE)) <= 31)
+#define CONST_OK_FOR_M(VALUE) (exact_log2 (VALUE) >= 0)
+#define CONST_OK_FOR_N(VALUE) (((int)(VALUE)) == -1 || exact_log2 ((VALUE) + 1) >= 0)
+#define CONST_OK_FOR_O(VALUE) (CONST_OK_FOR_I(VALUE) || \
+ CONST_OK_FOR_M(VALUE) || \
+ CONST_OK_FOR_N(VALUE) || \
+ CONST_OK_FOR_M((int)(VALUE) - 1) || \
+ CONST_OK_FOR_N((int)(VALUE) + 1))
+
+#define CONST_OK_FOR_P(VALUE) (mcore_const_ok_for_inline (VALUE))
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? CONST_OK_FOR_I (VALUE) \
+ : (C) == 'J' ? CONST_OK_FOR_J (VALUE) \
+ : (C) == 'L' ? CONST_OK_FOR_L (VALUE) \
+ : (C) == 'K' ? CONST_OK_FOR_K (VALUE) \
+ : (C) == 'M' ? CONST_OK_FOR_M (VALUE) \
+ : (C) == 'N' ? CONST_OK_FOR_N (VALUE) \
+ : (C) == 'P' ? CONST_OK_FOR_P (VALUE) \
+ : (C) == 'O' ? CONST_OK_FOR_O (VALUE) \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? CONST_OK_FOR_I (CONST_DOUBLE_HIGH (VALUE)) \
+ && CONST_OK_FOR_I (CONST_DOUBLE_LOW (VALUE)) \
+ : 0)
+
+/* Letters in the range `Q' through `U' in a register constraint string
+ may be defined in a machine-dependent fashion to stand for arbitrary
+ operand types. */
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'R' ? (GET_CODE (OP) == MEM \
+ && GET_CODE (XEXP (OP, 0)) == LABEL_REF) \
+ : (C) == 'S' ? (GET_CODE (OP) == CONST_INT \
+ && mcore_num_zeros (INTVAL (OP)) <= 2) \
+ : (C) == 'T' ? (GET_CODE (OP) == CONST_INT \
+ && mcore_num_ones (INTVAL (OP)) == 2) \
+ : (C) == 'Q' ? (GET_CODE (OP) == CONST_INT \
+ && INTVAL(OP) == 1) \
+ : (C) == 'U' ? (GET_CODE (OP) == CONST_INT \
+ && INTVAL(OP) == 0) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) mcore_reload_class (X, CLASS)
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+#define SECONDARY_RELOAD_CLASS(CLASS, MODE, X) NO_REGS
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+
+ On MCore this is the size of MODE in words. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (ROUND_ADVANCE (GET_MODE_SIZE (MODE)))
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define the number of register that can hold parameters.
+ These two macros are used only in other macro definitions below. */
+#define NPARM_REGS 6
+#define FIRST_PARM_REG 2
+#define FIRST_RET_REG 2
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+/* We don't define this, because the MCore does not support
+ addresses with negative offsets. */
+/* #define FRAME_GROWS_DOWNWARD */
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* If defined, the maximum amount of space required for outgoing arguments
+ will be computed and placed into the variable
+ `current_function_outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue should
+ increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Offset of first parameter from the argument pointer register value. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Value is the number of byte of arguments automatically
+ popped when returning from a subroutine call.
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack.
+
+ On the MCore, the callee does not pop any of its arguments that were passed
+ on the stack. */
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define FUNCTION_VALUE(VALTYPE, FUNC) mcore_function_value (VALTYPE, FUNC)
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* how we are going to return big values */
+/*
+ * #define RETURN_IN_MEMORY(TYPE) \
+ * (TYPE_MODE (TYPE) == BLKmode \
+ * || ((TREE_CODE (TYPE) == RECORD_TYPE || TREE_CODE(TYPE) == UNION_TYPE) \
+ * && !(TYPE_MODE (TYPE) == SImode \
+ * || (TYPE_MODE (TYPE) == BLKmode \
+ * && TYPE_ALIGN (TYPE) == BITS_PER_WORD \
+ * && int_size_in_bytes (TYPE) == UNITS_PER_WORD))))
+ */
+
+
+/* How many registers to use for struct return. */
+#define RETURN_IN_MEMORY(TYPE) (int_size_in_bytes (TYPE) > 2 * UNITS_PER_WORD)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, MODE, FIRST_RET_REG)
+
+/* 1 if N is a possible register number for a function value.
+ On the MCore, only r4 can return results. */
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == FIRST_RET_REG)
+
+#define MUST_PASS_IN_STACK(MODE,TYPE) \
+ mcore_must_pass_on_stack (MODE, TYPE)
+
+/* 1 if N is a possible register number for function argument passing. */
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >= FIRST_PARM_REG && (REGNO) < (NPARM_REGS + FIRST_PARM_REG))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On MCore, this is a single integer, which is a number of words
+ of arguments scanned so far (including the invisible argument,
+ if any, which holds the structure-value-address).
+ Thus NARGREGS or more means all following args should go on the stack. */
+#define CUMULATIVE_ARGS int
+
+#define ROUND_ADVANCE(SIZE) \
+ ((SIZE + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Round a register number up to a proper boundary for an arg of mode
+ MODE.
+
+ We round to an even reg for things larger than a word. */
+#define ROUND_REG(X, MODE) \
+ ((TARGET_8ALIGN \
+ && GET_MODE_UNIT_SIZE ((MODE)) > UNITS_PER_WORD) \
+ ? ((X) + ((X) & 1)) : (X))
+
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ On MCore, the offset always starts at 0: the first parm reg is always
+ the same reg. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be
+ available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ ((CUM) = (ROUND_REG ((CUM), (MODE)) \
+ + ((NAMED) * mcore_num_arg_regs (MODE, TYPE)))) \
+
+/* Define where to put the arguments to a function. */
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ mcore_function_arg (CUM, MODE, TYPE, NAMED)
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type. */
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+ MUST_PASS_IN_STACK (MODE, TYPE)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero.
+ Any arg that starts in the first NPARM_REGS regs but won't entirely
+ fit in them needs partial registers on the MCore. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ mcore_function_arg_partial_nregs (CUM, MODE, TYPE, NAMED)
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments. */
+#define SETUP_INCOMING_VARARGS(ASF, MODE, TYPE, PAS, ST) \
+ mcore_setup_incoming_varargs (ASF, MODE, TYPE, & PAS)
+
+/* Call the function profiler with a given profile label. */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf (STREAM, " trap 1\n"); \
+ fprintf (STREAM, " .align 2\n"); \
+ fprintf (STREAM, " .long LP%d\n", (LABELNO)); \
+}
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+#define EXIT_IGNORE_STACK 0
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ On the MCore, the trapoline looks like:
+ lrw r1, function
+ lrw r13, area
+ jmp r13
+ or r0, r0
+ .literals */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), " .short 0x7102\n"); \
+ fprintf ((FILE), " .short 0x7d02\n"); \
+ fprintf ((FILE), " .short 0x00cd\n"); \
+ fprintf ((FILE), " .short 0x1e00\n"); \
+ fprintf ((FILE), " .long 0\n"); \
+ fprintf ((FILE), " .long 0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 12
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 8)), \
+ (CXT)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((TRAMP), 12)), \
+ (FNADDR)); \
+}
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < AP_REG || (unsigned) reg_renumber[(REGNO)] < AP_REG)
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) 0
+
+/* Maximum number of registers that can appear in a valid memory
+ address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* Recognize any constant value that is a valid address. */
+#define CONSTANT_ADDRESS_P(X) (GET_CODE (X) == LABEL_REF)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the MCore, allow anything but a double. */
+#define LEGITIMATE_CONSTANT_P(X) (GET_CODE(X) != CONST_DOUBLE)
+
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN)
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used. */
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) <= 16 || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) 0
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ REGNO_OK_FOR_BASE_P (REGNO (X))
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) 0
+
+#endif
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ The other macros defined here are used only in GO_IF_LEGITIMATE_ADDRESS. */
+#define BASE_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X))
+
+#define INDEX_REGISTER_RTX_P(X) \
+ (GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X))
+
+
+/* Jump to LABEL if X is a valid address RTX. This must also take
+ REG_OK_STRICT into account when deciding about valid registers, but it uses
+ the above macros so we are in luck.
+
+ Allow REG
+ REG+disp
+
+ A legitimate index for a QI is 0..15, for HI is 0..30, for SI is 0..60,
+ and for DI is 0..56 because we use two SI loads, etc. */
+#define GO_IF_LEGITIMATE_INDEX(MODE, REGNO, OP, LABEL) \
+ do \
+ { \
+ if (GET_CODE (OP) == CONST_INT) \
+ { \
+ if (GET_MODE_SIZE (MODE) >= 4 \
+ && (((unsigned)INTVAL (OP)) % 4) == 0 \
+ && ((unsigned)INTVAL (OP)) <= 64 - GET_MODE_SIZE (MODE)) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) == 2 \
+ && (((unsigned)INTVAL (OP)) % 2) == 0 \
+ && ((unsigned)INTVAL (OP)) <= 30) \
+ goto LABEL; \
+ if (GET_MODE_SIZE (MODE) == 1 \
+ && ((unsigned)INTVAL (OP)) <= 15) \
+ goto LABEL; \
+ } \
+ } \
+ while (0)
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if (GET_CODE (X) == PLUS || GET_CODE (X) == LO_SUM) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ if (BASE_REGISTER_RTX_P (xop0)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ if (BASE_REGISTER_RTX_P (xop1)) \
+ GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ \
+ if ( GET_CODE (ADDR) == PRE_DEC || GET_CODE (ADDR) == POST_DEC \
+ || GET_CODE (ADDR) == PRE_INC || GET_CODE (ADDR) == POST_INC) \
+ goto LABEL; \
+}
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define this if the tablejump instruction expects the table
+ to contain offsets from the address of the table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* 'char' is signed by default. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* The type of size_t unsigned int. */
+#define SIZE_TYPE "unsigned int"
+
+/* Don't cse the address of the function being compiled. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Nonzero if access to memory by bytes is slow and undesirable. */
+#define SLOW_BYTE_ACCESS TARGET_SLOW_BYTES
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+#define STORE_FLAG_VALUE 1
+
+/* Immediate shift counts are truncated by the output routines (or was it
+ the assembler?). Shift counts in a register are truncated by ARM. Note
+ that the native compiler puts too large (> 32) immediate shift counts
+ into a register and shifts by the register, letting the ARM decide what
+ to do instead of doing that itself. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* All integers have the same format so truncation is easy. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+/* why is this defined??? -- dac */
+#define NO_FUNCTION_CSE 1
+
+/* Chars and shorts should be passed as ints. */
+#define PROMOTE_PROTOTYPES 1
+
+/* The machine modes of pointers and functions. */
+#define Pmode SImode
+#define FUNCTION_MODE Pmode
+
+/* The relative costs of various types of constants. Note that cse.c defines
+ REG = 1, SUBREG = 2, any node = (2 + sum of subnodes). */
+#define CONST_COSTS(RTX, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ return mcore_const_costs (RTX, OUTER_CODE); \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 5; \
+ case CONST_DOUBLE: \
+ return 10;
+
+/* provide the cost for an address calculation.
+ All addressing modes cost the same on the MCore. */
+#define ADDRESS_COST(RTX) 1
+
+/* Provide the cost of an rtl expression. */
+#define RTX_COSTS(X, CODE, OUTER_CODE) \
+ case AND: \
+ return COSTS_N_INSNS (mcore_and_cost (X)); \
+ case IOR: \
+ return COSTS_N_INSNS (mcore_ior_cost (X)); \
+ case DIV: \
+ case UDIV: \
+ case MOD: \
+ case UMOD: \
+ return COSTS_N_INSNS (100); \
+ case FLOAT: \
+ case FIX: \
+ return 100;
+
+/* Compute extra cost of moving data between one register class
+ and another. All register moves are cheap. */
+#define REGISTER_MOVE_COST(SRCCLASS, DSTCLASS) 2
+
+#define WORD_REGISTER_OPERATIONS
+
+/* Provided in ANSI C MCore libraries. */
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT 1
+
+/* Implicit library calls should use memcpy, not bcopy, etc. */
+#define TARGET_MEM_FUNCTIONS
+
+/* Assembler output control. */
+#define ASM_COMMENT_START "\t//"
+
+#define ASM_APP_ON "// inline asm begin\n"
+#define ASM_APP_OFF "// inline asm end\n"
+
+#define FILE_ASM_OP "\t.file\n"
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_ctors, in_dtors, SUBTARGET_EXTRA_SECTIONS
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS \
+ SWITCH_SECTION_FUNCTION
+
+#ifndef CTORS_SECTION_FUNCTION
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+#endif
+
+/* Switch to SECTION (an `enum in_section').
+
+ ??? This facility should be provided by GCC proper.
+ The problem is that we want to temporarily switch sections in
+ ASM_DECLARE_OBJECT_NAME and then switch back to the original section
+ afterwards. */
+#define SWITCH_SECTION_FUNCTION \
+void \
+switch_to_section (section, decl) \
+ enum in_section section; \
+ tree decl; \
+{ \
+ switch (section) \
+ { \
+ case in_text: text_section (); break; \
+ case in_data: data_section (); break; \
+ case in_named: named_section (decl, NULL, 0); break; \
+ case in_ctors: ctors_section (); break; \
+ case in_dtors: dtors_section (); break; \
+ SUBTARGET_SWITCH_SECTIONS \
+ default: abort (); break; \
+ } \
+}
+
+
+#define ASM_OUTPUT_SECTION(file, nam) \
+ do { fprintf (file, "\t.section\t%s\n", nam); } while (0)
+
+/* This is how to output an insn to push a register on the stack.
+ It need not be very fast code. */
+#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
+ fprintf (FILE, "\tsubi\t %s,%d\n\tstw\t %s,(%s)\n", \
+ reg_names[STACK_POINTER_REGNUM], \
+ (STACK_BOUNDARY / BITS_PER_UNIT), \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM])
+
+/* Length in instructions of the code output by ASM_OUTPUT_REG_PUSH. */
+#define REG_PUSH_LENGTH 2
+
+/* This is how to output an insn to pop a register from the stack. */
+#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
+ fprintf (FILE, "\tldw\t %s,(%s)\n\taddi\t %s,%d\n", \
+ reg_names[REGNO], \
+ reg_names[STACK_POINTER_REGNUM], \
+ reg_names[STACK_POINTER_REGNUM], \
+ (STACK_BOUNDARY / BITS_PER_UNIT))
+
+
+/* DBX register number for a given compiler register number. */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Output a label definition. */
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* Output a reference to a label. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(STREAM, NAME) \
+ fprintf (STREAM, "%s%s", USER_LABEL_PREFIX, MCORE_STRIP_NAME_ENCODING (NAME))
+
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align\t%d\n", LOG)
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* Strip export encoding from a function name. */
+#define MCORE_STRIP_NAME_ENCODING(SYM_NAME) \
+ ((SYM_NAME) + ((SYM_NAME)[0] == '@' ? 3 : 0))
+
+/* Strip any text from SYM_NAME added by ENCODE_SECTION_INFO and store
+ the result in VAR. */
+#undef STRIP_NAME_ENCODING
+#define STRIP_NAME_ENCODING(VAR, SYM_NAME) \
+ (VAR) = MCORE_STRIP_NAME_ENCODING (SYM_NAME)
+
+#undef UNIQUE_SECTION
+#define UNIQUE_SECTION(DECL, RELOC) mcore_unique_section (DECL, RELOC)
+
+#define REDO_SECTION_INFO_P(DECL) 1
+
+#define MULTIPLE_SYMBOL_SPACES 1
+
+#define SUPPORTS_ONE_ONLY 1
+
+/* A pair of macros to output things for the callgraph data.
+ VALUE means (to the tools that reads this info later):
+ 0 a call from src to dst
+ 1 the call is special (e.g. dst is "unknown" or "alloca")
+ 2 the call is special (e.g., the src is a table instead of routine)
+
+ Frame sizes are augmented with timestamps to help later tools
+ differentiate between static entities with same names in different
+ files. */
+extern long mcore_current_compilation_timestamp;
+#define ASM_OUTPUT_CG_NODE(FILE,SRCNAME,VALUE) \
+ do \
+ { \
+ if (mcore_current_compilation_timestamp == 0) \
+ mcore_current_compilation_timestamp = time (0); \
+ fprintf ((FILE),"\t.equ\t__$frame$size$_%s_$_%08lx,%d\n", \
+ (SRCNAME), mcore_current_compilation_timestamp, (VALUE)); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_CG_EDGE(FILE,SRCNAME,DSTNAME,VALUE) \
+ do \
+ { \
+ fprintf ((FILE),"\t.equ\t__$function$call$_%s_$_%s,%d\n", \
+ (SRCNAME), (DSTNAME), (VALUE)); \
+ } \
+ while (0)
+
+/* Output a globalising directive for a label. */
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf (STREAM, "\t.export\t"), \
+ assemble_name (STREAM, NAME), \
+ fputc ('\n',STREAM)) \
+
+/* The prefix to add to user-visible assembler symbols. */
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* Make an internal label into a string. */
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+ sprintf (STRING, "*.%s%ld", PREFIX, (long) NUM)
+
+/* Output an internal label definition. */
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Jump tables must be 32 bit aligned. */
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(STREAM,PREFIX,NUM,TABLE) \
+ fprintf (STREAM, "\t.align 2\n.%s%d:\n", PREFIX, NUM);
+
+/* Output a relative address. Not needed since jump tables are absolute
+ but we must define it anyway. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fputs ("- - - ASM_OUTPUT_ADDR_DIFF_ELT called!\n", STREAM)
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.long\t.L%d\n", VALUE)
+
+/* Output various types of constants. */
+
+/* This is how to output an assembler line defining a `double'. */
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+ do \
+ { \
+ char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", dstr); \
+ fprintf (FILE, "\t.double %s\n", dstr); \
+ } \
+ while (0)
+
+
+/* This is how to output an assembler line defining a `float' constant. */
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ do \
+ { \
+ char dstr[30]; \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", dstr); \
+ fprintf (FILE, "\t.float %s\n", dstr); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_INT(STREAM, EXP) \
+ (fprintf (STREAM, "\t.long\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_SHORT(STREAM, EXP) \
+ (fprintf (STREAM, "\t.short\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_CHAR(STREAM, EXP) \
+ (fprintf (STREAM, "\t.byte\t"), \
+ output_addr_const (STREAM, (EXP)), \
+ fputc ('\n', STREAM))
+
+#define ASM_OUTPUT_BYTE(STREAM, VALUE) \
+ fprintf (STREAM, "\t.byte\t%d\n", VALUE) \
+
+/* This is how to output an assembler line
+ that says to advance the location counter by SIZE bytes. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.fill %d, 1\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol, with alignment information. */
+/* XXX - for now we ignore the alignment. */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (mcore_dllexport_name_p (NAME)) \
+ MCORE_EXPORT_NAME (FILE, NAME) \
+ if (! mcore_dllimport_name_p (NAME)) \
+ { \
+ fputs ("\t.comm\t", FILE); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", SIZE); \
+ } \
+ } \
+ while (0)
+
+/* This says how to output an assembler line
+ to define an external symbol. */
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+ do \
+ { \
+ fputs ("\t.import\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs ("\n", (FILE)); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_EXTERNAL
+/* RBE: we undefined this and let gas do it's "undefined is imported"
+ games. This is because when we use this, we get a marked
+ reference through the call to assemble_name and this forces C++
+ inlined member functions (or any inlined function) to be instantiated
+ regardless of whether any callsites remain.
+ This makes this aspect of the compiler non-ABI compliant. */
+
+/* Similar, but for libcall. FUN is an rtx. */
+#undef ASM_OUTPUT_EXTERNAL_LIBCALL
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ do \
+ { \
+ fprintf (FILE, "\t.import\t"); \
+ assemble_name (FILE, XSTR (FUN, 0)); \
+ fprintf (FILE, "\n"); \
+ } \
+ while (0)
+
+
+/* This says how to output an assembler line
+ to define a local common symbol... */
+#undef ASM_OUTPUT_LOCAL
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+ (fputs ("\t.lcomm\t", FILE), \
+ assemble_name (FILE, NAME), \
+ fprintf (FILE, ",%d\n", SIZE))
+
+/* ... and how to define a local common symbol whose alignment
+ we wish to specify. ALIGN comes in as bits, we have to turn
+ it into bytes. */
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ fputs ("\t.bss\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%d,%d\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ while (0)
+
+/* We must mark dll symbols specially. Definitions of dllexport'd objects
+ install some info in the .drective (PE) or .exports (ELF) sections. */
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) mcore_encode_section_info (DECL)
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Target characters. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+#define PRINT_OPERAND(STREAM, X, CODE) mcore_print_operand (STREAM, X, CODE)
+
+/* Print a memory address as an operand to reference that memory location. */
+#define PRINT_OPERAND_ADDRESS(STREAM,X) mcore_print_operand_address (STREAM, X)
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
+ ((CHAR)=='.' || (CHAR) == '#' || (CHAR) == '*' || (CHAR) == '^' || (CHAR) == '!')
+
+/* This is to handle loads from the constant pool. */
+#define MACHINE_DEPENDENT_REORG(X) mcore_dependent_reorg (X)
+
+/* This handles MCore dependent rtl simplifications. */
+#define MACHINE_DEPENDENT_SIMPLIFY(X,M,L,I,S) \
+ mcore_dependent_simplify_rtx (X, M, L, I, S)
+
+#define PREDICATE_CODES \
+ { "mcore_arith_reg_operand", { REG, SUBREG }}, \
+ { "mcore_general_movsrc_operand", { MEM, CONST_INT, REG, SUBREG }},\
+ { "mcore_general_movdst_operand", { MEM, CONST_INT, REG, SUBREG }},\
+ { "mcore_reload_operand", { MEM, REG, SUBREG }}, \
+ { "mcore_arith_J_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_K_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_K_operand_not_0", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_M_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_K_S_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_O_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_imm_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_arith_any_imm_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_literal_K_operand", { CONST_INT }}, \
+ { "mcore_addsub_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_compare_operand", { CONST_INT, REG, SUBREG }}, \
+ { "mcore_load_multiple_operation", { PARALLEL }}, \
+ { "mcore_store_multiple_operation", { PARALLEL }}, \
+ { "mcore_call_address_operand", { REG, SUBREG, CONST_INT }}, \
+
+#endif /* __MCORE__H */
diff --git a/gcc/config/mcore/mcore.md b/gcc/config/mcore/mcore.md
new file mode 100644
index 00000000000..6c64561cfe4
--- /dev/null
+++ b/gcc/config/mcore/mcore.md
@@ -0,0 +1,3526 @@
+;; Machine description the Motorola MCore
+;; Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+;; Contributed by Motorola.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+
+
+;; -------------------------------------------------------------------------
+;; Attributes
+;; -------------------------------------------------------------------------
+
+; Target CPU.
+
+(define_attr "type" "brcond,branch,jmp,load,store,move,alu,shift"
+ (const_string "alu"))
+
+;; If a branch destination is within -2048..2047 bytes away from the
+;; instruction it can be 2 bytes long. All other conditional branches
+;; are 10 bytes long, and all other unconditional branches are 8 bytes.
+;;
+;; the assembler handles the long-branch span case for us if we use
+;; the "jb*" mnemonics for jumps/branches. This pushes the span
+;; calculations and the literal table placement into the assembler,
+;; where their interactions can be managed in a single place.
+
+; All MCORE instructions are two bytes long.
+
+(define_attr "length" "" (const_int 2))
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+
+
+(define_function_unit "memory" 1 1 (eq_attr "type" "load") 2 0)
+
+;; -------------------------------------------------------------------------
+;; Test and bit test
+;; -------------------------------------------------------------------------
+
+(define_insn ""
+ [(set (reg:SI 17)
+ (sign_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "mcore_literal_K_operand" "K")))]
+ ""
+ "btsti %0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (reg:SI 17)
+ (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "mcore_literal_K_operand" "K")))]
+ ""
+ "btsti %0,%1"
+ [(set_attr "type" "shift")])
+
+;;; This is created by combine.
+(define_insn ""
+ [(set (reg:CC 17)
+ (ne:CC (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "mcore_literal_K_operand" "K"))
+ (const_int 0)))]
+ ""
+ "btsti %0,%1"
+ [(set_attr "type" "shift")])
+
+
+;; Created by combine from conditional patterns below (see sextb/btsti rx,31)
+
+(define_insn ""
+ [(set (reg:CC 17)
+ (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 7))
+ (const_int 0)))]
+ "GET_CODE(operands[0]) == SUBREG &&
+ GET_MODE(SUBREG_REG(operands[0])) == QImode"
+ "btsti %0,7"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (reg:CC 17)
+ (ne:CC (lshiftrt:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 15))
+ (const_int 0)))]
+ "GET_CODE(operands[0]) == SUBREG &&
+ GET_MODE(SUBREG_REG(operands[0])) == HImode"
+ "btsti %0,15"
+ [(set_attr "type" "shift")])
+
+(define_split
+ [(set (pc)
+ (if_then_else (ne (eq:CC (zero_extract:SI
+ (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (const_int 1)
+ (match_operand:SI 1 "mcore_literal_K_operand" ""))
+ (const_int 0))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ [(set (reg:CC 17)
+ (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1)))
+ (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
+ (label_ref (match_dup 2))
+ (pc)))]
+ "")
+
+(define_split
+ [(set (pc)
+ (if_then_else (eq (ne:CC (zero_extract:SI
+ (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (const_int 1)
+ (match_operand:SI 1 "mcore_literal_K_operand" ""))
+ (const_int 0))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ [(set (reg:CC 17)
+ (zero_extract:SI (match_dup 0) (const_int 1) (match_dup 1)))
+ (set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
+ (label_ref (match_dup 2))
+ (pc)))]
+ "")
+
+;; XXX - disabled by nickc because it fails on libiberty/fnmatch.c
+;;
+;; ; Experimental - relax immediates for and, andn, or, and tst to allow
+;; ; any immediate value (or an immediate at all -- or, andn, & tst).
+;; ; This is done to allow bit field masks to fold together in combine.
+;; ; The reload phase will force the immediate into a register at the
+;; ; very end. This helps in some cases, but hurts in others: we'd
+;; ; really like to cse these immediates. However, there is an phase
+;; ; ordering problem here. cse picks up individual masks and cse's
+;; ; those, but not folded masks (cse happens before combine). It's
+;; ; not clear what the best solution is because we really want cse
+;; ; before combine (leaving the bit field masks alone). To pick up
+;; ; relaxed immediates use -mrelax-immediates. It might take some
+;; ; experimenting to see which does better (i.e. regular imms vs.
+;; ; arbitrary imms) for a particular code. BRC
+;;
+;; (define_insn ""
+;; [(set (reg:CC 17)
+;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+;; (match_operand:SI 1 "mcore_arith_any_imm_operand" "rI"))
+;; (const_int 0)))]
+;; "TARGET_RELAX_IMM"
+;; "tst %0,%1")
+;;
+;; (define_insn ""
+;; [(set (reg:CC 17)
+;; (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+;; (match_operand:SI 1 "mcore_arith_M_operand" "r"))
+;; (const_int 0)))]
+;; "!TARGET_RELAX_IMM"
+;; "tst %0,%1")
+
+(define_insn ""
+ [(set (reg:CC 17)
+ (ne:CC (and:SI (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_M_operand" "r"))
+ (const_int 0)))]
+ ""
+ "tst %0,%1")
+
+
+(define_split
+ [(parallel[
+ (set (reg:CC 17)
+ (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r"))
+ (const_int 0))
+ (const_int 0)))
+ (clobber (match_operand:CC 2 "mcore_arith_reg_operand" "=r"))])]
+ ""
+ [(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0)))
+ (set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))])
+
+;; -------------------------------------------------------------------------
+;; SImode signed integer comparisons
+;; -------------------------------------------------------------------------
+
+(define_insn "decne_t"
+ [(set (reg:CC 17) (ne:CC (plus:SI (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "decne %0")
+
+;; The combiner seems to prefer the following to the former.
+;;
+(define_insn ""
+ [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
+ (const_int 1)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "decne %0")
+
+(define_insn "cmpnesi_t"
+ [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "cmpne %0,%1")
+
+(define_insn "cmpneisi_t"
+ [(set (reg:CC 17) (ne:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_K_operand" "K")))]
+ ""
+ "cmpnei %0,%1")
+
+(define_insn "cmpgtsi_t"
+ [(set (reg:CC 17) (gt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "cmplt %1,%0")
+
+(define_insn ""
+ [(set (reg:CC 17) (gt:CC (plus:SI
+ (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))]
+ ""
+ "decgt %0")
+
+(define_insn "cmpltsi_t"
+ [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "cmplt %0,%1")
+
+; cmplti is 1-32
+(define_insn "cmpltisi_t"
+ [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_J_operand" "J")))]
+ ""
+ "cmplti %0,%1")
+
+; covers cmplti x,0
+(define_insn ""
+ [(set (reg:CC 17) (lt:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 0)))]
+ ""
+ "btsti %0,31")
+
+(define_insn ""
+ [(set (reg:CC 17) (lt:CC (plus:SI
+ (match_operand:SI 0 "mcore_arith_reg_operand" "+r")
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1)))]
+ ""
+ "declt %0")
+
+;; -------------------------------------------------------------------------
+;; SImode unsigned integer comparisons
+;; -------------------------------------------------------------------------
+
+(define_insn "cmpgeusi_t"
+ [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "cmphs %0,%1")
+
+(define_insn "cmpgeusi_0"
+ [(set (reg:CC 17) (geu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (const_int 0)))]
+ ""
+ "cmpnei %0, 0")
+
+(define_insn "cmpleusi_t"
+ [(set (reg:CC 17) (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "cmphs %1,%0")
+
+;; We save the compare operands in the cmpxx patterns and use them when
+;; we generate the branch.
+
+;; We accept constants here, in case we can modify them to ones which
+;; are more efficient to load. E.g. change 'x <= 62' to 'x < 63'.
+
+(define_expand "cmpsi"
+ [(set (reg:CC 17) (compare:CC (match_operand:SI 0 "mcore_compare_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+{ arch_compare_op0 = operands[0];
+ arch_compare_op1 = operands[1];
+ DONE;
+}")
+
+;; -------------------------------------------------------------------------
+;; Logical operations
+;; -------------------------------------------------------------------------
+
+;; Logical AND clearing a single bit. andsi3 knows that we have this
+;; pattern and allows the constant literal pass through.
+;;
+
+;; RBE 2/97: don't need this pattern any longer...
+;; RBE: I don't think we need both "S" and exact_log2() clauses.
+;;(define_insn ""
+;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+;; (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
+;; (match_operand:SI 2 "const_int_operand" "S")))]
+;; "mcore_arith_S_operand (operands[2])"
+;; "bclri %0,%Q2")
+;;
+
+(define_insn "andnsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (and:SI (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
+ ""
+ "andn %0,%1")
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0
+ && ! mcore_arith_S_operand (operands[2]))
+ {
+ int not_value = ~ INTVAL (operands[2]);
+ if ( CONST_OK_FOR_I (not_value)
+ || CONST_OK_FOR_M (not_value)
+ || CONST_OK_FOR_N (not_value))
+ {
+ operands[2] = copy_to_mode_reg (SImode, GEN_INT (not_value));
+ emit_insn (gen_andnsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+ }
+
+ if (! mcore_arith_K_S_operand (operands[2], SImode))
+ operands[2] = copy_to_mode_reg (SImode, operands[2]);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0")
+ (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,K,0,S")))]
+ "TARGET_RELAX_IMM"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"and %0,%2\";
+ case 1: return \"andi %0,%2\";
+ case 2: return \"and %0,%1\";
+ /* case -1: return \"bclri %0,%Q2\"; will not happen */
+ case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
+ default: abort ();
+ }
+}")
+
+;; This was the old "S" which was "!(2^n)" */
+;; case -1: return \"bclri %0,%Q2\"; will not happen */
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (and:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r,0")
+ (match_operand:SI 2 "mcore_arith_K_S_operand" "r,K,0,S")))]
+ "!TARGET_RELAX_IMM"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"and %0,%2\";
+ case 1: return \"andi %0,%2\";
+ case 2: return \"and %0,%1\";
+ case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
+ default: abort ();
+ }
+}")
+
+;(define_insn "iorsi3"
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
+; (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
+; ""
+; "or %0,%2")
+
+; need an expand to resolve ambiguity betw. the two iors below.
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ if (! mcore_arith_M_operand (operands[2], SImode))
+ operands[2] = copy_to_mode_reg (SImode, operands[2]);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
+ (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
+ (match_operand:SI 2 "mcore_arith_any_imm_operand" "r,M,T")))]
+ "TARGET_RELAX_IMM"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"or %0,%2\";
+ case 1: return \"bseti %0,%P2\";
+ case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
+ default: abort ();
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
+ (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
+ (match_operand:SI 2 "mcore_arith_M_operand" "r,M,T")))]
+ "!TARGET_RELAX_IMM"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"or %0,%2\";
+ case 1: return \"bseti %0,%P2\";
+ case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
+ default: abort ();
+ }
+}")
+
+;(define_insn ""
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
+; (match_operand:SI 2 "const_int_operand" "M")))]
+; "exact_log2 (INTVAL (operands[2])) >= 0"
+; "bseti %0,%P2")
+
+;(define_insn ""
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+; (ior:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
+; (match_operand:SI 2 "const_int_operand" "i")))]
+; "mcore_num_ones (INTVAL (operands[2])) < 3"
+; "* return mcore_output_bseti (operands[0], INTVAL (operands[2]));")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (xor:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
+ ""
+ "xor %0,%2")
+
+; these patterns give better code then gcc invents if
+; left to its own devices
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
+ (and:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
+ (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
+ ""
+ "and %0,%2\;and %R0,%R2"
+ [(set_attr "length" "4")])
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
+ (ior:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
+ (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
+ ""
+ "or %0,%2\;or %R0,%R2"
+ [(set_attr "length" "4")])
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
+ (xor:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
+ (match_operand:DI 2 "mcore_arith_reg_operand" "r")))]
+ ""
+ "xor %0,%2\;xor %R0,%R2"
+ [(set_attr "length" "4")])
+
+;; -------------------------------------------------------------------------
+;; Shifts and rotates
+;; -------------------------------------------------------------------------
+
+;;; ??? The reg case may never match.
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
+ (rotate:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
+ (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
+ ""
+ "@
+ rotl %0,%2
+ rotli %0,%2"
+ [(set_attr "type" "shift")])
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
+ (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
+ ""
+ "@
+ lsl %0,%2
+ lsli %0,%2"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (ashift:SI (const_int 1)
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "bgenr %0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
+ (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
+ ""
+ "@
+ asr %0,%2
+ asri %0,%2"
+ [(set_attr "type" "shift")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0")
+ (match_operand:SI 2 "mcore_arith_K_operand_not_0" "r,K")))]
+ ""
+ "@
+ lsr %0,%2
+ lsri %0,%2"
+ [(set_attr "type" "shift")])
+
+;(define_expand "ashldi3"
+; [(parallel[(set (match_operand:DI 0 "mcore_arith_reg_operand" "")
+; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "")
+; (match_operand:DI 2 "immediate_operand" "")))
+;
+; (clobber (reg:CC 17))])]
+;
+; ""
+; "
+;{
+; if (GET_CODE (operands[2]) != CONST_INT
+; || INTVAL (operands[2]) != 1)
+; FAIL;
+;}")
+;
+;(define_insn ""
+; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
+; (ashift:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
+; (const_int 1)))
+; (clobber (reg:CC 17))]
+; ""
+; "lsli %R0,0\;rotli %0,0"
+; [(set_attr "length" "4") (set_attr "type" "shift")])
+
+;; -------------------------------------------------------------------------
+;; Index instructions
+;; -------------------------------------------------------------------------
+;; The second of each set of patterns is borrowed from the alpha.md file.
+;; These variants of the above insns can occur if the second operand
+;; is the frame pointer. This is a kludge, but there doesn't
+;; seem to be a way around it. Only recognize them while reloading.
+
+;; We must use reload_operand for some operands in case frame pointer
+;; elimination put a MEM with invalid address there. Otherwise,
+;; the result of the substitution will not match this pattern, and reload
+;; will not be able to correctly fix the result.
+
+;; indexing longlongs or doubles (8 bytes)
+
+(define_insn "indexdi_t"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 8))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
+ ""
+ "*
+ if (! mcore_is_same_reg (operands[1], operands[2]))
+ {
+ output_asm_insn (\"ixw\\t%0,%1\", operands);
+ output_asm_insn (\"ixw\\t%0,%1\", operands);
+ }
+ else
+ {
+ output_asm_insn (\"ixh\\t%0,%1\", operands);
+ output_asm_insn (\"ixh\\t%0,%1\", operands);
+ }
+ return \"\";
+ "
+;; if operands[1] == operands[2], the first option above is wrong! -- dac
+;; was this... -- dac
+;; ixw %0,%1\;ixw %0,%1"
+
+ [(set_attr "length" "4")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
+ (const_int 8))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
+ (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
+ "reload_in_progress"
+ "@
+ ixw %0,%1\;ixw %0,%1\;addu %0,%3
+ ixw %0,%1\;ixw %0,%1\;addi %0,%3
+ ixw %0,%1\;ixw %0,%1\;subi %0,%M3"
+ [(set_attr "length" "6")])
+
+;; indexing longs (4 bytes)
+
+(define_insn "indexsi_t"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 4))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
+ ""
+ "ixw %0,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
+ (const_int 4))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
+ (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
+ "reload_in_progress"
+ "@
+ ixw %0,%1\;addu %0,%3
+ ixw %0,%1\;addi %0,%3
+ ixw %0,%1\;subi %0,%M3"
+ [(set_attr "length" "4")])
+
+;; indexing shorts (2 bytes)
+
+(define_insn "indexhi_t"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 2))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
+ ""
+ "ixh %0,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_reload_operand" "=r,r,r")
+ (plus:SI (plus:SI (mult:SI (match_operand:SI 1 "mcore_reload_operand" "r,r,r")
+ (const_int 2))
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0,0,0"))
+ (match_operand:SI 3 "mcore_addsub_operand" "r,J,L")))]
+ "reload_in_progress"
+ "@
+ ixh %0,%1\;addu %0,%3
+ ixh %0,%1\;addi %0,%3
+ ixh %0,%1\;subi %0,%M3"
+ [(set_attr "length" "4")])
+
+;;
+;; Other sizes may be handy for indexing.
+;; the tradeoffs to consider when adding these are
+;; codesize, execution time [vs. mul it is easy to win],
+;; and register pressure -- these patterns don't use an extra
+;; register to build the offset from the base
+;; and whether the compiler will not come up with some other idiom.
+;;
+
+;; -------------------------------------------------------------------------
+;; Addition, Subtraction instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ extern int flag_omit_frame_pointer;
+
+ /* If this is an add to the frame pointer, then accept it as is so
+ that we can later fold in the fp/sp offset from frame pointer
+ elimination. */
+ if (flag_omit_frame_pointer
+ && GET_CODE (operands[1]) == REG
+ && (REGNO (operands[1]) == VIRTUAL_STACK_VARS_REGNUM
+ || REGNO (operands[1]) == FRAME_POINTER_REGNUM))
+ {
+ emit_insn (gen_addsi3_fp (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+
+ /* Convert adds to subtracts if this makes loading the constant cheaper.
+ But only if we are allowed to generate new pseudos. */
+ if (! (reload_in_progress || reload_completed)
+ && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < -32)
+ {
+ int neg_value = - INTVAL (operands[2]);
+ if ( CONST_OK_FOR_I (neg_value)
+ || CONST_OK_FOR_M (neg_value)
+ || CONST_OK_FOR_N (neg_value))
+ {
+ operands[2] = copy_to_mode_reg (SImode, GEN_INT (neg_value));
+ emit_insn (gen_subsi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ }
+
+ if (! mcore_addsub_operand (operands[2], SImode))
+ operands[2] = copy_to_mode_reg (SImode, operands[2]);
+}")
+
+;; RBE: for some constants which are not in the range which allows
+;; us to do a single operation, we will try a paired addi/addi instead
+;; of a movi/addi. This relieves some register pressure at the expense
+;; of giving away some potential constant reuse.
+;;
+;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern
+;; for later reference
+;;
+;; (define_insn "addsi3_i2"
+;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
+;; (match_operand:SI 2 "const_int_operand" "g")))]
+;; "GET_CODE(operands[2]) == CONST_INT
+;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64)
+;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
+;; "*
+;; {
+;; int n = INTVAL(operands[2]);
+;; if (n > 0)
+;; {
+;; operands[2] = GEN_INT(n - 32);
+;; return \"addi\\t%0,32\;addi\\t%0,%2\";
+;; }
+;; else
+;; {
+;; n = (-n);
+;; operands[2] = GEN_INT(n - 32);
+;; return \"subi\\t%0,32\;subi\\t%0,%2\";
+;; }
+;; }"
+;; [(set_attr "length" "4")])
+
+(define_insn "addsi3_i"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
+ (match_operand:SI 2 "mcore_addsub_operand" "r,J,L")))]
+ ""
+ "@
+ addu %0,%2
+ addi %0,%2
+ subi %0,%M2")
+
+;; This exists so that address computations based on the frame pointer
+;; can be folded in when frame pointer elimination occurs. Ordinarily
+;; this would be bad because it allows insns which would require reloading,
+;; but without it, we get multiple adds where one would do.
+
+(define_insn "addsi3_fp"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
+ (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0,0,0")
+ (match_operand:SI 2 "immediate_operand" "r,J,L")))]
+ "flag_omit_frame_pointer
+ && (reload_in_progress || reload_completed || REGNO (operands[1]) == FRAME_POINTER_REGNUM)"
+ "@
+ addu %0,%2
+ addi %0,%2
+ subi %0,%M2")
+
+;; RBE: for some constants which are not in the range which allows
+;; us to do a single operation, we will try a paired addi/addi instead
+;; of a movi/addi. This relieves some register pressure at the expense
+;; of giving away some potential constant reuse.
+;;
+;; RBE 6/17/97: this didn't buy us anything, but I keep the pattern
+;; for later reference
+;;
+;; (define_insn "subsi3_i2"
+;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+;; (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
+;; (match_operand:SI 2 "const_int_operand" "g")))]
+;; "TARGET_RBETEST && GET_CODE(operands[2]) == CONST_INT
+;; && ((INTVAL (operands[2]) > 32 && INTVAL(operands[2]) <= 64)
+;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
+;; "*
+;; {
+;; int n = INTVAL(operands[2]);
+;; if ( n > 0)
+;; {
+;; operands[2] = GEN_INT( n - 32);
+;; return \"subi\\t%0,32\;subi\\t%0,%2\";
+;; }
+;; else
+;; {
+;; n = (-n);
+;; operands[2] = GEN_INT(n - 32);
+;; return \"addi\\t%0,32\;addi\\t%0,%2\";
+;; }
+;; }"
+;; [(set_attr "length" "4")])
+
+;(define_insn "subsi3"
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+; (minus:SI (match_operand:SI 1 "mcore_arith_K_operand" "0,0,r,K")
+; (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0,0")))]
+; ""
+; "@
+; sub %0,%2
+; subi %0,%2
+; rsub %0,%1
+; rsubi %0,%1")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r")
+ (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,0,r")
+ (match_operand:SI 2 "mcore_arith_J_operand" "r,J,0")))]
+ ""
+ "@
+ subu %0,%2
+ subi %0,%2
+ rsub %0,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (minus:SI (match_operand:SI 1 "mcore_literal_K_operand" "K")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "0")))]
+ ""
+ "rsubi %0,%1")
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+ (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
+ (match_operand:DI 2 "mcore_arith_reg_operand" "r")))
+ (clobber (reg:CC 17))]
+ ""
+ "*
+ {
+ if (TARGET_LITTLE_END)
+ return \"cmplt %0,%0\;addc %0,%2\;addc %R0,%R2\";
+ return \"cmplt %R0,%R0\;addc %R0,%R2\;addc %0,%2\";
+ }"
+ [(set_attr "length" "6")])
+
+;; special case for "longlong += 1"
+(define_insn ""
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+ (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
+ (const_int 1)))
+ (clobber (reg:CC 17))]
+ ""
+ "*
+ {
+ if (TARGET_LITTLE_END)
+ return \"addi %0,1\;cmpnei %0,0\;incf %R0\";
+ return \"addi %R0,1\;cmpnei %R0,0\;incf %0\";
+ }"
+ [(set_attr "length" "6")])
+
+;; special case for "longlong -= 1"
+(define_insn ""
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+ (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
+ (const_int -1)))
+ (clobber (reg:CC 17))]
+ ""
+ "*
+ {
+ if (TARGET_LITTLE_END)
+ return \"cmpnei %0,0\;decf %R0\;subi %0,1\";
+ return \"cmpnei %R0,0\;decf %0\;subi %R0,1\";
+ }"
+ [(set_attr "length" "6")])
+
+;; special case for "longlong += const_int"
+;; we have to use a register for the const_int because we don't
+;; have an unsigned compare immediate... only +/- 1 get to
+;; play the no-extra register game because they compare with 0.
+;; This winds up working out for any literal that is synthesized
+;; with a single instruction. The more complicated ones look
+;; like the get broken into subreg's to get initialized too soon
+;; for us to catch here. -- RBE 4/25/96
+;; only allow for-sure positive values.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+ (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
+ (match_operand:SI 2 "const_int_operand" "r")))
+ (clobber (reg:CC 17))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)"
+ "*
+{
+ if (GET_MODE (operands[2]) != SImode)
+ abort ();
+ if (TARGET_LITTLE_END)
+ return \"addu %0,%2\;cmphs %0,%2\;incf %R0\";
+ return \"addu %R0,%2\;cmphs %R0,%2\;incf %0\";
+}"
+ [(set_attr "length" "6")])
+
+;; optimize "long long" + "unsigned long"
+;; won't trigger because of how the extension is expanded upstream.
+;; (define_insn ""
+;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
+;; (zero_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r"))))
+;; (clobber (reg:CC 17))]
+;; "0"
+;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0"
+;; [(set_attr "length" "6")])
+
+;; optimize "long long" + "signed long"
+;; won't trigger because of how the extension is expanded upstream.
+;; (define_insn ""
+;; [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+;; (plus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "%0")
+;; (sign_extend:DI (match_operand:SI 2 "mcore_arith_reg_operand" "r"))))
+;; (clobber (reg:CC 17))]
+;; "0"
+;; "cmplt %R0,%R0\;addc %R0,%2\;inct %0\;btsti %2,31\;dect %0"
+;; [(set_attr "length" "6")])
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+ (minus:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")
+ (match_operand:DI 2 "mcore_arith_reg_operand" "r")))
+ (clobber (reg:CC 17))]
+ ""
+ "*
+ {
+ if (TARGET_LITTLE_END)
+ return \"cmphs %0,%0\;subc %0,%2\;subc %R0,%R2\";
+ return \"cmphs %R0,%R0\;subc %R0,%R2\;subc %0,%2\";
+ }"
+ [(set_attr "length" "6")])
+
+;; -------------------------------------------------------------------------
+;; Multiplication instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (mult:SI (match_operand:SI 1 "mcore_arith_reg_operand" "%0")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
+ ""
+ "mult %0,%2")
+
+;;
+;; 32/32 signed division -- added to the MCORE instruction set spring 1997
+;;
+;; Different constraints based on the architecture revision...
+;;
+(define_expand "divsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
+ "TARGET_DIV"
+ "")
+
+;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97)
+;;
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "b")))]
+ "TARGET_DIV"
+ "divs %0,%2")
+
+;;
+;; 32/32 signed division -- added to the MCORE instruction set spring 1997
+;;
+;; Different constraints based on the architecture revision...
+;;
+(define_expand "udivsi3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
+ "TARGET_DIV"
+ "")
+
+;; MCORE Revision 1.50: restricts the divisor to be in r1. (6/97)
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "b")))]
+ "TARGET_DIV"
+ "divu %0,%2")
+
+;; -------------------------------------------------------------------------
+;; Unary arithmetic
+;; -------------------------------------------------------------------------
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (neg:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "*
+{
+ return \"rsubi %0,0\";
+}")
+
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (abs:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "abs %0")
+
+(define_insn "negdi2"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=&r")
+ (neg:DI (match_operand:DI 1 "mcore_arith_reg_operand" "0")))
+ (clobber (reg:CC 17))]
+ ""
+ "*
+{
+ if (TARGET_LITTLE_END)
+ return \"cmpnei %0,0\\n\\trsubi %0,0\\n\\tnot %R0\\n\\tincf %R0\";
+ return \"cmpnei %R0,0\\n\\trsubi %R0,0\\n\\tnot %0\\n\\tincf %0\";
+}"
+ [(set_attr "length" "8")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (not:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "not %0")
+
+;; -------------------------------------------------------------------------
+;; Zero extension instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "general_operand" "0,m")))]
+ ""
+ "@
+ zexth %0
+ ld.h %0,%1"
+ [(set_attr "type" "shift,load")])
+
+;; ldh gives us a free zero-extension. The combiner picks up on this.
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (zero_extend:SI (mem:HI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
+ ""
+ "ld.h %0,(%1)"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (zero_extend:SI (mem:HI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "")))))]
+ "(INTVAL (operands[2]) >= 0) &&
+ (INTVAL (operands[2]) < 32) &&
+ ((INTVAL (operands[2])&1) == 0)"
+ "ld.h %0,(%1,%2)"
+ [(set_attr "type" "load")])
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "general_operand" "")))]
+ ""
+ "")
+
+;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register.
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b,r")
+ (zero_extend:SI (match_operand:QI 1 "general_operand" "0,r,m")))]
+ ""
+ "@
+ zextb %0
+ xtrb3 %0,%1
+ ld.b %0,%1"
+ [(set_attr "type" "shift,shift,load")])
+
+;; ldb gives us a free zero-extension. The combiner picks up on this.
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (zero_extend:SI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
+ ""
+ "ld.b %0,(%1)"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (zero_extend:SI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "")))))]
+ "(INTVAL (operands[2]) >= 0) &&
+ (INTVAL (operands[2]) < 16)"
+ "ld.b %0,(%1,%2)"
+ [(set_attr "type" "load")])
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "mcore_arith_reg_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "")))]
+ ""
+ "")
+
+;; RBE: XXX: we don't recognize that the xtrb3 kills the CC register.
+(define_insn ""
+ [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r,b,r")
+ (zero_extend:HI (match_operand:QI 1 "general_operand" "0,r,m")))]
+ ""
+ "@
+ zextb %0
+ xtrb3 %0,%1
+ ld.b %0,%1"
+ [(set_attr "type" "shift,shift,load")])
+
+;; ldb gives us a free zero-extension. The combiner picks up on this.
+;; this doesn't catch references that are into a structure.
+;; note that normally the compiler uses the above insn, unless it turns
+;; out that we're dealing with a volatile...
+(define_insn ""
+ [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
+ (zero_extend:HI (mem:QI (match_operand:SI 1 "mcore_arith_reg_operand" "r"))))]
+ ""
+ "ld.b %0,(%1)"
+ [(set_attr "type" "load")])
+
+(define_insn ""
+ [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
+ (zero_extend:HI (mem:QI (plus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "")))))]
+ "(INTVAL (operands[2]) >= 0) &&
+ (INTVAL (operands[2]) < 16)"
+ "ld.b %0,(%1,%2)"
+ [(set_attr "type" "load")])
+
+
+;; -------------------------------------------------------------------------
+;; Sign extension instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "mcore_arith_reg_operand" "=r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r"))]
+ ""
+ "
+ {
+ int low, high;
+
+ if (TARGET_LITTLE_END)
+ low = 0, high = 1;
+ else
+ low = 1, high = 0;
+
+ emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], low),
+ operands[1]));
+ emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], high),
+ gen_rtx_ASHIFTRT (SImode,
+ gen_rtx_SUBREG (SImode, operands[0], low),
+ GEN_INT (31))));
+ DONE;
+ }"
+)
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "sexth %0")
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "sextb %0")
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "sextb %0")
+
+;; -------------------------------------------------------------------------
+;; Move instructions
+;; -------------------------------------------------------------------------
+
+;; SImode
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SImode, operands[1]);
+ else if (CONSTANT_P (operands[1])
+ && (GET_CODE (operands[1]) != CONST_INT
+ || ( ! CONST_OK_FOR_I (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_N (INTVAL (operands[1]))
+ && (! TARGET_HARDLIT ||
+ ! mcore_const_ok_for_inline (INTVAL (operands[1])))))
+ && ! reload_completed
+ && ! reload_in_progress
+ && GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < FIRST_PSEUDO_REGISTER
+ && (REGNO (operands[0]) == STACK_POINTER_REGNUM
+ || REGNO (operands[0]) == LK_REG))
+ operands[1] = force_reg (SImode, operands[1]);
+}")
+
+;;; Must put a/i before r/r so that it will be preferred when the dest is
+;;; a hard register. Must put a/R before r/m.
+;;; DO WE NEED a/i ANYMORE?
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,r,a,r,r,a,r,m")
+ (match_operand:SI 1 "mcore_general_movsrc_operand" "I,M,N,i,r,c,R,m,r"))]
+ "(register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))
+ && ! (CONSTANT_P (operands[1])
+ && (GET_CODE (operands[1]) != CONST_INT
+ || ( ! CONST_OK_FOR_I (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
+ && GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) < FIRST_PSEUDO_REGISTER
+ && (REGNO (operands[0]) == STACK_POINTER_REGNUM
+ || REGNO (operands[0]) == LK_REG))"
+ "* return mcore_output_move (insn, operands, SImode);"
+ [(set_attr "type" "move,move,move,move,move,move,load,load,store")])
+
+;; This is to work around a bug in reload.
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "immediate_operand" "i"))]
+ "((reload_in_progress || reload_completed)
+ && CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_I (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_N (INTVAL (operands[1]))
+ && GET_CODE (operands[0]) == REG
+ && REGNO (operands[0]) == LK_REG)"
+ "* return mcore_output_inline_const_forced (insn, operands, SImode);"
+ [(set_attr "type" "load")])
+
+;; (define_expand "reload_insi"
+;; [(parallel [(match_operand:SI 0 "register_operand" "=r")
+;; (match_operand:SI 1 "general_operand" "")
+;; (match_operand:DI 2 "register_operand" "=&r")])]
+;; ""
+;; "
+;; {
+;; if (CONSTANT_P (operands[1])
+;; && GET_CODE (operands[1]) == CONST_INT
+;; && ! CONST_OK_FOR_I (INTVAL (operands[1]))
+;; && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+;; && ! CONST_OK_FOR_N (INTVAL (operands[1]))
+;; && GET_CODE (operands[0]) == REG
+;; && (REGNO (operands[0]) == STACK_POINTER_REGNUM
+;; || REGNO (operands[0]) == LK_REG))
+;; {
+;; rtx tmp;
+;;
+;; if ( REGNO (operands[2]) == REGNO (operands[0])
+;; || REGNO (operands[2]) == STACK_POINTER_REGNUM
+;; || REGNO (operands[2]) == LK_REG)
+;; tmp = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+;; else
+;; tmp = gen_rtx_REG (SImode, REGNO (operands[2]));
+;;
+;; emit_insn (gen_movsi (tmp, operands[1]));
+;; emit_insn (gen_movsi (operands[0], tmp));
+;; DONE;
+;; }
+;; emit_insn (gen_movsi (operands[0], operands[1]));
+;; DONE;
+;; }"
+;; )
+
+
+
+;;
+;; HImode
+;;
+
+;;; ??? This isn't guaranteed to work. It should be more like the SImode
+;;; patterns.
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (HImode, operands[1]);
+ else if (CONSTANT_P (operands[1])
+ && (GET_CODE (operands[1]) != CONST_INT
+ || (! CONST_OK_FOR_I (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
+ && ! reload_completed && ! reload_in_progress)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, HImode, reg, 0);
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,r,r,r,r,m")
+ (match_operand:HI 1 "mcore_general_movsrc_operand" "r,I,M,N,c,m,r"))]
+ "(register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))
+ && (GET_CODE (operands[1]) != CONST_INT
+ || CONST_OK_FOR_M (INTVAL (operands[1]))
+ || CONST_OK_FOR_N (INTVAL (operands[1]))
+ || CONST_OK_FOR_I (INTVAL (operands[1])))"
+ "@
+ mov %0,%1
+ movi %0,%1
+ bgeni %0,%P1
+ bmaski %0,%N1
+ mvc %0
+ ld.h %0,%1
+ st.h %1,%0"
+ [(set_attr "type" "move,move,move,move,move,load,store")])
+
+;; Like movhi, but the const_int source can't be synthesized in
+;; a single-instruction. Fall back to the same things that
+;; are done for movsi in such cases. Presumes that we can
+;; modify any parts of the register that we wish.
+
+(define_insn ""
+ [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,a")
+ (match_operand:HI 1 "const_int_operand" "P,i"))]
+ "GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) > 127 && INTVAL (operands[1]) < 65536"
+ "*
+{
+ if (GET_CODE (operands[0])== REG && REGNO (operands[0]) == 15
+ && !mcore_const_ok_for_inline (INTVAL (operands[1])))
+ {
+ /* mcore_output_move would generate lrw r15 -- a forbidden combo */
+ return mcore_output_inline_const_forced (insn, operands, SImode);
+ }
+ else
+ return mcore_output_move (insn, operands, SImode);
+}"
+ [(set_attr "type" "move")])
+
+
+;; if we're still looking around for things to use, here's a last
+;; ditch effort that just calls the move. We only let this happen
+;; if we're in the reload pass.
+;;
+(define_insn ""
+ [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,a")
+ (match_operand:HI 1 "const_int_operand" "P,i"))]
+ "reload_in_progress || reload_completed"
+ "*
+{
+ if (GET_CODE (operands[0])== REG && REGNO (operands[0]) == 15
+ && !mcore_const_ok_for_inline (INTVAL (operands[1])))
+ {
+ /* mcore_output_move would generate lrw r15 -- a forbidden combo */
+ return mcore_output_inline_const_forced (insn, operands, SImode);
+ }
+ else
+ return mcore_output_move (insn, operands, HImode);
+}"
+ [(set_attr "type" "move")])
+
+;;
+;; QImode
+;;
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (QImode, operands[1]);
+ else if (CONSTANT_P (operands[1])
+ && (GET_CODE (operands[1]) != CONST_INT
+ || (! CONST_OK_FOR_I (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
+ && ! reload_completed && ! reload_in_progress)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, operands[1]));
+ operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,r,r,r,r,m")
+ (match_operand:QI 1 "mcore_general_movsrc_operand" "r,I,M,N,c,m,r"))]
+ "(register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))
+ && (GET_CODE (operands[1]) != CONST_INT
+ || CONST_OK_FOR_M (INTVAL (operands[1]))
+ || CONST_OK_FOR_N (INTVAL (operands[1]))
+ || CONST_OK_FOR_I (INTVAL (operands[1])))"
+ "@
+ mov %0,%1
+ movi %0,%1
+ bgeni %0,%P1
+ bmaski %0,%N1
+ mvc %0
+ ld.b %0,%1
+ st.b %1,%0"
+ [(set_attr "type" "move,move,move,move,move,load,store")])
+
+;; cover the case where the constant is 128..255; this isn't handled
+;; in the above case. We could if we wanted to mess with adding a
+;; new constraint class like M,N,I.
+(define_insn ""
+ [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r")
+ (match_operand:QI 1 "const_int_operand" ""))]
+ "GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) > 127 && INTVAL (operands[1]) < 256"
+ "*
+{
+ /* have a constant in range 128..255; have to do 2 insns; we can
+ * do this with a movi followed by a bseti
+ */
+ operands[2] = GEN_INT (INTVAL (operands[1]) & 0x7f);
+ return \"movi\\t%0,%2\;bseti\\t%0,7\";
+}"
+ [(set_attr "type" "move")])
+
+;; if we're still looking around for things to use, here's a last
+;; ditch effort that just calls the move. We only let this happen
+;; if we're in the reload pass.
+;;
+(define_insn ""
+ [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,a")
+ (match_operand:QI 1 "const_int_operand" "P,i"))]
+ "(reload_in_progress || reload_completed)"
+ "*
+{
+ if (GET_CODE (operands[0])== REG && REGNO (operands[0]) == 15
+ && ! mcore_const_ok_for_inline (INTVAL (operands[1])))
+ {
+ /* mcore_output_move would generate lrw r15 -- a forbidden combo */
+ return mcore_output_inline_const_forced (insn, operands, SImode);
+ }
+ else
+ return mcore_output_move (insn, operands, QImode);
+}"
+ [(set_attr "type" "move")])
+
+;; DImode
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DImode, operands[1]);
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_I (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_M (INTVAL (operands[1]))
+ && ! CONST_OK_FOR_N (INTVAL (operands[1]))
+ && ! reload_completed
+ && ! reload_in_progress
+ && GET_CODE (operands[0]) == REG)
+ {
+ emit_move_insn (operand_subword (operands[0], 0, 1, DImode),
+ operand_subword_force (operands[1], 0, DImode));
+ emit_move_insn (operand_subword (operands[0], 1, 1, DImode),
+ operand_subword_force (operands[1], 1, DImode));
+ DONE;
+ }
+}")
+
+(define_insn "movdi_i"
+ [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,a,r,m")
+ (match_operand:DI 1 "mcore_general_movsrc_operand" "I,M,N,r,R,m,r"))]
+ ""
+ "* return mcore_output_movedouble (operands, DImode);"
+ [(set_attr "length" "4") (set_attr "type" "move,move,move,move,load,load,store")])
+
+;; SFmode
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (SFmode, operands[1]);
+}")
+
+(define_insn "movsf_i"
+ [(set (match_operand:SF 0 "general_operand" "=r,r,m")
+ (match_operand:SF 1 "general_operand" "r,m,r"))]
+ ""
+ "@
+ mov %0,%1
+ ld.w %0,%1
+ st.w %1,%0"
+ [(set_attr "type" "move,load,store")])
+
+;; DFmode
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM)
+ operands[1] = force_reg (DFmode, operands[1]);
+}")
+
+(define_insn "movdf_k"
+ [(set (match_operand:DF 0 "general_operand" "=r,r,m")
+ (match_operand:DF 1 "general_operand" "r,m,r"))]
+ ""
+ "* return mcore_output_movedouble (operands, DFmode);"
+ [(set_attr "length" "4") (set_attr "type" "move,load,store")])
+
+
+;; Load/store multiple
+
+;; ??? This is not currently used.
+(define_insn "ldm"
+ [(set (match_operand:TI 0 "mcore_arith_reg_operand" "=r")
+ (mem:TI (match_operand:SI 1 "mcore_arith_reg_operand" "r")))]
+ ""
+ "ldq %U0,(%1)")
+
+;; ??? This is not currently used.
+(define_insn "stm"
+ [(set (mem:TI (match_operand:SI 0 "mcore_arith_reg_operand" "r"))
+ (match_operand:TI 1 "mcore_arith_reg_operand" "r"))]
+ ""
+ "stq %U1,(%0)")
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+{
+ int regno, count, i;
+
+ /* Support only loading a constant number of registers from memory and
+ only if at least two registers. The last register must be r15. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[1]) != MEM
+ || XEXP (operands[1], 0) != stack_pointer_rtx
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) + INTVAL (operands[2]) != 16)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[0]);
+
+ operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+
+ for (i = 0; i < count; i++)
+ XVECEXP (operands[3], 0, i)
+ = gen_rtx (SET, VOIDmode,
+ gen_rtx (REG, SImode, regno + i),
+ gen_rtx (MEM, SImode, plus_constant (stack_pointer_rtx,
+ i * 4)));
+}")
+
+(define_insn ""
+ [(match_parallel 0 "mcore_load_multiple_operation"
+ [(set (match_operand:SI 1 "mcore_arith_reg_operand" "=r")
+ (mem:SI (match_operand:SI 2 "register_operand" "r")))])]
+ "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM"
+ "ldm %1-r15,(%2)")
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ ""
+ "
+{
+ int regno, count, i;
+
+ /* Support only storing a constant number of registers to memory and
+ only if at least two registers. The last register must be r15. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) < 2
+ || GET_CODE (operands[0]) != MEM
+ || XEXP (operands[0], 0) != stack_pointer_rtx
+ || GET_CODE (operands[1]) != REG
+ || REGNO (operands[1]) + INTVAL (operands[2]) != 16)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[1]);
+
+ operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+
+ for (i = 0; i < count; i++)
+ XVECEXP (operands[3], 0, i)
+ = gen_rtx (SET, VOIDmode,
+ gen_rtx (MEM, SImode, plus_constant (stack_pointer_rtx,
+ i * 4)),
+ gen_rtx (REG, SImode, regno + i));
+}")
+
+(define_insn ""
+ [(match_parallel 0 "mcore_store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 2 "register_operand" "r"))
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r"))])]
+ "GET_CODE (operands[2]) == REG && REGNO (operands[2]) == STACK_POINTER_REGNUM"
+ "stm %1-r15,(%2)")
+
+;; ------------------------------------------------------------------------
+;; Define the real conditional branch instructions.
+;; ------------------------------------------------------------------------
+
+(define_insn "branch_true"
+ [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jbt %l0"
+ [(set_attr "type" "brcond")])
+
+(define_insn "branch_false"
+ [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "jbf %l0"
+ [(set_attr "type" "brcond")])
+
+(define_insn "inverse_branch_true"
+ [(set (pc) (if_then_else (ne (reg:CC 17) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "jbf %l0"
+ [(set_attr "type" "brcond")])
+
+(define_insn "inverse_branch_false"
+ [(set (pc) (if_then_else (eq (reg:CC 17) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "jbt %l0"
+ [(set_attr "type" "brcond")])
+
+;; Conditional branch insns
+
+;; At top-level, condition test are eq/ne, because we
+;; are comparing against the condition register (which
+;; has the result of the true relational test
+
+; There is no beq compare, so we reverse the branch arms.
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (EQ);
+}")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (NE);
+}")
+
+; check whether (GT A imm) can become (LE A imm) with the branch reversed.
+; if so, emit a (LT A imm + 1) in place of the (LE A imm). BRC
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (mcore_modify_comparison (LE))
+ {
+ emit_jump_insn (gen_reverse_blt (operands[0]));
+ DONE;
+ }
+ operands[1] = mcore_gen_compare_reg (GT);
+}")
+
+; There is no ble compare, so we reverse the branch arms.
+; reversed the condition and branch arms for ble -- the check_dbra_loop()
+; transformation assumes that ble uses a branch-true with the label as
+; as the target. BRC
+
+; check whether (LE A imm) can become (LT A imm + 1).
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (mcore_modify_comparison (LE))
+ {
+ emit_jump_insn (gen_blt (operands[0]));
+ DONE;
+ }
+ operands[1] = mcore_gen_compare_reg (LE);
+}")
+
+; make generating a reversed blt simple
+(define_expand "reverse_blt"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LT);
+}")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LT);
+}")
+
+; There is no bge compare, so we reverse the branch arms.
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (GE);
+}")
+
+; There is no gtu compare, so we reverse the branch arms
+
+;(define_expand "bgtu"
+; [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+; (pc)
+; (label_ref (match_operand 0 "" ""))))]
+; ""
+; "
+;{
+; if (GET_CODE (arch_compare_op1) == CONST_INT
+; && INTVAL (arch_compare_op1) == 0)
+; operands[1] = mcore_gen_compare_reg (NE);
+; else
+; { if (mcore_modify_comparison (GTU))
+; {
+; emit_jump_insn (gen_bgeu (operands[0]));
+; DONE;
+; }
+; operands[1] = mcore_gen_compare_reg (LEU);
+; }
+;}")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "
+{
+ if (GET_CODE (arch_compare_op1) == CONST_INT
+ && INTVAL (arch_compare_op1) == 0)
+ {
+ /* The inverse of '> 0' for an unsigned test is
+ '== 0' but we do not have such an instruction available.
+ Instead we must reverse the branch (back to the normal
+ ordering) and test '!= 0'. */
+
+ operands[1] = mcore_gen_compare_reg (NE);
+
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_NE (VOIDmode,
+ operands[1],
+ const0_rtx),
+ gen_rtx_LABEL_REF (VOIDmode,operands[0]),
+ pc_rtx)));
+ DONE;
+ }
+ operands[1] = mcore_gen_compare_reg (GTU);
+}")
+
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LEU);
+}")
+
+; There is no bltu compare, so we reverse the branch arms
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LTU);
+}")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+
+ operands[1] = mcore_gen_compare_reg (GEU);
+}")
+
+;; ------------------------------------------------------------------------
+;; Jump and linkage insns
+;; ------------------------------------------------------------------------
+
+(define_insn "jump_real"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jbr %l0"
+ [(set_attr "type" "branch")])
+
+(define_expand "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "
+{
+ emit_insn (gen_jump_real (operand0));
+ DONE;
+}
+")
+
+(define_insn "indirect_jump"
+ [(set (pc)
+ (match_operand:SI 0 "mcore_arith_reg_operand" "r"))]
+ ""
+ "jmp %0"
+ [(set_attr "type" "jmp")])
+
+(define_expand "call"
+ [(parallel[(call (match_operand:SI 0 "" "")
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))])]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! register_operand (XEXP (operands[0], 0), SImode)
+ && ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
+ operands[0] = gen_rtx (MEM, GET_MODE (operands[0]),
+ force_reg (Pmode, XEXP (operands[0], 0)));
+}")
+
+(define_insn "call_internal"
+ [(call (mem:SI (match_operand:SI 0 "mcore_call_address_operand" "riR"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))]
+ ""
+ "* return mcore_output_call (operands, 0);")
+
+(define_expand "call_value"
+ [(parallel[(set (match_operand 0 "register_operand" "")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))])]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) == MEM
+ && ! register_operand (XEXP (operands[0], 0), SImode)
+ && ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
+ operands[1] = gen_rtx (MEM, GET_MODE (operands[1]),
+ force_reg (Pmode, XEXP (operands[1], 0)));
+}")
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:SI (match_operand:SI 1 "mcore_call_address_operand" "riR"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))]
+ ""
+ "* return mcore_output_call (operands, 1);")
+
+(define_insn "call_value_struct"
+ [(parallel [(set (match_parallel 0 ""
+ [(expr_list (match_operand 3 "register_operand" "") (match_operand 4 "immediate_operand" ""))
+ (expr_list (match_operand 5 "register_operand" "") (match_operand 6 "immediate_operand" ""))])
+ (call (match_operand:SI 1 "" "")
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))])]
+ ""
+ "* return mcore_output_call (operands, 1);"
+)
+
+
+;; ------------------------------------------------------------------------
+;; Misc insns
+;; ------------------------------------------------------------------------
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "or r0,r0")
+
+(define_insn "tablejump"
+ [(set (pc)
+ (match_operand:SI 0 "mcore_arith_reg_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jmp %0"
+ [(set_attr "type" "jmp")])
+
+(define_insn "return"
+ [(return)]
+ "reload_completed && ! mcore_naked_function_p ()"
+ "jmp r15"
+ [(set_attr "type" "jmp")])
+
+(define_insn "*no_return"
+ [(return)]
+ "reload_completed && mcore_naked_function_p ()"
+ ""
+ [(set_attr "length" "0")]
+)
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "mcore_expand_prolog (); DONE;")
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "mcore_expand_epilog ();")
+
+;; ------------------------------------------------------------------------
+;; Scc instructions
+;; ------------------------------------------------------------------------
+
+(define_insn "mvc"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (ne:SI (reg:CC 17) (const_int 0)))]
+ ""
+ "mvc %0"
+ [(set_attr "type" "move")])
+
+(define_insn "mvcv"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (eq:SI (reg:CC 17) (const_int 0)))]
+ ""
+ "mvcv %0"
+ [(set_attr "type" "move")])
+
+; in 0.97 use (LE 0) with (LT 1) and complement c. BRC
+(define_split
+ [(parallel[
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ne:SI (gt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (const_int 0))
+ (const_int 0)))
+ (clobber (reg:SI 17))])]
+ ""
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 1) (const_int 1)))
+ (set (match_dup 0) (eq:SI (reg:CC 17) (const_int 0)))])
+
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (NE);
+}")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (NE);
+}")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LT);
+}")
+
+; make generating a LT with the comparison reversed easy. BRC
+(define_expand "reverse_slt"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LT);
+}")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LT);
+}")
+
+; check whether (GT A imm) can become (LE A imm) with the comparison
+; reversed. if so, emit a (LT A imm + 1) in place of the (LE A imm). BRC
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ if (mcore_modify_comparison (LE))
+ {
+ emit_insn (gen_reverse_slt (operands[0]));
+ DONE;
+ }
+
+ operands[1] = mcore_gen_compare_reg (GT);
+}")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ if (mcore_modify_comparison (LE))
+ {
+ emit_insn (gen_slt (operands[0]));
+ DONE;
+ }
+ operands[1] = mcore_gen_compare_reg (GT);
+}")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (GEU);
+}")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (GEU);
+}")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LEU);
+}")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ ""
+ "
+{
+ operands[1] = mcore_gen_compare_reg (LEU);
+}")
+
+(define_insn "incscc"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (plus:SI (ne (reg:CC 17) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "inct %0")
+
+(define_insn "incscc_false"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (plus:SI (eq (reg:CC 17) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_reg_operand" "0")))]
+ ""
+ "incf %0")
+
+(define_insn "decscc"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
+ (ne (reg:CC 17) (const_int 0))))]
+ ""
+ "dect %0")
+
+(define_insn "decscc_false"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (minus:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0")
+ (eq (reg:CC 17) (const_int 0))))]
+ ""
+ "decf %0")
+
+;; ------------------------------------------------------------------------
+;; Conditional move patterns.
+;; ------------------------------------------------------------------------
+
+(define_expand "smaxsi3"
+ [(set (reg:CC 17)
+ (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (if_then_else:SI (eq (reg:CC 17) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
+ ""
+ [(set (reg:CC 17)
+ (lt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC 17) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+; no tstgt in 0.97, so just use cmplti (btsti x,31) and reverse move
+; condition BRC
+(define_split
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (smax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (const_int 0)))]
+ ""
+ [(set (reg:CC 17)
+ (lt:CC (match_dup 1) (const_int 0)))
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC 17) (const_int 0))
+ (match_dup 1) (const_int 0)))]
+ "")
+
+(define_expand "sminsi3"
+ [(set (reg:CC 17)
+ (lt:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (if_then_else:SI (ne (reg:CC 17) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
+ ""
+ [(set (reg:CC 17)
+ (lt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (if_then_else:SI (ne (reg:CC 17) (const_int 0))
+ (match_dup 1) (match_dup 2)))]
+ "")
+
+;(define_split
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+; (smin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+; (const_int 0)))]
+; ""
+; [(set (reg:CC 17)
+; (gt:CC (match_dup 1) (const_int 0)))
+; (set (match_dup 0)
+; (if_then_else:SI (eq (reg:CC 17) (const_int 0))
+; (match_dup 1) (const_int 0)))]
+; "")
+
+; changed these unsigned patterns to use geu instead of ltu. it appears
+; that the c-torture & ssrl test suites didn't catch these! only showed
+; up in friedman's clib work. BRC 7/7/95
+
+(define_expand "umaxsi3"
+ [(set (reg:CC 17)
+ (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (if_then_else:SI (eq (reg:CC 17) (const_int 0))
+ (match_dup 2) (match_dup 1)))]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (umax:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
+ ""
+ [(set (reg:CC 17)
+ (geu:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (if_then_else:SI (eq (reg:CC 17) (const_int 0))
+ (match_dup 2) (match_dup 1)))]
+ "")
+
+(define_expand "uminsi3"
+ [(set (reg:CC 17)
+ (geu:CC (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (if_then_else:SI (ne (reg:CC 17) (const_int 0))
+ (match_dup 2) (match_dup 1)))]
+ ""
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (umin:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "mcore_arith_reg_operand" "")))]
+ ""
+ [(set (reg:CC 17)
+ (geu:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (if_then_else:SI (ne (reg:CC 17) (const_int 0))
+ (match_dup 2) (match_dup 1)))]
+ "")
+
+;; ------------------------------------------------------------------------
+;; conditional move patterns really start here
+;; ------------------------------------------------------------------------
+
+;; the "movtK" patterns are experimental. they are intended to account for
+;; gcc's mucking on code such as:
+;;
+;; free_ent = ((block_compress) ? 257 : 256 );
+;;
+;; these patterns help to get a tstne/bgeni/inct (or equivalent) sequence
+;; when both arms have constants that are +/- 1 of each other.
+;;
+;; note in the following patterns that the "movtK" ones should be the first
+;; one defined in each sequence. this is because the general pattern also
+;; matches, so use ordering to determine priority (it's easier this way than
+;; adding conditions to the general patterns). BRC
+;;
+;; the U and Q constraints are necessary to ensure that reload does the
+;; 'right thing'. U constrains the operand to 0 and Q to 1 for use in the
+;; clrt & clrf and clrt/inct & clrf/incf patterns. BRC 6/26
+;;
+;; ??? there appears to be some problems with these movtK patterns for ops
+;; other than eq & ne. need to fix. 6/30 BRC
+
+;; ------------------------------------------------------------------------
+;; ne
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+
+(define_insn "movtK_1"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (ne (reg:CC 17) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_O_operand" "O")
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
+ " GET_CODE (operands[1]) == CONST_INT
+ && GET_CODE (operands[2]) == CONST_INT
+ && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1)
+ || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
+ "* return mcore_output_cmov (operands, 1, NULL);"
+ [(set_attr "length" "4")])
+
+(define_insn "movt0"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (ne (reg:CC 17) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ movt %0,%1
+ movf %0,%2
+ clrt %0
+ clrf %0")
+
+;; ------------------------------------------------------------------------
+;; eq
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (eq (reg:CC 17) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_O_operand" "O")
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
+ " GET_CODE (operands[1]) == CONST_INT
+ && GET_CODE (operands[2]) == CONST_INT
+ && ( (INTVAL (operands[1]) - INTVAL (operands[2]) == 1)
+ || (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
+ "* return mcore_output_cmov (operands, 0, NULL);"
+ [(set_attr "length" "4")])
+
+(define_insn "movf0"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (eq (reg:CC 17) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ movf %0,%1
+ movt %0,%2
+ clrf %0
+ clrt %0")
+
+; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole
+; because the instructions are not adjacent (peepholes are related by posn -
+; not by dataflow). BRC
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (eq (zero_extract:SI
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 1)
+ (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K"))
+ (const_int 0))
+ (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ btsti %1,%2\;movf %0,%3
+ btsti %1,%2\;movt %0,%4
+ btsti %1,%2\;clrf %0
+ btsti %1,%2\;clrt %0"
+ [(set_attr "length" "4")])
+
+; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (eq (lshiftrt:SI
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 7))
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
+ "GET_CODE (operands[1]) == SUBREG &&
+ GET_MODE (SUBREG_REG (operands[1])) == QImode"
+ "@
+ btsti %1,7\;movf %0,%2
+ btsti %1,7\;movt %0,%3
+ btsti %1,7\;clrf %0
+ btsti %1,7\;clrt %0"
+ [(set_attr "length" "4")])
+
+
+;; ------------------------------------------------------------------------
+;; ne
+;; ------------------------------------------------------------------------
+
+;; Combine creates this from an andn instruction in a scc sequence.
+;; We must recognize it to get conditional moves generated.
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")
+ (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
+ " GET_CODE (operands[2]) == CONST_INT
+ && GET_CODE (operands[3]) == CONST_INT
+ && ( (INTVAL (operands[2]) - INTVAL (operands[3]) == 1)
+ || (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
+ "*
+{
+ rtx out_operands[4];
+ out_operands[0] = operands[0];
+ out_operands[1] = operands[2];
+ out_operands[2] = operands[3];
+ out_operands[3] = operands[1];
+
+ return mcore_output_cmov (out_operands, 1, \"cmpnei %3,0\");
+
+}"
+ [(set_attr "length" "6")])
+
+(define_insn "movt2"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (ne (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ cmpnei %1,0\;movt %0,%2
+ cmpnei %1,0\;movf %0,%3
+ cmpnei %1,0\;clrt %0
+ cmpnei %1,0\;clrf %0"
+ [(set_attr "length" "4")])
+
+; turns lsli rx,imm/btsti rx,31 into btsti rx,imm. not done by a peephole
+; because the instructions are not adjacent (peepholes are related by posn -
+; not by dataflow). BRC
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (ne (zero_extract:SI
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 1)
+ (match_operand:SI 2 "mcore_literal_K_operand" "K,K,K,K"))
+ (const_int 0))
+ (match_operand:SI 3 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 4 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ btsti %1,%2\;movt %0,%3
+ btsti %1,%2\;movf %0,%4
+ btsti %1,%2\;clrt %0
+ btsti %1,%2\;clrf %0"
+ [(set_attr "length" "4")])
+
+; turns sextb rx/btsti rx,31 into btsti rx,7. must be QImode to be safe. BRC
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (ne (lshiftrt:SI
+ (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 7))
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
+ "GET_CODE (operands[1]) == SUBREG &&
+ GET_MODE (SUBREG_REG (operands[1])) == QImode"
+ "@
+ btsti %1,7\;movt %0,%2
+ btsti %1,7\;movf %0,%3
+ btsti %1,7\;clrt %0
+ btsti %1,7\;clrf %0"
+ [(set_attr "length" "4")])
+
+;; ------------------------------------------------------------------------
+;; eq/eq
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_4"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_O_operand" "O")
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[1]) == CONST_INT &&
+ GET_CODE (operands[2]) == CONST_INT &&
+ ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
+ (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
+ "* return mcore_output_cmov(operands, 1, NULL);"
+ [(set_attr "length" "4")])
+
+(define_insn "movt3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (eq (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ movt %0,%1
+ movf %0,%2
+ clrt %0
+ clrf %0")
+
+;; ------------------------------------------------------------------------
+;; eq/ne
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_5"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_O_operand" "O")
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[1]) == CONST_INT &&
+ GET_CODE (operands[2]) == CONST_INT &&
+ ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
+ (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
+ "* return mcore_output_cmov (operands, 0, NULL);"
+ [(set_attr "length" "4")])
+
+(define_insn "movf1"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (eq (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ movf %0,%1
+ movt %0,%2
+ clrf %0
+ clrt %0")
+
+;; ------------------------------------------------------------------------
+;; eq
+;; ------------------------------------------------------------------------
+
+;; Combine creates this from an andn instruction in a scc sequence.
+;; We must recognize it to get conditional moves generated.
+
+; experimental conditional move with two constants +/- 1 BRC
+
+(define_insn "movtK_6"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")
+ (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[1]) == CONST_INT &&
+ GET_CODE (operands[2]) == CONST_INT &&
+ ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
+ (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
+ "*
+{
+ rtx out_operands[4];
+ out_operands[0] = operands[0];
+ out_operands[1] = operands[2];
+ out_operands[2] = operands[3];
+ out_operands[3] = operands[1];
+
+ return mcore_output_cmov (out_operands, 0, \"cmpnei %3,0\");
+}"
+ [(set_attr "length" "6")])
+
+(define_insn "movf3"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (eq (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ cmpnei %1,0\;movf %0,%2
+ cmpnei %1,0\;movt %0,%3
+ cmpnei %1,0\;clrf %0
+ cmpnei %1,0\;clrt %0"
+ [(set_attr "length" "4")])
+
+;; ------------------------------------------------------------------------
+;; ne/eq
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_7"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_O_operand" "O")
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[1]) == CONST_INT &&
+ GET_CODE (operands[2]) == CONST_INT &&
+ ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
+ (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
+ "* return mcore_output_cmov (operands, 0, NULL);"
+ [(set_attr "length" "4")])
+
+(define_insn "movf4"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (ne (eq:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ movf %0,%1
+ movt %0,%2
+ clrf %0
+ clrt %0")
+
+;; ------------------------------------------------------------------------
+;; ne/ne
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_8"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_O_operand" "O")
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[1]) == CONST_INT &&
+ GET_CODE (operands[2]) == CONST_INT &&
+ ((INTVAL (operands[1]) - INTVAL (operands[2]) == 1) ||
+ (INTVAL (operands[2]) - INTVAL (operands[1]) == 1))"
+ "* return mcore_output_cmov (operands, 1, NULL);"
+ [(set_attr "length" "4")])
+
+(define_insn "movt4"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI
+ (ne (ne:SI (reg:CC 17) (const_int 0)) (const_int 0))
+ (match_operand:SI 1 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 2 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ movt %0,%1
+ movf %0,%2
+ clrt %0
+ clrf %0")
+
+;; Also need patterns to recognize lt/ge, since otherwise the compiler will
+;; try to output not/asri/tstne/movf.
+
+;; ------------------------------------------------------------------------
+;; lt
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_9"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")
+ (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[2]) == CONST_INT &&
+ GET_CODE (operands[3]) == CONST_INT &&
+ ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
+ (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
+ "*
+{
+ rtx out_operands[4];
+ out_operands[0] = operands[0];
+ out_operands[1] = operands[2];
+ out_operands[2] = operands[3];
+ out_operands[3] = operands[1];
+
+ return mcore_output_cmov (out_operands, 1, \"btsti %3,31\");
+}"
+ [(set_attr "length" "6")])
+
+(define_insn "movt5"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (lt (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ btsti %1,31\;movt %0,%2
+ btsti %1,31\;movf %0,%3
+ btsti %1,31\;clrt %0
+ btsti %1,31\;clrf %0"
+ [(set_attr "length" "4")])
+
+
+;; ------------------------------------------------------------------------
+;; ge
+;; ------------------------------------------------------------------------
+
+; experimental conditional move with two constants +/- 1 BRC
+(define_insn "movtK_10"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (if_then_else:SI
+ (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_O_operand" "O")
+ (match_operand:SI 3 "mcore_arith_O_operand" "O")))]
+ "GET_CODE (operands[2]) == CONST_INT &&
+ GET_CODE (operands[3]) == CONST_INT &&
+ ((INTVAL (operands[2]) - INTVAL (operands[3]) == 1) ||
+ (INTVAL (operands[3]) - INTVAL (operands[2]) == 1))"
+ "*
+{
+ rtx out_operands[4];
+ out_operands[0] = operands[0];
+ out_operands[1] = operands[2];
+ out_operands[2] = operands[3];
+ out_operands[3] = operands[1];
+
+ return mcore_output_cmov (out_operands, 0, \"btsti %3,31\");
+}"
+ [(set_attr "length" "6")])
+
+(define_insn "movf5"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,r,r,r")
+ (if_then_else:SI (ge (match_operand:SI 1 "mcore_arith_reg_operand" "r,r,r,r")
+ (const_int 0))
+ (match_operand:SI 2 "mcore_arith_imm_operand" "r,0,U,0")
+ (match_operand:SI 3 "mcore_arith_imm_operand" "0,r,0,U")))]
+ ""
+ "@
+ btsti %1,31\;movf %0,%2
+ btsti %1,31\;movt %0,%3
+ btsti %1,31\;clrf %0
+ btsti %1,31\;clrt %0"
+ [(set_attr "length" "4")])
+
+;; ------------------------------------------------------------------------
+;; Bitfield extract (xtrbN)
+;; ------------------------------------------------------------------------
+
+; sometimes we're better off using QI/HI mode and letting the machine indep.
+; part expand insv and extv.
+;
+; e.g., sequences like:a [an insertion]
+;
+; ldw r8,(r6)
+; movi r7,0x00ffffff
+; and r8,r7 r7 dead
+; stw r8,(r6) r8 dead
+;
+; become:
+;
+; movi r8,0
+; stb r8,(r6) r8 dead
+;
+; it looks like always using SI mode is a win except in this type of code
+; (when adjacent bit fields collapse on a byte or halfword boundary). when
+; expanding with SI mode, non-adjacent bit field masks fold, but with QI/HI
+; mode, they do not. one thought is to add some peepholes to cover cases
+; like the above, but this is not a general solution.
+;
+; -mword-bitfields expands/inserts using SI mode. otherwise, do it with
+; the smallest mode possible (using the machine indep. expansions). BRC
+
+;(define_expand "extv"
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+; (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+; (match_operand:SI 2 "const_int_operand" "")
+; (match_operand:SI 3 "const_int_operand" "")))
+; (clobber (reg:CC 17))]
+; ""
+; "
+;{
+; if (INTVAL (operands[1]) != 8 || INTVAL (operands[2]) % 8 != 0)
+; {
+; if (TARGET_W_FIELD)
+; {
+; rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
+; rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
+;
+; emit_insn (gen_rtx (SET, SImode, operands[0], operands[1]));
+; emit_insn (gen_rtx (SET, SImode, operands[0],
+; gen_rtx (ASHIFT, SImode, operands[0], lshft)));
+; emit_insn (gen_rtx (SET, SImode, operands[0],
+; gen_rtx (ASHIFTRT, SImode, operands[0], rshft)));
+; DONE;
+; }
+; else
+; FAIL;
+; }
+;}")
+
+(define_expand "extv"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))
+ (clobber (reg:CC 17))]
+ ""
+ "
+{
+ if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
+ {
+ /* 8 bit field, aligned properly, use the xtrb[0123]+sext sequence */
+ /* not DONE, not FAIL, but let the RTL get generated... */
+ }
+ else if (TARGET_W_FIELD)
+ {
+ /* Arbitrary placement; note that the tree->rtl generator will make
+ something close to this if we return FAIL */
+ rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
+ rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
+ rtx tmp1 = gen_reg_rtx (SImode);
+ rtx tmp2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx (SET, SImode, tmp1, operands[1]));
+ emit_insn (gen_rtx (SET, SImode, tmp2,
+ gen_rtx (ASHIFT, SImode, tmp1, lshft)));
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (ASHIFTRT, SImode, tmp2, rshft)));
+ DONE;
+ }
+ else
+ {
+ /* let the caller choose an alternate sequence */
+ FAIL;
+ }
+}")
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))
+ (clobber (reg:CC 17))]
+ ""
+ "
+{
+ if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
+ {
+ /* 8 bit field, aligned properly, use the xtrb[0123] sequence */
+ /* let the template generate some RTL.... */
+ }
+ else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1))
+ {
+ /* A narrow bitfield (<=5 bits) means we can do a shift to put
+ it in place and then use an andi to extract it.
+ This is as good as a shiftleft/shiftright. */
+
+ rtx shifted;
+ rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1);
+
+ if (INTVAL (operands[3]) == 0)
+ {
+ shifted = operands[1];
+ }
+ else
+ {
+ rtx rshft = GEN_INT (INTVAL (operands[3]));
+ shifted = gen_reg_rtx (SImode);
+ emit_insn (gen_rtx (SET, SImode, shifted,
+ gen_rtx (LSHIFTRT, SImode, operands[1], rshft)));
+ }
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (AND, SImode, shifted, mask)));
+ DONE;
+ }
+ else if (TARGET_W_FIELD)
+ {
+ /* Arbitrary pattern; play shift/shift games to get it.
+ * this is pretty much what the caller will do if we say FAIL */
+ rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
+ rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
+ rtx tmp1 = gen_reg_rtx (SImode);
+ rtx tmp2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx (SET, SImode, tmp1, operands[1]));
+ emit_insn (gen_rtx (SET, SImode, tmp2,
+ gen_rtx (ASHIFT, SImode, tmp1, lshft)));
+ emit_insn (gen_rtx (SET, SImode, operands[0],
+ gen_rtx (LSHIFTRT, SImode, tmp2, rshft)));
+ DONE;
+ }
+ else
+ {
+ /* Make the compiler figure out some alternative mechanism. */
+ FAIL;
+ }
+
+ /* Emit the RTL pattern; something will match it later. */
+}")
+
+(define_expand "insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "general_operand" ""))
+ (clobber (reg:CC 17))]
+ ""
+ "
+{
+ if (mcore_expand_insv (operands))
+ {
+ DONE;
+ }
+ else
+ {
+ FAIL;
+ }
+}")
+
+;;
+;; the xtrb[0123] instructions handily get at 8-bit fields on nice boundaries.
+;; but then, they do force you through r1.
+;;
+;; the combiner will build such patterns for us, so we'll make them available
+;; for its use.
+;;
+;; Note that we have both SIGNED and UNSIGNED versions of these...
+;;
+
+;;
+;; These no longer worry about the clobbering of CC bit; not sure this is
+;; good...
+;;
+;; the SIGNED versions of these
+;;
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
+ (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))]
+ ""
+ "@
+ asri %0,24
+ xtrb0 %0,%1\;sextb %0"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
+ (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))]
+ ""
+ "xtrb1 %0,%1\;sextb %0"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
+ (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))]
+ ""
+ "xtrb2 %0,%1\;sextb %0"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0") (const_int 8) (const_int 0)))]
+ ""
+ "sextb %0"
+ [(set_attr "type" "shift")])
+
+;; the UNSIGNED uses of xtrb[0123]
+;;
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
+ (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 24)))]
+ ""
+ "@
+ lsri %0,24
+ xtrb0 %0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
+ (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 16)))]
+ ""
+ "xtrb1 %0,%1"
+ [(set_attr "type" "shift")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=b")
+ (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "r") (const_int 8) (const_int 8)))]
+ ""
+ "xtrb2 %0,%1"
+ [(set_attr "type" "shift")])
+
+;; this can be peepholed if it follows a ldb ...
+(define_insn ""
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
+ (zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))]
+ ""
+ "@
+ zextb %0
+ xtrb3 %0,%1\;zextb %0"
+ [(set_attr "type" "shift")])
+
+
+;; ------------------------------------------------------------------------
+;; Block move - adapted from m88k.md
+;; ------------------------------------------------------------------------
+
+(define_expand "movstrsi"
+ [(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
+ (mem:BLK (match_operand:BLK 1 "" "")))
+ (use (match_operand:SI 2 "general_operand" ""))
+ (use (match_operand:SI 3 "immediate_operand" ""))])]
+ ""
+ "
+{
+ rtx dest_mem = operands[0];
+ rtx src_mem = operands[1];
+ operands[0] = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ mcore_expand_block_move (dest_mem, src_mem, operands);
+ DONE;
+}")
+
+;; ;;; ??? These patterns are meant to be generated from expand_block_move,
+;; ;;; but they currently are not.
+;;
+;; (define_insn ""
+;; [(set (match_operand:QI 0 "mcore_arith_reg_operand" "=r")
+;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
+;; ""
+;; "ld.b %0,%1"
+;; [(set_attr "type" "load")])
+;;
+;; (define_insn ""
+;; [(set (match_operand:HI 0 "mcore_arith_reg_operand" "=r")
+;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
+;; ""
+;; "ld.h %0,%1"
+;; [(set_attr "type" "load")])
+;;
+;; (define_insn ""
+;; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+;; (match_operand:BLK 1 "mcore_general_movsrc_operand" "m"))]
+;; ""
+;; "ld.w %0,%1"
+;; [(set_attr "type" "load")])
+;;
+;; (define_insn ""
+;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
+;; (match_operand:QI 1 "mcore_arith_reg_operand" "r"))]
+;; ""
+;; "st.b %1,%0"
+;; [(set_attr "type" "store")])
+;;
+;; (define_insn ""
+;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
+;; (match_operand:HI 1 "mcore_arith_reg_operand" "r"))]
+;; ""
+;; "st.h %1,%0"
+;; [(set_attr "type" "store")])
+;;
+;; (define_insn ""
+;; [(set (match_operand:BLK 0 "mcore_general_movdst_operand" "=m")
+;; (match_operand:SI 1 "mcore_arith_reg_operand" "r"))]
+;; ""
+;; "st.w %1,%0"
+;; [(set_attr "type" "store")])
+
+;; ------------------------------------------------------------------------
+;; Misc Optimizing quirks
+;; ------------------------------------------------------------------------
+
+;; pair to catch constructs like: (int *)((p+=4)-4) which happen
+;; in stdarg/varargs traversal. This changes a 3 insn sequence to a 2
+;; insn sequence. -- RBE 11/30/95
+(define_insn ""
+ [(parallel[
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "=r")
+ (match_operand:SI 1 "mcore_arith_reg_operand" "+r"))
+ (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])]
+ "GET_CODE(operands[2]) == CONST_INT"
+ "#"
+ [(set_attr "length" "4")])
+
+(define_split
+ [(parallel[
+ (set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "mcore_arith_reg_operand" ""))
+ (set (match_dup 1) (plus:SI (match_dup 1) (match_operand 2 "mcore_arith_any_imm_operand" "")))])]
+ "GET_CODE(operands[2]) == CONST_INT &&
+ operands[0] != operands[1]"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 1) (plus:SI (match_dup 1) (match_dup 2)))])
+
+
+;;; Peepholes
+
+; note: in the following patterns, use mcore_is_dead() to ensure that the
+; reg we may be trashing really is dead. reload doesn't always mark
+; deaths, so mcore_is_dead() (see mcore.c) scans forward to find its death. BRC
+
+;;; A peephole to convert the 3 instruction sequence generated by reload
+;;; to load a FP-offset address into a 2 instruction sequence.
+;;; ??? This probably never matches anymore.
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "J"))
+ (set (match_dup 0) (neg:SI (match_dup 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (match_operand:SI 2 "mcore_arith_reg_operand" "r")))]
+ "CONST_OK_FOR_J (INTVAL (operands[1]))"
+ "error\;mov %0,%2\;subi %0,%1")
+
+;; Moves of inlinable constants are done late, so when a 'not' is generated
+;; it is never combined with the following 'and' to generate an 'andn' b/c
+;; the combiner never sees it. use a peephole to pick up this case (happens
+;; mostly with bitfields) BRC
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (match_operand:SI 1 "const_int_operand" "i"))
+ (set (match_operand:SI 2 "mcore_arith_reg_operand" "r")
+ (and:SI (match_dup 2) (match_dup 0)))]
+ "mcore_const_trick_uses_not (INTVAL (operands[1])) &&
+ operands[0] != operands[2] &&
+ mcore_is_dead (insn, operands[0])"
+ "* return mcore_output_andn (insn, operands);")
+
+; when setting or clearing just two bits, it's cheapest to use two bseti's
+; or bclri's. only happens when relaxing immediates. BRC
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
+ (ior:SI (match_dup 2) (match_dup 0)))]
+ "TARGET_HARDLIT && mcore_num_ones (INTVAL (operands[1])) == 2 &&
+ mcore_is_dead (insn, operands[0])"
+ "* return mcore_output_bseti (operands[2], INTVAL (operands[1]));")
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
+ (and:SI (match_dup 2) (match_dup 0)))]
+ "TARGET_HARDLIT && mcore_num_zeros (INTVAL (operands[1])) == 2 &&
+ mcore_is_dead (insn, operands[0])"
+ "* return mcore_output_bclri (operands[2], INTVAL (operands[1]));")
+
+; change an and with a mask that has a single cleared bit into a bclri. this
+; handles QI and HI mode values using the knowledge that the most significant
+; bits don't matter.
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
+ (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "")
+ (match_dup 0)))]
+ "GET_CODE (operands[3]) == SUBREG &&
+ GET_MODE (SUBREG_REG (operands[3])) == QImode &&
+ mcore_num_zeros (INTVAL (operands[1]) | 0xffffff00) == 1 &&
+ mcore_is_dead (insn, operands[0])"
+"*
+ if (! mcore_is_same_reg (operands[2], operands[3]))
+ output_asm_insn (\"mov\\t%2,%3\", operands);
+ return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);")
+
+/* do not fold these together -- mode is lost at final output phase */
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))
+ (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
+ (and:SI (match_operand:SI 3 "mcore_arith_reg_operand" "")
+ (match_dup 0)))]
+ "GET_CODE (operands[3]) == SUBREG &&
+ GET_MODE (SUBREG_REG (operands[3])) == HImode &&
+ mcore_num_zeros (INTVAL (operands[1]) | 0xffff0000) == 1 &&
+ operands[2] == operands[3] &&
+ mcore_is_dead (insn, operands[0])"
+"*
+ if (! mcore_is_same_reg (operands[2], operands[3]))
+ output_asm_insn (\"mov\\t%2,%3\", operands);
+ return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffff0000);")
+
+; This peephole helps when using -mwide-bitfields to widen fields so they
+; collapse. This, however, has the effect that a narrower mode is not used
+; when desirable.
+;
+; e.g., sequences like:
+;
+; ldw r8,(r6)
+; movi r7,0x00ffffff
+; and r8,r7 r7 dead
+; stw r8,(r6) r8 dead
+;
+; get peepholed to become:
+;
+; movi r8,0
+; stb r8,(r6) r8 dead
+;
+; Do only easy addresses that have no offset. This peephole is also applied
+; to halfwords. We need to check that the load is non-volatile before we get
+; rid of it.
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (set (match_dup 0) (and:SI (match_dup 0) (match_dup 2)))
+ (set (match_operand:SI 4 "memory_operand" "") (match_dup 0))]
+ "mcore_is_dead (insn, operands[0]) &&
+ ! MEM_VOLATILE_P (operands[1]) &&
+ mcore_is_dead (insn, operands[2]) &&
+ (mcore_byte_offset (INTVAL (operands[3])) > -1 ||
+ mcore_halfword_offset (INTVAL (operands[3])) > -1) &&
+ ! MEM_VOLATILE_P (operands[4]) &&
+ GET_CODE (XEXP (operands[4], 0)) == REG"
+"*
+{
+ int ofs;
+ enum machine_mode mode;
+ rtx base_reg = XEXP (operands[4], 0);
+
+ if ((ofs = mcore_byte_offset (INTVAL (operands[3]))) > -1)
+ mode = QImode;
+ else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1)
+ mode = HImode;
+ else
+ abort ();
+
+ if (ofs > 0)
+ operands[4] = gen_rtx (MEM, mode,
+ gen_rtx (PLUS, SImode, base_reg, GEN_INT(ofs)));
+ else
+ operands[4] = gen_rtx (MEM, mode, base_reg);
+
+ if (mode == QImode)
+ return \"movi %0,0\\n\\tst.b %0,%4\";
+
+ return \"movi %0,0\\n\\tst.h %0,%4\";
+}")
+
+; from sop11. get btsti's for (LT A 0) where A is a QI or HI value
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (sign_extend:SI (match_operand:QI 1 "mcore_arith_reg_operand" "0")))
+ (set (reg:CC 17)
+ (lt:CC (match_dup 0)
+ (const_int 0)))]
+ "mcore_is_dead (insn, operands[0])"
+ "btsti %0,7")
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+ (sign_extend:SI (match_operand:HI 1 "mcore_arith_reg_operand" "0")))
+ (set (reg:CC 17)
+ (lt:CC (match_dup 0)
+ (const_int 0)))]
+ "mcore_is_dead (insn, operands[0])"
+ "btsti %0,15")
+
+; Pick up a tst. This combination happens because the immediate is not
+; allowed to fold into one of the operands of the tst. Does not happen
+; when relaxing immediates. BRC
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "mcore_arith_reg_operand" ""))
+ (set (match_dup 0)
+ (and:SI (match_dup 0)
+ (match_operand:SI 2 "mcore_literal_K_operand" "")))
+ (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))]
+ "mcore_is_dead (insn, operands[0])"
+ "movi %0,%2\;tst %1,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (if_then_else:SI (ne (zero_extract:SI
+ (match_operand:SI 1 "mcore_arith_reg_operand" "")
+ (const_int 1)
+ (match_operand:SI 2 "mcore_literal_K_operand" ""))
+ (const_int 0))
+ (match_operand:SI 3 "mcore_arith_imm_operand" "")
+ (match_operand:SI 4 "mcore_arith_imm_operand" "")))
+ (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))]
+ ""
+"*
+{
+ int op0 = REGNO (operands[0]);
+
+ if (GET_CODE (operands[3]) == REG)
+ {
+ if (REGNO (operands[3]) == op0 && GET_CODE (operands[4]) == CONST_INT
+ && INTVAL (operands[4]) == 0)
+ return \"btsti %1,%2\\n\\tclrf %0\";
+ else if (GET_CODE (operands[4]) == REG)
+ {
+ if (REGNO (operands[4]) == op0)
+ return \"btsti %1,%2\\n\\tmovf %0,%3\";
+ else if (REGNO (operands[3]) == op0)
+ return \"btsti %1,%2\\n\\tmovt %0,%4\";
+ }
+
+ abort ();
+ }
+ else if (GET_CODE (operands[3]) == CONST_INT
+ && INTVAL (operands[3]) == 0
+ && GET_CODE (operands[4]) == REG)
+ return \"btsti %1,%2\\n\\tclrt %0\";
+
+ abort ();
+ return \"\";
+}")
+
+; experimental - do the constant folding ourselves. note that this isn't
+; re-applied like we'd really want. ie., four ands collapse into two
+; instead of one. this is because peepholes are applied as a sliding
+; window. the peephole does not generate new rtl's, but instead slides
+; across the rtl's generating machine instructions. it would be nice
+; if the peephole optimizer is changed to re-apply patterns and to gen
+; new rtl's. this is more flexible. the pattern below helps when we're
+; not using relaxed immediates. BRC
+
+;(define_peephole
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
+; (match_operand:SI 1 "const_int_operand" ""))
+; (set (match_operand:SI 2 "mcore_arith_reg_operand" "")
+; (and:SI (match_dup 2) (match_dup 0)))
+; (set (match_dup 0)
+; (match_operand:SI 3 "const_int_operand" ""))
+; (set (match_dup 2)
+; (and:SI (match_dup 2) (match_dup 0)))]
+; "!TARGET_RELAX_IMM && mcore_is_dead (insn, operands[0]) &&
+; mcore_const_ok_for_inline (INTVAL (operands[1]) & INTVAL (operands[3]))"
+; "*
+;{
+; rtx out_operands[2];
+; out_operands[0] = operands[0];
+; out_operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[3]));
+;
+; output_inline_const (SImode, out_operands);
+;
+; output_asm_insn (\"and %2,%0\", operands);
+;
+; return \"\";
+;}")
+
+; BRC: for inlining get rid of extra test - experimental
+;(define_peephole
+; [(set (match_operand:SI 0 "mcore_arith_reg_operand" "r")
+; (ne:SI (reg:CC 17) (const_int 0)))
+; (set (reg:CC 17) (ne:CC (match_dup 0) (const_int 0)))
+; (set (pc)
+; (if_then_else (eq (reg:CC 17) (const_int 0))
+; (label_ref (match_operand 1 "" ""))
+; (pc)))]
+; ""
+; "*
+;{
+; if (get_attr_length (insn) == 10)
+; {
+; output_asm_insn (\"bt 2f\\n\\tjmpi [1f]\", operands);
+; output_asm_insn (\".align 2\\n1:\", operands);
+; output_asm_insn (\".long %1\\n2:\", operands);
+; return \"\";
+; }
+; return \"bf %l1\";
+;}")
+
+
+;;; Special patterns for dealing with the constant pool.
+
+;;; 4 byte integer in line.
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand:SI 0 "general_operand" "=g")] 0)]
+ ""
+ "*
+{
+ assemble_integer (operands[0], 4, 1);
+ return \"\";
+}"
+ [(set_attr "length" "4")])
+
+;;; align to a four byte boundary.
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ ".align 2")
+
+;;; Handle extra constant pool entries created during final pass.
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 2)]
+ ""
+ "* return mcore_output_jump_label_table ();")
+
+;;
+;; Stack allocation -- in particular, for alloca().
+;; this is *not* what we use for entry into functions.
+;;
+;; This is how we allocate stack space. If we are allocating a
+;; constant amount of space and we know it is less than 4096
+;; bytes, we need do nothing.
+;;
+;; If it is more than 4096 bytes, we need to probe the stack
+;; periodically.
+;;
+;; operands[1], the distance is a POSITIVE number indicating that we
+;; are allocating stack space
+;;
+(define_expand "allocate_stack"
+ [(set (reg:SI 0)
+ (plus:SI (reg:SI 0)
+ (match_operand:SI 1 "general_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (match_dup 2))]
+ ""
+ "
+{
+ /* if he wants no probing, just do it for him. */
+ if (mcore_stack_increment == 0)
+ {
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1]));
+;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+ }
+
+ /* for small constant growth, we unroll the code */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 8*STACK_UNITS_MAXSTEP)
+ {
+ int left = INTVAL(operands[1]);
+
+ /* if it's a long way, get close enough for a last shot */
+ if (left >= STACK_UNITS_MAXSTEP)
+ {
+ rtx tmp = gen_reg_rtx (Pmode);
+ emit_insn (gen_movsi (tmp, GEN_INT(STACK_UNITS_MAXSTEP)));
+ do
+ {
+ rtx memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_insn(gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
+ emit_insn(gen_movsi (memref, stack_pointer_rtx));
+ left -= STACK_UNITS_MAXSTEP;
+ } while (left > STACK_UNITS_MAXSTEP);
+ }
+ /* performs the final adjustment */
+ emit_insn(gen_addsi3(stack_pointer_rtx,stack_pointer_rtx,GEN_INT(-left)));
+;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+ }
+ else
+ {
+ rtx out_label = 0;
+ rtx loop_label = gen_label_rtx ();
+ rtx step = gen_reg_rtx (Pmode);
+ rtx tmp = gen_reg_rtx (Pmode);
+ rtx memref;
+
+#if 1
+ emit_insn(gen_movsi(tmp, operands[1]));
+ emit_insn(gen_movsi(step, GEN_INT(STACK_UNITS_MAXSTEP)));
+
+ if (GET_CODE (operands[1]) != CONST_INT)
+ {
+ out_label = gen_label_rtx ();
+ emit_insn (gen_cmpsi (step, tmp)); /* quick out */
+ emit_jump_insn (gen_bgeu (out_label));
+ }
+
+ /* run a loop that steps it incrementally */
+ emit_label (loop_label);
+
+ /* extend a step, probe, and adjust remaining count */
+ emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step));
+ memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
+ MEM_VOLATILE_P (memref) = 1;
+ emit_insn(gen_movsi(memref, stack_pointer_rtx));
+ emit_insn(gen_subsi3(tmp, tmp, step));
+
+ /* loop condition -- going back up */
+ emit_insn (gen_cmpsi (step, tmp));
+ emit_jump_insn (gen_bltu (loop_label));
+
+ if (out_label)
+ emit_label (out_label);
+
+ /* bump the residual */
+ emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, tmp));
+;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+#else
+ /* simple one-shot -- ensure register and do a subtract.
+ * this does NOT comply with the ABI. */
+ emit_insn(gen_movsi(tmp, operands[1]));
+ emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, tmp));
+;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+#endif
+ }
+}")
diff --git a/gcc/config/mcore/t-mcore b/gcc/config/mcore/t-mcore
new file mode 100644
index 00000000000..baeb9d2780a
--- /dev/null
+++ b/gcc/config/mcore/t-mcore
@@ -0,0 +1,64 @@
+# Name of assembly file containing libgcc1 functions.
+# This entry must be present, but it can be empty if the target does
+# not need any assembler functions to support its code generation.
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = mcore/lib1.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/mcore/crti.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/mcore/crti.asm
+
+$(T)crtn.o: $(srcdir)/config/mcore/crtn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/mcore/crtn.asm
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore
+ rm -f dp-bit.c
+ echo '' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore
+ rm -f fp-bit.c
+ echo '' > fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+T_CFLAGS = -DDONT_HAVE_STDIO -DDONT_HAVE_SETJMP -Dinhibit_libc
+# could use -msifilter to be safe from interrupt/jmp interactions and others.
+TARGET_LIBGCC2_CFLAGS=-O3 -DNO_FLOATLIB_FIXUNSDFSI #-msifilter
+
+# We have values for float.h.
+CROSS_FLOAT_H = $(srcdir)/config/mcore/gfloat.h
+
+# let the library provider supply an <assert.h>
+INSTALL_ASSERT_H=
+
+# If support for -m4align is ever re-enabled then comment out the
+# following line and uncomment the mutlilib lines below.
+
+EXTRA_PARTS = crtbegin.o crtend.o crti.o crtn.o
+
+# MULTILIB_OPTIONS = m8align/m4align
+# MULTILIB_DIRNAMES = align8 align4
+# MULTILIB_MATCHES =
+# MULTILIB_EXTRA_OPTS =
+# MULTILIB_EXCEPTIONS =
+# EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
+# LIBGCC = stmp-multilib
+# INSTALL_LIBGCC = install-multilib
+
+MULTILIB_OPTIONS = mbig-endian/mlittle-endian m210/m340
+MULTILIB_DIRNAMES = big little m210 m340
+
+EXTRA_PARTS =
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc/config/mcore/t-mcore-pe b/gcc/config/mcore/t-mcore-pe
new file mode 100644
index 00000000000..6955c9905fa
--- /dev/null
+++ b/gcc/config/mcore/t-mcore-pe
@@ -0,0 +1,47 @@
+# Name of assembly file containing libgcc1 functions.
+# This entry must be present, but it can be empty if the target does
+# not need any assembler functions to support its code generation.
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = mcore/lib1.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore
+ rm -f dp-bit.c
+ echo '' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c $(srcdir)/config/mcore/t-mcore
+ rm -f fp-bit.c
+ echo '' > fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+T_CFLAGS = -DDONT_HAVE_STDIO -DDONT_HAVE_SETJMP -Dinhibit_libc
+# could use -msifilter to be safe from interrupt/jmp interactions and others.
+TARGET_LIBGCC2_CFLAGS=-O3 -DNO_FLOATLIB_FIXUNSDFSI #-msifilter
+
+# We have values for float.h.
+CROSS_FLOAT_H = $(srcdir)/config/mcore/gfloat.h
+
+# let the library provider supply an <assert.h>
+INSTALL_ASSERT_H=
+
+MULTILIB_OPTIONS = mbig-endian/mlittle-endian m210/m340
+MULTILIB_DIRNAMES = big little m210 m340
+MULTILIB_MATCHES =
+MULTILIB_EXTRA_OPTS =
+MULTILIB_EXCEPTIONS =
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# If EXTRA_MULTILIB_PARTS is not defined above then define EXTRA_PARTS here
+# EXTRA_PARTS = crtbegin.o crtend.o
+
diff --git a/gcc/config/mcore/xm-mcore.h b/gcc/config/mcore/xm-mcore.h
new file mode 100644
index 00000000000..46b4eff5fd0
--- /dev/null
+++ b/gcc/config/mcore/xm-mcore.h
@@ -0,0 +1,42 @@
+/* Configuration for GNU C-compiler for the Motorola M*Core.
+ Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+
+/* If compiled with GNU C, use the built-in alloca. */
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#endif
+
+/* Target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+