summaryrefslogtreecommitdiff
path: root/sim/m32r
diff options
context:
space:
mode:
authorJason Molenda <jsm@bugshack.cygnus.com>1999-10-12 04:37:53 +0000
committerJason Molenda <jsm@bugshack.cygnus.com>1999-10-12 04:37:53 +0000
commitf4e2ab8d6023f442c422f4f883c9c96f0106190f (patch)
tree00bae8f1462117e273e686d43b7d2ea51942d508 /sim/m32r
parentad85739dd1557a2714913e53e2cbbf9dfd7361d4 (diff)
downloadgdb-f4e2ab8d6023f442c422f4f883c9c96f0106190f.tar.gz
Initial revision
Diffstat (limited to 'sim/m32r')
-rw-r--r--sim/m32r/cpux.c197
-rw-r--r--sim/m32r/cpux.h945
-rw-r--r--sim/m32r/decodex.c2223
-rw-r--r--sim/m32r/decodex.h143
-rw-r--r--sim/m32r/m32rx.c311
-rw-r--r--sim/m32r/mloopx.in484
-rw-r--r--sim/m32r/modelx.c2899
-rw-r--r--sim/m32r/semx-switch.c6266
8 files changed, 13468 insertions, 0 deletions
diff --git a/sim/m32r/cpux.c b/sim/m32r/cpux.c
new file mode 100644
index 00000000000..47aa0b7f84c
--- /dev/null
+++ b/sim/m32r/cpux.c
@@ -0,0 +1,197 @@
+/* Misc. support for CPU family m32rxf.
+
+THIS FILE IS MACHINE GENERATED WITH CGEN.
+
+Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of the GNU Simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*/
+
+#define WANT_CPU m32rxf
+#define WANT_CPU_M32RXF
+
+#include "sim-main.h"
+#include "cgen-ops.h"
+
+/* Get the value of h-pc. */
+
+USI
+m32rxf_h_pc_get (SIM_CPU *current_cpu)
+{
+ return CPU (h_pc);
+}
+
+/* Set a value for h-pc. */
+
+void
+m32rxf_h_pc_set (SIM_CPU *current_cpu, USI newval)
+{
+ CPU (h_pc) = newval;
+}
+
+/* Get the value of h-gr. */
+
+SI
+m32rxf_h_gr_get (SIM_CPU *current_cpu, UINT regno)
+{
+ return CPU (h_gr[regno]);
+}
+
+/* Set a value for h-gr. */
+
+void
+m32rxf_h_gr_set (SIM_CPU *current_cpu, UINT regno, SI newval)
+{
+ CPU (h_gr[regno]) = newval;
+}
+
+/* Get the value of h-cr. */
+
+USI
+m32rxf_h_cr_get (SIM_CPU *current_cpu, UINT regno)
+{
+ return GET_H_CR (regno);
+}
+
+/* Set a value for h-cr. */
+
+void
+m32rxf_h_cr_set (SIM_CPU *current_cpu, UINT regno, USI newval)
+{
+ SET_H_CR (regno, newval);
+}
+
+/* Get the value of h-accum. */
+
+DI
+m32rxf_h_accum_get (SIM_CPU *current_cpu)
+{
+ return GET_H_ACCUM ();
+}
+
+/* Set a value for h-accum. */
+
+void
+m32rxf_h_accum_set (SIM_CPU *current_cpu, DI newval)
+{
+ SET_H_ACCUM (newval);
+}
+
+/* Get the value of h-accums. */
+
+DI
+m32rxf_h_accums_get (SIM_CPU *current_cpu, UINT regno)
+{
+ return GET_H_ACCUMS (regno);
+}
+
+/* Set a value for h-accums. */
+
+void
+m32rxf_h_accums_set (SIM_CPU *current_cpu, UINT regno, DI newval)
+{
+ SET_H_ACCUMS (regno, newval);
+}
+
+/* Get the value of h-cond. */
+
+BI
+m32rxf_h_cond_get (SIM_CPU *current_cpu)
+{
+ return CPU (h_cond);
+}
+
+/* Set a value for h-cond. */
+
+void
+m32rxf_h_cond_set (SIM_CPU *current_cpu, BI newval)
+{
+ CPU (h_cond) = newval;
+}
+
+/* Get the value of h-psw. */
+
+UQI
+m32rxf_h_psw_get (SIM_CPU *current_cpu)
+{
+ return GET_H_PSW ();
+}
+
+/* Set a value for h-psw. */
+
+void
+m32rxf_h_psw_set (SIM_CPU *current_cpu, UQI newval)
+{
+ SET_H_PSW (newval);
+}
+
+/* Get the value of h-bpsw. */
+
+UQI
+m32rxf_h_bpsw_get (SIM_CPU *current_cpu)
+{
+ return CPU (h_bpsw);
+}
+
+/* Set a value for h-bpsw. */
+
+void
+m32rxf_h_bpsw_set (SIM_CPU *current_cpu, UQI newval)
+{
+ CPU (h_bpsw) = newval;
+}
+
+/* Get the value of h-bbpsw. */
+
+UQI
+m32rxf_h_bbpsw_get (SIM_CPU *current_cpu)
+{
+ return CPU (h_bbpsw);
+}
+
+/* Set a value for h-bbpsw. */
+
+void
+m32rxf_h_bbpsw_set (SIM_CPU *current_cpu, UQI newval)
+{
+ CPU (h_bbpsw) = newval;
+}
+
+/* Get the value of h-lock. */
+
+BI
+m32rxf_h_lock_get (SIM_CPU *current_cpu)
+{
+ return CPU (h_lock);
+}
+
+/* Set a value for h-lock. */
+
+void
+m32rxf_h_lock_set (SIM_CPU *current_cpu, BI newval)
+{
+ CPU (h_lock) = newval;
+}
+
+/* Record trace results for INSN. */
+
+void
+m32rxf_record_trace_results (SIM_CPU *current_cpu, CGEN_INSN *insn,
+ int *indices, TRACE_RECORD *tr)
+{
+}
diff --git a/sim/m32r/cpux.h b/sim/m32r/cpux.h
new file mode 100644
index 00000000000..71b14fa0b56
--- /dev/null
+++ b/sim/m32r/cpux.h
@@ -0,0 +1,945 @@
+/* CPU family header for m32rxf.
+
+THIS FILE IS MACHINE GENERATED WITH CGEN.
+
+Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of the GNU Simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*/
+
+#ifndef CPU_M32RXF_H
+#define CPU_M32RXF_H
+
+/* Maximum number of instructions that are fetched at a time.
+ This is for LIW type instructions sets (e.g. m32r). */
+#define MAX_LIW_INSNS 2
+
+/* Maximum number of instructions that can be executed in parallel. */
+#define MAX_PARALLEL_INSNS 2
+
+/* CPU state information. */
+typedef struct {
+ /* Hardware elements. */
+ struct {
+ /* program counter */
+ USI h_pc;
+#define GET_H_PC() CPU (h_pc)
+#define SET_H_PC(x) (CPU (h_pc) = (x))
+ /* general registers */
+ SI h_gr[16];
+#define GET_H_GR(a1) CPU (h_gr)[a1]
+#define SET_H_GR(a1, x) (CPU (h_gr)[a1] = (x))
+ /* control registers */
+ USI h_cr[16];
+#define GET_H_CR(index) m32rxf_h_cr_get_handler (current_cpu, index)
+#define SET_H_CR(index, x) \
+do { \
+m32rxf_h_cr_set_handler (current_cpu, (index), (x));\
+} while (0)
+ /* accumulator */
+ DI h_accum;
+#define GET_H_ACCUM() m32rxf_h_accum_get_handler (current_cpu)
+#define SET_H_ACCUM(x) \
+do { \
+m32rxf_h_accum_set_handler (current_cpu, (x));\
+} while (0)
+ /* accumulators */
+ DI h_accums[2];
+#define GET_H_ACCUMS(index) m32rxf_h_accums_get_handler (current_cpu, index)
+#define SET_H_ACCUMS(index, x) \
+do { \
+m32rxf_h_accums_set_handler (current_cpu, (index), (x));\
+} while (0)
+ /* condition bit */
+ BI h_cond;
+#define GET_H_COND() CPU (h_cond)
+#define SET_H_COND(x) (CPU (h_cond) = (x))
+ /* psw part of psw */
+ UQI h_psw;
+#define GET_H_PSW() m32rxf_h_psw_get_handler (current_cpu)
+#define SET_H_PSW(x) \
+do { \
+m32rxf_h_psw_set_handler (current_cpu, (x));\
+} while (0)
+ /* backup psw */
+ UQI h_bpsw;
+#define GET_H_BPSW() CPU (h_bpsw)
+#define SET_H_BPSW(x) (CPU (h_bpsw) = (x))
+ /* backup bpsw */
+ UQI h_bbpsw;
+#define GET_H_BBPSW() CPU (h_bbpsw)
+#define SET_H_BBPSW(x) (CPU (h_bbpsw) = (x))
+ /* lock */
+ BI h_lock;
+#define GET_H_LOCK() CPU (h_lock)
+#define SET_H_LOCK(x) (CPU (h_lock) = (x))
+ } hardware;
+#define CPU_CGEN_HW(cpu) (& (cpu)->cpu_data.hardware)
+} M32RXF_CPU_DATA;
+
+/* Cover fns for register access. */
+USI m32rxf_h_pc_get (SIM_CPU *);
+void m32rxf_h_pc_set (SIM_CPU *, USI);
+SI m32rxf_h_gr_get (SIM_CPU *, UINT);
+void m32rxf_h_gr_set (SIM_CPU *, UINT, SI);
+USI m32rxf_h_cr_get (SIM_CPU *, UINT);
+void m32rxf_h_cr_set (SIM_CPU *, UINT, USI);
+DI m32rxf_h_accum_get (SIM_CPU *);
+void m32rxf_h_accum_set (SIM_CPU *, DI);
+DI m32rxf_h_accums_get (SIM_CPU *, UINT);
+void m32rxf_h_accums_set (SIM_CPU *, UINT, DI);
+BI m32rxf_h_cond_get (SIM_CPU *);
+void m32rxf_h_cond_set (SIM_CPU *, BI);
+UQI m32rxf_h_psw_get (SIM_CPU *);
+void m32rxf_h_psw_set (SIM_CPU *, UQI);
+UQI m32rxf_h_bpsw_get (SIM_CPU *);
+void m32rxf_h_bpsw_set (SIM_CPU *, UQI);
+UQI m32rxf_h_bbpsw_get (SIM_CPU *);
+void m32rxf_h_bbpsw_set (SIM_CPU *, UQI);
+BI m32rxf_h_lock_get (SIM_CPU *);
+void m32rxf_h_lock_set (SIM_CPU *, BI);
+
+/* These must be hand-written. */
+extern CPUREG_FETCH_FN m32rxf_fetch_register;
+extern CPUREG_STORE_FN m32rxf_store_register;
+
+typedef struct {
+ int empty;
+} MODEL_M32RX_DATA;
+
+/* Instruction argument buffer. */
+
+union sem_fields {
+ struct { /* no operands */
+ int empty;
+ } fmt_empty;
+ struct { /* */
+ UINT f_uimm4;
+ } sfmt_trap;
+ struct { /* */
+ IADDR i_disp24;
+ unsigned char out_h_gr_14;
+ } sfmt_bl24;
+ struct { /* */
+ IADDR i_disp8;
+ unsigned char out_h_gr_14;
+ } sfmt_bl8;
+ struct { /* */
+ SI* i_dr;
+ UINT f_hi16;
+ unsigned char out_dr;
+ } sfmt_seth;
+ struct { /* */
+ SI f_imm1;
+ UINT f_accd;
+ UINT f_accs;
+ } sfmt_rac_dsi;
+ struct { /* */
+ SI* i_sr;
+ UINT f_r1;
+ unsigned char in_sr;
+ } sfmt_mvtc;
+ struct { /* */
+ SI* i_src1;
+ UINT f_accs;
+ unsigned char in_src1;
+ } sfmt_mvtachi_a;
+ struct { /* */
+ SI* i_dr;
+ UINT f_r2;
+ unsigned char out_dr;
+ } sfmt_mvfc;
+ struct { /* */
+ SI* i_dr;
+ UINT f_accs;
+ unsigned char out_dr;
+ } sfmt_mvfachi_a;
+ struct { /* */
+ ADDR i_uimm24;
+ SI* i_dr;
+ unsigned char out_dr;
+ } sfmt_ld24;
+ struct { /* */
+ SI* i_sr;
+ unsigned char in_sr;
+ unsigned char out_h_gr_14;
+ } sfmt_jl;
+ struct { /* */
+ SI* i_dr;
+ UINT f_uimm5;
+ unsigned char in_dr;
+ unsigned char out_dr;
+ } sfmt_slli;
+ struct { /* */
+ SI* i_dr;
+ INT f_simm8;
+ unsigned char in_dr;
+ unsigned char out_dr;
+ } sfmt_addi;
+ struct { /* */
+ SI* i_src1;
+ SI* i_src2;
+ unsigned char in_src1;
+ unsigned char in_src2;
+ unsigned char out_src2;
+ } sfmt_st_plus;
+ struct { /* */
+ SI* i_src1;
+ SI* i_src2;
+ INT f_simm16;
+ unsigned char in_src1;
+ unsigned char in_src2;
+ } sfmt_st_d;
+ struct { /* */
+ SI* i_src1;
+ SI* i_src2;
+ UINT f_acc;
+ unsigned char in_src1;
+ unsigned char in_src2;
+ } sfmt_machi_a;
+ struct { /* */
+ SI* i_dr;
+ SI* i_sr;
+ unsigned char in_sr;
+ unsigned char out_dr;
+ unsigned char out_sr;
+ } sfmt_ld_plus;
+ struct { /* */
+ IADDR i_disp16;
+ SI* i_src1;
+ SI* i_src2;
+ unsigned char in_src1;
+ unsigned char in_src2;
+ } sfmt_beq;
+ struct { /* */
+ SI* i_dr;
+ SI* i_sr;
+ UINT f_uimm16;
+ unsigned char in_sr;
+ unsigned char out_dr;
+ } sfmt_and3;
+ struct { /* */
+ SI* i_dr;
+ SI* i_sr;
+ INT f_simm16;
+ unsigned char in_sr;
+ unsigned char out_dr;
+ } sfmt_add3;
+ struct { /* */
+ SI* i_dr;
+ SI* i_sr;
+ unsigned char in_dr;
+ unsigned char in_sr;
+ unsigned char out_dr;
+ } sfmt_add;
+#if WITH_SCACHE_PBB
+ /* Writeback handler. */
+ struct {
+ /* Pointer to argbuf entry for insn whose results need writing back. */
+ const struct argbuf *abuf;
+ } write;
+ /* x-before handler */
+ struct {
+ /*const SCACHE *insns[MAX_PARALLEL_INSNS];*/
+ int first_p;
+ } before;
+ /* x-after handler */
+ struct {
+ int empty;
+ } after;
+ /* This entry is used to terminate each pbb. */
+ struct {
+ /* Number of insns in pbb. */
+ int insn_count;
+ /* Next pbb to execute. */
+ SCACHE *next;
+ SCACHE *branch_target;
+ } chain;
+#endif
+};
+
+/* The ARGBUF struct. */
+struct argbuf {
+ /* These are the baseclass definitions. */
+ IADDR addr;
+ const IDESC *idesc;
+ char trace_p;
+ char profile_p;
+ /* ??? Temporary hack for skip insns. */
+ char skip_count;
+ char unused;
+ /* cpu specific data follows */
+ union sem semantic;
+ int written;
+ union sem_fields fields;
+};
+
+/* A cached insn.
+
+ ??? SCACHE used to contain more than just argbuf. We could delete the
+ type entirely and always just use ARGBUF, but for future concerns and as
+ a level of abstraction it is left in. */
+
+struct scache {
+ struct argbuf argbuf;
+};
+
+/* Macros to simplify extraction, reading and semantic code.
+ These define and assign the local vars that contain the insn's fields. */
+
+#define EXTRACT_IFMT_EMPTY_VARS \
+ unsigned int length;
+#define EXTRACT_IFMT_EMPTY_CODE \
+ length = 0; \
+
+#define EXTRACT_IFMT_ADD_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_ADD_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_ADD3_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ INT f_simm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_ADD3_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_AND3_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ UINT f_uimm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_AND3_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_uimm16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_OR3_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ UINT f_uimm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_OR3_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_uimm16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_ADDI_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ INT f_simm8; \
+ unsigned int length;
+#define EXTRACT_IFMT_ADDI_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_simm8 = EXTRACT_MSB0_INT (insn, 16, 8, 8); \
+
+#define EXTRACT_IFMT_ADDV3_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ INT f_simm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_ADDV3_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_BC8_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ SI f_disp8; \
+ unsigned int length;
+#define EXTRACT_IFMT_BC8_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_disp8 = ((((EXTRACT_MSB0_INT (insn, 16, 8, 8)) << (2))) + (((pc) & (-4)))); \
+
+#define EXTRACT_IFMT_BC24_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ SI f_disp24; \
+ unsigned int length;
+#define EXTRACT_IFMT_BC24_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_disp24 = ((((EXTRACT_MSB0_INT (insn, 32, 8, 24)) << (2))) + (pc)); \
+
+#define EXTRACT_IFMT_BEQ_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ SI f_disp16; \
+ unsigned int length;
+#define EXTRACT_IFMT_BEQ_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_disp16 = ((((EXTRACT_MSB0_INT (insn, 32, 16, 16)) << (2))) + (pc)); \
+
+#define EXTRACT_IFMT_BEQZ_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ SI f_disp16; \
+ unsigned int length;
+#define EXTRACT_IFMT_BEQZ_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_disp16 = ((((EXTRACT_MSB0_INT (insn, 32, 16, 16)) << (2))) + (pc)); \
+
+#define EXTRACT_IFMT_CMP_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_CMP_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_CMPI_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ INT f_simm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_CMPI_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_CMPZ_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_CMPZ_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_DIV_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ INT f_simm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_DIV_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_JC_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_JC_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_LD24_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_uimm24; \
+ unsigned int length;
+#define EXTRACT_IFMT_LD24_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_uimm24 = EXTRACT_MSB0_UINT (insn, 32, 8, 24); \
+
+#define EXTRACT_IFMT_LDI16_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ INT f_simm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_LDI16_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_MACHI_A_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_acc; \
+ UINT f_op23; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_MACHI_A_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_acc = EXTRACT_MSB0_UINT (insn, 16, 8, 1); \
+ f_op23 = EXTRACT_MSB0_UINT (insn, 16, 9, 3); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_MVFACHI_A_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_accs; \
+ UINT f_op3; \
+ unsigned int length;
+#define EXTRACT_IFMT_MVFACHI_A_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_accs = EXTRACT_MSB0_UINT (insn, 16, 12, 2); \
+ f_op3 = EXTRACT_MSB0_UINT (insn, 16, 14, 2); \
+
+#define EXTRACT_IFMT_MVFC_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_MVFC_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_MVTACHI_A_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_accs; \
+ UINT f_op3; \
+ unsigned int length;
+#define EXTRACT_IFMT_MVTACHI_A_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_accs = EXTRACT_MSB0_UINT (insn, 16, 12, 2); \
+ f_op3 = EXTRACT_MSB0_UINT (insn, 16, 14, 2); \
+
+#define EXTRACT_IFMT_MVTC_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_MVTC_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_NOP_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ unsigned int length;
+#define EXTRACT_IFMT_NOP_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_RAC_DSI_VARS \
+ UINT f_op1; \
+ UINT f_accd; \
+ UINT f_bits67; \
+ UINT f_op2; \
+ UINT f_accs; \
+ UINT f_bit14; \
+ SI f_imm1; \
+ unsigned int length;
+#define EXTRACT_IFMT_RAC_DSI_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_accd = EXTRACT_MSB0_UINT (insn, 16, 4, 2); \
+ f_bits67 = EXTRACT_MSB0_UINT (insn, 16, 6, 2); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_accs = EXTRACT_MSB0_UINT (insn, 16, 12, 2); \
+ f_bit14 = EXTRACT_MSB0_UINT (insn, 16, 14, 1); \
+ f_imm1 = ((EXTRACT_MSB0_UINT (insn, 16, 15, 1)) + (1)); \
+
+#define EXTRACT_IFMT_SETH_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ UINT f_hi16; \
+ unsigned int length;
+#define EXTRACT_IFMT_SETH_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_hi16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_SLLI_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_shift_op2; \
+ UINT f_uimm5; \
+ unsigned int length;
+#define EXTRACT_IFMT_SLLI_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_shift_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 3); \
+ f_uimm5 = EXTRACT_MSB0_UINT (insn, 16, 11, 5); \
+
+#define EXTRACT_IFMT_ST_D_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ INT f_simm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_ST_D_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16); \
+
+#define EXTRACT_IFMT_TRAP_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_uimm4; \
+ unsigned int length;
+#define EXTRACT_IFMT_TRAP_CODE \
+ length = 2; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 16, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 16, 8, 4); \
+ f_uimm4 = EXTRACT_MSB0_UINT (insn, 16, 12, 4); \
+
+#define EXTRACT_IFMT_SATB_VARS \
+ UINT f_op1; \
+ UINT f_r1; \
+ UINT f_op2; \
+ UINT f_r2; \
+ UINT f_uimm16; \
+ unsigned int length;
+#define EXTRACT_IFMT_SATB_CODE \
+ length = 4; \
+ f_op1 = EXTRACT_MSB0_UINT (insn, 32, 0, 4); \
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4); \
+ f_op2 = EXTRACT_MSB0_UINT (insn, 32, 8, 4); \
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4); \
+ f_uimm16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16); \
+
+/* Queued output values of an instruction. */
+
+struct parexec {
+ union {
+ struct { /* empty sformat for unspecified field list */
+ int empty;
+ } sfmt_empty;
+ struct { /* e.g. add $dr,$sr */
+ SI dr;
+ } sfmt_add;
+ struct { /* e.g. add3 $dr,$sr,$hash$slo16 */
+ SI dr;
+ } sfmt_add3;
+ struct { /* e.g. and3 $dr,$sr,$uimm16 */
+ SI dr;
+ } sfmt_and3;
+ struct { /* e.g. or3 $dr,$sr,$hash$ulo16 */
+ SI dr;
+ } sfmt_or3;
+ struct { /* e.g. addi $dr,$simm8 */
+ SI dr;
+ } sfmt_addi;
+ struct { /* e.g. addv $dr,$sr */
+ BI condbit;
+ SI dr;
+ } sfmt_addv;
+ struct { /* e.g. addv3 $dr,$sr,$simm16 */
+ BI condbit;
+ SI dr;
+ } sfmt_addv3;
+ struct { /* e.g. addx $dr,$sr */
+ BI condbit;
+ SI dr;
+ } sfmt_addx;
+ struct { /* e.g. bc.s $disp8 */
+ USI pc;
+ } sfmt_bc8;
+ struct { /* e.g. bc.l $disp24 */
+ USI pc;
+ } sfmt_bc24;
+ struct { /* e.g. beq $src1,$src2,$disp16 */
+ USI pc;
+ } sfmt_beq;
+ struct { /* e.g. beqz $src2,$disp16 */
+ USI pc;
+ } sfmt_beqz;
+ struct { /* e.g. bl.s $disp8 */
+ SI h_gr_14;
+ USI pc;
+ } sfmt_bl8;
+ struct { /* e.g. bl.l $disp24 */
+ SI h_gr_14;
+ USI pc;
+ } sfmt_bl24;
+ struct { /* e.g. bcl.s $disp8 */
+ SI h_gr_14;
+ USI pc;
+ } sfmt_bcl8;
+ struct { /* e.g. bcl.l $disp24 */
+ SI h_gr_14;
+ USI pc;
+ } sfmt_bcl24;
+ struct { /* e.g. bra.s $disp8 */
+ USI pc;
+ } sfmt_bra8;
+ struct { /* e.g. bra.l $disp24 */
+ USI pc;
+ } sfmt_bra24;
+ struct { /* e.g. cmp $src1,$src2 */
+ BI condbit;
+ } sfmt_cmp;
+ struct { /* e.g. cmpi $src2,$simm16 */
+ BI condbit;
+ } sfmt_cmpi;
+ struct { /* e.g. cmpz $src2 */
+ BI condbit;
+ } sfmt_cmpz;
+ struct { /* e.g. div $dr,$sr */
+ SI dr;
+ } sfmt_div;
+ struct { /* e.g. jc $sr */
+ USI pc;
+ } sfmt_jc;
+ struct { /* e.g. jl $sr */
+ SI h_gr_14;
+ USI pc;
+ } sfmt_jl;
+ struct { /* e.g. jmp $sr */
+ USI pc;
+ } sfmt_jmp;
+ struct { /* e.g. ld $dr,@$sr */
+ SI dr;
+ } sfmt_ld;
+ struct { /* e.g. ld $dr,@($slo16,$sr) */
+ SI dr;
+ } sfmt_ld_d;
+ struct { /* e.g. ld $dr,@$sr+ */
+ SI dr;
+ SI sr;
+ } sfmt_ld_plus;
+ struct { /* e.g. ld24 $dr,$uimm24 */
+ SI dr;
+ } sfmt_ld24;
+ struct { /* e.g. ldi8 $dr,$simm8 */
+ SI dr;
+ } sfmt_ldi8;
+ struct { /* e.g. ldi16 $dr,$hash$slo16 */
+ SI dr;
+ } sfmt_ldi16;
+ struct { /* e.g. lock $dr,@$sr */
+ SI dr;
+ BI h_lock;
+ } sfmt_lock;
+ struct { /* e.g. machi $src1,$src2,$acc */
+ DI acc;
+ } sfmt_machi_a;
+ struct { /* e.g. mulhi $src1,$src2,$acc */
+ DI acc;
+ } sfmt_mulhi_a;
+ struct { /* e.g. mv $dr,$sr */
+ SI dr;
+ } sfmt_mv;
+ struct { /* e.g. mvfachi $dr,$accs */
+ SI dr;
+ } sfmt_mvfachi_a;
+ struct { /* e.g. mvfc $dr,$scr */
+ SI dr;
+ } sfmt_mvfc;
+ struct { /* e.g. mvtachi $src1,$accs */
+ DI accs;
+ } sfmt_mvtachi_a;
+ struct { /* e.g. mvtc $sr,$dcr */
+ USI dcr;
+ } sfmt_mvtc;
+ struct { /* e.g. nop */
+ int empty;
+ } sfmt_nop;
+ struct { /* e.g. rac $accd,$accs,$imm1 */
+ DI accd;
+ } sfmt_rac_dsi;
+ struct { /* e.g. rte */
+ UQI h_bpsw;
+ USI h_cr_6;
+ UQI h_psw;
+ USI pc;
+ } sfmt_rte;
+ struct { /* e.g. seth $dr,$hash$hi16 */
+ SI dr;
+ } sfmt_seth;
+ struct { /* e.g. sll3 $dr,$sr,$simm16 */
+ SI dr;
+ } sfmt_sll3;
+ struct { /* e.g. slli $dr,$uimm5 */
+ SI dr;
+ } sfmt_slli;
+ struct { /* e.g. st $src1,@$src2 */
+ SI h_memory_src2;
+ USI h_memory_src2_idx;
+ } sfmt_st;
+ struct { /* e.g. st $src1,@($slo16,$src2) */
+ SI h_memory_add__DFLT_src2_slo16;
+ USI h_memory_add__DFLT_src2_slo16_idx;
+ } sfmt_st_d;
+ struct { /* e.g. stb $src1,@$src2 */
+ QI h_memory_src2;
+ USI h_memory_src2_idx;
+ } sfmt_stb;
+ struct { /* e.g. stb $src1,@($slo16,$src2) */
+ QI h_memory_add__DFLT_src2_slo16;
+ USI h_memory_add__DFLT_src2_slo16_idx;
+ } sfmt_stb_d;
+ struct { /* e.g. sth $src1,@$src2 */
+ HI h_memory_src2;
+ USI h_memory_src2_idx;
+ } sfmt_sth;
+ struct { /* e.g. sth $src1,@($slo16,$src2) */
+ HI h_memory_add__DFLT_src2_slo16;
+ USI h_memory_add__DFLT_src2_slo16_idx;
+ } sfmt_sth_d;
+ struct { /* e.g. st $src1,@+$src2 */
+ SI h_memory_new_src2;
+ USI h_memory_new_src2_idx;
+ SI src2;
+ } sfmt_st_plus;
+ struct { /* e.g. trap $uimm4 */
+ UQI h_bbpsw;
+ UQI h_bpsw;
+ USI h_cr_14;
+ USI h_cr_6;
+ UQI h_psw;
+ SI pc;
+ } sfmt_trap;
+ struct { /* e.g. unlock $src1,@$src2 */
+ BI h_lock;
+ SI h_memory_src2;
+ USI h_memory_src2_idx;
+ } sfmt_unlock;
+ struct { /* e.g. satb $dr,$sr */
+ SI dr;
+ } sfmt_satb;
+ struct { /* e.g. sat $dr,$sr */
+ SI dr;
+ } sfmt_sat;
+ struct { /* e.g. sadd */
+ DI h_accums_0;
+ } sfmt_sadd;
+ struct { /* e.g. macwu1 $src1,$src2 */
+ DI h_accums_1;
+ } sfmt_macwu1;
+ struct { /* e.g. msblo $src1,$src2 */
+ DI accum;
+ } sfmt_msblo;
+ struct { /* e.g. mulwu1 $src1,$src2 */
+ DI h_accums_1;
+ } sfmt_mulwu1;
+ struct { /* e.g. sc */
+ int empty;
+ } sfmt_sc;
+ } operands;
+ /* For conditionally written operands, bitmask of which ones were. */
+ int written;
+};
+
+/* Collection of various things for the trace handler to use. */
+
+typedef struct trace_record {
+ IADDR pc;
+ /* FIXME:wip */
+} TRACE_RECORD;
+
+#endif /* CPU_M32RXF_H */
diff --git a/sim/m32r/decodex.c b/sim/m32r/decodex.c
new file mode 100644
index 00000000000..3a8884a46a1
--- /dev/null
+++ b/sim/m32r/decodex.c
@@ -0,0 +1,2223 @@
+/* Simulator instruction decoder for m32rxf.
+
+THIS FILE IS MACHINE GENERATED WITH CGEN.
+
+Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of the GNU Simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*/
+
+#define WANT_CPU m32rxf
+#define WANT_CPU_M32RXF
+
+#include "sim-main.h"
+#include "sim-assert.h"
+
+/* Insn can't be executed in parallel.
+ Or is that "do NOt Pass to Air defense Radar"? :-) */
+#define NOPAR (-1)
+
+/* The instruction descriptor array.
+ This is computed at runtime. Space for it is not malloc'd to save a
+ teensy bit of cpu in the decoder. Moving it to malloc space is trivial
+ but won't be done until necessary (we don't currently support the runtime
+ addition of instructions nor an SMP machine with different cpus). */
+static IDESC m32rxf_insn_data[M32RXF_INSN_MAX];
+
+/* Commas between elements are contained in the macros.
+ Some of these are conditionally compiled out. */
+
+static const struct insn_sem m32rxf_insn_sem[] =
+{
+ { VIRTUAL_INSN_X_INVALID, M32RXF_INSN_X_INVALID, M32RXF_SFMT_EMPTY, NOPAR, NOPAR },
+ { VIRTUAL_INSN_X_AFTER, M32RXF_INSN_X_AFTER, M32RXF_SFMT_EMPTY, NOPAR, NOPAR },
+ { VIRTUAL_INSN_X_BEFORE, M32RXF_INSN_X_BEFORE, M32RXF_SFMT_EMPTY, NOPAR, NOPAR },
+ { VIRTUAL_INSN_X_CTI_CHAIN, M32RXF_INSN_X_CTI_CHAIN, M32RXF_SFMT_EMPTY, NOPAR, NOPAR },
+ { VIRTUAL_INSN_X_CHAIN, M32RXF_INSN_X_CHAIN, M32RXF_SFMT_EMPTY, NOPAR, NOPAR },
+ { VIRTUAL_INSN_X_BEGIN, M32RXF_INSN_X_BEGIN, M32RXF_SFMT_EMPTY, NOPAR, NOPAR },
+ { M32R_INSN_ADD, M32RXF_INSN_ADD, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_ADD, M32RXF_INSN_WRITE_ADD },
+ { M32R_INSN_ADD3, M32RXF_INSN_ADD3, M32RXF_SFMT_ADD3, NOPAR, NOPAR },
+ { M32R_INSN_AND, M32RXF_INSN_AND, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_AND, M32RXF_INSN_WRITE_AND },
+ { M32R_INSN_AND3, M32RXF_INSN_AND3, M32RXF_SFMT_AND3, NOPAR, NOPAR },
+ { M32R_INSN_OR, M32RXF_INSN_OR, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_OR, M32RXF_INSN_WRITE_OR },
+ { M32R_INSN_OR3, M32RXF_INSN_OR3, M32RXF_SFMT_OR3, NOPAR, NOPAR },
+ { M32R_INSN_XOR, M32RXF_INSN_XOR, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_XOR, M32RXF_INSN_WRITE_XOR },
+ { M32R_INSN_XOR3, M32RXF_INSN_XOR3, M32RXF_SFMT_AND3, NOPAR, NOPAR },
+ { M32R_INSN_ADDI, M32RXF_INSN_ADDI, M32RXF_SFMT_ADDI, M32RXF_INSN_PAR_ADDI, M32RXF_INSN_WRITE_ADDI },
+ { M32R_INSN_ADDV, M32RXF_INSN_ADDV, M32RXF_SFMT_ADDV, M32RXF_INSN_PAR_ADDV, M32RXF_INSN_WRITE_ADDV },
+ { M32R_INSN_ADDV3, M32RXF_INSN_ADDV3, M32RXF_SFMT_ADDV3, NOPAR, NOPAR },
+ { M32R_INSN_ADDX, M32RXF_INSN_ADDX, M32RXF_SFMT_ADDX, M32RXF_INSN_PAR_ADDX, M32RXF_INSN_WRITE_ADDX },
+ { M32R_INSN_BC8, M32RXF_INSN_BC8, M32RXF_SFMT_BC8, M32RXF_INSN_PAR_BC8, M32RXF_INSN_WRITE_BC8 },
+ { M32R_INSN_BC24, M32RXF_INSN_BC24, M32RXF_SFMT_BC24, NOPAR, NOPAR },
+ { M32R_INSN_BEQ, M32RXF_INSN_BEQ, M32RXF_SFMT_BEQ, NOPAR, NOPAR },
+ { M32R_INSN_BEQZ, M32RXF_INSN_BEQZ, M32RXF_SFMT_BEQZ, NOPAR, NOPAR },
+ { M32R_INSN_BGEZ, M32RXF_INSN_BGEZ, M32RXF_SFMT_BEQZ, NOPAR, NOPAR },
+ { M32R_INSN_BGTZ, M32RXF_INSN_BGTZ, M32RXF_SFMT_BEQZ, NOPAR, NOPAR },
+ { M32R_INSN_BLEZ, M32RXF_INSN_BLEZ, M32RXF_SFMT_BEQZ, NOPAR, NOPAR },
+ { M32R_INSN_BLTZ, M32RXF_INSN_BLTZ, M32RXF_SFMT_BEQZ, NOPAR, NOPAR },
+ { M32R_INSN_BNEZ, M32RXF_INSN_BNEZ, M32RXF_SFMT_BEQZ, NOPAR, NOPAR },
+ { M32R_INSN_BL8, M32RXF_INSN_BL8, M32RXF_SFMT_BL8, M32RXF_INSN_PAR_BL8, M32RXF_INSN_WRITE_BL8 },
+ { M32R_INSN_BL24, M32RXF_INSN_BL24, M32RXF_SFMT_BL24, NOPAR, NOPAR },
+ { M32R_INSN_BCL8, M32RXF_INSN_BCL8, M32RXF_SFMT_BCL8, M32RXF_INSN_PAR_BCL8, M32RXF_INSN_WRITE_BCL8 },
+ { M32R_INSN_BCL24, M32RXF_INSN_BCL24, M32RXF_SFMT_BCL24, NOPAR, NOPAR },
+ { M32R_INSN_BNC8, M32RXF_INSN_BNC8, M32RXF_SFMT_BC8, M32RXF_INSN_PAR_BNC8, M32RXF_INSN_WRITE_BNC8 },
+ { M32R_INSN_BNC24, M32RXF_INSN_BNC24, M32RXF_SFMT_BC24, NOPAR, NOPAR },
+ { M32R_INSN_BNE, M32RXF_INSN_BNE, M32RXF_SFMT_BEQ, NOPAR, NOPAR },
+ { M32R_INSN_BRA8, M32RXF_INSN_BRA8, M32RXF_SFMT_BRA8, M32RXF_INSN_PAR_BRA8, M32RXF_INSN_WRITE_BRA8 },
+ { M32R_INSN_BRA24, M32RXF_INSN_BRA24, M32RXF_SFMT_BRA24, NOPAR, NOPAR },
+ { M32R_INSN_BNCL8, M32RXF_INSN_BNCL8, M32RXF_SFMT_BCL8, M32RXF_INSN_PAR_BNCL8, M32RXF_INSN_WRITE_BNCL8 },
+ { M32R_INSN_BNCL24, M32RXF_INSN_BNCL24, M32RXF_SFMT_BCL24, NOPAR, NOPAR },
+ { M32R_INSN_CMP, M32RXF_INSN_CMP, M32RXF_SFMT_CMP, M32RXF_INSN_PAR_CMP, M32RXF_INSN_WRITE_CMP },
+ { M32R_INSN_CMPI, M32RXF_INSN_CMPI, M32RXF_SFMT_CMPI, NOPAR, NOPAR },
+ { M32R_INSN_CMPU, M32RXF_INSN_CMPU, M32RXF_SFMT_CMP, M32RXF_INSN_PAR_CMPU, M32RXF_INSN_WRITE_CMPU },
+ { M32R_INSN_CMPUI, M32RXF_INSN_CMPUI, M32RXF_SFMT_CMPI, NOPAR, NOPAR },
+ { M32R_INSN_CMPEQ, M32RXF_INSN_CMPEQ, M32RXF_SFMT_CMP, M32RXF_INSN_PAR_CMPEQ, M32RXF_INSN_WRITE_CMPEQ },
+ { M32R_INSN_CMPZ, M32RXF_INSN_CMPZ, M32RXF_SFMT_CMPZ, M32RXF_INSN_PAR_CMPZ, M32RXF_INSN_WRITE_CMPZ },
+ { M32R_INSN_DIV, M32RXF_INSN_DIV, M32RXF_SFMT_DIV, NOPAR, NOPAR },
+ { M32R_INSN_DIVU, M32RXF_INSN_DIVU, M32RXF_SFMT_DIV, NOPAR, NOPAR },
+ { M32R_INSN_REM, M32RXF_INSN_REM, M32RXF_SFMT_DIV, NOPAR, NOPAR },
+ { M32R_INSN_REMU, M32RXF_INSN_REMU, M32RXF_SFMT_DIV, NOPAR, NOPAR },
+ { M32R_INSN_DIVH, M32RXF_INSN_DIVH, M32RXF_SFMT_DIV, NOPAR, NOPAR },
+ { M32R_INSN_JC, M32RXF_INSN_JC, M32RXF_SFMT_JC, M32RXF_INSN_PAR_JC, M32RXF_INSN_WRITE_JC },
+ { M32R_INSN_JNC, M32RXF_INSN_JNC, M32RXF_SFMT_JC, M32RXF_INSN_PAR_JNC, M32RXF_INSN_WRITE_JNC },
+ { M32R_INSN_JL, M32RXF_INSN_JL, M32RXF_SFMT_JL, M32RXF_INSN_PAR_JL, M32RXF_INSN_WRITE_JL },
+ { M32R_INSN_JMP, M32RXF_INSN_JMP, M32RXF_SFMT_JMP, M32RXF_INSN_PAR_JMP, M32RXF_INSN_WRITE_JMP },
+ { M32R_INSN_LD, M32RXF_INSN_LD, M32RXF_SFMT_LD, M32RXF_INSN_PAR_LD, M32RXF_INSN_WRITE_LD },
+ { M32R_INSN_LD_D, M32RXF_INSN_LD_D, M32RXF_SFMT_LD_D, NOPAR, NOPAR },
+ { M32R_INSN_LDB, M32RXF_INSN_LDB, M32RXF_SFMT_LD, M32RXF_INSN_PAR_LDB, M32RXF_INSN_WRITE_LDB },
+ { M32R_INSN_LDB_D, M32RXF_INSN_LDB_D, M32RXF_SFMT_LD_D, NOPAR, NOPAR },
+ { M32R_INSN_LDH, M32RXF_INSN_LDH, M32RXF_SFMT_LD, M32RXF_INSN_PAR_LDH, M32RXF_INSN_WRITE_LDH },
+ { M32R_INSN_LDH_D, M32RXF_INSN_LDH_D, M32RXF_SFMT_LD_D, NOPAR, NOPAR },
+ { M32R_INSN_LDUB, M32RXF_INSN_LDUB, M32RXF_SFMT_LD, M32RXF_INSN_PAR_LDUB, M32RXF_INSN_WRITE_LDUB },
+ { M32R_INSN_LDUB_D, M32RXF_INSN_LDUB_D, M32RXF_SFMT_LD_D, NOPAR, NOPAR },
+ { M32R_INSN_LDUH, M32RXF_INSN_LDUH, M32RXF_SFMT_LD, M32RXF_INSN_PAR_LDUH, M32RXF_INSN_WRITE_LDUH },
+ { M32R_INSN_LDUH_D, M32RXF_INSN_LDUH_D, M32RXF_SFMT_LD_D, NOPAR, NOPAR },
+ { M32R_INSN_LD_PLUS, M32RXF_INSN_LD_PLUS, M32RXF_SFMT_LD_PLUS, M32RXF_INSN_PAR_LD_PLUS, M32RXF_INSN_WRITE_LD_PLUS },
+ { M32R_INSN_LD24, M32RXF_INSN_LD24, M32RXF_SFMT_LD24, NOPAR, NOPAR },
+ { M32R_INSN_LDI8, M32RXF_INSN_LDI8, M32RXF_SFMT_LDI8, M32RXF_INSN_PAR_LDI8, M32RXF_INSN_WRITE_LDI8 },
+ { M32R_INSN_LDI16, M32RXF_INSN_LDI16, M32RXF_SFMT_LDI16, NOPAR, NOPAR },
+ { M32R_INSN_LOCK, M32RXF_INSN_LOCK, M32RXF_SFMT_LOCK, M32RXF_INSN_PAR_LOCK, M32RXF_INSN_WRITE_LOCK },
+ { M32R_INSN_MACHI_A, M32RXF_INSN_MACHI_A, M32RXF_SFMT_MACHI_A, M32RXF_INSN_PAR_MACHI_A, M32RXF_INSN_WRITE_MACHI_A },
+ { M32R_INSN_MACLO_A, M32RXF_INSN_MACLO_A, M32RXF_SFMT_MACHI_A, M32RXF_INSN_PAR_MACLO_A, M32RXF_INSN_WRITE_MACLO_A },
+ { M32R_INSN_MACWHI_A, M32RXF_INSN_MACWHI_A, M32RXF_SFMT_MACHI_A, M32RXF_INSN_PAR_MACWHI_A, M32RXF_INSN_WRITE_MACWHI_A },
+ { M32R_INSN_MACWLO_A, M32RXF_INSN_MACWLO_A, M32RXF_SFMT_MACHI_A, M32RXF_INSN_PAR_MACWLO_A, M32RXF_INSN_WRITE_MACWLO_A },
+ { M32R_INSN_MUL, M32RXF_INSN_MUL, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_MUL, M32RXF_INSN_WRITE_MUL },
+ { M32R_INSN_MULHI_A, M32RXF_INSN_MULHI_A, M32RXF_SFMT_MULHI_A, M32RXF_INSN_PAR_MULHI_A, M32RXF_INSN_WRITE_MULHI_A },
+ { M32R_INSN_MULLO_A, M32RXF_INSN_MULLO_A, M32RXF_SFMT_MULHI_A, M32RXF_INSN_PAR_MULLO_A, M32RXF_INSN_WRITE_MULLO_A },
+ { M32R_INSN_MULWHI_A, M32RXF_INSN_MULWHI_A, M32RXF_SFMT_MULHI_A, M32RXF_INSN_PAR_MULWHI_A, M32RXF_INSN_WRITE_MULWHI_A },
+ { M32R_INSN_MULWLO_A, M32RXF_INSN_MULWLO_A, M32RXF_SFMT_MULHI_A, M32RXF_INSN_PAR_MULWLO_A, M32RXF_INSN_WRITE_MULWLO_A },
+ { M32R_INSN_MV, M32RXF_INSN_MV, M32RXF_SFMT_MV, M32RXF_INSN_PAR_MV, M32RXF_INSN_WRITE_MV },
+ { M32R_INSN_MVFACHI_A, M32RXF_INSN_MVFACHI_A, M32RXF_SFMT_MVFACHI_A, M32RXF_INSN_PAR_MVFACHI_A, M32RXF_INSN_WRITE_MVFACHI_A },
+ { M32R_INSN_MVFACLO_A, M32RXF_INSN_MVFACLO_A, M32RXF_SFMT_MVFACHI_A, M32RXF_INSN_PAR_MVFACLO_A, M32RXF_INSN_WRITE_MVFACLO_A },
+ { M32R_INSN_MVFACMI_A, M32RXF_INSN_MVFACMI_A, M32RXF_SFMT_MVFACHI_A, M32RXF_INSN_PAR_MVFACMI_A, M32RXF_INSN_WRITE_MVFACMI_A },
+ { M32R_INSN_MVFC, M32RXF_INSN_MVFC, M32RXF_SFMT_MVFC, M32RXF_INSN_PAR_MVFC, M32RXF_INSN_WRITE_MVFC },
+ { M32R_INSN_MVTACHI_A, M32RXF_INSN_MVTACHI_A, M32RXF_SFMT_MVTACHI_A, M32RXF_INSN_PAR_MVTACHI_A, M32RXF_INSN_WRITE_MVTACHI_A },
+ { M32R_INSN_MVTACLO_A, M32RXF_INSN_MVTACLO_A, M32RXF_SFMT_MVTACHI_A, M32RXF_INSN_PAR_MVTACLO_A, M32RXF_INSN_WRITE_MVTACLO_A },
+ { M32R_INSN_MVTC, M32RXF_INSN_MVTC, M32RXF_SFMT_MVTC, M32RXF_INSN_PAR_MVTC, M32RXF_INSN_WRITE_MVTC },
+ { M32R_INSN_NEG, M32RXF_INSN_NEG, M32RXF_SFMT_MV, M32RXF_INSN_PAR_NEG, M32RXF_INSN_WRITE_NEG },
+ { M32R_INSN_NOP, M32RXF_INSN_NOP, M32RXF_SFMT_NOP, M32RXF_INSN_PAR_NOP, M32RXF_INSN_WRITE_NOP },
+ { M32R_INSN_NOT, M32RXF_INSN_NOT, M32RXF_SFMT_MV, M32RXF_INSN_PAR_NOT, M32RXF_INSN_WRITE_NOT },
+ { M32R_INSN_RAC_DSI, M32RXF_INSN_RAC_DSI, M32RXF_SFMT_RAC_DSI, M32RXF_INSN_PAR_RAC_DSI, M32RXF_INSN_WRITE_RAC_DSI },
+ { M32R_INSN_RACH_DSI, M32RXF_INSN_RACH_DSI, M32RXF_SFMT_RAC_DSI, M32RXF_INSN_PAR_RACH_DSI, M32RXF_INSN_WRITE_RACH_DSI },
+ { M32R_INSN_RTE, M32RXF_INSN_RTE, M32RXF_SFMT_RTE, M32RXF_INSN_PAR_RTE, M32RXF_INSN_WRITE_RTE },
+ { M32R_INSN_SETH, M32RXF_INSN_SETH, M32RXF_SFMT_SETH, NOPAR, NOPAR },
+ { M32R_INSN_SLL, M32RXF_INSN_SLL, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_SLL, M32RXF_INSN_WRITE_SLL },
+ { M32R_INSN_SLL3, M32RXF_INSN_SLL3, M32RXF_SFMT_SLL3, NOPAR, NOPAR },
+ { M32R_INSN_SLLI, M32RXF_INSN_SLLI, M32RXF_SFMT_SLLI, M32RXF_INSN_PAR_SLLI, M32RXF_INSN_WRITE_SLLI },
+ { M32R_INSN_SRA, M32RXF_INSN_SRA, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_SRA, M32RXF_INSN_WRITE_SRA },
+ { M32R_INSN_SRA3, M32RXF_INSN_SRA3, M32RXF_SFMT_SLL3, NOPAR, NOPAR },
+ { M32R_INSN_SRAI, M32RXF_INSN_SRAI, M32RXF_SFMT_SLLI, M32RXF_INSN_PAR_SRAI, M32RXF_INSN_WRITE_SRAI },
+ { M32R_INSN_SRL, M32RXF_INSN_SRL, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_SRL, M32RXF_INSN_WRITE_SRL },
+ { M32R_INSN_SRL3, M32RXF_INSN_SRL3, M32RXF_SFMT_SLL3, NOPAR, NOPAR },
+ { M32R_INSN_SRLI, M32RXF_INSN_SRLI, M32RXF_SFMT_SLLI, M32RXF_INSN_PAR_SRLI, M32RXF_INSN_WRITE_SRLI },
+ { M32R_INSN_ST, M32RXF_INSN_ST, M32RXF_SFMT_ST, M32RXF_INSN_PAR_ST, M32RXF_INSN_WRITE_ST },
+ { M32R_INSN_ST_D, M32RXF_INSN_ST_D, M32RXF_SFMT_ST_D, NOPAR, NOPAR },
+ { M32R_INSN_STB, M32RXF_INSN_STB, M32RXF_SFMT_STB, M32RXF_INSN_PAR_STB, M32RXF_INSN_WRITE_STB },
+ { M32R_INSN_STB_D, M32RXF_INSN_STB_D, M32RXF_SFMT_STB_D, NOPAR, NOPAR },
+ { M32R_INSN_STH, M32RXF_INSN_STH, M32RXF_SFMT_STH, M32RXF_INSN_PAR_STH, M32RXF_INSN_WRITE_STH },
+ { M32R_INSN_STH_D, M32RXF_INSN_STH_D, M32RXF_SFMT_STH_D, NOPAR, NOPAR },
+ { M32R_INSN_ST_PLUS, M32RXF_INSN_ST_PLUS, M32RXF_SFMT_ST_PLUS, M32RXF_INSN_PAR_ST_PLUS, M32RXF_INSN_WRITE_ST_PLUS },
+ { M32R_INSN_ST_MINUS, M32RXF_INSN_ST_MINUS, M32RXF_SFMT_ST_PLUS, M32RXF_INSN_PAR_ST_MINUS, M32RXF_INSN_WRITE_ST_MINUS },
+ { M32R_INSN_SUB, M32RXF_INSN_SUB, M32RXF_SFMT_ADD, M32RXF_INSN_PAR_SUB, M32RXF_INSN_WRITE_SUB },
+ { M32R_INSN_SUBV, M32RXF_INSN_SUBV, M32RXF_SFMT_ADDV, M32RXF_INSN_PAR_SUBV, M32RXF_INSN_WRITE_SUBV },
+ { M32R_INSN_SUBX, M32RXF_INSN_SUBX, M32RXF_SFMT_ADDX, M32RXF_INSN_PAR_SUBX, M32RXF_INSN_WRITE_SUBX },
+ { M32R_INSN_TRAP, M32RXF_INSN_TRAP, M32RXF_SFMT_TRAP, M32RXF_INSN_PAR_TRAP, M32RXF_INSN_WRITE_TRAP },
+ { M32R_INSN_UNLOCK, M32RXF_INSN_UNLOCK, M32RXF_SFMT_UNLOCK, M32RXF_INSN_PAR_UNLOCK, M32RXF_INSN_WRITE_UNLOCK },
+ { M32R_INSN_SATB, M32RXF_INSN_SATB, M32RXF_SFMT_SATB, NOPAR, NOPAR },
+ { M32R_INSN_SATH, M32RXF_INSN_SATH, M32RXF_SFMT_SATB, NOPAR, NOPAR },
+ { M32R_INSN_SAT, M32RXF_INSN_SAT, M32RXF_SFMT_SAT, NOPAR, NOPAR },
+ { M32R_INSN_PCMPBZ, M32RXF_INSN_PCMPBZ, M32RXF_SFMT_CMPZ, M32RXF_INSN_PAR_PCMPBZ, M32RXF_INSN_WRITE_PCMPBZ },
+ { M32R_INSN_SADD, M32RXF_INSN_SADD, M32RXF_SFMT_SADD, M32RXF_INSN_PAR_SADD, M32RXF_INSN_WRITE_SADD },
+ { M32R_INSN_MACWU1, M32RXF_INSN_MACWU1, M32RXF_SFMT_MACWU1, M32RXF_INSN_PAR_MACWU1, M32RXF_INSN_WRITE_MACWU1 },
+ { M32R_INSN_MSBLO, M32RXF_INSN_MSBLO, M32RXF_SFMT_MSBLO, M32RXF_INSN_PAR_MSBLO, M32RXF_INSN_WRITE_MSBLO },
+ { M32R_INSN_MULWU1, M32RXF_INSN_MULWU1, M32RXF_SFMT_MULWU1, M32RXF_INSN_PAR_MULWU1, M32RXF_INSN_WRITE_MULWU1 },
+ { M32R_INSN_MACLH1, M32RXF_INSN_MACLH1, M32RXF_SFMT_MACWU1, M32RXF_INSN_PAR_MACLH1, M32RXF_INSN_WRITE_MACLH1 },
+ { M32R_INSN_SC, M32RXF_INSN_SC, M32RXF_SFMT_SC, M32RXF_INSN_PAR_SC, M32RXF_INSN_WRITE_SC },
+ { M32R_INSN_SNC, M32RXF_INSN_SNC, M32RXF_SFMT_SC, M32RXF_INSN_PAR_SNC, M32RXF_INSN_WRITE_SNC },
+};
+
+static const struct insn_sem m32rxf_insn_sem_invalid = {
+ VIRTUAL_INSN_X_INVALID, M32RXF_INSN_X_INVALID, M32RXF_SFMT_EMPTY, NOPAR, NOPAR
+};
+
+/* Initialize an IDESC from the compile-time computable parts. */
+
+static INLINE void
+init_idesc (SIM_CPU *cpu, IDESC *id, const struct insn_sem *t)
+{
+ const CGEN_INSN *insn_table = CGEN_CPU_INSN_TABLE (CPU_CPU_DESC (cpu))->init_entries;
+
+ id->num = t->index;
+ id->sfmt = t->sfmt;
+ if ((int) t->type <= 0)
+ id->idata = & cgen_virtual_insn_table[- (int) t->type];
+ else
+ id->idata = & insn_table[t->type];
+ id->attrs = CGEN_INSN_ATTRS (id->idata);
+ /* Oh my god, a magic number. */
+ id->length = CGEN_INSN_BITSIZE (id->idata) / 8;
+
+#if WITH_PROFILE_MODEL_P
+ id->timing = & MODEL_TIMING (CPU_MODEL (cpu)) [t->index];
+ {
+ SIM_DESC sd = CPU_STATE (cpu);
+ SIM_ASSERT (t->index == id->timing->num);
+ }
+#endif
+
+ /* Semantic pointers are initialized elsewhere. */
+}
+
+/* Initialize the instruction descriptor table. */
+
+void
+m32rxf_init_idesc_table (SIM_CPU *cpu)
+{
+ IDESC *id,*tabend;
+ const struct insn_sem *t,*tend;
+ int tabsize = M32RXF_INSN_MAX;
+ IDESC *table = m32rxf_insn_data;
+
+ memset (table, 0, tabsize * sizeof (IDESC));
+
+ /* First set all entries to the `invalid insn'. */
+ t = & m32rxf_insn_sem_invalid;
+ for (id = table, tabend = table + tabsize; id < tabend; ++id)
+ init_idesc (cpu, id, t);
+
+ /* Now fill in the values for the chosen cpu. */
+ for (t = m32rxf_insn_sem, tend = t + sizeof (m32rxf_insn_sem) / sizeof (*t);
+ t != tend; ++t)
+ {
+ init_idesc (cpu, & table[t->index], t);
+ if (t->par_index != NOPAR)
+ {
+ init_idesc (cpu, &table[t->par_index], t);
+ table[t->index].par_idesc = &table[t->par_index];
+ }
+ if (t->par_index != NOPAR)
+ {
+ init_idesc (cpu, &table[t->write_index], t);
+ table[t->par_index].par_idesc = &table[t->write_index];
+ }
+ }
+
+ /* Link the IDESC table into the cpu. */
+ CPU_IDESC (cpu) = table;
+}
+
+/* Given an instruction, return a pointer to its IDESC entry. */
+
+const IDESC *
+m32rxf_decode (SIM_CPU *current_cpu, IADDR pc,
+ CGEN_INSN_INT base_insn, CGEN_INSN_INT entire_insn,
+ ARGBUF *abuf)
+{
+ /* Result of decoder. */
+ M32RXF_INSN_TYPE itype;
+
+ {
+ CGEN_INSN_INT insn = base_insn;
+
+ {
+ unsigned int val = (((insn >> 8) & (15 << 4)) | ((insn >> 4) & (15 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_SUBV; goto extract_sfmt_addv;
+ case 1 : itype = M32RXF_INSN_SUBX; goto extract_sfmt_addx;
+ case 2 : itype = M32RXF_INSN_SUB; goto extract_sfmt_add;
+ case 3 : itype = M32RXF_INSN_NEG; goto extract_sfmt_mv;
+ case 4 : itype = M32RXF_INSN_CMP; goto extract_sfmt_cmp;
+ case 5 : itype = M32RXF_INSN_CMPU; goto extract_sfmt_cmp;
+ case 6 : itype = M32RXF_INSN_CMPEQ; goto extract_sfmt_cmp;
+ case 7 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_CMPZ; goto extract_sfmt_cmpz;
+ case 3 : itype = M32RXF_INSN_PCMPBZ; goto extract_sfmt_cmpz;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 8 : itype = M32RXF_INSN_ADDV; goto extract_sfmt_addv;
+ case 9 : itype = M32RXF_INSN_ADDX; goto extract_sfmt_addx;
+ case 10 : itype = M32RXF_INSN_ADD; goto extract_sfmt_add;
+ case 11 : itype = M32RXF_INSN_NOT; goto extract_sfmt_mv;
+ case 12 : itype = M32RXF_INSN_AND; goto extract_sfmt_add;
+ case 13 : itype = M32RXF_INSN_XOR; goto extract_sfmt_add;
+ case 14 : itype = M32RXF_INSN_OR; goto extract_sfmt_add;
+ case 16 : itype = M32RXF_INSN_SRL; goto extract_sfmt_add;
+ case 18 : itype = M32RXF_INSN_SRA; goto extract_sfmt_add;
+ case 20 : itype = M32RXF_INSN_SLL; goto extract_sfmt_add;
+ case 22 : itype = M32RXF_INSN_MUL; goto extract_sfmt_add;
+ case 24 : itype = M32RXF_INSN_MV; goto extract_sfmt_mv;
+ case 25 : itype = M32RXF_INSN_MVFC; goto extract_sfmt_mvfc;
+ case 26 : itype = M32RXF_INSN_MVTC; goto extract_sfmt_mvtc;
+ case 28 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 12 : itype = M32RXF_INSN_JC; goto extract_sfmt_jc;
+ case 13 : itype = M32RXF_INSN_JNC; goto extract_sfmt_jc;
+ case 14 : itype = M32RXF_INSN_JL; goto extract_sfmt_jl;
+ case 15 : itype = M32RXF_INSN_JMP; goto extract_sfmt_jmp;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 29 : itype = M32RXF_INSN_RTE; goto extract_sfmt_rte;
+ case 31 : itype = M32RXF_INSN_TRAP; goto extract_sfmt_trap;
+ case 32 : itype = M32RXF_INSN_STB; goto extract_sfmt_stb;
+ case 34 : itype = M32RXF_INSN_STH; goto extract_sfmt_sth;
+ case 36 : itype = M32RXF_INSN_ST; goto extract_sfmt_st;
+ case 37 : itype = M32RXF_INSN_UNLOCK; goto extract_sfmt_unlock;
+ case 38 : itype = M32RXF_INSN_ST_PLUS; goto extract_sfmt_st_plus;
+ case 39 : itype = M32RXF_INSN_ST_MINUS; goto extract_sfmt_st_plus;
+ case 40 : itype = M32RXF_INSN_LDB; goto extract_sfmt_ld;
+ case 41 : itype = M32RXF_INSN_LDUB; goto extract_sfmt_ld;
+ case 42 : itype = M32RXF_INSN_LDH; goto extract_sfmt_ld;
+ case 43 : itype = M32RXF_INSN_LDUH; goto extract_sfmt_ld;
+ case 44 : itype = M32RXF_INSN_LD; goto extract_sfmt_ld;
+ case 45 : itype = M32RXF_INSN_LOCK; goto extract_sfmt_lock;
+ case 46 : itype = M32RXF_INSN_LD_PLUS; goto extract_sfmt_ld_plus;
+ case 48 : /* fall through */
+ case 56 : itype = M32RXF_INSN_MULHI_A; goto extract_sfmt_mulhi_a;
+ case 49 : /* fall through */
+ case 57 : itype = M32RXF_INSN_MULLO_A; goto extract_sfmt_mulhi_a;
+ case 50 : /* fall through */
+ case 58 : itype = M32RXF_INSN_MULWHI_A; goto extract_sfmt_mulhi_a;
+ case 51 : /* fall through */
+ case 59 : itype = M32RXF_INSN_MULWLO_A; goto extract_sfmt_mulhi_a;
+ case 52 : /* fall through */
+ case 60 : itype = M32RXF_INSN_MACHI_A; goto extract_sfmt_machi_a;
+ case 53 : /* fall through */
+ case 61 : itype = M32RXF_INSN_MACLO_A; goto extract_sfmt_machi_a;
+ case 54 : /* fall through */
+ case 62 : itype = M32RXF_INSN_MACWHI_A; goto extract_sfmt_machi_a;
+ case 55 : /* fall through */
+ case 63 : itype = M32RXF_INSN_MACWLO_A; goto extract_sfmt_machi_a;
+ case 64 : /* fall through */
+ case 65 : /* fall through */
+ case 66 : /* fall through */
+ case 67 : /* fall through */
+ case 68 : /* fall through */
+ case 69 : /* fall through */
+ case 70 : /* fall through */
+ case 71 : /* fall through */
+ case 72 : /* fall through */
+ case 73 : /* fall through */
+ case 74 : /* fall through */
+ case 75 : /* fall through */
+ case 76 : /* fall through */
+ case 77 : /* fall through */
+ case 78 : /* fall through */
+ case 79 : itype = M32RXF_INSN_ADDI; goto extract_sfmt_addi;
+ case 80 : /* fall through */
+ case 81 : itype = M32RXF_INSN_SRLI; goto extract_sfmt_slli;
+ case 82 : /* fall through */
+ case 83 : itype = M32RXF_INSN_SRAI; goto extract_sfmt_slli;
+ case 84 : /* fall through */
+ case 85 : itype = M32RXF_INSN_SLLI; goto extract_sfmt_slli;
+ case 87 :
+ {
+ unsigned int val = (((insn >> 0) & (3 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_MVTACHI_A; goto extract_sfmt_mvtachi_a;
+ case 1 : itype = M32RXF_INSN_MVTACLO_A; goto extract_sfmt_mvtachi_a;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 88 : itype = M32RXF_INSN_RACH_DSI; goto extract_sfmt_rac_dsi;
+ case 89 : itype = M32RXF_INSN_RAC_DSI; goto extract_sfmt_rac_dsi;
+ case 90 : itype = M32RXF_INSN_MULWU1; goto extract_sfmt_mulwu1;
+ case 91 : itype = M32RXF_INSN_MACWU1; goto extract_sfmt_macwu1;
+ case 92 : itype = M32RXF_INSN_MACLH1; goto extract_sfmt_macwu1;
+ case 93 : itype = M32RXF_INSN_MSBLO; goto extract_sfmt_msblo;
+ case 94 : itype = M32RXF_INSN_SADD; goto extract_sfmt_sadd;
+ case 95 :
+ {
+ unsigned int val = (((insn >> 0) & (3 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_MVFACHI_A; goto extract_sfmt_mvfachi_a;
+ case 1 : itype = M32RXF_INSN_MVFACLO_A; goto extract_sfmt_mvfachi_a;
+ case 2 : itype = M32RXF_INSN_MVFACMI_A; goto extract_sfmt_mvfachi_a;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 96 : /* fall through */
+ case 97 : /* fall through */
+ case 98 : /* fall through */
+ case 99 : /* fall through */
+ case 100 : /* fall through */
+ case 101 : /* fall through */
+ case 102 : /* fall through */
+ case 103 : /* fall through */
+ case 104 : /* fall through */
+ case 105 : /* fall through */
+ case 106 : /* fall through */
+ case 107 : /* fall through */
+ case 108 : /* fall through */
+ case 109 : /* fall through */
+ case 110 : /* fall through */
+ case 111 : itype = M32RXF_INSN_LDI8; goto extract_sfmt_ldi8;
+ case 112 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_NOP; goto extract_sfmt_nop;
+ case 4 : itype = M32RXF_INSN_SC; goto extract_sfmt_sc;
+ case 5 : itype = M32RXF_INSN_SNC; goto extract_sfmt_sc;
+ case 8 : itype = M32RXF_INSN_BCL8; goto extract_sfmt_bcl8;
+ case 9 : itype = M32RXF_INSN_BNCL8; goto extract_sfmt_bcl8;
+ case 12 : itype = M32RXF_INSN_BC8; goto extract_sfmt_bc8;
+ case 13 : itype = M32RXF_INSN_BNC8; goto extract_sfmt_bc8;
+ case 14 : itype = M32RXF_INSN_BL8; goto extract_sfmt_bl8;
+ case 15 : itype = M32RXF_INSN_BRA8; goto extract_sfmt_bra8;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 113 : /* fall through */
+ case 114 : /* fall through */
+ case 115 : /* fall through */
+ case 116 : /* fall through */
+ case 117 : /* fall through */
+ case 118 : /* fall through */
+ case 119 : /* fall through */
+ case 120 : /* fall through */
+ case 121 : /* fall through */
+ case 122 : /* fall through */
+ case 123 : /* fall through */
+ case 124 : /* fall through */
+ case 125 : /* fall through */
+ case 126 : /* fall through */
+ case 127 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 8 : itype = M32RXF_INSN_BCL8; goto extract_sfmt_bcl8;
+ case 9 : itype = M32RXF_INSN_BNCL8; goto extract_sfmt_bcl8;
+ case 12 : itype = M32RXF_INSN_BC8; goto extract_sfmt_bc8;
+ case 13 : itype = M32RXF_INSN_BNC8; goto extract_sfmt_bc8;
+ case 14 : itype = M32RXF_INSN_BL8; goto extract_sfmt_bl8;
+ case 15 : itype = M32RXF_INSN_BRA8; goto extract_sfmt_bra8;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 132 : itype = M32RXF_INSN_CMPI; goto extract_sfmt_cmpi;
+ case 133 : itype = M32RXF_INSN_CMPUI; goto extract_sfmt_cmpi;
+ case 134 :
+ {
+ unsigned int val;
+ /* Must fetch more bits. */
+ insn = GETIMEMUHI (current_cpu, pc + 2);
+ val = (((insn >> 12) & (15 << 0)));
+ switch (val)
+ {
+ case 0 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_SAT; goto extract_sfmt_sat;
+ case 2 : itype = M32RXF_INSN_SATH; goto extract_sfmt_satb;
+ case 3 : itype = M32RXF_INSN_SATB; goto extract_sfmt_satb;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 136 : itype = M32RXF_INSN_ADDV3; goto extract_sfmt_addv3;
+ case 138 : itype = M32RXF_INSN_ADD3; goto extract_sfmt_add3;
+ case 140 : itype = M32RXF_INSN_AND3; goto extract_sfmt_and3;
+ case 141 : itype = M32RXF_INSN_XOR3; goto extract_sfmt_and3;
+ case 142 : itype = M32RXF_INSN_OR3; goto extract_sfmt_or3;
+ case 144 :
+ {
+ unsigned int val;
+ /* Must fetch more bits. */
+ insn = GETIMEMUHI (current_cpu, pc + 2);
+ val = (((insn >> 12) & (15 << 0)));
+ switch (val)
+ {
+ case 0 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 0 :
+ {
+ unsigned int val = (((insn >> 4) & (15 << 0)));
+ switch (val)
+ {
+ case 0 : itype = M32RXF_INSN_DIV; goto extract_sfmt_div;
+ case 1 : itype = M32RXF_INSN_DIVH; goto extract_sfmt_div;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ case 145 : itype = M32RXF_INSN_DIVU; goto extract_sfmt_div;
+ case 146 : itype = M32RXF_INSN_REM; goto extract_sfmt_div;
+ case 147 : itype = M32RXF_INSN_REMU; goto extract_sfmt_div;
+ case 152 : itype = M32RXF_INSN_SRL3; goto extract_sfmt_sll3;
+ case 154 : itype = M32RXF_INSN_SRA3; goto extract_sfmt_sll3;
+ case 156 : itype = M32RXF_INSN_SLL3; goto extract_sfmt_sll3;
+ case 159 : itype = M32RXF_INSN_LDI16; goto extract_sfmt_ldi16;
+ case 160 : itype = M32RXF_INSN_STB_D; goto extract_sfmt_stb_d;
+ case 162 : itype = M32RXF_INSN_STH_D; goto extract_sfmt_sth_d;
+ case 164 : itype = M32RXF_INSN_ST_D; goto extract_sfmt_st_d;
+ case 168 : itype = M32RXF_INSN_LDB_D; goto extract_sfmt_ld_d;
+ case 169 : itype = M32RXF_INSN_LDUB_D; goto extract_sfmt_ld_d;
+ case 170 : itype = M32RXF_INSN_LDH_D; goto extract_sfmt_ld_d;
+ case 171 : itype = M32RXF_INSN_LDUH_D; goto extract_sfmt_ld_d;
+ case 172 : itype = M32RXF_INSN_LD_D; goto extract_sfmt_ld_d;
+ case 176 : itype = M32RXF_INSN_BEQ; goto extract_sfmt_beq;
+ case 177 : itype = M32RXF_INSN_BNE; goto extract_sfmt_beq;
+ case 184 : itype = M32RXF_INSN_BEQZ; goto extract_sfmt_beqz;
+ case 185 : itype = M32RXF_INSN_BNEZ; goto extract_sfmt_beqz;
+ case 186 : itype = M32RXF_INSN_BLTZ; goto extract_sfmt_beqz;
+ case 187 : itype = M32RXF_INSN_BGEZ; goto extract_sfmt_beqz;
+ case 188 : itype = M32RXF_INSN_BLEZ; goto extract_sfmt_beqz;
+ case 189 : itype = M32RXF_INSN_BGTZ; goto extract_sfmt_beqz;
+ case 220 : itype = M32RXF_INSN_SETH; goto extract_sfmt_seth;
+ case 224 : /* fall through */
+ case 225 : /* fall through */
+ case 226 : /* fall through */
+ case 227 : /* fall through */
+ case 228 : /* fall through */
+ case 229 : /* fall through */
+ case 230 : /* fall through */
+ case 231 : /* fall through */
+ case 232 : /* fall through */
+ case 233 : /* fall through */
+ case 234 : /* fall through */
+ case 235 : /* fall through */
+ case 236 : /* fall through */
+ case 237 : /* fall through */
+ case 238 : /* fall through */
+ case 239 : itype = M32RXF_INSN_LD24; goto extract_sfmt_ld24;
+ case 240 : /* fall through */
+ case 241 : /* fall through */
+ case 242 : /* fall through */
+ case 243 : /* fall through */
+ case 244 : /* fall through */
+ case 245 : /* fall through */
+ case 246 : /* fall through */
+ case 247 : /* fall through */
+ case 248 : /* fall through */
+ case 249 : /* fall through */
+ case 250 : /* fall through */
+ case 251 : /* fall through */
+ case 252 : /* fall through */
+ case 253 : /* fall through */
+ case 254 : /* fall through */
+ case 255 :
+ {
+ unsigned int val = (((insn >> 8) & (15 << 0)));
+ switch (val)
+ {
+ case 8 : itype = M32RXF_INSN_BCL24; goto extract_sfmt_bcl24;
+ case 9 : itype = M32RXF_INSN_BNCL24; goto extract_sfmt_bcl24;
+ case 12 : itype = M32RXF_INSN_BC24; goto extract_sfmt_bc24;
+ case 13 : itype = M32RXF_INSN_BNC24; goto extract_sfmt_bc24;
+ case 14 : itype = M32RXF_INSN_BL24; goto extract_sfmt_bl24;
+ case 15 : itype = M32RXF_INSN_BRA24; goto extract_sfmt_bra24;
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ default : itype = M32RXF_INSN_X_INVALID; goto extract_sfmt_empty;
+ }
+ }
+ }
+
+ /* The instruction has been decoded, now extract the fields. */
+
+ extract_sfmt_empty:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.fmt_empty.f
+
+
+ /* Record the fields for the semantic handler. */
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_empty", (char *) 0));
+
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_add:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_add", "dr 0x%x", 'x', f_r1, "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_dr) = f_r1;
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_add3:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add3.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_add3", "f_simm16 0x%x", 'x', f_simm16, "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_and3:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_and3.f
+ UINT f_r1;
+ UINT f_r2;
+ UINT f_uimm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_uimm16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_uimm16) = f_uimm16;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_and3", "f_uimm16 0x%x", 'x', f_uimm16, "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_or3:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_and3.f
+ UINT f_r1;
+ UINT f_r2;
+ UINT f_uimm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_uimm16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_uimm16) = f_uimm16;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_or3", "f_uimm16 0x%x", 'x', f_uimm16, "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_addi:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_addi.f
+ UINT f_r1;
+ INT f_simm8;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_simm8 = EXTRACT_MSB0_INT (insn, 16, 8, 8);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm8) = f_simm8;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_addi", "f_simm8 0x%x", 'x', f_simm8, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_dr) = f_r1;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_addv:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_addv", "dr 0x%x", 'x', f_r1, "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_dr) = f_r1;
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_addv3:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add3.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_addv3", "f_simm16 0x%x", 'x', f_simm16, "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_addx:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_addx", "dr 0x%x", 'x', f_r1, "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_dr) = f_r1;
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bc8:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ SI f_disp8;
+
+ f_disp8 = ((((EXTRACT_MSB0_INT (insn, 16, 8, 8)) << (2))) + (((pc) & (-4))));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp8) = f_disp8;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bc8", "disp8 0x%x", 'x', f_disp8, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bc24:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ SI f_disp24;
+
+ f_disp24 = ((((EXTRACT_MSB0_INT (insn, 32, 8, 24)) << (2))) + (pc));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp24) = f_disp24;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bc24", "disp24 0x%x", 'x', f_disp24, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_beq:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_beq.f
+ UINT f_r1;
+ UINT f_r2;
+ SI f_disp16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_disp16 = ((((EXTRACT_MSB0_INT (insn, 32, 16, 16)) << (2))) + (pc));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp16) = f_disp16;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_beq", "disp16 0x%x", 'x', f_disp16, "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_beqz:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_beq.f
+ UINT f_r2;
+ SI f_disp16;
+
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_disp16 = ((((EXTRACT_MSB0_INT (insn, 32, 16, 16)) << (2))) + (pc));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp16) = f_disp16;
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_beqz", "disp16 0x%x", 'x', f_disp16, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bl8:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ SI f_disp8;
+
+ f_disp8 = ((((EXTRACT_MSB0_INT (insn, 16, 8, 8)) << (2))) + (((pc) & (-4))));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp8) = f_disp8;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bl8", "disp8 0x%x", 'x', f_disp8, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_h_gr_14) = 14;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bl24:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ SI f_disp24;
+
+ f_disp24 = ((((EXTRACT_MSB0_INT (insn, 32, 8, 24)) << (2))) + (pc));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp24) = f_disp24;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bl24", "disp24 0x%x", 'x', f_disp24, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_h_gr_14) = 14;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bcl8:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ SI f_disp8;
+
+ f_disp8 = ((((EXTRACT_MSB0_INT (insn, 16, 8, 8)) << (2))) + (((pc) & (-4))));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp8) = f_disp8;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bcl8", "disp8 0x%x", 'x', f_disp8, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_h_gr_14) = 14;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bcl24:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ SI f_disp24;
+
+ f_disp24 = ((((EXTRACT_MSB0_INT (insn, 32, 8, 24)) << (2))) + (pc));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp24) = f_disp24;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bcl24", "disp24 0x%x", 'x', f_disp24, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_h_gr_14) = 14;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bra8:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ SI f_disp8;
+
+ f_disp8 = ((((EXTRACT_MSB0_INT (insn, 16, 8, 8)) << (2))) + (((pc) & (-4))));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp8) = f_disp8;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bra8", "disp8 0x%x", 'x', f_disp8, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_bra24:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ SI f_disp24;
+
+ f_disp24 = ((((EXTRACT_MSB0_INT (insn, 32, 8, 24)) << (2))) + (pc));
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_disp24) = f_disp24;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_bra24", "disp24 0x%x", 'x', f_disp24, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_cmp:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_cmp", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_cmpi:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_cmpi", "f_simm16 0x%x", 'x', f_simm16, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_cmpz:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r2;
+
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_cmpz", "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_div:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_div", "dr 0x%x", 'x', f_r1, "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_dr) = f_r1;
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_jc:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ UINT f_r2;
+
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_jc", "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_jl:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_jl.f
+ UINT f_r2;
+
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_jl", "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_h_gr_14) = 14;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_jmp:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ UINT f_r2;
+
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_jmp", "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_ld:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_ld", "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_ld_d:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add3.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_ld_d", "f_simm16 0x%x", 'x', f_simm16, "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_ld_plus:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_ld_plus", "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ FLD (out_sr) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_ld24:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld24.f
+ UINT f_r1;
+ UINT f_uimm24;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_uimm24 = EXTRACT_MSB0_UINT (insn, 32, 8, 24);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_uimm24) = f_uimm24;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_ld24", "uimm24 0x%x", 'x', f_uimm24, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_ldi8:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_addi.f
+ UINT f_r1;
+ INT f_simm8;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_simm8 = EXTRACT_MSB0_INT (insn, 16, 8, 8);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm8) = f_simm8;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_ldi8", "f_simm8 0x%x", 'x', f_simm8, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_ldi16:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add3.f
+ UINT f_r1;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_ldi16", "f_simm16 0x%x", 'x', f_simm16, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_lock:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_lock", "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_machi_a:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ UINT f_r1;
+ UINT f_acc;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_acc = EXTRACT_MSB0_UINT (insn, 16, 8, 1);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_acc) = f_acc;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_machi_a", "f_acc 0x%x", 'x', f_acc, "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mulhi_a:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ UINT f_r1;
+ UINT f_acc;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_acc = EXTRACT_MSB0_UINT (insn, 16, 8, 1);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_acc) = f_acc;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mulhi_a", "f_acc 0x%x", 'x', f_acc, "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mv:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mv", "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mvfachi_a:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ UINT f_r1;
+ UINT f_accs;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_accs = EXTRACT_MSB0_UINT (insn, 16, 12, 2);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_accs) = f_accs;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mvfachi_a", "f_accs 0x%x", 'x', f_accs, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mvfc:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_mvfc.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_r2) = f_r2;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mvfc", "f_r2 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mvtachi_a:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+ UINT f_r1;
+ UINT f_accs;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_accs = EXTRACT_MSB0_UINT (insn, 16, 12, 2);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_accs) = f_accs;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mvtachi_a", "f_accs 0x%x", 'x', f_accs, "src1 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mvtc:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_r1) = f_r1;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mvtc", "f_r1 0x%x", 'x', f_r1, "sr 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_nop:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.fmt_empty.f
+
+
+ /* Record the fields for the semantic handler. */
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_nop", (char *) 0));
+
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_rac_dsi:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+ UINT f_accd;
+ UINT f_accs;
+ SI f_imm1;
+
+ f_accd = EXTRACT_MSB0_UINT (insn, 16, 4, 2);
+ f_accs = EXTRACT_MSB0_UINT (insn, 16, 12, 2);
+ f_imm1 = ((EXTRACT_MSB0_UINT (insn, 16, 15, 1)) + (1));
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_accs) = f_accs;
+ FLD (f_imm1) = f_imm1;
+ FLD (f_accd) = f_accd;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_rac_dsi", "f_accs 0x%x", 'x', f_accs, "f_imm1 0x%x", 'x', f_imm1, "f_accd 0x%x", 'x', f_accd, (char *) 0));
+
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_rte:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.fmt_empty.f
+
+
+ /* Record the fields for the semantic handler. */
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_rte", (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_seth:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_seth.f
+ UINT f_r1;
+ UINT f_hi16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_hi16 = EXTRACT_MSB0_UINT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_hi16) = f_hi16;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_seth", "f_hi16 0x%x", 'x', f_hi16, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_sll3:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_add3.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_sll3", "f_simm16 0x%x", 'x', f_simm16, "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_slli:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_slli.f
+ UINT f_r1;
+ UINT f_uimm5;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_uimm5 = EXTRACT_MSB0_UINT (insn, 16, 11, 5);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_uimm5) = f_uimm5;
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_slli", "f_uimm5 0x%x", 'x', f_uimm5, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_dr) = f_r1;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_st:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_st", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_st_d:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_st_d", "f_simm16 0x%x", 'x', f_simm16, "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_stb:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_stb", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_stb_d:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_stb_d", "f_simm16 0x%x", 'x', f_simm16, "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_sth:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_sth", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_sth_d:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ UINT f_r1;
+ UINT f_r2;
+ INT f_simm16;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+ f_simm16 = EXTRACT_MSB0_INT (insn, 32, 16, 16);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_simm16) = f_simm16;
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_sth_d", "f_simm16 0x%x", 'x', f_simm16, "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_st_plus:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_st_plus", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ FLD (out_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_trap:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_trap.f
+ UINT f_uimm4;
+
+ f_uimm4 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (f_uimm4) = f_uimm4;
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_trap", "f_uimm4 0x%x", 'x', f_uimm4, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_unlock:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_unlock", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_satb:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_satb", "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_sat:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 32, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 32, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_sr) = & CPU (h_gr)[f_r2];
+ FLD (i_dr) = & CPU (h_gr)[f_r1];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_sat", "sr 0x%x", 'x', f_r2, "dr 0x%x", 'x', f_r1, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_sr) = f_r2;
+ FLD (out_dr) = f_r1;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_sadd:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.fmt_empty.f
+
+
+ /* Record the fields for the semantic handler. */
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_sadd", (char *) 0));
+
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_macwu1:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_macwu1", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_msblo:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_msblo", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_mulwu1:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ UINT f_r1;
+ UINT f_r2;
+
+ f_r1 = EXTRACT_MSB0_UINT (insn, 16, 4, 4);
+ f_r2 = EXTRACT_MSB0_UINT (insn, 16, 12, 4);
+
+ /* Record the fields for the semantic handler. */
+ FLD (i_src1) = & CPU (h_gr)[f_r1];
+ FLD (i_src2) = & CPU (h_gr)[f_r2];
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_mulwu1", "src1 0x%x", 'x', f_r1, "src2 0x%x", 'x', f_r2, (char *) 0));
+
+#if WITH_PROFILE_MODEL_P
+ /* Record the fields for profiling. */
+ if (PROFILE_MODEL_P (current_cpu))
+ {
+ FLD (in_src1) = f_r1;
+ FLD (in_src2) = f_r2;
+ }
+#endif
+#undef FLD
+ return idesc;
+ }
+
+ extract_sfmt_sc:
+ {
+ const IDESC *idesc = &m32rxf_insn_data[itype];
+ CGEN_INSN_INT insn = entire_insn;
+#define FLD(f) abuf->fields.fmt_empty.f
+
+
+ /* Record the fields for the semantic handler. */
+ TRACE_EXTRACT (current_cpu, abuf, (current_cpu, pc, "sfmt_sc", (char *) 0));
+
+#undef FLD
+ return idesc;
+ }
+
+}
diff --git a/sim/m32r/decodex.h b/sim/m32r/decodex.h
new file mode 100644
index 00000000000..7c6c2185847
--- /dev/null
+++ b/sim/m32r/decodex.h
@@ -0,0 +1,143 @@
+/* Decode header for m32rxf.
+
+THIS FILE IS MACHINE GENERATED WITH CGEN.
+
+Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of the GNU Simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*/
+
+#ifndef M32RXF_DECODE_H
+#define M32RXF_DECODE_H
+
+extern const IDESC *m32rxf_decode (SIM_CPU *, IADDR,
+ CGEN_INSN_INT, CGEN_INSN_INT,
+ ARGBUF *);
+extern void m32rxf_init_idesc_table (SIM_CPU *);
+extern void m32rxf_sem_init_idesc_table (SIM_CPU *);
+extern void m32rxf_semf_init_idesc_table (SIM_CPU *);
+
+/* Enum declaration for instructions in cpu family m32rxf. */
+typedef enum m32rxf_insn_type {
+ M32RXF_INSN_X_INVALID, M32RXF_INSN_X_AFTER, M32RXF_INSN_X_BEFORE, M32RXF_INSN_X_CTI_CHAIN
+ , M32RXF_INSN_X_CHAIN, M32RXF_INSN_X_BEGIN, M32RXF_INSN_ADD, M32RXF_INSN_ADD3
+ , M32RXF_INSN_AND, M32RXF_INSN_AND3, M32RXF_INSN_OR, M32RXF_INSN_OR3
+ , M32RXF_INSN_XOR, M32RXF_INSN_XOR3, M32RXF_INSN_ADDI, M32RXF_INSN_ADDV
+ , M32RXF_INSN_ADDV3, M32RXF_INSN_ADDX, M32RXF_INSN_BC8, M32RXF_INSN_BC24
+ , M32RXF_INSN_BEQ, M32RXF_INSN_BEQZ, M32RXF_INSN_BGEZ, M32RXF_INSN_BGTZ
+ , M32RXF_INSN_BLEZ, M32RXF_INSN_BLTZ, M32RXF_INSN_BNEZ, M32RXF_INSN_BL8
+ , M32RXF_INSN_BL24, M32RXF_INSN_BCL8, M32RXF_INSN_BCL24, M32RXF_INSN_BNC8
+ , M32RXF_INSN_BNC24, M32RXF_INSN_BNE, M32RXF_INSN_BRA8, M32RXF_INSN_BRA24
+ , M32RXF_INSN_BNCL8, M32RXF_INSN_BNCL24, M32RXF_INSN_CMP, M32RXF_INSN_CMPI
+ , M32RXF_INSN_CMPU, M32RXF_INSN_CMPUI, M32RXF_INSN_CMPEQ, M32RXF_INSN_CMPZ
+ , M32RXF_INSN_DIV, M32RXF_INSN_DIVU, M32RXF_INSN_REM, M32RXF_INSN_REMU
+ , M32RXF_INSN_DIVH, M32RXF_INSN_JC, M32RXF_INSN_JNC, M32RXF_INSN_JL
+ , M32RXF_INSN_JMP, M32RXF_INSN_LD, M32RXF_INSN_LD_D, M32RXF_INSN_LDB
+ , M32RXF_INSN_LDB_D, M32RXF_INSN_LDH, M32RXF_INSN_LDH_D, M32RXF_INSN_LDUB
+ , M32RXF_INSN_LDUB_D, M32RXF_INSN_LDUH, M32RXF_INSN_LDUH_D, M32RXF_INSN_LD_PLUS
+ , M32RXF_INSN_LD24, M32RXF_INSN_LDI8, M32RXF_INSN_LDI16, M32RXF_INSN_LOCK
+ , M32RXF_INSN_MACHI_A, M32RXF_INSN_MACLO_A, M32RXF_INSN_MACWHI_A, M32RXF_INSN_MACWLO_A
+ , M32RXF_INSN_MUL, M32RXF_INSN_MULHI_A, M32RXF_INSN_MULLO_A, M32RXF_INSN_MULWHI_A
+ , M32RXF_INSN_MULWLO_A, M32RXF_INSN_MV, M32RXF_INSN_MVFACHI_A, M32RXF_INSN_MVFACLO_A
+ , M32RXF_INSN_MVFACMI_A, M32RXF_INSN_MVFC, M32RXF_INSN_MVTACHI_A, M32RXF_INSN_MVTACLO_A
+ , M32RXF_INSN_MVTC, M32RXF_INSN_NEG, M32RXF_INSN_NOP, M32RXF_INSN_NOT
+ , M32RXF_INSN_RAC_DSI, M32RXF_INSN_RACH_DSI, M32RXF_INSN_RTE, M32RXF_INSN_SETH
+ , M32RXF_INSN_SLL, M32RXF_INSN_SLL3, M32RXF_INSN_SLLI, M32RXF_INSN_SRA
+ , M32RXF_INSN_SRA3, M32RXF_INSN_SRAI, M32RXF_INSN_SRL, M32RXF_INSN_SRL3
+ , M32RXF_INSN_SRLI, M32RXF_INSN_ST, M32RXF_INSN_ST_D, M32RXF_INSN_STB
+ , M32RXF_INSN_STB_D, M32RXF_INSN_STH, M32RXF_INSN_STH_D, M32RXF_INSN_ST_PLUS
+ , M32RXF_INSN_ST_MINUS, M32RXF_INSN_SUB, M32RXF_INSN_SUBV, M32RXF_INSN_SUBX
+ , M32RXF_INSN_TRAP, M32RXF_INSN_UNLOCK, M32RXF_INSN_SATB, M32RXF_INSN_SATH
+ , M32RXF_INSN_SAT, M32RXF_INSN_PCMPBZ, M32RXF_INSN_SADD, M32RXF_INSN_MACWU1
+ , M32RXF_INSN_MSBLO, M32RXF_INSN_MULWU1, M32RXF_INSN_MACLH1, M32RXF_INSN_SC
+ , M32RXF_INSN_SNC, M32RXF_INSN_PAR_ADD, M32RXF_INSN_WRITE_ADD, M32RXF_INSN_PAR_AND
+ , M32RXF_INSN_WRITE_AND, M32RXF_INSN_PAR_OR, M32RXF_INSN_WRITE_OR, M32RXF_INSN_PAR_XOR
+ , M32RXF_INSN_WRITE_XOR, M32RXF_INSN_PAR_ADDI, M32RXF_INSN_WRITE_ADDI, M32RXF_INSN_PAR_ADDV
+ , M32RXF_INSN_WRITE_ADDV, M32RXF_INSN_PAR_ADDX, M32RXF_INSN_WRITE_ADDX, M32RXF_INSN_PAR_BC8
+ , M32RXF_INSN_WRITE_BC8, M32RXF_INSN_PAR_BL8, M32RXF_INSN_WRITE_BL8, M32RXF_INSN_PAR_BCL8
+ , M32RXF_INSN_WRITE_BCL8, M32RXF_INSN_PAR_BNC8, M32RXF_INSN_WRITE_BNC8, M32RXF_INSN_PAR_BRA8
+ , M32RXF_INSN_WRITE_BRA8, M32RXF_INSN_PAR_BNCL8, M32RXF_INSN_WRITE_BNCL8, M32RXF_INSN_PAR_CMP
+ , M32RXF_INSN_WRITE_CMP, M32RXF_INSN_PAR_CMPU, M32RXF_INSN_WRITE_CMPU, M32RXF_INSN_PAR_CMPEQ
+ , M32RXF_INSN_WRITE_CMPEQ, M32RXF_INSN_PAR_CMPZ, M32RXF_INSN_WRITE_CMPZ, M32RXF_INSN_PAR_JC
+ , M32RXF_INSN_WRITE_JC, M32RXF_INSN_PAR_JNC, M32RXF_INSN_WRITE_JNC, M32RXF_INSN_PAR_JL
+ , M32RXF_INSN_WRITE_JL, M32RXF_INSN_PAR_JMP, M32RXF_INSN_WRITE_JMP, M32RXF_INSN_PAR_LD
+ , M32RXF_INSN_WRITE_LD, M32RXF_INSN_PAR_LDB, M32RXF_INSN_WRITE_LDB, M32RXF_INSN_PAR_LDH
+ , M32RXF_INSN_WRITE_LDH, M32RXF_INSN_PAR_LDUB, M32RXF_INSN_WRITE_LDUB, M32RXF_INSN_PAR_LDUH
+ , M32RXF_INSN_WRITE_LDUH, M32RXF_INSN_PAR_LD_PLUS, M32RXF_INSN_WRITE_LD_PLUS, M32RXF_INSN_PAR_LDI8
+ , M32RXF_INSN_WRITE_LDI8, M32RXF_INSN_PAR_LOCK, M32RXF_INSN_WRITE_LOCK, M32RXF_INSN_PAR_MACHI_A
+ , M32RXF_INSN_WRITE_MACHI_A, M32RXF_INSN_PAR_MACLO_A, M32RXF_INSN_WRITE_MACLO_A, M32RXF_INSN_PAR_MACWHI_A
+ , M32RXF_INSN_WRITE_MACWHI_A, M32RXF_INSN_PAR_MACWLO_A, M32RXF_INSN_WRITE_MACWLO_A, M32RXF_INSN_PAR_MUL
+ , M32RXF_INSN_WRITE_MUL, M32RXF_INSN_PAR_MULHI_A, M32RXF_INSN_WRITE_MULHI_A, M32RXF_INSN_PAR_MULLO_A
+ , M32RXF_INSN_WRITE_MULLO_A, M32RXF_INSN_PAR_MULWHI_A, M32RXF_INSN_WRITE_MULWHI_A, M32RXF_INSN_PAR_MULWLO_A
+ , M32RXF_INSN_WRITE_MULWLO_A, M32RXF_INSN_PAR_MV, M32RXF_INSN_WRITE_MV, M32RXF_INSN_PAR_MVFACHI_A
+ , M32RXF_INSN_WRITE_MVFACHI_A, M32RXF_INSN_PAR_MVFACLO_A, M32RXF_INSN_WRITE_MVFACLO_A, M32RXF_INSN_PAR_MVFACMI_A
+ , M32RXF_INSN_WRITE_MVFACMI_A, M32RXF_INSN_PAR_MVFC, M32RXF_INSN_WRITE_MVFC, M32RXF_INSN_PAR_MVTACHI_A
+ , M32RXF_INSN_WRITE_MVTACHI_A, M32RXF_INSN_PAR_MVTACLO_A, M32RXF_INSN_WRITE_MVTACLO_A, M32RXF_INSN_PAR_MVTC
+ , M32RXF_INSN_WRITE_MVTC, M32RXF_INSN_PAR_NEG, M32RXF_INSN_WRITE_NEG, M32RXF_INSN_PAR_NOP
+ , M32RXF_INSN_WRITE_NOP, M32RXF_INSN_PAR_NOT, M32RXF_INSN_WRITE_NOT, M32RXF_INSN_PAR_RAC_DSI
+ , M32RXF_INSN_WRITE_RAC_DSI, M32RXF_INSN_PAR_RACH_DSI, M32RXF_INSN_WRITE_RACH_DSI, M32RXF_INSN_PAR_RTE
+ , M32RXF_INSN_WRITE_RTE, M32RXF_INSN_PAR_SLL, M32RXF_INSN_WRITE_SLL, M32RXF_INSN_PAR_SLLI
+ , M32RXF_INSN_WRITE_SLLI, M32RXF_INSN_PAR_SRA, M32RXF_INSN_WRITE_SRA, M32RXF_INSN_PAR_SRAI
+ , M32RXF_INSN_WRITE_SRAI, M32RXF_INSN_PAR_SRL, M32RXF_INSN_WRITE_SRL, M32RXF_INSN_PAR_SRLI
+ , M32RXF_INSN_WRITE_SRLI, M32RXF_INSN_PAR_ST, M32RXF_INSN_WRITE_ST, M32RXF_INSN_PAR_STB
+ , M32RXF_INSN_WRITE_STB, M32RXF_INSN_PAR_STH, M32RXF_INSN_WRITE_STH, M32RXF_INSN_PAR_ST_PLUS
+ , M32RXF_INSN_WRITE_ST_PLUS, M32RXF_INSN_PAR_ST_MINUS, M32RXF_INSN_WRITE_ST_MINUS, M32RXF_INSN_PAR_SUB
+ , M32RXF_INSN_WRITE_SUB, M32RXF_INSN_PAR_SUBV, M32RXF_INSN_WRITE_SUBV, M32RXF_INSN_PAR_SUBX
+ , M32RXF_INSN_WRITE_SUBX, M32RXF_INSN_PAR_TRAP, M32RXF_INSN_WRITE_TRAP, M32RXF_INSN_PAR_UNLOCK
+ , M32RXF_INSN_WRITE_UNLOCK, M32RXF_INSN_PAR_PCMPBZ, M32RXF_INSN_WRITE_PCMPBZ, M32RXF_INSN_PAR_SADD
+ , M32RXF_INSN_WRITE_SADD, M32RXF_INSN_PAR_MACWU1, M32RXF_INSN_WRITE_MACWU1, M32RXF_INSN_PAR_MSBLO
+ , M32RXF_INSN_WRITE_MSBLO, M32RXF_INSN_PAR_MULWU1, M32RXF_INSN_WRITE_MULWU1, M32RXF_INSN_PAR_MACLH1
+ , M32RXF_INSN_WRITE_MACLH1, M32RXF_INSN_PAR_SC, M32RXF_INSN_WRITE_SC, M32RXF_INSN_PAR_SNC
+ , M32RXF_INSN_WRITE_SNC, M32RXF_INSN_MAX
+} M32RXF_INSN_TYPE;
+
+/* Enum declaration for semantic formats in cpu family m32rxf. */
+typedef enum m32rxf_sfmt_type {
+ M32RXF_SFMT_EMPTY, M32RXF_SFMT_ADD, M32RXF_SFMT_ADD3, M32RXF_SFMT_AND3
+ , M32RXF_SFMT_OR3, M32RXF_SFMT_ADDI, M32RXF_SFMT_ADDV, M32RXF_SFMT_ADDV3
+ , M32RXF_SFMT_ADDX, M32RXF_SFMT_BC8, M32RXF_SFMT_BC24, M32RXF_SFMT_BEQ
+ , M32RXF_SFMT_BEQZ, M32RXF_SFMT_BL8, M32RXF_SFMT_BL24, M32RXF_SFMT_BCL8
+ , M32RXF_SFMT_BCL24, M32RXF_SFMT_BRA8, M32RXF_SFMT_BRA24, M32RXF_SFMT_CMP
+ , M32RXF_SFMT_CMPI, M32RXF_SFMT_CMPZ, M32RXF_SFMT_DIV, M32RXF_SFMT_JC
+ , M32RXF_SFMT_JL, M32RXF_SFMT_JMP, M32RXF_SFMT_LD, M32RXF_SFMT_LD_D
+ , M32RXF_SFMT_LD_PLUS, M32RXF_SFMT_LD24, M32RXF_SFMT_LDI8, M32RXF_SFMT_LDI16
+ , M32RXF_SFMT_LOCK, M32RXF_SFMT_MACHI_A, M32RXF_SFMT_MULHI_A, M32RXF_SFMT_MV
+ , M32RXF_SFMT_MVFACHI_A, M32RXF_SFMT_MVFC, M32RXF_SFMT_MVTACHI_A, M32RXF_SFMT_MVTC
+ , M32RXF_SFMT_NOP, M32RXF_SFMT_RAC_DSI, M32RXF_SFMT_RTE, M32RXF_SFMT_SETH
+ , M32RXF_SFMT_SLL3, M32RXF_SFMT_SLLI, M32RXF_SFMT_ST, M32RXF_SFMT_ST_D
+ , M32RXF_SFMT_STB, M32RXF_SFMT_STB_D, M32RXF_SFMT_STH, M32RXF_SFMT_STH_D
+ , M32RXF_SFMT_ST_PLUS, M32RXF_SFMT_TRAP, M32RXF_SFMT_UNLOCK, M32RXF_SFMT_SATB
+ , M32RXF_SFMT_SAT, M32RXF_SFMT_SADD, M32RXF_SFMT_MACWU1, M32RXF_SFMT_MSBLO
+ , M32RXF_SFMT_MULWU1, M32RXF_SFMT_SC
+} M32RXF_SFMT_TYPE;
+
+/* Function unit handlers (user written). */
+
+extern int m32rxf_model_m32rx_u_store (SIM_CPU *, const IDESC *, int /*unit_num*/, int /*referenced*/, INT /*src1*/, INT /*src2*/);
+extern int m32rxf_model_m32rx_u_load (SIM_CPU *, const IDESC *, int /*unit_num*/, int /*referenced*/, INT /*sr*/, INT /*dr*/);
+extern int m32rxf_model_m32rx_u_cti (SIM_CPU *, const IDESC *, int /*unit_num*/, int /*referenced*/, INT /*sr*/);
+extern int m32rxf_model_m32rx_u_mac (SIM_CPU *, const IDESC *, int /*unit_num*/, int /*referenced*/, INT /*src1*/, INT /*src2*/);
+extern int m32rxf_model_m32rx_u_cmp (SIM_CPU *, const IDESC *, int /*unit_num*/, int /*referenced*/, INT /*src1*/, INT /*src2*/);
+extern int m32rxf_model_m32rx_u_exec (SIM_CPU *, const IDESC *, int /*unit_num*/, int /*referenced*/, INT /*sr*/, INT /*dr*/, INT /*dr*/);
+
+/* Profiling before/after handlers (user written) */
+
+extern void m32rxf_model_insn_before (SIM_CPU *, int /*first_p*/);
+extern void m32rxf_model_insn_after (SIM_CPU *, int /*last_p*/, int /*cycles*/);
+
+#endif /* M32RXF_DECODE_H */
diff --git a/sim/m32r/m32rx.c b/sim/m32r/m32rx.c
new file mode 100644
index 00000000000..cb319f6b024
--- /dev/null
+++ b/sim/m32r/m32rx.c
@@ -0,0 +1,311 @@
+/* m32rx simulator support code
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GDB, the GNU debugger.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
+
+#define WANT_CPU m32rxf
+#define WANT_CPU_M32RXF
+
+#include "sim-main.h"
+#include "cgen-mem.h"
+#include "cgen-ops.h"
+
+/* The contents of BUF are in target byte order. */
+
+int
+m32rxf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
+{
+ return m32rbf_fetch_register (current_cpu, rn, buf, len);
+}
+
+/* The contents of BUF are in target byte order. */
+
+int
+m32rxf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
+{
+ return m32rbf_store_register (current_cpu, rn, buf, len);
+}
+
+/* Cover fns to get/set the control registers.
+ FIXME: Duplicated from m32r.c. The issue is structure offsets. */
+
+USI
+m32rxf_h_cr_get_handler (SIM_CPU *current_cpu, UINT cr)
+{
+ switch (cr)
+ {
+ case H_CR_PSW : /* psw */
+ return (((CPU (h_bpsw) & 0xc1) << 8)
+ | ((CPU (h_psw) & 0xc0) << 0)
+ | GET_H_COND ());
+ case H_CR_BBPSW : /* backup backup psw */
+ return CPU (h_bbpsw) & 0xc1;
+ case H_CR_CBR : /* condition bit */
+ return GET_H_COND ();
+ case H_CR_SPI : /* interrupt stack pointer */
+ if (! GET_H_SM ())
+ return CPU (h_gr[H_GR_SP]);
+ else
+ return CPU (h_cr[H_CR_SPI]);
+ case H_CR_SPU : /* user stack pointer */
+ if (GET_H_SM ())
+ return CPU (h_gr[H_GR_SP]);
+ else
+ return CPU (h_cr[H_CR_SPU]);
+ case H_CR_BPC : /* backup pc */
+ return CPU (h_cr[H_CR_BPC]) & 0xfffffffe;
+ case H_CR_BBPC : /* backup backup pc */
+ return CPU (h_cr[H_CR_BBPC]) & 0xfffffffe;
+ case 4 : /* ??? unspecified, but apparently available */
+ case 5 : /* ??? unspecified, but apparently available */
+ return CPU (h_cr[cr]);
+ default :
+ return 0;
+ }
+}
+
+void
+m32rxf_h_cr_set_handler (SIM_CPU *current_cpu, UINT cr, USI newval)
+{
+ switch (cr)
+ {
+ case H_CR_PSW : /* psw */
+ {
+ int old_sm = (CPU (h_psw) & 0x80) != 0;
+ int new_sm = (newval & 0x80) != 0;
+ CPU (h_bpsw) = (newval >> 8) & 0xff;
+ CPU (h_psw) = newval & 0xff;
+ SET_H_COND (newval & 1);
+ /* When switching stack modes, update the registers. */
+ if (old_sm != new_sm)
+ {
+ if (old_sm)
+ {
+ /* Switching user -> system. */
+ CPU (h_cr[H_CR_SPU]) = CPU (h_gr[H_GR_SP]);
+ CPU (h_gr[H_GR_SP]) = CPU (h_cr[H_CR_SPI]);
+ }
+ else
+ {
+ /* Switching system -> user. */
+ CPU (h_cr[H_CR_SPI]) = CPU (h_gr[H_GR_SP]);
+ CPU (h_gr[H_GR_SP]) = CPU (h_cr[H_CR_SPU]);
+ }
+ }
+ break;
+ }
+ case H_CR_BBPSW : /* backup backup psw */
+ CPU (h_bbpsw) = newval & 0xff;
+ break;
+ case H_CR_CBR : /* condition bit */
+ SET_H_COND (newval & 1);
+ break;
+ case H_CR_SPI : /* interrupt stack pointer */
+ if (! GET_H_SM ())
+ CPU (h_gr[H_GR_SP]) = newval;
+ else
+ CPU (h_cr[H_CR_SPI]) = newval;
+ break;
+ case H_CR_SPU : /* user stack pointer */
+ if (GET_H_SM ())
+ CPU (h_gr[H_GR_SP]) = newval;
+ else
+ CPU (h_cr[H_CR_SPU]) = newval;
+ break;
+ case H_CR_BPC : /* backup pc */
+ CPU (h_cr[H_CR_BPC]) = newval;
+ break;
+ case H_CR_BBPC : /* backup backup pc */
+ CPU (h_cr[H_CR_BBPC]) = newval;
+ break;
+ case 4 : /* ??? unspecified, but apparently available */
+ case 5 : /* ??? unspecified, but apparently available */
+ CPU (h_cr[cr]) = newval;
+ break;
+ default :
+ /* ignore */
+ break;
+ }
+}
+
+/* Cover fns to access h-psw. */
+
+UQI
+m32rxf_h_psw_get_handler (SIM_CPU *current_cpu)
+{
+ return (CPU (h_psw) & 0xfe) | (CPU (h_cond) & 1);
+}
+
+void
+m32rxf_h_psw_set_handler (SIM_CPU *current_cpu, UQI newval)
+{
+ CPU (h_psw) = newval;
+ CPU (h_cond) = newval & 1;
+}
+
+/* Cover fns to access h-accum. */
+
+DI
+m32rxf_h_accum_get_handler (SIM_CPU *current_cpu)
+{
+ /* Sign extend the top 8 bits. */
+ DI r;
+ r = ANDDI (CPU (h_accum), MAKEDI (0xffffff, 0xffffffff));
+ r = XORDI (r, MAKEDI (0x800000, 0));
+ r = SUBDI (r, MAKEDI (0x800000, 0));
+ return r;
+}
+
+void
+m32rxf_h_accum_set_handler (SIM_CPU *current_cpu, DI newval)
+{
+ CPU (h_accum) = newval;
+}
+
+/* Cover fns to access h-accums. */
+
+DI
+m32rxf_h_accums_get_handler (SIM_CPU *current_cpu, UINT regno)
+{
+ /* FIXME: Yes, this is just a quick hack. */
+ DI r;
+ if (regno == 0)
+ r = CPU (h_accum);
+ else
+ r = CPU (h_accums[1]);
+ /* Sign extend the top 8 bits. */
+ r = ANDDI (r, MAKEDI (0xffffff, 0xffffffff));
+ r = XORDI (r, MAKEDI (0x800000, 0));
+ r = SUBDI (r, MAKEDI (0x800000, 0));
+ return r;
+}
+
+void
+m32rxf_h_accums_set_handler (SIM_CPU *current_cpu, UINT regno, DI newval)
+{
+ /* FIXME: Yes, this is just a quick hack. */
+ if (regno == 0)
+ CPU (h_accum) = newval;
+ else
+ CPU (h_accums[1]) = newval;
+}
+
+#if WITH_PROFILE_MODEL_P
+
+/* Initialize cycle counting for an insn.
+ FIRST_P is non-zero if this is the first insn in a set of parallel
+ insns. */
+
+void
+m32rxf_model_insn_before (SIM_CPU *cpu, int first_p)
+{
+ m32rbf_model_insn_before (cpu, first_p);
+}
+
+/* Record the cycles computed for an insn.
+ LAST_P is non-zero if this is the last insn in a set of parallel insns,
+ and we update the total cycle count.
+ CYCLES is the cycle count of the insn. */
+
+void
+m32rxf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
+{
+ m32rbf_model_insn_after (cpu, last_p, cycles);
+}
+
+static INLINE void
+check_load_stall (SIM_CPU *cpu, int regno)
+{
+ UINT h_gr = CPU_M32R_MISC_PROFILE (cpu)->load_regs;
+
+ if (regno != -1
+ && (h_gr & (1 << regno)) != 0)
+ {
+ CPU_M32R_MISC_PROFILE (cpu)->load_stall += 2;
+ if (TRACE_INSN_P (cpu))
+ cgen_trace_printf (cpu, " ; Load stall of 2 cycles.");
+ }
+}
+
+int
+m32rxf_model_m32rx_u_exec (SIM_CPU *cpu, const IDESC *idesc,
+ int unit_num, int referenced,
+ INT sr, INT sr2, INT dr)
+{
+ check_load_stall (cpu, sr);
+ check_load_stall (cpu, sr2);
+ return idesc->timing->units[unit_num].done;
+}
+
+int
+m32rxf_model_m32rx_u_cmp (SIM_CPU *cpu, const IDESC *idesc,
+ int unit_num, int referenced,
+ INT src1, INT src2)
+{
+ check_load_stall (cpu, src1);
+ check_load_stall (cpu, src2);
+ return idesc->timing->units[unit_num].done;
+}
+
+int
+m32rxf_model_m32rx_u_mac (SIM_CPU *cpu, const IDESC *idesc,
+ int unit_num, int referenced,
+ INT src1, INT src2)
+{
+ check_load_stall (cpu, src1);
+ check_load_stall (cpu, src2);
+ return idesc->timing->units[unit_num].done;
+}
+
+int
+m32rxf_model_m32rx_u_cti (SIM_CPU *cpu, const IDESC *idesc,
+ int unit_num, int referenced,
+ INT sr)
+{
+ PROFILE_DATA *profile = CPU_PROFILE_DATA (cpu);
+ int taken_p = (referenced & (1 << 1)) != 0;
+
+ check_load_stall (cpu, sr);
+ if (taken_p)
+ {
+ CPU_M32R_MISC_PROFILE (cpu)->cti_stall += 2;
+ PROFILE_MODEL_TAKEN_COUNT (profile) += 1;
+ }
+ else
+ PROFILE_MODEL_UNTAKEN_COUNT (profile) += 1;
+ return idesc->timing->units[unit_num].done;
+}
+
+int
+m32rxf_model_m32rx_u_load (SIM_CPU *cpu, const IDESC *idesc,
+ int unit_num, int referenced,
+ INT sr, INT dr)
+{
+ CPU_M32R_MISC_PROFILE (cpu)->load_regs_pending |= (1 << dr);
+ return idesc->timing->units[unit_num].done;
+}
+
+int
+m32rxf_model_m32rx_u_store (SIM_CPU *cpu, const IDESC *idesc,
+ int unit_num, int referenced,
+ INT src1, INT src2)
+{
+ return idesc->timing->units[unit_num].done;
+}
+
+#endif /* WITH_PROFILE_MODEL_P */
diff --git a/sim/m32r/mloopx.in b/sim/m32r/mloopx.in
new file mode 100644
index 00000000000..e1663f799f2
--- /dev/null
+++ b/sim/m32r/mloopx.in
@@ -0,0 +1,484 @@
+# Simulator main loop for m32rx. -*- C -*-
+# Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+#
+# This file is part of the GNU Simulators.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# Syntax:
+# /bin/sh mainloop.in command
+#
+# Command is one of:
+#
+# init
+# support
+# extract-{simple,scache,pbb}
+# {full,fast}-exec-{simple,scache,pbb}
+#
+# A target need only provide a "full" version of one of simple,scache,pbb.
+# If the target wants it can also provide a fast version of same, or if
+# the slow (full featured) version is `simple', then the fast version can be
+# one of scache/pbb.
+# A target can't provide more than this.
+
+# ??? After a few more ports are done, revisit.
+# Will eventually need to machine generate a lot of this.
+
+case "x$1" in
+
+xsupport)
+
+cat <<EOF
+
+/* Emit insns to write back the results of insns executed in parallel.
+ SC points to a sufficient number of scache entries for the writeback
+ handlers.
+ SC1/ID1 is the first insn (left slot, lower address).
+ SC2/ID2 is the second insn (right slot, higher address). */
+
+static INLINE void
+emit_par_finish (SIM_CPU *current_cpu, PCADDR pc, SCACHE *sc,
+ SCACHE *sc1, const IDESC *id1, SCACHE *sc2, const IDESC *id2)
+{
+ ARGBUF *abuf;
+
+ abuf = &sc->argbuf;
+ id1 = id1->par_idesc;
+ abuf->fields.write.abuf = &sc1->argbuf;
+ @cpu@_fill_argbuf (current_cpu, abuf, id1, pc, 0);
+ /* no need to set trace_p,profile_p */
+#if 0 /* not currently needed for id2 since results written directly */
+ abuf = &sc[1].argbuf;
+ id2 = id2->par_idesc;
+ abuf->fields.write.abuf = &sc2->argbuf;
+ @cpu@_fill_argbuf (current_cpu, abuf, id2, pc + 2, 0);
+ /* no need to set trace_p,profile_p */
+#endif
+}
+
+static INLINE const IDESC *
+emit_16 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
+ SCACHE *sc, int fast_p, int parallel_p)
+{
+ ARGBUF *abuf = &sc->argbuf;
+ const IDESC *id = @cpu@_decode (current_cpu, pc, insn, insn, abuf);
+
+ if (parallel_p)
+ id = id->par_idesc;
+ @cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
+ return id;
+}
+
+static INLINE const IDESC *
+emit_full16 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, SCACHE *sc,
+ int trace_p, int profile_p)
+{
+ const IDESC *id;
+
+ @cpu@_emit_before (current_cpu, sc, pc, 1);
+ id = emit_16 (current_cpu, pc, insn, sc + 1, 0, 0);
+ @cpu@_emit_after (current_cpu, sc + 2, pc);
+ sc[1].argbuf.trace_p = trace_p;
+ sc[1].argbuf.profile_p = profile_p;
+ return id;
+}
+
+static INLINE const IDESC *
+emit_parallel (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
+ SCACHE *sc, int fast_p)
+{
+ const IDESC *id,*id2;
+
+ /* Emit both insns, then emit a finisher-upper.
+ We speed things up by handling the second insn serially
+ [not parallelly]. Then the writeback only has to deal
+ with the first insn. */
+ /* ??? Revisit to handle exceptions right. */
+
+ /* FIXME: No need to handle this parallely if second is nop. */
+ id = emit_16 (current_cpu, pc, insn >> 16, sc, fast_p, 1);
+
+ /* Note that this can never be a cti. No cti's go in the S pipeline. */
+ id2 = emit_16 (current_cpu, pc + 2, insn & 0x7fff, sc + 1, fast_p, 0);
+
+ /* Set sc/snc insns notion of where to skip to. */
+ if (IDESC_SKIP_P (id))
+ SEM_SKIP_COMPILE (current_cpu, sc, 1);
+
+ /* Emit code to finish executing the semantics
+ (write back the results). */
+ emit_par_finish (current_cpu, pc, sc + 2, sc, id, sc + 1, id2);
+
+ return id;
+}
+
+static INLINE const IDESC *
+emit_full_parallel (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
+ SCACHE *sc, int trace_p, int profile_p)
+{
+ const IDESC *id,*id2;
+
+ /* Emit both insns, then emit a finisher-upper.
+ We speed things up by handling the second insn serially
+ [not parallelly]. Then the writeback only has to deal
+ with the first insn. */
+ /* ??? Revisit to handle exceptions right. */
+
+ @cpu@_emit_before (current_cpu, sc, pc, 1);
+
+ /* FIXME: No need to handle this parallelly if second is nop. */
+ id = emit_16 (current_cpu, pc, insn >> 16, sc + 1, 0, 1);
+ sc[1].argbuf.trace_p = trace_p;
+ sc[1].argbuf.profile_p = profile_p;
+
+ @cpu@_emit_before (current_cpu, sc + 2, pc, 0);
+
+ /* Note that this can never be a cti. No cti's go in the S pipeline. */
+ id2 = emit_16 (current_cpu, pc + 2, insn & 0x7fff, sc + 3, 0, 0);
+ sc[3].argbuf.trace_p = trace_p;
+ sc[3].argbuf.profile_p = profile_p;
+
+ /* Set sc/snc insns notion of where to skip to. */
+ if (IDESC_SKIP_P (id))
+ SEM_SKIP_COMPILE (current_cpu, sc, 4);
+
+ /* Emit code to finish executing the semantics
+ (write back the results). */
+ emit_par_finish (current_cpu, pc, sc + 4, sc + 1, id, sc + 3, id2);
+
+ @cpu@_emit_after (current_cpu, sc + 5, pc);
+
+ return id;
+}
+
+static INLINE const IDESC *
+emit_32 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn,
+ SCACHE *sc, int fast_p)
+{
+ ARGBUF *abuf = &sc->argbuf;
+ const IDESC *id = @cpu@_decode (current_cpu, pc,
+ (USI) insn >> 16, insn, abuf);
+
+ @cpu@_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
+ return id;
+}
+
+static INLINE const IDESC *
+emit_full32 (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, SCACHE *sc,
+ int trace_p, int profile_p)
+{
+ const IDESC *id;
+
+ @cpu@_emit_before (current_cpu, sc, pc, 1);
+ id = emit_32 (current_cpu, pc, insn, sc + 1, 0);
+ @cpu@_emit_after (current_cpu, sc + 2, pc);
+ sc[1].argbuf.trace_p = trace_p;
+ sc[1].argbuf.profile_p = profile_p;
+ return id;
+}
+
+EOF
+
+;;
+
+xinit)
+
+# Nothing needed.
+
+;;
+
+xextract-pbb)
+
+# Inputs: current_cpu, pc, sc, max_insns, FAST_P
+# Outputs: sc, pc
+# sc must be left pointing past the last created entry.
+# pc must be left pointing past the last created entry.
+# If the pbb is terminated by a cti insn, SET_CTI_VPC(sc) must be called
+# to record the vpc of the cti insn.
+# SET_INSN_COUNT(n) must be called to record number of real insns.
+
+cat <<EOF
+{
+ const IDESC *idesc;
+ int icount = 0;
+
+ if ((pc & 3) != 0)
+ {
+ /* This occurs when single stepping and when compiling the not-taken
+ part of conditional branches. */
+ UHI insn = GETIMEMUHI (current_cpu, pc);
+ int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
+ int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
+ SCACHE *cti_sc; /* ??? tmp hack */
+
+ /* A parallel insn isn't allowed here, but we don't mind nops.
+ ??? We need to wait until the insn is executed before signalling
+ the error, for situations where such signalling is wanted. */
+#if 0
+ if ((insn & 0x8000) != 0
+ && (insn & 0x7fff) != 0x7000) /* parallel nops are ok */
+ sim_engine_invalid_insn (current_cpu, pc, 0);
+#endif
+
+ /* Only emit before/after handlers if necessary. */
+ if (FAST_P || (! trace_p && ! profile_p))
+ {
+ idesc = emit_16 (current_cpu, pc, insn & 0x7fff, sc, FAST_P, 0);
+ cti_sc = sc;
+ ++sc;
+ --max_insns;
+ }
+ else
+ {
+ idesc = emit_full16 (current_cpu, pc, insn & 0x7fff, sc,
+ trace_p, profile_p);
+ cti_sc = sc + 1;
+ sc += 3;
+ max_insns -= 3;
+ }
+ ++icount;
+ pc += 2;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (cti_sc);
+ goto Finish;
+ }
+ }
+
+ /* There are two copies of the compiler: full(!fast) and fast.
+ The "full" case emits before/after handlers for each insn.
+ Having two copies of this code is a tradeoff, having one copy
+ seemed a bit more difficult to read (due to constantly testing
+ FAST_P). ??? On the other hand, with address ranges we'll want to
+ omit before/after handlers for unwanted insns. Having separate loops
+ for FAST/!FAST avoids constantly doing the test in the loop, but
+ typically FAST_P is a constant and such tests will get optimized out. */
+
+ if (FAST_P)
+ {
+ while (max_insns > 0)
+ {
+ USI insn = GETIMEMUSI (current_cpu, pc);
+ if ((SI) insn < 0)
+ {
+ /* 32 bit insn */
+ idesc = emit_32 (current_cpu, pc, insn, sc, 1);
+ ++sc;
+ --max_insns;
+ ++icount;
+ pc += 4;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (sc - 1);
+ break;
+ }
+ }
+ else
+ {
+ if ((insn & 0x8000) != 0) /* parallel? */
+ {
+ /* Yep. Here's the "interesting" [sic] part. */
+ idesc = emit_parallel (current_cpu, pc, insn, sc, 1);
+ sc += 3;
+ max_insns -= 3;
+ icount += 2;
+ pc += 4;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (sc - 3);
+ break;
+ }
+ }
+ else /* 2 serial 16 bit insns */
+ {
+ idesc = emit_16 (current_cpu, pc, insn >> 16, sc, 1, 0);
+ ++sc;
+ --max_insns;
+ ++icount;
+ pc += 2;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (sc - 1);
+ break;
+ }
+ /* While we're guaranteed that there's room to extract the
+ insn, when single stepping we can't; the pbb must stop
+ after the first insn. */
+ if (max_insns == 0)
+ break;
+ idesc = emit_16 (current_cpu, pc, insn & 0x7fff, sc, 1, 0);
+ ++sc;
+ --max_insns;
+ ++icount;
+ pc += 2;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (sc - 1);
+ break;
+ }
+ }
+ }
+ }
+ }
+ else /* ! FAST_P */
+ {
+ while (max_insns > 0)
+ {
+ USI insn = GETIMEMUSI (current_cpu, pc);
+ int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
+ int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
+ SCACHE *cti_sc; /* ??? tmp hack */
+ if ((SI) insn < 0)
+ {
+ /* 32 bit insn
+ Only emit before/after handlers if necessary. */
+ if (trace_p || profile_p)
+ {
+ idesc = emit_full32 (current_cpu, pc, insn, sc,
+ trace_p, profile_p);
+ cti_sc = sc + 1;
+ sc += 3;
+ max_insns -= 3;
+ }
+ else
+ {
+ idesc = emit_32 (current_cpu, pc, insn, sc, 0);
+ cti_sc = sc;
+ ++sc;
+ --max_insns;
+ }
+ ++icount;
+ pc += 4;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (cti_sc);
+ break;
+ }
+ }
+ else
+ {
+ if ((insn & 0x8000) != 0) /* parallel? */
+ {
+ /* Yep. Here's the "interesting" [sic] part.
+ Only emit before/after handlers if necessary. */
+ if (trace_p || profile_p)
+ {
+ idesc = emit_full_parallel (current_cpu, pc, insn, sc,
+ trace_p, profile_p);
+ cti_sc = sc + 1;
+ sc += 6;
+ max_insns -= 6;
+ }
+ else
+ {
+ idesc = emit_parallel (current_cpu, pc, insn, sc, 0);
+ cti_sc = sc;
+ sc += 3;
+ max_insns -= 3;
+ }
+ icount += 2;
+ pc += 4;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (cti_sc);
+ break;
+ }
+ }
+ else /* 2 serial 16 bit insns */
+ {
+ /* Only emit before/after handlers if necessary. */
+ if (trace_p || profile_p)
+ {
+ idesc = emit_full16 (current_cpu, pc, insn >> 16, sc,
+ trace_p, profile_p);
+ cti_sc = sc + 1;
+ sc += 3;
+ max_insns -= 3;
+ }
+ else
+ {
+ idesc = emit_16 (current_cpu, pc, insn >> 16, sc, 0, 0);
+ cti_sc = sc;
+ ++sc;
+ --max_insns;
+ }
+ ++icount;
+ pc += 2;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (cti_sc);
+ break;
+ }
+ /* While we're guaranteed that there's room to extract the
+ insn, when single stepping we can't; the pbb must stop
+ after the first insn. */
+ if (max_insns <= 0)
+ break;
+ /* Use the same trace/profile address for the 2nd insn.
+ Saves us having to compute it and they come in pairs
+ anyway (e.g. can never branch to the 2nd insn). */
+ if (trace_p || profile_p)
+ {
+ idesc = emit_full16 (current_cpu, pc, insn & 0x7fff, sc,
+ trace_p, profile_p);
+ cti_sc = sc + 1;
+ sc += 3;
+ max_insns -= 3;
+ }
+ else
+ {
+ idesc = emit_16 (current_cpu, pc, insn & 0x7fff, sc, 0, 0);
+ cti_sc = sc;
+ ++sc;
+ --max_insns;
+ }
+ ++icount;
+ pc += 2;
+ if (IDESC_CTI_P (idesc))
+ {
+ SET_CTI_VPC (cti_sc);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ Finish:
+ SET_INSN_COUNT (icount);
+}
+EOF
+
+;;
+
+xfull-exec-pbb)
+
+# Inputs: current_cpu, vpc, FAST_P
+# Outputs: vpc
+# vpc is the virtual program counter.
+
+cat <<EOF
+#define DEFINE_SWITCH
+#include "semx-switch.c"
+EOF
+
+;;
+
+*)
+ echo "Invalid argument to mainloop.in: $1" >&2
+ exit 1
+ ;;
+
+esac
diff --git a/sim/m32r/modelx.c b/sim/m32r/modelx.c
new file mode 100644
index 00000000000..93e1522d41e
--- /dev/null
+++ b/sim/m32r/modelx.c
@@ -0,0 +1,2899 @@
+/* Simulator model support for m32rxf.
+
+THIS FILE IS MACHINE GENERATED WITH CGEN.
+
+Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of the GNU Simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*/
+
+#define WANT_CPU m32rxf
+#define WANT_CPU_M32RXF
+
+#include "sim-main.h"
+
+/* The profiling data is recorded here, but is accessed via the profiling
+ mechanism. After all, this is information for profiling. */
+
+#if WITH_PROFILE_MODEL_P
+
+/* Model handlers for each insn. */
+
+static int
+model_m32rx_add (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_add3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_and (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_and3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_and3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_or (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_or3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_and3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_xor (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_xor3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_and3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_addi (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_addi.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_addv (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_addv3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_addx (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bc8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bc24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_beq (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 3)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_beqz (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bgez (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bgtz (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_blez (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bltz (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bnez (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bl8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bl24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bcl8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 4)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bcl24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 4)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bnc8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bnc24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bne (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_beq.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 3)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 1, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bra8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bra24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bncl8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 4)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_bncl24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ if (insn_referenced & (1 << 4)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_cmp (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_cmpi (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_cmpu (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_cmpui (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_cmpeq (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_cmpz (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_div (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ if (insn_referenced & (1 << 0)) referenced |= 1 << 1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_divu (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ if (insn_referenced & (1 << 0)) referenced |= 1 << 1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_rem (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ if (insn_referenced & (1 << 0)) referenced |= 1 << 1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_remu (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ if (insn_referenced & (1 << 0)) referenced |= 1 << 1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_divh (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ if (insn_referenced & (1 << 0)) referenced |= 1 << 1;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_jc (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ in_sr = FLD (in_sr);
+ if (insn_referenced & (1 << 1)) referenced |= 1 << 0;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_jnc (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ in_sr = FLD (in_sr);
+ if (insn_referenced & (1 << 1)) referenced |= 1 << 0;
+ if (insn_referenced & (1 << 2)) referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_jl (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_jl.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ in_sr = FLD (in_sr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_jmp (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ in_sr = FLD (in_sr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cti (current_cpu, idesc, 0, referenced, in_sr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ld (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ld_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldb (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldb_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldh (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldh_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldub (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldub_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_lduh (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_lduh_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ld_plus (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_sr);
+ out_dr = FLD (out_sr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 1, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ld24 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld24.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldi8 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_addi.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_ldi16 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_lock (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_machi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_maclo_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_macwhi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_macwlo_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mul (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mulhi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mullo_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mulwhi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mulwlo_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mv (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvfachi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvfaclo_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvfacmi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvfc (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvfc.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvtachi_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_src1);
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvtaclo_a (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_src1);
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mvtc (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ referenced |= 1 << 0;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_neg (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_nop (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.fmt_empty.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_not (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_rac_dsi (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_rach_dsi (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_rte (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.fmt_empty.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_seth (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_seth.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sll (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sll3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_slli (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_slli.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sra (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sra3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_srai (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_slli.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_srl (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_srl3 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add3.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_srli (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_slli.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_st (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_st_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_stb (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_stb_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sth (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sth_d (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_st_plus (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_src2);
+ out_dr = FLD (out_src2);
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 1, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_st_minus (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = 0;
+ INT in_src2 = 0;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_store (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_dr = FLD (in_src2);
+ out_dr = FLD (out_src2);
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 1, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sub (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_subv (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_subx (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_add.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ in_dr = FLD (in_dr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_trap (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_trap.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_unlock (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = 0;
+ INT out_dr = 0;
+ cycles += m32rxf_model_m32rx_u_load (current_cpu, idesc, 0, referenced, in_sr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_satb (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sath (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sat (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ in_sr = FLD (in_sr);
+ out_dr = FLD (out_dr);
+ if (insn_referenced & (1 << 1)) referenced |= 1 << 0;
+ referenced |= 1 << 2;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_pcmpbz (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_cmp (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sadd (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.fmt_empty.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_macwu1 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_msblo (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_mulwu1 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_maclh1 (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_src1 = -1;
+ INT in_src2 = -1;
+ in_src1 = FLD (in_src1);
+ in_src2 = FLD (in_src2);
+ referenced |= 1 << 0;
+ referenced |= 1 << 1;
+ cycles += m32rxf_model_m32rx_u_mac (current_cpu, idesc, 0, referenced, in_src1, in_src2);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_sc (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.fmt_empty.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+static int
+model_m32rx_snc (SIM_CPU *current_cpu, void *sem_arg)
+{
+#define FLD(f) abuf->fields.fmt_empty.f
+ const ARGBUF * UNUSED abuf = SEM_ARGBUF ((SEM_ARG) sem_arg);
+ const IDESC * UNUSED idesc = abuf->idesc;
+ int cycles = 0;
+ {
+ int referenced = 0;
+ int UNUSED insn_referenced = abuf->written;
+ INT in_sr = -1;
+ INT in_dr = -1;
+ INT out_dr = -1;
+ cycles += m32rxf_model_m32rx_u_exec (current_cpu, idesc, 0, referenced, in_sr, in_dr, out_dr);
+ }
+ return cycles;
+#undef FLD
+}
+
+/* We assume UNIT_NONE == 0 because the tables don't always terminate
+ entries with it. */
+
+/* Model timing data for `m32rx'. */
+
+static const INSN_TIMING m32rx_timing[] = {
+ { M32RXF_INSN_X_INVALID, 0, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_X_AFTER, 0, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_X_BEFORE, 0, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_X_CTI_CHAIN, 0, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_X_CHAIN, 0, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_X_BEGIN, 0, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ADD, model_m32rx_add, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ADD3, model_m32rx_add3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_AND, model_m32rx_and, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_AND3, model_m32rx_and3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_OR, model_m32rx_or, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_OR3, model_m32rx_or3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_XOR, model_m32rx_xor, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_XOR3, model_m32rx_xor3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ADDI, model_m32rx_addi, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ADDV, model_m32rx_addv, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ADDV3, model_m32rx_addv3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ADDX, model_m32rx_addx, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_BC8, model_m32rx_bc8, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BC24, model_m32rx_bc24, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BEQ, model_m32rx_beq, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BEQZ, model_m32rx_beqz, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BGEZ, model_m32rx_bgez, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BGTZ, model_m32rx_bgtz, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BLEZ, model_m32rx_blez, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BLTZ, model_m32rx_bltz, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BNEZ, model_m32rx_bnez, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BL8, model_m32rx_bl8, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BL24, model_m32rx_bl24, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BCL8, model_m32rx_bcl8, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BCL24, model_m32rx_bcl24, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BNC8, model_m32rx_bnc8, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BNC24, model_m32rx_bnc24, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BNE, model_m32rx_bne, { { (int) UNIT_M32RX_U_CTI, 1, 1 }, { (int) UNIT_M32RX_U_CMP, 1, 0 } } },
+ { M32RXF_INSN_BRA8, model_m32rx_bra8, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BRA24, model_m32rx_bra24, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BNCL8, model_m32rx_bncl8, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_BNCL24, model_m32rx_bncl24, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_CMP, model_m32rx_cmp, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_CMPI, model_m32rx_cmpi, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_CMPU, model_m32rx_cmpu, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_CMPUI, model_m32rx_cmpui, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_CMPEQ, model_m32rx_cmpeq, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_CMPZ, model_m32rx_cmpz, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_DIV, model_m32rx_div, { { (int) UNIT_M32RX_U_EXEC, 1, 37 } } },
+ { M32RXF_INSN_DIVU, model_m32rx_divu, { { (int) UNIT_M32RX_U_EXEC, 1, 37 } } },
+ { M32RXF_INSN_REM, model_m32rx_rem, { { (int) UNIT_M32RX_U_EXEC, 1, 37 } } },
+ { M32RXF_INSN_REMU, model_m32rx_remu, { { (int) UNIT_M32RX_U_EXEC, 1, 37 } } },
+ { M32RXF_INSN_DIVH, model_m32rx_divh, { { (int) UNIT_M32RX_U_EXEC, 1, 21 } } },
+ { M32RXF_INSN_JC, model_m32rx_jc, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_JNC, model_m32rx_jnc, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_JL, model_m32rx_jl, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_JMP, model_m32rx_jmp, { { (int) UNIT_M32RX_U_CTI, 1, 1 } } },
+ { M32RXF_INSN_LD, model_m32rx_ld, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_LD_D, model_m32rx_ld_d, { { (int) UNIT_M32RX_U_LOAD, 1, 2 } } },
+ { M32RXF_INSN_LDB, model_m32rx_ldb, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_LDB_D, model_m32rx_ldb_d, { { (int) UNIT_M32RX_U_LOAD, 1, 2 } } },
+ { M32RXF_INSN_LDH, model_m32rx_ldh, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_LDH_D, model_m32rx_ldh_d, { { (int) UNIT_M32RX_U_LOAD, 1, 2 } } },
+ { M32RXF_INSN_LDUB, model_m32rx_ldub, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_LDUB_D, model_m32rx_ldub_d, { { (int) UNIT_M32RX_U_LOAD, 1, 2 } } },
+ { M32RXF_INSN_LDUH, model_m32rx_lduh, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_LDUH_D, model_m32rx_lduh_d, { { (int) UNIT_M32RX_U_LOAD, 1, 2 } } },
+ { M32RXF_INSN_LD_PLUS, model_m32rx_ld_plus, { { (int) UNIT_M32RX_U_LOAD, 1, 1 }, { (int) UNIT_M32RX_U_EXEC, 1, 0 } } },
+ { M32RXF_INSN_LD24, model_m32rx_ld24, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_LDI8, model_m32rx_ldi8, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_LDI16, model_m32rx_ldi16, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_LOCK, model_m32rx_lock, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_MACHI_A, model_m32rx_machi_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MACLO_A, model_m32rx_maclo_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MACWHI_A, model_m32rx_macwhi_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MACWLO_A, model_m32rx_macwlo_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MUL, model_m32rx_mul, { { (int) UNIT_M32RX_U_EXEC, 1, 4 } } },
+ { M32RXF_INSN_MULHI_A, model_m32rx_mulhi_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MULLO_A, model_m32rx_mullo_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MULWHI_A, model_m32rx_mulwhi_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MULWLO_A, model_m32rx_mulwlo_a, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MV, model_m32rx_mv, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_MVFACHI_A, model_m32rx_mvfachi_a, { { (int) UNIT_M32RX_U_EXEC, 1, 2 } } },
+ { M32RXF_INSN_MVFACLO_A, model_m32rx_mvfaclo_a, { { (int) UNIT_M32RX_U_EXEC, 1, 2 } } },
+ { M32RXF_INSN_MVFACMI_A, model_m32rx_mvfacmi_a, { { (int) UNIT_M32RX_U_EXEC, 1, 2 } } },
+ { M32RXF_INSN_MVFC, model_m32rx_mvfc, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_MVTACHI_A, model_m32rx_mvtachi_a, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_MVTACLO_A, model_m32rx_mvtaclo_a, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_MVTC, model_m32rx_mvtc, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_NEG, model_m32rx_neg, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_NOP, model_m32rx_nop, { { (int) UNIT_M32RX_U_EXEC, 1, 0 } } },
+ { M32RXF_INSN_NOT, model_m32rx_not, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_RAC_DSI, model_m32rx_rac_dsi, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_RACH_DSI, model_m32rx_rach_dsi, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_RTE, model_m32rx_rte, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SETH, model_m32rx_seth, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SLL, model_m32rx_sll, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SLL3, model_m32rx_sll3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SLLI, model_m32rx_slli, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SRA, model_m32rx_sra, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SRA3, model_m32rx_sra3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SRAI, model_m32rx_srai, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SRL, model_m32rx_srl, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SRL3, model_m32rx_srl3, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SRLI, model_m32rx_srli, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_ST, model_m32rx_st, { { (int) UNIT_M32RX_U_STORE, 1, 1 } } },
+ { M32RXF_INSN_ST_D, model_m32rx_st_d, { { (int) UNIT_M32RX_U_STORE, 1, 2 } } },
+ { M32RXF_INSN_STB, model_m32rx_stb, { { (int) UNIT_M32RX_U_STORE, 1, 1 } } },
+ { M32RXF_INSN_STB_D, model_m32rx_stb_d, { { (int) UNIT_M32RX_U_STORE, 1, 2 } } },
+ { M32RXF_INSN_STH, model_m32rx_sth, { { (int) UNIT_M32RX_U_STORE, 1, 1 } } },
+ { M32RXF_INSN_STH_D, model_m32rx_sth_d, { { (int) UNIT_M32RX_U_STORE, 1, 2 } } },
+ { M32RXF_INSN_ST_PLUS, model_m32rx_st_plus, { { (int) UNIT_M32RX_U_STORE, 1, 1 }, { (int) UNIT_M32RX_U_EXEC, 1, 0 } } },
+ { M32RXF_INSN_ST_MINUS, model_m32rx_st_minus, { { (int) UNIT_M32RX_U_STORE, 1, 1 }, { (int) UNIT_M32RX_U_EXEC, 1, 0 } } },
+ { M32RXF_INSN_SUB, model_m32rx_sub, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SUBV, model_m32rx_subv, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SUBX, model_m32rx_subx, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_TRAP, model_m32rx_trap, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_UNLOCK, model_m32rx_unlock, { { (int) UNIT_M32RX_U_LOAD, 1, 1 } } },
+ { M32RXF_INSN_SATB, model_m32rx_satb, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SATH, model_m32rx_sath, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SAT, model_m32rx_sat, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_PCMPBZ, model_m32rx_pcmpbz, { { (int) UNIT_M32RX_U_CMP, 1, 1 } } },
+ { M32RXF_INSN_SADD, model_m32rx_sadd, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MACWU1, model_m32rx_macwu1, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MSBLO, model_m32rx_msblo, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MULWU1, model_m32rx_mulwu1, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_MACLH1, model_m32rx_maclh1, { { (int) UNIT_M32RX_U_MAC, 1, 1 } } },
+ { M32RXF_INSN_SC, model_m32rx_sc, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+ { M32RXF_INSN_SNC, model_m32rx_snc, { { (int) UNIT_M32RX_U_EXEC, 1, 1 } } },
+};
+
+#endif /* WITH_PROFILE_MODEL_P */
+
+static void
+m32rx_model_init (SIM_CPU *cpu)
+{
+ CPU_MODEL_DATA (cpu) = (void *) zalloc (sizeof (MODEL_M32RX_DATA));
+}
+
+#if WITH_PROFILE_MODEL_P
+#define TIMING_DATA(td) td
+#else
+#define TIMING_DATA(td) 0
+#endif
+
+static const MODEL m32rx_models[] =
+{
+ { "m32rx", & m32rx_mach, MODEL_M32RX, TIMING_DATA (& m32rx_timing[0]), m32rx_model_init },
+ { 0 }
+};
+
+/* The properties of this cpu's implementation. */
+
+static const MACH_IMP_PROPERTIES m32rxf_imp_properties =
+{
+ sizeof (SIM_CPU),
+#if WITH_SCACHE
+ sizeof (SCACHE)
+#else
+ 0
+#endif
+};
+
+
+static void
+m32rxf_prepare_run (SIM_CPU *cpu)
+{
+ if (CPU_IDESC (cpu) == NULL)
+ m32rxf_init_idesc_table (cpu);
+}
+
+static const CGEN_INSN *
+m32rxf_get_idata (SIM_CPU *cpu, int inum)
+{
+ return CPU_IDESC (cpu) [inum].idata;
+}
+
+static void
+m32rx_init_cpu (SIM_CPU *cpu)
+{
+ CPU_REG_FETCH (cpu) = m32rxf_fetch_register;
+ CPU_REG_STORE (cpu) = m32rxf_store_register;
+ CPU_PC_FETCH (cpu) = m32rxf_h_pc_get;
+ CPU_PC_STORE (cpu) = m32rxf_h_pc_set;
+ CPU_GET_IDATA (cpu) = m32rxf_get_idata;
+ CPU_MAX_INSNS (cpu) = M32RXF_INSN_MAX;
+ CPU_INSN_NAME (cpu) = cgen_insn_name;
+ CPU_FULL_ENGINE_FN (cpu) = m32rxf_engine_run_full;
+#if WITH_FAST
+ CPU_FAST_ENGINE_FN (cpu) = m32rxf_engine_run_fast;
+#else
+ CPU_FAST_ENGINE_FN (cpu) = m32rxf_engine_run_full;
+#endif
+}
+
+const MACH m32rx_mach =
+{
+ "m32rx", "m32rx", MACH_M32RX,
+ 32, 32, & m32rx_models[0], & m32rxf_imp_properties,
+ m32rx_init_cpu,
+ m32rxf_prepare_run
+};
+
diff --git a/sim/m32r/semx-switch.c b/sim/m32r/semx-switch.c
new file mode 100644
index 00000000000..a68d18bbf6d
--- /dev/null
+++ b/sim/m32r/semx-switch.c
@@ -0,0 +1,6266 @@
+/* Simulator instruction semantics for m32rxf.
+
+THIS FILE IS MACHINE GENERATED WITH CGEN.
+
+Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+
+This file is part of the GNU Simulators.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+*/
+
+#ifdef DEFINE_LABELS
+
+ /* The labels have the case they have because the enum of insn types
+ is all uppercase and in the non-stdc case the insn symbol is built
+ into the enum name. */
+
+ static struct {
+ int index;
+ void *label;
+ } labels[] = {
+ { M32RXF_INSN_X_INVALID, && case_sem_INSN_X_INVALID },
+ { M32RXF_INSN_X_AFTER, && case_sem_INSN_X_AFTER },
+ { M32RXF_INSN_X_BEFORE, && case_sem_INSN_X_BEFORE },
+ { M32RXF_INSN_X_CTI_CHAIN, && case_sem_INSN_X_CTI_CHAIN },
+ { M32RXF_INSN_X_CHAIN, && case_sem_INSN_X_CHAIN },
+ { M32RXF_INSN_X_BEGIN, && case_sem_INSN_X_BEGIN },
+ { M32RXF_INSN_ADD, && case_sem_INSN_ADD },
+ { M32RXF_INSN_ADD3, && case_sem_INSN_ADD3 },
+ { M32RXF_INSN_AND, && case_sem_INSN_AND },
+ { M32RXF_INSN_AND3, && case_sem_INSN_AND3 },
+ { M32RXF_INSN_OR, && case_sem_INSN_OR },
+ { M32RXF_INSN_OR3, && case_sem_INSN_OR3 },
+ { M32RXF_INSN_XOR, && case_sem_INSN_XOR },
+ { M32RXF_INSN_XOR3, && case_sem_INSN_XOR3 },
+ { M32RXF_INSN_ADDI, && case_sem_INSN_ADDI },
+ { M32RXF_INSN_ADDV, && case_sem_INSN_ADDV },
+ { M32RXF_INSN_ADDV3, && case_sem_INSN_ADDV3 },
+ { M32RXF_INSN_ADDX, && case_sem_INSN_ADDX },
+ { M32RXF_INSN_BC8, && case_sem_INSN_BC8 },
+ { M32RXF_INSN_BC24, && case_sem_INSN_BC24 },
+ { M32RXF_INSN_BEQ, && case_sem_INSN_BEQ },
+ { M32RXF_INSN_BEQZ, && case_sem_INSN_BEQZ },
+ { M32RXF_INSN_BGEZ, && case_sem_INSN_BGEZ },
+ { M32RXF_INSN_BGTZ, && case_sem_INSN_BGTZ },
+ { M32RXF_INSN_BLEZ, && case_sem_INSN_BLEZ },
+ { M32RXF_INSN_BLTZ, && case_sem_INSN_BLTZ },
+ { M32RXF_INSN_BNEZ, && case_sem_INSN_BNEZ },
+ { M32RXF_INSN_BL8, && case_sem_INSN_BL8 },
+ { M32RXF_INSN_BL24, && case_sem_INSN_BL24 },
+ { M32RXF_INSN_BCL8, && case_sem_INSN_BCL8 },
+ { M32RXF_INSN_BCL24, && case_sem_INSN_BCL24 },
+ { M32RXF_INSN_BNC8, && case_sem_INSN_BNC8 },
+ { M32RXF_INSN_BNC24, && case_sem_INSN_BNC24 },
+ { M32RXF_INSN_BNE, && case_sem_INSN_BNE },
+ { M32RXF_INSN_BRA8, && case_sem_INSN_BRA8 },
+ { M32RXF_INSN_BRA24, && case_sem_INSN_BRA24 },
+ { M32RXF_INSN_BNCL8, && case_sem_INSN_BNCL8 },
+ { M32RXF_INSN_BNCL24, && case_sem_INSN_BNCL24 },
+ { M32RXF_INSN_CMP, && case_sem_INSN_CMP },
+ { M32RXF_INSN_CMPI, && case_sem_INSN_CMPI },
+ { M32RXF_INSN_CMPU, && case_sem_INSN_CMPU },
+ { M32RXF_INSN_CMPUI, && case_sem_INSN_CMPUI },
+ { M32RXF_INSN_CMPEQ, && case_sem_INSN_CMPEQ },
+ { M32RXF_INSN_CMPZ, && case_sem_INSN_CMPZ },
+ { M32RXF_INSN_DIV, && case_sem_INSN_DIV },
+ { M32RXF_INSN_DIVU, && case_sem_INSN_DIVU },
+ { M32RXF_INSN_REM, && case_sem_INSN_REM },
+ { M32RXF_INSN_REMU, && case_sem_INSN_REMU },
+ { M32RXF_INSN_DIVH, && case_sem_INSN_DIVH },
+ { M32RXF_INSN_JC, && case_sem_INSN_JC },
+ { M32RXF_INSN_JNC, && case_sem_INSN_JNC },
+ { M32RXF_INSN_JL, && case_sem_INSN_JL },
+ { M32RXF_INSN_JMP, && case_sem_INSN_JMP },
+ { M32RXF_INSN_LD, && case_sem_INSN_LD },
+ { M32RXF_INSN_LD_D, && case_sem_INSN_LD_D },
+ { M32RXF_INSN_LDB, && case_sem_INSN_LDB },
+ { M32RXF_INSN_LDB_D, && case_sem_INSN_LDB_D },
+ { M32RXF_INSN_LDH, && case_sem_INSN_LDH },
+ { M32RXF_INSN_LDH_D, && case_sem_INSN_LDH_D },
+ { M32RXF_INSN_LDUB, && case_sem_INSN_LDUB },
+ { M32RXF_INSN_LDUB_D, && case_sem_INSN_LDUB_D },
+ { M32RXF_INSN_LDUH, && case_sem_INSN_LDUH },
+ { M32RXF_INSN_LDUH_D, && case_sem_INSN_LDUH_D },
+ { M32RXF_INSN_LD_PLUS, && case_sem_INSN_LD_PLUS },
+ { M32RXF_INSN_LD24, && case_sem_INSN_LD24 },
+ { M32RXF_INSN_LDI8, && case_sem_INSN_LDI8 },
+ { M32RXF_INSN_LDI16, && case_sem_INSN_LDI16 },
+ { M32RXF_INSN_LOCK, && case_sem_INSN_LOCK },
+ { M32RXF_INSN_MACHI_A, && case_sem_INSN_MACHI_A },
+ { M32RXF_INSN_MACLO_A, && case_sem_INSN_MACLO_A },
+ { M32RXF_INSN_MACWHI_A, && case_sem_INSN_MACWHI_A },
+ { M32RXF_INSN_MACWLO_A, && case_sem_INSN_MACWLO_A },
+ { M32RXF_INSN_MUL, && case_sem_INSN_MUL },
+ { M32RXF_INSN_MULHI_A, && case_sem_INSN_MULHI_A },
+ { M32RXF_INSN_MULLO_A, && case_sem_INSN_MULLO_A },
+ { M32RXF_INSN_MULWHI_A, && case_sem_INSN_MULWHI_A },
+ { M32RXF_INSN_MULWLO_A, && case_sem_INSN_MULWLO_A },
+ { M32RXF_INSN_MV, && case_sem_INSN_MV },
+ { M32RXF_INSN_MVFACHI_A, && case_sem_INSN_MVFACHI_A },
+ { M32RXF_INSN_MVFACLO_A, && case_sem_INSN_MVFACLO_A },
+ { M32RXF_INSN_MVFACMI_A, && case_sem_INSN_MVFACMI_A },
+ { M32RXF_INSN_MVFC, && case_sem_INSN_MVFC },
+ { M32RXF_INSN_MVTACHI_A, && case_sem_INSN_MVTACHI_A },
+ { M32RXF_INSN_MVTACLO_A, && case_sem_INSN_MVTACLO_A },
+ { M32RXF_INSN_MVTC, && case_sem_INSN_MVTC },
+ { M32RXF_INSN_NEG, && case_sem_INSN_NEG },
+ { M32RXF_INSN_NOP, && case_sem_INSN_NOP },
+ { M32RXF_INSN_NOT, && case_sem_INSN_NOT },
+ { M32RXF_INSN_RAC_DSI, && case_sem_INSN_RAC_DSI },
+ { M32RXF_INSN_RACH_DSI, && case_sem_INSN_RACH_DSI },
+ { M32RXF_INSN_RTE, && case_sem_INSN_RTE },
+ { M32RXF_INSN_SETH, && case_sem_INSN_SETH },
+ { M32RXF_INSN_SLL, && case_sem_INSN_SLL },
+ { M32RXF_INSN_SLL3, && case_sem_INSN_SLL3 },
+ { M32RXF_INSN_SLLI, && case_sem_INSN_SLLI },
+ { M32RXF_INSN_SRA, && case_sem_INSN_SRA },
+ { M32RXF_INSN_SRA3, && case_sem_INSN_SRA3 },
+ { M32RXF_INSN_SRAI, && case_sem_INSN_SRAI },
+ { M32RXF_INSN_SRL, && case_sem_INSN_SRL },
+ { M32RXF_INSN_SRL3, && case_sem_INSN_SRL3 },
+ { M32RXF_INSN_SRLI, && case_sem_INSN_SRLI },
+ { M32RXF_INSN_ST, && case_sem_INSN_ST },
+ { M32RXF_INSN_ST_D, && case_sem_INSN_ST_D },
+ { M32RXF_INSN_STB, && case_sem_INSN_STB },
+ { M32RXF_INSN_STB_D, && case_sem_INSN_STB_D },
+ { M32RXF_INSN_STH, && case_sem_INSN_STH },
+ { M32RXF_INSN_STH_D, && case_sem_INSN_STH_D },
+ { M32RXF_INSN_ST_PLUS, && case_sem_INSN_ST_PLUS },
+ { M32RXF_INSN_ST_MINUS, && case_sem_INSN_ST_MINUS },
+ { M32RXF_INSN_SUB, && case_sem_INSN_SUB },
+ { M32RXF_INSN_SUBV, && case_sem_INSN_SUBV },
+ { M32RXF_INSN_SUBX, && case_sem_INSN_SUBX },
+ { M32RXF_INSN_TRAP, && case_sem_INSN_TRAP },
+ { M32RXF_INSN_UNLOCK, && case_sem_INSN_UNLOCK },
+ { M32RXF_INSN_SATB, && case_sem_INSN_SATB },
+ { M32RXF_INSN_SATH, && case_sem_INSN_SATH },
+ { M32RXF_INSN_SAT, && case_sem_INSN_SAT },
+ { M32RXF_INSN_PCMPBZ, && case_sem_INSN_PCMPBZ },
+ { M32RXF_INSN_SADD, && case_sem_INSN_SADD },
+ { M32RXF_INSN_MACWU1, && case_sem_INSN_MACWU1 },
+ { M32RXF_INSN_MSBLO, && case_sem_INSN_MSBLO },
+ { M32RXF_INSN_MULWU1, && case_sem_INSN_MULWU1 },
+ { M32RXF_INSN_MACLH1, && case_sem_INSN_MACLH1 },
+ { M32RXF_INSN_SC, && case_sem_INSN_SC },
+ { M32RXF_INSN_SNC, && case_sem_INSN_SNC },
+ { M32RXF_INSN_PAR_ADD, && case_sem_INSN_PAR_ADD },
+ { M32RXF_INSN_WRITE_ADD, && case_sem_INSN_WRITE_ADD },
+ { M32RXF_INSN_PAR_AND, && case_sem_INSN_PAR_AND },
+ { M32RXF_INSN_WRITE_AND, && case_sem_INSN_WRITE_AND },
+ { M32RXF_INSN_PAR_OR, && case_sem_INSN_PAR_OR },
+ { M32RXF_INSN_WRITE_OR, && case_sem_INSN_WRITE_OR },
+ { M32RXF_INSN_PAR_XOR, && case_sem_INSN_PAR_XOR },
+ { M32RXF_INSN_WRITE_XOR, && case_sem_INSN_WRITE_XOR },
+ { M32RXF_INSN_PAR_ADDI, && case_sem_INSN_PAR_ADDI },
+ { M32RXF_INSN_WRITE_ADDI, && case_sem_INSN_WRITE_ADDI },
+ { M32RXF_INSN_PAR_ADDV, && case_sem_INSN_PAR_ADDV },
+ { M32RXF_INSN_WRITE_ADDV, && case_sem_INSN_WRITE_ADDV },
+ { M32RXF_INSN_PAR_ADDX, && case_sem_INSN_PAR_ADDX },
+ { M32RXF_INSN_WRITE_ADDX, && case_sem_INSN_WRITE_ADDX },
+ { M32RXF_INSN_PAR_BC8, && case_sem_INSN_PAR_BC8 },
+ { M32RXF_INSN_WRITE_BC8, && case_sem_INSN_WRITE_BC8 },
+ { M32RXF_INSN_PAR_BL8, && case_sem_INSN_PAR_BL8 },
+ { M32RXF_INSN_WRITE_BL8, && case_sem_INSN_WRITE_BL8 },
+ { M32RXF_INSN_PAR_BCL8, && case_sem_INSN_PAR_BCL8 },
+ { M32RXF_INSN_WRITE_BCL8, && case_sem_INSN_WRITE_BCL8 },
+ { M32RXF_INSN_PAR_BNC8, && case_sem_INSN_PAR_BNC8 },
+ { M32RXF_INSN_WRITE_BNC8, && case_sem_INSN_WRITE_BNC8 },
+ { M32RXF_INSN_PAR_BRA8, && case_sem_INSN_PAR_BRA8 },
+ { M32RXF_INSN_WRITE_BRA8, && case_sem_INSN_WRITE_BRA8 },
+ { M32RXF_INSN_PAR_BNCL8, && case_sem_INSN_PAR_BNCL8 },
+ { M32RXF_INSN_WRITE_BNCL8, && case_sem_INSN_WRITE_BNCL8 },
+ { M32RXF_INSN_PAR_CMP, && case_sem_INSN_PAR_CMP },
+ { M32RXF_INSN_WRITE_CMP, && case_sem_INSN_WRITE_CMP },
+ { M32RXF_INSN_PAR_CMPU, && case_sem_INSN_PAR_CMPU },
+ { M32RXF_INSN_WRITE_CMPU, && case_sem_INSN_WRITE_CMPU },
+ { M32RXF_INSN_PAR_CMPEQ, && case_sem_INSN_PAR_CMPEQ },
+ { M32RXF_INSN_WRITE_CMPEQ, && case_sem_INSN_WRITE_CMPEQ },
+ { M32RXF_INSN_PAR_CMPZ, && case_sem_INSN_PAR_CMPZ },
+ { M32RXF_INSN_WRITE_CMPZ, && case_sem_INSN_WRITE_CMPZ },
+ { M32RXF_INSN_PAR_JC, && case_sem_INSN_PAR_JC },
+ { M32RXF_INSN_WRITE_JC, && case_sem_INSN_WRITE_JC },
+ { M32RXF_INSN_PAR_JNC, && case_sem_INSN_PAR_JNC },
+ { M32RXF_INSN_WRITE_JNC, && case_sem_INSN_WRITE_JNC },
+ { M32RXF_INSN_PAR_JL, && case_sem_INSN_PAR_JL },
+ { M32RXF_INSN_WRITE_JL, && case_sem_INSN_WRITE_JL },
+ { M32RXF_INSN_PAR_JMP, && case_sem_INSN_PAR_JMP },
+ { M32RXF_INSN_WRITE_JMP, && case_sem_INSN_WRITE_JMP },
+ { M32RXF_INSN_PAR_LD, && case_sem_INSN_PAR_LD },
+ { M32RXF_INSN_WRITE_LD, && case_sem_INSN_WRITE_LD },
+ { M32RXF_INSN_PAR_LDB, && case_sem_INSN_PAR_LDB },
+ { M32RXF_INSN_WRITE_LDB, && case_sem_INSN_WRITE_LDB },
+ { M32RXF_INSN_PAR_LDH, && case_sem_INSN_PAR_LDH },
+ { M32RXF_INSN_WRITE_LDH, && case_sem_INSN_WRITE_LDH },
+ { M32RXF_INSN_PAR_LDUB, && case_sem_INSN_PAR_LDUB },
+ { M32RXF_INSN_WRITE_LDUB, && case_sem_INSN_WRITE_LDUB },
+ { M32RXF_INSN_PAR_LDUH, && case_sem_INSN_PAR_LDUH },
+ { M32RXF_INSN_WRITE_LDUH, && case_sem_INSN_WRITE_LDUH },
+ { M32RXF_INSN_PAR_LD_PLUS, && case_sem_INSN_PAR_LD_PLUS },
+ { M32RXF_INSN_WRITE_LD_PLUS, && case_sem_INSN_WRITE_LD_PLUS },
+ { M32RXF_INSN_PAR_LDI8, && case_sem_INSN_PAR_LDI8 },
+ { M32RXF_INSN_WRITE_LDI8, && case_sem_INSN_WRITE_LDI8 },
+ { M32RXF_INSN_PAR_LOCK, && case_sem_INSN_PAR_LOCK },
+ { M32RXF_INSN_WRITE_LOCK, && case_sem_INSN_WRITE_LOCK },
+ { M32RXF_INSN_PAR_MACHI_A, && case_sem_INSN_PAR_MACHI_A },
+ { M32RXF_INSN_WRITE_MACHI_A, && case_sem_INSN_WRITE_MACHI_A },
+ { M32RXF_INSN_PAR_MACLO_A, && case_sem_INSN_PAR_MACLO_A },
+ { M32RXF_INSN_WRITE_MACLO_A, && case_sem_INSN_WRITE_MACLO_A },
+ { M32RXF_INSN_PAR_MACWHI_A, && case_sem_INSN_PAR_MACWHI_A },
+ { M32RXF_INSN_WRITE_MACWHI_A, && case_sem_INSN_WRITE_MACWHI_A },
+ { M32RXF_INSN_PAR_MACWLO_A, && case_sem_INSN_PAR_MACWLO_A },
+ { M32RXF_INSN_WRITE_MACWLO_A, && case_sem_INSN_WRITE_MACWLO_A },
+ { M32RXF_INSN_PAR_MUL, && case_sem_INSN_PAR_MUL },
+ { M32RXF_INSN_WRITE_MUL, && case_sem_INSN_WRITE_MUL },
+ { M32RXF_INSN_PAR_MULHI_A, && case_sem_INSN_PAR_MULHI_A },
+ { M32RXF_INSN_WRITE_MULHI_A, && case_sem_INSN_WRITE_MULHI_A },
+ { M32RXF_INSN_PAR_MULLO_A, && case_sem_INSN_PAR_MULLO_A },
+ { M32RXF_INSN_WRITE_MULLO_A, && case_sem_INSN_WRITE_MULLO_A },
+ { M32RXF_INSN_PAR_MULWHI_A, && case_sem_INSN_PAR_MULWHI_A },
+ { M32RXF_INSN_WRITE_MULWHI_A, && case_sem_INSN_WRITE_MULWHI_A },
+ { M32RXF_INSN_PAR_MULWLO_A, && case_sem_INSN_PAR_MULWLO_A },
+ { M32RXF_INSN_WRITE_MULWLO_A, && case_sem_INSN_WRITE_MULWLO_A },
+ { M32RXF_INSN_PAR_MV, && case_sem_INSN_PAR_MV },
+ { M32RXF_INSN_WRITE_MV, && case_sem_INSN_WRITE_MV },
+ { M32RXF_INSN_PAR_MVFACHI_A, && case_sem_INSN_PAR_MVFACHI_A },
+ { M32RXF_INSN_WRITE_MVFACHI_A, && case_sem_INSN_WRITE_MVFACHI_A },
+ { M32RXF_INSN_PAR_MVFACLO_A, && case_sem_INSN_PAR_MVFACLO_A },
+ { M32RXF_INSN_WRITE_MVFACLO_A, && case_sem_INSN_WRITE_MVFACLO_A },
+ { M32RXF_INSN_PAR_MVFACMI_A, && case_sem_INSN_PAR_MVFACMI_A },
+ { M32RXF_INSN_WRITE_MVFACMI_A, && case_sem_INSN_WRITE_MVFACMI_A },
+ { M32RXF_INSN_PAR_MVFC, && case_sem_INSN_PAR_MVFC },
+ { M32RXF_INSN_WRITE_MVFC, && case_sem_INSN_WRITE_MVFC },
+ { M32RXF_INSN_PAR_MVTACHI_A, && case_sem_INSN_PAR_MVTACHI_A },
+ { M32RXF_INSN_WRITE_MVTACHI_A, && case_sem_INSN_WRITE_MVTACHI_A },
+ { M32RXF_INSN_PAR_MVTACLO_A, && case_sem_INSN_PAR_MVTACLO_A },
+ { M32RXF_INSN_WRITE_MVTACLO_A, && case_sem_INSN_WRITE_MVTACLO_A },
+ { M32RXF_INSN_PAR_MVTC, && case_sem_INSN_PAR_MVTC },
+ { M32RXF_INSN_WRITE_MVTC, && case_sem_INSN_WRITE_MVTC },
+ { M32RXF_INSN_PAR_NEG, && case_sem_INSN_PAR_NEG },
+ { M32RXF_INSN_WRITE_NEG, && case_sem_INSN_WRITE_NEG },
+ { M32RXF_INSN_PAR_NOP, && case_sem_INSN_PAR_NOP },
+ { M32RXF_INSN_WRITE_NOP, && case_sem_INSN_WRITE_NOP },
+ { M32RXF_INSN_PAR_NOT, && case_sem_INSN_PAR_NOT },
+ { M32RXF_INSN_WRITE_NOT, && case_sem_INSN_WRITE_NOT },
+ { M32RXF_INSN_PAR_RAC_DSI, && case_sem_INSN_PAR_RAC_DSI },
+ { M32RXF_INSN_WRITE_RAC_DSI, && case_sem_INSN_WRITE_RAC_DSI },
+ { M32RXF_INSN_PAR_RACH_DSI, && case_sem_INSN_PAR_RACH_DSI },
+ { M32RXF_INSN_WRITE_RACH_DSI, && case_sem_INSN_WRITE_RACH_DSI },
+ { M32RXF_INSN_PAR_RTE, && case_sem_INSN_PAR_RTE },
+ { M32RXF_INSN_WRITE_RTE, && case_sem_INSN_WRITE_RTE },
+ { M32RXF_INSN_PAR_SLL, && case_sem_INSN_PAR_SLL },
+ { M32RXF_INSN_WRITE_SLL, && case_sem_INSN_WRITE_SLL },
+ { M32RXF_INSN_PAR_SLLI, && case_sem_INSN_PAR_SLLI },
+ { M32RXF_INSN_WRITE_SLLI, && case_sem_INSN_WRITE_SLLI },
+ { M32RXF_INSN_PAR_SRA, && case_sem_INSN_PAR_SRA },
+ { M32RXF_INSN_WRITE_SRA, && case_sem_INSN_WRITE_SRA },
+ { M32RXF_INSN_PAR_SRAI, && case_sem_INSN_PAR_SRAI },
+ { M32RXF_INSN_WRITE_SRAI, && case_sem_INSN_WRITE_SRAI },
+ { M32RXF_INSN_PAR_SRL, && case_sem_INSN_PAR_SRL },
+ { M32RXF_INSN_WRITE_SRL, && case_sem_INSN_WRITE_SRL },
+ { M32RXF_INSN_PAR_SRLI, && case_sem_INSN_PAR_SRLI },
+ { M32RXF_INSN_WRITE_SRLI, && case_sem_INSN_WRITE_SRLI },
+ { M32RXF_INSN_PAR_ST, && case_sem_INSN_PAR_ST },
+ { M32RXF_INSN_WRITE_ST, && case_sem_INSN_WRITE_ST },
+ { M32RXF_INSN_PAR_STB, && case_sem_INSN_PAR_STB },
+ { M32RXF_INSN_WRITE_STB, && case_sem_INSN_WRITE_STB },
+ { M32RXF_INSN_PAR_STH, && case_sem_INSN_PAR_STH },
+ { M32RXF_INSN_WRITE_STH, && case_sem_INSN_WRITE_STH },
+ { M32RXF_INSN_PAR_ST_PLUS, && case_sem_INSN_PAR_ST_PLUS },
+ { M32RXF_INSN_WRITE_ST_PLUS, && case_sem_INSN_WRITE_ST_PLUS },
+ { M32RXF_INSN_PAR_ST_MINUS, && case_sem_INSN_PAR_ST_MINUS },
+ { M32RXF_INSN_WRITE_ST_MINUS, && case_sem_INSN_WRITE_ST_MINUS },
+ { M32RXF_INSN_PAR_SUB, && case_sem_INSN_PAR_SUB },
+ { M32RXF_INSN_WRITE_SUB, && case_sem_INSN_WRITE_SUB },
+ { M32RXF_INSN_PAR_SUBV, && case_sem_INSN_PAR_SUBV },
+ { M32RXF_INSN_WRITE_SUBV, && case_sem_INSN_WRITE_SUBV },
+ { M32RXF_INSN_PAR_SUBX, && case_sem_INSN_PAR_SUBX },
+ { M32RXF_INSN_WRITE_SUBX, && case_sem_INSN_WRITE_SUBX },
+ { M32RXF_INSN_PAR_TRAP, && case_sem_INSN_PAR_TRAP },
+ { M32RXF_INSN_WRITE_TRAP, && case_sem_INSN_WRITE_TRAP },
+ { M32RXF_INSN_PAR_UNLOCK, && case_sem_INSN_PAR_UNLOCK },
+ { M32RXF_INSN_WRITE_UNLOCK, && case_sem_INSN_WRITE_UNLOCK },
+ { M32RXF_INSN_PAR_PCMPBZ, && case_sem_INSN_PAR_PCMPBZ },
+ { M32RXF_INSN_WRITE_PCMPBZ, && case_sem_INSN_WRITE_PCMPBZ },
+ { M32RXF_INSN_PAR_SADD, && case_sem_INSN_PAR_SADD },
+ { M32RXF_INSN_WRITE_SADD, && case_sem_INSN_WRITE_SADD },
+ { M32RXF_INSN_PAR_MACWU1, && case_sem_INSN_PAR_MACWU1 },
+ { M32RXF_INSN_WRITE_MACWU1, && case_sem_INSN_WRITE_MACWU1 },
+ { M32RXF_INSN_PAR_MSBLO, && case_sem_INSN_PAR_MSBLO },
+ { M32RXF_INSN_WRITE_MSBLO, && case_sem_INSN_WRITE_MSBLO },
+ { M32RXF_INSN_PAR_MULWU1, && case_sem_INSN_PAR_MULWU1 },
+ { M32RXF_INSN_WRITE_MULWU1, && case_sem_INSN_WRITE_MULWU1 },
+ { M32RXF_INSN_PAR_MACLH1, && case_sem_INSN_PAR_MACLH1 },
+ { M32RXF_INSN_WRITE_MACLH1, && case_sem_INSN_WRITE_MACLH1 },
+ { M32RXF_INSN_PAR_SC, && case_sem_INSN_PAR_SC },
+ { M32RXF_INSN_WRITE_SC, && case_sem_INSN_WRITE_SC },
+ { M32RXF_INSN_PAR_SNC, && case_sem_INSN_PAR_SNC },
+ { M32RXF_INSN_WRITE_SNC, && case_sem_INSN_WRITE_SNC },
+ { 0, 0 }
+ };
+ int i;
+
+ for (i = 0; labels[i].label != 0; ++i)
+ {
+#if FAST_P
+ CPU_IDESC (current_cpu) [labels[i].index].sem_fast_lab = labels[i].label;
+#else
+ CPU_IDESC (current_cpu) [labels[i].index].sem_full_lab = labels[i].label;
+#endif
+ }
+
+#undef DEFINE_LABELS
+#endif /* DEFINE_LABELS */
+
+#ifdef DEFINE_SWITCH
+
+/* If hyper-fast [well not unnecessarily slow] execution is selected, turn
+ off frills like tracing and profiling. */
+/* FIXME: A better way would be to have TRACE_RESULT check for something
+ that can cause it to be optimized out. Another way would be to emit
+ special handlers into the instruction "stream". */
+
+#if FAST_P
+#undef TRACE_RESULT
+#define TRACE_RESULT(cpu, abuf, name, type, val)
+#endif
+
+#undef GET_ATTR
+#define GET_ATTR(cpu, num, attr) CGEN_ATTR_VALUE (NULL, abuf->idesc->attrs, CGEN_INSN_##attr)
+
+{
+
+#if WITH_SCACHE_PBB
+
+/* Branch to next handler without going around main loop. */
+#define NEXT(vpc) goto * SEM_ARGBUF (vpc) -> semantic.sem_case
+SWITCH (sem, SEM_ARGBUF (vpc) -> semantic.sem_case)
+
+#else /* ! WITH_SCACHE_PBB */
+
+#define NEXT(vpc) BREAK (sem)
+#ifdef __GNUC__
+#if FAST_P
+ SWITCH (sem, SEM_ARGBUF (sc) -> idesc->sem_fast_lab)
+#else
+ SWITCH (sem, SEM_ARGBUF (sc) -> idesc->sem_full_lab)
+#endif
+#else
+ SWITCH (sem, SEM_ARGBUF (sc) -> idesc->num)
+#endif
+
+#endif /* ! WITH_SCACHE_PBB */
+
+ {
+
+ CASE (sem, INSN_X_INVALID) : /* --invalid-- */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ {
+ /* Update the recorded pc in the cpu state struct.
+ Only necessary for WITH_SCACHE case, but to avoid the
+ conditional compilation .... */
+ SET_H_PC (pc);
+ /* Virtual insns have zero size. Overwrite vpc with address of next insn
+ using the default-insn-bitsize spec. When executing insns in parallel
+ we may want to queue the fault and continue execution. */
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+ vpc = sim_engine_invalid_insn (current_cpu, pc, vpc);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_X_AFTER) : /* --after-- */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ {
+#if WITH_SCACHE_PBB_M32RXF
+ m32rxf_pbb_after (current_cpu, sem_arg);
+#endif
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_X_BEFORE) : /* --before-- */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ {
+#if WITH_SCACHE_PBB_M32RXF
+ m32rxf_pbb_before (current_cpu, sem_arg);
+#endif
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_X_CTI_CHAIN) : /* --cti-chain-- */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ {
+#if WITH_SCACHE_PBB_M32RXF
+#ifdef DEFINE_SWITCH
+ vpc = m32rxf_pbb_cti_chain (current_cpu, sem_arg,
+ pbb_br_type, pbb_br_npc);
+ BREAK (sem);
+#else
+ /* FIXME: Allow provision of explicit ifmt spec in insn spec. */
+ vpc = m32rxf_pbb_cti_chain (current_cpu, sem_arg,
+ CPU_PBB_BR_TYPE (current_cpu),
+ CPU_PBB_BR_NPC (current_cpu));
+#endif
+#endif
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_X_CHAIN) : /* --chain-- */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ {
+#if WITH_SCACHE_PBB_M32RXF
+ vpc = m32rxf_pbb_chain (current_cpu, sem_arg);
+#ifdef DEFINE_SWITCH
+ BREAK (sem);
+#endif
+#endif
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_X_BEGIN) : /* --begin-- */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ {
+#if WITH_SCACHE_PBB_M32RXF
+#ifdef DEFINE_SWITCH
+ /* In the switch case FAST_P is a constant, allowing several optimizations
+ in any called inline functions. */
+ vpc = m32rxf_pbb_begin (current_cpu, FAST_P);
+#else
+ vpc = m32rxf_pbb_begin (current_cpu, STATE_RUN_FAST_P (CPU_STATE (current_cpu)));
+#endif
+#endif
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ADD) : /* add $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ADDSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ADD3) : /* add3 $dr,$sr,$hash$slo16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = ADDSI (* FLD (i_sr), FLD (f_simm16));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_AND) : /* and $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ANDSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_AND3) : /* and3 $dr,$sr,$uimm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_and3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = ANDSI (* FLD (i_sr), FLD (f_uimm16));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_OR) : /* or $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ORSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_OR3) : /* or3 $dr,$sr,$hash$ulo16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_and3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = ORSI (* FLD (i_sr), FLD (f_uimm16));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_XOR) : /* xor $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = XORSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_XOR3) : /* xor3 $dr,$sr,$uimm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_and3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = XORSI (* FLD (i_sr), FLD (f_uimm16));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ADDI) : /* addi $dr,$simm8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_addi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ADDSI (* FLD (i_dr), FLD (f_simm8));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ADDV) : /* addv $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = ADDSI (* FLD (i_dr), * FLD (i_sr));
+ temp1 = ADDOFSI (* FLD (i_dr), * FLD (i_sr), 0);
+ {
+ SI opval = temp0;
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ADDV3) : /* addv3 $dr,$sr,$simm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+{
+ SI temp0;BI temp1;
+ temp0 = ADDSI (* FLD (i_sr), FLD (f_simm16));
+ temp1 = ADDOFSI (* FLD (i_sr), FLD (f_simm16), 0);
+ {
+ SI opval = temp0;
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ADDX) : /* addx $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = ADDCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ temp1 = ADDCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ {
+ SI opval = temp0;
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BC8) : /* bc.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (CPU (h_cond)) {
+ {
+ USI opval = FLD (i_disp8);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BC24) : /* bc.l $disp24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (CPU (h_cond)) {
+ {
+ USI opval = FLD (i_disp24);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BEQ) : /* beq $src1,$src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (EQSI (* FLD (i_src1), * FLD (i_src2))) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BEQZ) : /* beqz $src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (EQSI (* FLD (i_src2), 0)) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BGEZ) : /* bgez $src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (GESI (* FLD (i_src2), 0)) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BGTZ) : /* bgtz $src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (GTSI (* FLD (i_src2), 0)) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BLEZ) : /* blez $src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (LESI (* FLD (i_src2), 0)) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BLTZ) : /* bltz $src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (LTSI (* FLD (i_src2), 0)) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BNEZ) : /* bnez $src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_src2), 0)) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BL8) : /* bl.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ SI opval = ADDSI (ANDSI (pc, -4), 4);
+ CPU (h_gr[((UINT) 14)]) = opval;
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp8);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BL24) : /* bl.l $disp24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+{
+ {
+ SI opval = ADDSI (pc, 4);
+ CPU (h_gr[((UINT) 14)]) = opval;
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp24);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BCL8) : /* bcl.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (CPU (h_cond)) {
+{
+ {
+ SI opval = ADDSI (ANDSI (pc, -4), 4);
+ CPU (h_gr[((UINT) 14)]) = opval;
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp8);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BCL24) : /* bcl.l $disp24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (CPU (h_cond)) {
+{
+ {
+ SI opval = ADDSI (pc, 4);
+ CPU (h_gr[((UINT) 14)]) = opval;
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp24);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BNC8) : /* bnc.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (NOTBI (CPU (h_cond))) {
+ {
+ USI opval = FLD (i_disp8);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BNC24) : /* bnc.l $disp24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NOTBI (CPU (h_cond))) {
+ {
+ USI opval = FLD (i_disp24);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BNE) : /* bne $src1,$src2,$disp16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_beq.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_src1), * FLD (i_src2))) {
+ {
+ USI opval = FLD (i_disp16);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BRA8) : /* bra.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ USI opval = FLD (i_disp8);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BRA24) : /* bra.l $disp24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ USI opval = FLD (i_disp24);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BNCL8) : /* bncl.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (NOTBI (CPU (h_cond))) {
+{
+ {
+ SI opval = ADDSI (ANDSI (pc, -4), 4);
+ CPU (h_gr[((UINT) 14)]) = opval;
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp8);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_BNCL24) : /* bncl.l $disp24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NOTBI (CPU (h_cond))) {
+{
+ {
+ SI opval = ADDSI (pc, 4);
+ CPU (h_gr[((UINT) 14)]) = opval;
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp24);
+ SEM_BRANCH_VIA_CACHE (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_CMP) : /* cmp $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = LTSI (* FLD (i_src1), * FLD (i_src2));
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_CMPI) : /* cmpi $src2,$simm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ BI opval = LTSI (* FLD (i_src2), FLD (f_simm16));
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_CMPU) : /* cmpu $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = LTUSI (* FLD (i_src1), * FLD (i_src2));
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_CMPUI) : /* cmpui $src2,$simm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ BI opval = LTUSI (* FLD (i_src2), FLD (f_simm16));
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_CMPEQ) : /* cmpeq $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = EQSI (* FLD (i_src1), * FLD (i_src2));
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_CMPZ) : /* cmpz $src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = EQSI (* FLD (i_src2), 0);
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_DIV) : /* div $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_sr), 0)) {
+ {
+ SI opval = DIVSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_DIVU) : /* divu $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_sr), 0)) {
+ {
+ SI opval = UDIVSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_REM) : /* rem $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_sr), 0)) {
+ {
+ SI opval = MODSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_REMU) : /* remu $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_sr), 0)) {
+ {
+ SI opval = UMODSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_DIVH) : /* divh $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+if (NESI (* FLD (i_sr), 0)) {
+ {
+ SI opval = DIVSI (EXTHISI (TRUNCSIHI (* FLD (i_dr))), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_JC) : /* jc $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (CPU (h_cond)) {
+ {
+ USI opval = ANDSI (* FLD (i_sr), -4);
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_JNC) : /* jnc $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (NOTBI (CPU (h_cond))) {
+ {
+ USI opval = ANDSI (* FLD (i_sr), -4);
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc);
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_JL) : /* jl $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_jl.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;USI temp1;
+ temp0 = ADDSI (ANDSI (pc, -4), 4);
+ temp1 = ANDSI (* FLD (i_sr), -4);
+ {
+ SI opval = temp0;
+ CPU (h_gr[((UINT) 14)]) = opval;
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = temp1;
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_JMP) : /* jmp $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ USI opval = ANDSI (* FLD (i_sr), -4);
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LD) : /* ld $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LD_D) : /* ld $dr,@($slo16,$sr) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = GETMEMSI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDB) : /* ldb $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = EXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDB_D) : /* ldb $dr,@($slo16,$sr) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = EXTQISI (GETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDH) : /* ldh $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = EXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDH_D) : /* ldh $dr,@($slo16,$sr) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = EXTHISI (GETMEMHI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDUB) : /* ldub $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ZEXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDUB_D) : /* ldub $dr,@($slo16,$sr) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = ZEXTQISI (GETMEMQI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDUH) : /* lduh $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ZEXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDUH_D) : /* lduh $dr,@($slo16,$sr) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = ZEXTHISI (GETMEMHI (current_cpu, pc, ADDSI (* FLD (i_sr), FLD (f_simm16))));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LD_PLUS) : /* ld $dr,@$sr+ */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;SI temp1;
+ temp0 = GETMEMSI (current_cpu, pc, * FLD (i_sr));
+ temp1 = ADDSI (* FLD (i_sr), 4);
+ {
+ SI opval = temp0;
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ SI opval = temp1;
+ * FLD (i_sr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "sr", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LD24) : /* ld24 $dr,$uimm24 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld24.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = FLD (i_uimm24);
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDI8) : /* ldi8 $dr,$simm8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_addi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = FLD (f_simm8);
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LDI16) : /* ldi16 $dr,$hash$slo16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = FLD (f_simm16);
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_LOCK) : /* lock $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ BI opval = 1;
+ CPU (h_lock) = opval;
+ TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval);
+ }
+ {
+ SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MACHI_A) : /* machi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))))), 8), 8);
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MACLO_A) : /* maclo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))))), 8), 8);
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MACWHI_A) : /* macwhi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))));
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MACWLO_A) : /* macwlo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))));
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MUL) : /* mul $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = MULSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MULHI_A) : /* mulhi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))), 16), 16);
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MULLO_A) : /* mullo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 16), 16);
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MULWHI_A) : /* mulwhi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))));
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MULWLO_A) : /* mulwlo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))));
+ SET_H_ACCUMS (FLD (f_acc), opval);
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MV) : /* mv $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = * FLD (i_sr);
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVFACHI_A) : /* mvfachi $dr,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 32));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVFACLO_A) : /* mvfaclo $dr,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = TRUNCDISI (GET_H_ACCUMS (FLD (f_accs)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVFACMI_A) : /* mvfacmi $dr,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 16));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVFC) : /* mvfc $dr,$scr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = GET_H_CR (FLD (f_r2));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVTACHI_A) : /* mvtachi $src1,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0, 0xffffffff)), SLLDI (EXTSIDI (* FLD (i_src1)), 32));
+ SET_H_ACCUMS (FLD (f_accs), opval);
+ TRACE_RESULT (current_cpu, abuf, "accs", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVTACLO_A) : /* mvtaclo $src1,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0xffffffff, 0)), ZEXTSIDI (* FLD (i_src1)));
+ SET_H_ACCUMS (FLD (f_accs), opval);
+ TRACE_RESULT (current_cpu, abuf, "accs", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MVTC) : /* mvtc $sr,$dcr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ USI opval = * FLD (i_sr);
+ SET_H_CR (FLD (f_r1), opval);
+ TRACE_RESULT (current_cpu, abuf, "dcr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_NEG) : /* neg $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = NEGSI (* FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_NOP) : /* nop */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+PROFILE_COUNT_FILLNOPS (current_cpu, abuf->addr);
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_NOT) : /* not $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = INVSI (* FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_RAC_DSI) : /* rac $accd,$accs,$imm1 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ DI tmp_tmp1;
+ tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1));
+ tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 32768));
+ {
+ DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0xffff0000))) ? (MAKEDI (32767, 0xffff0000)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0xffff0000)));
+ SET_H_ACCUMS (FLD (f_accd), opval);
+ TRACE_RESULT (current_cpu, abuf, "accd", 'D', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_RACH_DSI) : /* rach $accd,$accs,$imm1 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ DI tmp_tmp1;
+ tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1));
+ tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 0x80000000));
+ {
+ DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0))) ? (MAKEDI (32767, 0)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0)));
+ SET_H_ACCUMS (FLD (f_accd), opval);
+ TRACE_RESULT (current_cpu, abuf, "accd", 'D', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_RTE) : /* rte */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ USI opval = ANDSI (GET_H_CR (((UINT) 6)), -4);
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+ {
+ USI opval = GET_H_CR (((UINT) 14));
+ SET_H_CR (((UINT) 6), opval);
+ TRACE_RESULT (current_cpu, abuf, "cr-6", 'x', opval);
+ }
+ {
+ UQI opval = CPU (h_bpsw);
+ SET_H_PSW (opval);
+ TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval);
+ }
+ {
+ UQI opval = CPU (h_bbpsw);
+ CPU (h_bpsw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval);
+ }
+}
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SETH) : /* seth $dr,$hash$hi16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_seth.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = SLLSI (FLD (f_hi16), 16);
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SLL) : /* sll $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SLLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SLL3) : /* sll3 $dr,$sr,$simm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = SLLSI (* FLD (i_sr), ANDSI (FLD (f_simm16), 31));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SLLI) : /* slli $dr,$uimm5 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_slli.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SLLSI (* FLD (i_dr), FLD (f_uimm5));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SRA) : /* sra $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRASI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SRA3) : /* sra3 $dr,$sr,$simm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = SRASI (* FLD (i_sr), ANDSI (FLD (f_simm16), 31));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SRAI) : /* srai $dr,$uimm5 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_slli.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRASI (* FLD (i_dr), FLD (f_uimm5));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SRL) : /* srl $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SRL3) : /* srl3 $dr,$sr,$simm16 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add3.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = SRLSI (* FLD (i_sr), ANDSI (FLD (f_simm16), 31));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SRLI) : /* srli $dr,$uimm5 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_slli.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRLSI (* FLD (i_dr), FLD (f_uimm5));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ST) : /* st $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = * FLD (i_src1);
+ SETMEMSI (current_cpu, pc, * FLD (i_src2), opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ST_D) : /* st $src1,@($slo16,$src2) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = * FLD (i_src1);
+ SETMEMSI (current_cpu, pc, ADDSI (* FLD (i_src2), FLD (f_simm16)), opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_STB) : /* stb $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ QI opval = * FLD (i_src1);
+ SETMEMQI (current_cpu, pc, * FLD (i_src2), opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_STB_D) : /* stb $src1,@($slo16,$src2) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ QI opval = * FLD (i_src1);
+ SETMEMQI (current_cpu, pc, ADDSI (* FLD (i_src2), FLD (f_simm16)), opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_STH) : /* sth $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ HI opval = * FLD (i_src1);
+ SETMEMHI (current_cpu, pc, * FLD (i_src2), opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_STH_D) : /* sth $src1,@($slo16,$src2) */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_d.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ HI opval = * FLD (i_src1);
+ SETMEMHI (current_cpu, pc, ADDSI (* FLD (i_src2), FLD (f_simm16)), opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ST_PLUS) : /* st $src1,@+$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI tmp_new_src2;
+ tmp_new_src2 = ADDSI (* FLD (i_src2), 4);
+ {
+ SI opval = * FLD (i_src1);
+ SETMEMSI (current_cpu, pc, tmp_new_src2, opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+ {
+ SI opval = tmp_new_src2;
+ * FLD (i_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "src2", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_ST_MINUS) : /* st $src1,@-$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI tmp_new_src2;
+ tmp_new_src2 = SUBSI (* FLD (i_src2), 4);
+ {
+ SI opval = * FLD (i_src1);
+ SETMEMSI (current_cpu, pc, tmp_new_src2, opval);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+ {
+ SI opval = tmp_new_src2;
+ * FLD (i_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "src2", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SUB) : /* sub $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SUBSI (* FLD (i_dr), * FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SUBV) : /* subv $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = SUBSI (* FLD (i_dr), * FLD (i_sr));
+ temp1 = SUBOFSI (* FLD (i_dr), * FLD (i_sr), 0);
+ {
+ SI opval = temp0;
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SUBX) : /* subx $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = SUBCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ temp1 = SUBCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ {
+ SI opval = temp0;
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_TRAP) : /* trap $uimm4 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_trap.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ USI opval = GET_H_CR (((UINT) 6));
+ SET_H_CR (((UINT) 14), opval);
+ TRACE_RESULT (current_cpu, abuf, "cr-14", 'x', opval);
+ }
+ {
+ USI opval = ADDSI (pc, 4);
+ SET_H_CR (((UINT) 6), opval);
+ TRACE_RESULT (current_cpu, abuf, "cr-6", 'x', opval);
+ }
+ {
+ UQI opval = CPU (h_bpsw);
+ CPU (h_bbpsw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "bbpsw", 'x', opval);
+ }
+ {
+ UQI opval = GET_H_PSW ();
+ CPU (h_bpsw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval);
+ }
+ {
+ UQI opval = ANDQI (GET_H_PSW (), 128);
+ SET_H_PSW (opval);
+ TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval);
+ }
+ {
+ SI opval = m32r_trap (current_cpu, pc, FLD (f_uimm4));
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, opval, vpc);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ SEM_BRANCH_FINI (vpc);
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_UNLOCK) : /* unlock $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+if (CPU (h_lock)) {
+ {
+ SI opval = * FLD (i_src1);
+ SETMEMSI (current_cpu, pc, * FLD (i_src2), opval);
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+}
+ {
+ BI opval = 0;
+ CPU (h_lock) = opval;
+ TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SATB) : /* satb $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = (GESI (* FLD (i_sr), 127)) ? (127) : (LESI (* FLD (i_sr), -128)) ? (-128) : (* FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SATH) : /* sath $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = (GESI (* FLD (i_sr), 32767)) ? (32767) : (LESI (* FLD (i_sr), -32768)) ? (-32768) : (* FLD (i_sr));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SAT) : /* sat $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 4);
+
+ {
+ SI opval = ((CPU (h_cond)) ? (((LTSI (* FLD (i_sr), 0)) ? (2147483647) : (0x80000000))) : (* FLD (i_sr)));
+ * FLD (i_dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_PCMPBZ) : /* pcmpbz $src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = (EQSI (ANDSI (* FLD (i_src2), 255), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 65280), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 16711680), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 0xff000000), 0)) ? (1) : (0);
+ CPU (h_cond) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SADD) : /* sadd */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ADDDI (SRADI (GET_H_ACCUMS (((UINT) 1)), 16), GET_H_ACCUMS (((UINT) 0)));
+ SET_H_ACCUMS (((UINT) 0), opval);
+ TRACE_RESULT (current_cpu, abuf, "accums-0", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MACWU1) : /* macwu1 $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535)))), 8), 8);
+ SET_H_ACCUMS (((UINT) 1), opval);
+ TRACE_RESULT (current_cpu, abuf, "accums-1", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MSBLO) : /* msblo $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (SUBDI (GET_H_ACCUM (), SRADI (SLLDI (MULDI (EXTHIDI (TRUNCSIHI (* FLD (i_src1))), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 32), 16)), 8), 8);
+ SET_H_ACCUM (opval);
+ TRACE_RESULT (current_cpu, abuf, "accum", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MULWU1) : /* mulwu1 $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535))), 16), 16);
+ SET_H_ACCUMS (((UINT) 1), opval);
+ TRACE_RESULT (current_cpu, abuf, "accums-1", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_MACLH1) : /* maclh1 $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), SLLDI (EXTSIDI (MULSI (EXTHISI (TRUNCSIHI (* FLD (i_src1))), SRASI (* FLD (i_src2), 16))), 16)), 8), 8);
+ SET_H_ACCUMS (((UINT) 1), opval);
+ TRACE_RESULT (current_cpu, abuf, "accums-1", 'D', opval);
+ }
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SC) : /* sc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (ZEXTBISI (CPU (h_cond)))
+ SEM_SKIP_INSN (current_cpu, sem_arg, vpc);
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_SNC) : /* snc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (ZEXTBISI (NOTBI (CPU (h_cond))))
+ SEM_SKIP_INSN (current_cpu, sem_arg, vpc);
+
+#undef FLD
+}
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ADD) : /* add $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ADDSI (* FLD (i_dr), * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ADD) : /* add $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_AND) : /* and $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ANDSI (* FLD (i_dr), * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_AND) : /* and $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_OR) : /* or $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ORSI (* FLD (i_dr), * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_OR) : /* or $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_XOR) : /* xor $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = XORSI (* FLD (i_dr), * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_XOR) : /* xor $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ADDI) : /* addi $dr,$simm8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_addi.f
+#define OPRND(f) par_exec->operands.sfmt_addi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ADDSI (* FLD (i_dr), FLD (f_simm8));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ADDI) : /* addi $dr,$simm8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_addi.f
+#define OPRND(f) par_exec->operands.sfmt_addi.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ADDV) : /* addv $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addv.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = ADDSI (* FLD (i_dr), * FLD (i_sr));
+ temp1 = ADDOFSI (* FLD (i_dr), * FLD (i_sr), 0);
+ {
+ SI opval = temp0;
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ADDV) : /* addv $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addv.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ADDX) : /* addx $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addx.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = ADDCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ temp1 = ADDCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ {
+ SI opval = temp0;
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ADDX) : /* addx $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addx.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_BC8) : /* bc.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bc8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (CPU (h_cond)) {
+ {
+ USI opval = FLD (i_disp8);
+ OPRND (pc) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_BC8) : /* bc.s $disp8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bc8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ if (written & (1 << 2))
+ {
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_BL8) : /* bl.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ SI opval = ADDSI (ANDSI (pc, -4), 4);
+ OPRND (h_gr_14) = opval;
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp8);
+ OPRND (pc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_BL8) : /* bl.s $disp8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bl8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_14);
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_BCL8) : /* bcl.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bcl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (CPU (h_cond)) {
+{
+ {
+ SI opval = ADDSI (ANDSI (pc, -4), 4);
+ OPRND (h_gr_14) = opval;
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp8);
+ OPRND (pc) = opval;
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_BCL8) : /* bcl.s $disp8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bcl8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ if (written & (1 << 3))
+ {
+ CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_14);
+ }
+ if (written & (1 << 4))
+ {
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_BNC8) : /* bnc.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bc8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (NOTBI (CPU (h_cond))) {
+ {
+ USI opval = FLD (i_disp8);
+ OPRND (pc) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_BNC8) : /* bnc.s $disp8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bc8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ if (written & (1 << 2))
+ {
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_BRA8) : /* bra.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bra8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ USI opval = FLD (i_disp8);
+ OPRND (pc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_BRA8) : /* bra.s $disp8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bra8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_BNCL8) : /* bncl.s $disp8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bcl8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (NOTBI (CPU (h_cond))) {
+{
+ {
+ SI opval = ADDSI (ANDSI (pc, -4), 4);
+ OPRND (h_gr_14) = opval;
+ written |= (1 << 3);
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = FLD (i_disp8);
+ OPRND (pc) = opval;
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_BNCL8) : /* bncl.s $disp8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_bl8.f
+#define OPRND(f) par_exec->operands.sfmt_bcl8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ if (written & (1 << 3))
+ {
+ CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_14);
+ }
+ if (written & (1 << 4))
+ {
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_CMP) : /* cmp $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmp.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = LTSI (* FLD (i_src1), * FLD (i_src2));
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_CMP) : /* cmp $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmp.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_CMPU) : /* cmpu $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmp.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = LTUSI (* FLD (i_src1), * FLD (i_src2));
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_CMPU) : /* cmpu $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmp.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_CMPEQ) : /* cmpeq $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmp.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = EQSI (* FLD (i_src1), * FLD (i_src2));
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_CMPEQ) : /* cmpeq $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmp.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_CMPZ) : /* cmpz $src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmpz.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = EQSI (* FLD (i_src2), 0);
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_CMPZ) : /* cmpz $src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmpz.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_JC) : /* jc $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_jc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (CPU (h_cond)) {
+ {
+ USI opval = ANDSI (* FLD (i_sr), -4);
+ OPRND (pc) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_JC) : /* jc $sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_jc.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ if (written & (1 << 2))
+ {
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_JNC) : /* jnc $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_jc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (NOTBI (CPU (h_cond))) {
+ {
+ USI opval = ANDSI (* FLD (i_sr), -4);
+ OPRND (pc) = opval;
+ written |= (1 << 2);
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_JNC) : /* jnc $sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_jc.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ if (written & (1 << 2))
+ {
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+ }
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_JL) : /* jl $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_jl.f
+#define OPRND(f) par_exec->operands.sfmt_jl.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;USI temp1;
+ temp0 = ADDSI (ANDSI (pc, -4), 4);
+ temp1 = ANDSI (* FLD (i_sr), -4);
+ {
+ SI opval = temp0;
+ OPRND (h_gr_14) = opval;
+ TRACE_RESULT (current_cpu, abuf, "gr-14", 'x', opval);
+ }
+ {
+ USI opval = temp1;
+ OPRND (pc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_JL) : /* jl $sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_jl.f
+#define OPRND(f) par_exec->operands.sfmt_jl.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_gr[((UINT) 14)]) = OPRND (h_gr_14);
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_JMP) : /* jmp $sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_jmp.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ USI opval = ANDSI (* FLD (i_sr), -4);
+ OPRND (pc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_JMP) : /* jmp $sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_jmp.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LD) : /* ld $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LD) : /* ld $dr,@$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LDB) : /* ldb $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = EXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr)));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LDB) : /* ldb $dr,@$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LDH) : /* ldh $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = EXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr)));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LDH) : /* ldh $dr,@$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LDUB) : /* ldub $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ZEXTQISI (GETMEMQI (current_cpu, pc, * FLD (i_sr)));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LDUB) : /* ldub $dr,@$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LDUH) : /* lduh $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = ZEXTHISI (GETMEMHI (current_cpu, pc, * FLD (i_sr)));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LDUH) : /* lduh $dr,@$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LD_PLUS) : /* ld $dr,@$sr+ */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;SI temp1;
+ temp0 = GETMEMSI (current_cpu, pc, * FLD (i_sr));
+ temp1 = ADDSI (* FLD (i_sr), 4);
+ {
+ SI opval = temp0;
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ SI opval = temp1;
+ OPRND (sr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "sr", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LD_PLUS) : /* ld $dr,@$sr+ */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_ld_plus.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+ * FLD (i_sr) = OPRND (sr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LDI8) : /* ldi8 $dr,$simm8 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_addi.f
+#define OPRND(f) par_exec->operands.sfmt_ldi8.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = FLD (f_simm8);
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LDI8) : /* ldi8 $dr,$simm8 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_addi.f
+#define OPRND(f) par_exec->operands.sfmt_ldi8.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_LOCK) : /* lock $dr,@$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_lock.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ BI opval = 1;
+ OPRND (h_lock) = opval;
+ TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval);
+ }
+ {
+ SI opval = GETMEMSI (current_cpu, pc, * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_LOCK) : /* lock $dr,@$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_lock.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+ CPU (h_lock) = OPRND (h_lock);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MACHI_A) : /* machi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))))), 8), 8);
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MACHI_A) : /* machi $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MACLO_A) : /* maclo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))))), 8), 8);
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MACLO_A) : /* maclo $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MACWHI_A) : /* macwhi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))));
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MACWHI_A) : /* macwhi $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MACWLO_A) : /* macwlo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ADDDI (GET_H_ACCUMS (FLD (f_acc)), MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))));
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MACWLO_A) : /* macwlo $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_machi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MUL) : /* mul $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = MULSI (* FLD (i_dr), * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MUL) : /* mul $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MULHI_A) : /* mulhi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (MULDI (EXTSIDI (ANDSI (* FLD (i_src1), 0xffff0000)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16)))), 16), 16);
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MULHI_A) : /* mulhi $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MULLO_A) : /* mullo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (MULDI (EXTSIDI (SLLSI (* FLD (i_src1), 16)), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 16), 16);
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MULLO_A) : /* mullo $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MULWHI_A) : /* mulwhi $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (SRASI (* FLD (i_src2), 16))));
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MULWHI_A) : /* mulwhi $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MULWLO_A) : /* mulwlo $src1,$src2,$acc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = MULDI (EXTSIDI (* FLD (i_src1)), EXTHIDI (TRUNCSIHI (* FLD (i_src2))));
+ OPRND (acc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "acc", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MULWLO_A) : /* mulwlo $src1,$src2,$acc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_machi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mulhi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_acc), OPRND (acc));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MV) : /* mv $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mv.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = * FLD (i_sr);
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MV) : /* mv $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mv.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVFACHI_A) : /* mvfachi $dr,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 32));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVFACHI_A) : /* mvfachi $dr,$accs */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVFACLO_A) : /* mvfaclo $dr,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = TRUNCDISI (GET_H_ACCUMS (FLD (f_accs)));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVFACLO_A) : /* mvfaclo $dr,$accs */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVFACMI_A) : /* mvfacmi $dr,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = TRUNCDISI (SRADI (GET_H_ACCUMS (FLD (f_accs)), 16));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVFACMI_A) : /* mvfacmi $dr,$accs */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvfachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvfachi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVFC) : /* mvfc $dr,$scr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvfc.f
+#define OPRND(f) par_exec->operands.sfmt_mvfc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = GET_H_CR (FLD (f_r2));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVFC) : /* mvfc $dr,$scr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvfc.f
+#define OPRND(f) par_exec->operands.sfmt_mvfc.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVTACHI_A) : /* mvtachi $src1,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0, 0xffffffff)), SLLDI (EXTSIDI (* FLD (i_src1)), 32));
+ OPRND (accs) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accs", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVTACHI_A) : /* mvtachi $src1,$accs */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_accs), OPRND (accs));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVTACLO_A) : /* mvtaclo $src1,$accs */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ORDI (ANDDI (GET_H_ACCUMS (FLD (f_accs)), MAKEDI (0xffffffff, 0)), ZEXTSIDI (* FLD (i_src1)));
+ OPRND (accs) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accs", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVTACLO_A) : /* mvtaclo $src1,$accs */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvtachi_a.f
+#define OPRND(f) par_exec->operands.sfmt_mvtachi_a.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_accs), OPRND (accs));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MVTC) : /* mvtc $sr,$dcr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_mvtc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ USI opval = * FLD (i_sr);
+ OPRND (dcr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dcr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MVTC) : /* mvtc $sr,$dcr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_mvtc.f
+#define OPRND(f) par_exec->operands.sfmt_mvtc.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_CR (FLD (f_r1), OPRND (dcr));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_NEG) : /* neg $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mv.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = NEGSI (* FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_NEG) : /* neg $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mv.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_NOP) : /* nop */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_nop.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+PROFILE_COUNT_FILLNOPS (current_cpu, abuf->addr);
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_NOP) : /* nop */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_nop.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_NOT) : /* not $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mv.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = INVSI (* FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_NOT) : /* not $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_ld_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mv.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_RAC_DSI) : /* rac $accd,$accs,$imm1 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+#define OPRND(f) par_exec->operands.sfmt_rac_dsi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ DI tmp_tmp1;
+ tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1));
+ tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 32768));
+ {
+ DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0xffff0000))) ? (MAKEDI (32767, 0xffff0000)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0xffff0000)));
+ OPRND (accd) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accd", 'D', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_RAC_DSI) : /* rac $accd,$accs,$imm1 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+#define OPRND(f) par_exec->operands.sfmt_rac_dsi.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_accd), OPRND (accd));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_RACH_DSI) : /* rach $accd,$accs,$imm1 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+#define OPRND(f) par_exec->operands.sfmt_rac_dsi.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ DI tmp_tmp1;
+ tmp_tmp1 = SLLDI (GET_H_ACCUMS (FLD (f_accs)), FLD (f_imm1));
+ tmp_tmp1 = ADDDI (tmp_tmp1, MAKEDI (0, 0x80000000));
+ {
+ DI opval = (GTDI (tmp_tmp1, MAKEDI (32767, 0))) ? (MAKEDI (32767, 0)) : (LTDI (tmp_tmp1, MAKEDI (0xffff8000, 0))) ? (MAKEDI (0xffff8000, 0)) : (ANDDI (tmp_tmp1, MAKEDI (0xffffffff, 0)));
+ OPRND (accd) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accd", 'D', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_RACH_DSI) : /* rach $accd,$accs,$imm1 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_rac_dsi.f
+#define OPRND(f) par_exec->operands.sfmt_rac_dsi.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (FLD (f_accd), OPRND (accd));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_RTE) : /* rte */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_rte.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ USI opval = ANDSI (GET_H_CR (((UINT) 6)), -4);
+ OPRND (pc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+ {
+ USI opval = GET_H_CR (((UINT) 14));
+ OPRND (h_cr_6) = opval;
+ TRACE_RESULT (current_cpu, abuf, "cr-6", 'x', opval);
+ }
+ {
+ UQI opval = CPU (h_bpsw);
+ OPRND (h_psw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval);
+ }
+ {
+ UQI opval = CPU (h_bbpsw);
+ OPRND (h_bpsw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_RTE) : /* rte */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_rte.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_bpsw) = OPRND (h_bpsw);
+ SET_H_CR (((UINT) 6), OPRND (h_cr_6));
+ SET_H_PSW (OPRND (h_psw));
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SLL) : /* sll $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SLLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SLL) : /* sll $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SLLI) : /* slli $dr,$uimm5 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_slli.f
+#define OPRND(f) par_exec->operands.sfmt_slli.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SLLSI (* FLD (i_dr), FLD (f_uimm5));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SLLI) : /* slli $dr,$uimm5 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_slli.f
+#define OPRND(f) par_exec->operands.sfmt_slli.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SRA) : /* sra $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRASI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SRA) : /* sra $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SRAI) : /* srai $dr,$uimm5 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_slli.f
+#define OPRND(f) par_exec->operands.sfmt_slli.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRASI (* FLD (i_dr), FLD (f_uimm5));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SRAI) : /* srai $dr,$uimm5 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_slli.f
+#define OPRND(f) par_exec->operands.sfmt_slli.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SRL) : /* srl $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRLSI (* FLD (i_dr), ANDSI (* FLD (i_sr), 31));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SRL) : /* srl $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SRLI) : /* srli $dr,$uimm5 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_slli.f
+#define OPRND(f) par_exec->operands.sfmt_slli.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SRLSI (* FLD (i_dr), FLD (f_uimm5));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SRLI) : /* srli $dr,$uimm5 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_slli.f
+#define OPRND(f) par_exec->operands.sfmt_slli.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ST) : /* st $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_st.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = * FLD (i_src1);
+ OPRND (h_memory_src2_idx) = * FLD (i_src2);
+ OPRND (h_memory_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ST) : /* st $src1,@$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_st.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SETMEMSI (current_cpu, pc, OPRND (h_memory_src2_idx), OPRND (h_memory_src2));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_STB) : /* stb $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_stb.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ QI opval = * FLD (i_src1);
+ OPRND (h_memory_src2_idx) = * FLD (i_src2);
+ OPRND (h_memory_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_STB) : /* stb $src1,@$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_stb.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SETMEMQI (current_cpu, pc, OPRND (h_memory_src2_idx), OPRND (h_memory_src2));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_STH) : /* sth $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_sth.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ HI opval = * FLD (i_src1);
+ OPRND (h_memory_src2_idx) = * FLD (i_src2);
+ OPRND (h_memory_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_STH) : /* sth $src1,@$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_sth.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SETMEMHI (current_cpu, pc, OPRND (h_memory_src2_idx), OPRND (h_memory_src2));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ST_PLUS) : /* st $src1,@+$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI tmp_new_src2;
+ tmp_new_src2 = ADDSI (* FLD (i_src2), 4);
+ {
+ SI opval = * FLD (i_src1);
+ OPRND (h_memory_new_src2_idx) = tmp_new_src2;
+ OPRND (h_memory_new_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+ {
+ SI opval = tmp_new_src2;
+ OPRND (src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "src2", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ST_PLUS) : /* st $src1,@+$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_st_plus.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SETMEMSI (current_cpu, pc, OPRND (h_memory_new_src2_idx), OPRND (h_memory_new_src2));
+ * FLD (i_src2) = OPRND (src2);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_ST_MINUS) : /* st $src1,@-$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_st_plus.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI tmp_new_src2;
+ tmp_new_src2 = SUBSI (* FLD (i_src2), 4);
+ {
+ SI opval = * FLD (i_src1);
+ OPRND (h_memory_new_src2_idx) = tmp_new_src2;
+ OPRND (h_memory_new_src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+ {
+ SI opval = tmp_new_src2;
+ OPRND (src2) = opval;
+ TRACE_RESULT (current_cpu, abuf, "src2", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_ST_MINUS) : /* st $src1,@-$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_st_plus.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SETMEMSI (current_cpu, pc, OPRND (h_memory_new_src2_idx), OPRND (h_memory_new_src2));
+ * FLD (i_src2) = OPRND (src2);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SUB) : /* sub $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ SI opval = SUBSI (* FLD (i_dr), * FLD (i_sr));
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SUB) : /* sub $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_add.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SUBV) : /* subv $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addv.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = SUBSI (* FLD (i_dr), * FLD (i_sr));
+ temp1 = SUBOFSI (* FLD (i_dr), * FLD (i_sr), 0);
+ {
+ SI opval = temp0;
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SUBV) : /* subv $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addv.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SUBX) : /* subx $dr,$sr */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addx.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ SI temp0;BI temp1;
+ temp0 = SUBCSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ temp1 = SUBCFSI (* FLD (i_dr), * FLD (i_sr), CPU (h_cond));
+ {
+ SI opval = temp0;
+ OPRND (dr) = opval;
+ TRACE_RESULT (current_cpu, abuf, "dr", 'x', opval);
+ }
+ {
+ BI opval = temp1;
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SUBX) : /* subx $dr,$sr */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_add.f
+#define OPRND(f) par_exec->operands.sfmt_addx.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+ * FLD (i_dr) = OPRND (dr);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_TRAP) : /* trap $uimm4 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_trap.f
+#define OPRND(f) par_exec->operands.sfmt_trap.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+ {
+ USI opval = GET_H_CR (((UINT) 6));
+ OPRND (h_cr_14) = opval;
+ TRACE_RESULT (current_cpu, abuf, "cr-14", 'x', opval);
+ }
+ {
+ USI opval = ADDSI (pc, 4);
+ OPRND (h_cr_6) = opval;
+ TRACE_RESULT (current_cpu, abuf, "cr-6", 'x', opval);
+ }
+ {
+ UQI opval = CPU (h_bpsw);
+ OPRND (h_bbpsw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "bbpsw", 'x', opval);
+ }
+ {
+ UQI opval = GET_H_PSW ();
+ OPRND (h_bpsw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "bpsw", 'x', opval);
+ }
+ {
+ UQI opval = ANDQI (GET_H_PSW (), 128);
+ OPRND (h_psw) = opval;
+ TRACE_RESULT (current_cpu, abuf, "psw", 'x', opval);
+ }
+ {
+ SI opval = m32r_trap (current_cpu, pc, FLD (f_uimm4));
+ OPRND (pc) = opval;
+ TRACE_RESULT (current_cpu, abuf, "pc", 'x', opval);
+ }
+}
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_TRAP) : /* trap $uimm4 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_trap.f
+#define OPRND(f) par_exec->operands.sfmt_trap.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ SEM_BRANCH_INIT
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_bbpsw) = OPRND (h_bbpsw);
+ CPU (h_bpsw) = OPRND (h_bpsw);
+ SET_H_CR (((UINT) 14), OPRND (h_cr_14));
+ SET_H_CR (((UINT) 6), OPRND (h_cr_6));
+ SET_H_PSW (OPRND (h_psw));
+ SEM_BRANCH_VIA_ADDR (current_cpu, sem_arg, OPRND (pc), vpc);
+
+ SEM_BRANCH_FINI (vpc);
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_UNLOCK) : /* unlock $src1,@$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_unlock.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+{
+if (CPU (h_lock)) {
+ {
+ SI opval = * FLD (i_src1);
+ OPRND (h_memory_src2_idx) = * FLD (i_src2);
+ OPRND (h_memory_src2) = opval;
+ written |= (1 << 4);
+ TRACE_RESULT (current_cpu, abuf, "memory", 'x', opval);
+ }
+}
+ {
+ BI opval = 0;
+ OPRND (h_lock) = opval;
+ TRACE_RESULT (current_cpu, abuf, "lock", 'x', opval);
+ }
+}
+
+ abuf->written = written;
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_UNLOCK) : /* unlock $src1,@$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_unlock.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_lock) = OPRND (h_lock);
+ if (written & (1 << 4))
+ {
+ SETMEMSI (current_cpu, pc, OPRND (h_memory_src2_idx), OPRND (h_memory_src2));
+ }
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_PCMPBZ) : /* pcmpbz $src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmpz.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ BI opval = (EQSI (ANDSI (* FLD (i_src2), 255), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 65280), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 16711680), 0)) ? (1) : (EQSI (ANDSI (* FLD (i_src2), 0xff000000), 0)) ? (1) : (0);
+ OPRND (condbit) = opval;
+ TRACE_RESULT (current_cpu, abuf, "condbit", 'x', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_PCMPBZ) : /* pcmpbz $src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_cmpz.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ CPU (h_cond) = OPRND (condbit);
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SADD) : /* sadd */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_sadd.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = ADDDI (SRADI (GET_H_ACCUMS (((UINT) 1)), 16), GET_H_ACCUMS (((UINT) 0)));
+ OPRND (h_accums_0) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accums-0", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SADD) : /* sadd */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_sadd.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (((UINT) 0), OPRND (h_accums_0));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MACWU1) : /* macwu1 $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_macwu1.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535)))), 8), 8);
+ OPRND (h_accums_1) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accums-1", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MACWU1) : /* macwu1 $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_macwu1.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (((UINT) 1), OPRND (h_accums_1));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MSBLO) : /* msblo $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_msblo.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (SUBDI (GET_H_ACCUM (), SRADI (SLLDI (MULDI (EXTHIDI (TRUNCSIHI (* FLD (i_src1))), EXTHIDI (TRUNCSIHI (* FLD (i_src2)))), 32), 16)), 8), 8);
+ OPRND (accum) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accum", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MSBLO) : /* msblo $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_msblo.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUM (OPRND (accum));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MULWU1) : /* mulwu1 $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mulwu1.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (MULDI (EXTSIDI (* FLD (i_src1)), EXTSIDI (ANDSI (* FLD (i_src2), 65535))), 16), 16);
+ OPRND (h_accums_1) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accums-1", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MULWU1) : /* mulwu1 $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_mulwu1.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (((UINT) 1), OPRND (h_accums_1));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_MACLH1) : /* maclh1 $src1,$src2 */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_macwu1.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+ {
+ DI opval = SRADI (SLLDI (ADDDI (GET_H_ACCUMS (((UINT) 1)), SLLDI (EXTSIDI (MULSI (EXTHISI (TRUNCSIHI (* FLD (i_src1))), SRASI (* FLD (i_src2), 16))), 16)), 8), 8);
+ OPRND (h_accums_1) = opval;
+ TRACE_RESULT (current_cpu, abuf, "accums-1", 'D', opval);
+ }
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_MACLH1) : /* maclh1 $src1,$src2 */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.sfmt_st_plus.f
+#define OPRND(f) par_exec->operands.sfmt_macwu1.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+ SET_H_ACCUMS (((UINT) 1), OPRND (h_accums_1));
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SC) : /* sc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_sc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (ZEXTBISI (CPU (h_cond)))
+ SEM_SKIP_INSN (current_cpu, sem_arg, vpc);
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SC) : /* sc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_sc.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+ CASE (sem, INSN_PAR_SNC) : /* snc */
+{
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ ARGBUF *abuf = SEM_ARGBUF (sem_arg);
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_sc.f
+ int UNUSED written = 0;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 2);
+
+if (ZEXTBISI (NOTBI (CPU (h_cond))))
+ SEM_SKIP_INSN (current_cpu, sem_arg, vpc);
+
+#undef OPRND
+#undef FLD
+}
+ NEXT (vpc);
+
+CASE (sem, INSN_WRITE_SNC) : /* snc */
+ {
+ SEM_ARG sem_arg = SEM_SEM_ARG (vpc, sc);
+ const ARGBUF *abuf = SEM_ARGBUF (sem_arg)->fields.write.abuf;
+#define FLD(f) abuf->fields.fmt_empty.f
+#define OPRND(f) par_exec->operands.sfmt_sc.f
+ int UNUSED written = abuf->written;
+ IADDR UNUSED pc = abuf->addr;
+ vpc = SEM_NEXT_VPC (sem_arg, pc, 0);
+
+
+#undef OPRND
+#undef FLD
+ }
+ NEXT (vpc);
+
+
+ }
+ ENDSWITCH (sem) /* End of semantic switch. */
+
+ /* At this point `vpc' contains the next insn to execute. */
+}
+
+#undef DEFINE_SWITCH
+#endif /* DEFINE_SWITCH */