summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/mips64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/mips64')
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc354
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h628
-rw-r--r--deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc37
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc324
4 files changed, 830 insertions, 513 deletions
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 7beb887b53..0e2b508a29 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -87,6 +87,9 @@ class MipsOperandConverter final : public InstructionOperandConverter {
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
// maybe not done on arm due to const pool ??
break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
break;
@@ -349,116 +352,120 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_instr) \
- do { \
- Label binop; \
- __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ sync(); \
- __ bind(&binop); \
- __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
- __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \
- do { \
- Label binop; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
- __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
- Operand(i.TempRegister(3))); \
- __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
- __ sync(); \
- __ bind(&binop); \
- __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
- size, sign_extend); \
- __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
- size); \
- __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr) \
+ do { \
+ Label binop; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ sll(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ sync(); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \
- do { \
- Label exchange; \
- __ sync(); \
- __ bind(&exchange); \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
- __ mov(i.TempRegister(1), i.InputRegister(2)); \
- __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_linked, store_conditional) \
+ do { \
+ Label exchange; \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ mov(i.TempRegister(1), i.InputRegister(2)); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \
- do { \
- Label exchange; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
- __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
- Operand(i.TempRegister(1))); \
- __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
- __ sync(); \
- __ bind(&exchange); \
- __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
- size, sign_extend); \
- __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size); \
- __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(load_linked, store_conditional, \
+ sign_extend, size) \
+ do { \
+ Label exchange; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \
- do { \
- Label compareExchange; \
- Label exit; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ sync(); \
- __ bind(&compareExchange); \
- __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&exit, ne, i.InputRegister(2), \
- Operand(i.OutputRegister(0))); \
- __ mov(i.TempRegister(2), i.InputRegister(3)); \
- __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
- Operand(zero_reg)); \
- __ bind(&exit); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
- __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
- Operand(i.TempRegister(1))); \
- __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
- __ sync(); \
- __ bind(&compareExchange); \
- __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
- size, sign_extend); \
- __ BranchShort(&exit, ne, i.InputRegister(2), \
- Operand(i.OutputRegister(0))); \
- __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
- size); \
- __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
- __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
- Operand(zero_reg)); \
- __ bind(&exit); \
- __ sync(); \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ daddu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ __ Dsubu(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ sll(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ sync(); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ sync(); \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
@@ -1845,6 +1852,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
+ case kMips64Word64AtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
+ break;
+ case kMips64Word64AtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
+ break;
+ case kMips64Word64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ break;
+ case kMips64Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
+ break;
case kWord32AtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
@@ -1854,52 +1873,88 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kWord32AtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
+ case kMips64Word64AtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
+ break;
+ case kMips64Word64AtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
+ break;
+ case kMips64Word64AtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
+ break;
+ case kMips64Word64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
+ break;
case kWord32AtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8);
break;
case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8);
break;
case kWord32AtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16);
break;
case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16);
break;
case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kMips64Word64AtomicExchangeUint8:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8);
+ break;
+ case kMips64Word64AtomicExchangeUint16:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16);
+ break;
+ case kMips64Word64AtomicExchangeUint32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32);
+ break;
+ case kMips64Word64AtomicExchangeUint64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
case kWord32AtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8);
break;
case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8);
break;
case kWord32AtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16);
break;
case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16);
break;
case kWord32AtomicCompareExchangeWord32:
__ sll(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
- break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst); \
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint8:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint16:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32);
+ break;
+ case kMips64Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
+ break;
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kWord32Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst); \
+ break; \
+ case kWord32Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst); \
+ break; \
+ case kWord32Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst); \
+ break; \
+ case kWord32Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst); \
+ break; \
+ case kWord32Atomic##op##Word32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
ATOMIC_BINOP_CASE(Sub, Subu)
@@ -1907,6 +1962,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kMips64Word64Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst); \
+ break; \
+ case kMips64Word64Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst); \
+ break; \
+ case kMips64Word64Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst); \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+ break;
+ ATOMIC_BINOP_CASE(Add, Daddu)
+ ATOMIC_BINOP_CASE(Sub, Dsubu)
+ ATOMIC_BINOP_CASE(And, And)
+ ATOMIC_BINOP_CASE(Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor)
+#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
@@ -2496,7 +2570,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label all_false;
__ BranchMSA(&all_false, MSA_BRANCH_V, all_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
- __ li(dst, 0); // branch delay slot
+ __ li(dst, 0l); // branch delay slot
__ li(dst, -1);
__ bind(&all_false);
break;
@@ -2508,7 +2582,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, -1); // branch delay slot
- __ li(dst, 0);
+ __ li(dst, 0l);
__ bind(&all_true);
break;
}
@@ -2519,7 +2593,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, -1); // branch delay slot
- __ li(dst, 0);
+ __ li(dst, 0l);
__ bind(&all_true);
break;
}
@@ -2530,7 +2604,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero,
i.InputSimd128Register(0), USE_DELAY_SLOT);
__ li(dst, -1); // branch delay slot
- __ li(dst, 0);
+ __ li(dst, 0l);
__ bind(&all_true);
break;
}
@@ -3174,6 +3248,8 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
}
}
+#undef UNSUPPORTED_COND
+
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3621,9 +3697,12 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ li(dst, src.ToExternalReference());
break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
case Constant::kHeapObject: {
Handle<HeapObject> src_object = src.ToHeapObject();
- Heap::RootListIndex index;
+ RootIndex index;
if (IsMaterializableFromRoot(src_object, &index)) {
__ LoadRoot(dst, index);
} else {
@@ -3823,6 +3902,19 @@ void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
UNREACHABLE();
}
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
#undef __
} // namespace compiler
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index a50d294013..7ea707db53 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -11,302 +11,338 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64ModS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64AssertEqual) \
- V(Mips64S128Zero) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4AddHoriz) \
- V(Mips64I32x4Sub) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4AddHoriz) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLane) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSaturateS) \
- V(Mips64I16x8AddHoriz) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSaturateS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSaturateU) \
- V(Mips64I16x8SubSaturateU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLane) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSaturateS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSaturateS) \
- V(Mips64I8x16Mul) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSaturateU) \
- V(Mips64I8x16SubSaturateU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S1x4AnyTrue) \
- V(Mips64S1x4AllTrue) \
- V(Mips64S1x8AnyTrue) \
- V(Mips64S1x8AllTrue) \
- V(Mips64S1x16AnyTrue) \
- V(Mips64S1x16AllTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64S8x16Shuffle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8)
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64ModS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Zero) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4AddHoriz) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4AddHoriz) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLane) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8AddHoriz) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSaturateS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSaturateU) \
+ V(Mips64I16x8SubSaturateU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLane) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSaturateS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSaturateS) \
+ V(Mips64I8x16Mul) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSaturateU) \
+ V(Mips64I8x16SubSaturateU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S1x4AnyTrue) \
+ V(Mips64S1x4AllTrue) \
+ V(Mips64S1x8AnyTrue) \
+ V(Mips64S1x8AllTrue) \
+ V(Mips64S1x16AnyTrue) \
+ V(Mips64S1x16AllTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64S8x16Shuffle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64Word64AtomicLoadUint8) \
+ V(Mips64Word64AtomicLoadUint16) \
+ V(Mips64Word64AtomicLoadUint32) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord8) \
+ V(Mips64Word64AtomicStoreWord16) \
+ V(Mips64Word64AtomicStoreWord32) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint8) \
+ V(Mips64Word64AtomicAddUint16) \
+ V(Mips64Word64AtomicAddUint32) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint8) \
+ V(Mips64Word64AtomicSubUint16) \
+ V(Mips64Word64AtomicSubUint32) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint8) \
+ V(Mips64Word64AtomicAndUint16) \
+ V(Mips64Word64AtomicAndUint32) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint8) \
+ V(Mips64Word64AtomicOrUint16) \
+ V(Mips64Word64AtomicOrUint32) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint8) \
+ V(Mips64Word64AtomicXorUint16) \
+ V(Mips64Word64AtomicXorUint32) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint8) \
+ V(Mips64Word64AtomicExchangeUint16) \
+ V(Mips64Word64AtomicExchangeUint32) \
+ V(Mips64Word64AtomicExchangeUint64) \
+ V(Mips64Word64AtomicCompareExchangeUint8) \
+ V(Mips64Word64AtomicCompareExchangeUint16) \
+ V(Mips64Word64AtomicCompareExchangeUint32) \
+ V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
index b0f6d65bfe..8fe669fe02 100644
--- a/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-scheduler-mips64.cc
@@ -293,6 +293,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ulw:
case kMips64Ulwu:
case kMips64Ulwc1:
+ case kMips64Word64AtomicLoadUint8:
+ case kMips64Word64AtomicLoadUint16:
+ case kMips64Word64AtomicLoadUint32:
+ case kMips64Word64AtomicLoadUint64:
+
return kIsLoadOperation;
case kMips64ModD:
@@ -312,6 +317,38 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Ush:
case kMips64Usw:
case kMips64Uswc1:
+ case kMips64Word64AtomicStoreWord8:
+ case kMips64Word64AtomicStoreWord16:
+ case kMips64Word64AtomicStoreWord32:
+ case kMips64Word64AtomicStoreWord64:
+ case kMips64Word64AtomicAddUint8:
+ case kMips64Word64AtomicAddUint16:
+ case kMips64Word64AtomicAddUint32:
+ case kMips64Word64AtomicAddUint64:
+ case kMips64Word64AtomicSubUint8:
+ case kMips64Word64AtomicSubUint16:
+ case kMips64Word64AtomicSubUint32:
+ case kMips64Word64AtomicSubUint64:
+ case kMips64Word64AtomicAndUint8:
+ case kMips64Word64AtomicAndUint16:
+ case kMips64Word64AtomicAndUint32:
+ case kMips64Word64AtomicAndUint64:
+ case kMips64Word64AtomicOrUint8:
+ case kMips64Word64AtomicOrUint16:
+ case kMips64Word64AtomicOrUint32:
+ case kMips64Word64AtomicOrUint64:
+ case kMips64Word64AtomicXorUint8:
+ case kMips64Word64AtomicXorUint16:
+ case kMips64Word64AtomicXorUint32:
+ case kMips64Word64AtomicXorUint64:
+ case kMips64Word64AtomicExchangeUint8:
+ case kMips64Word64AtomicExchangeUint16:
+ case kMips64Word64AtomicExchangeUint32:
+ case kMips64Word64AtomicExchangeUint64:
+ case kMips64Word64AtomicCompareExchangeUint8:
+ case kMips64Word64AtomicCompareExchangeUint16:
+ case kMips64Word64AtomicCompareExchangeUint32:
+ case kMips64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 9f9aebc145..f27ad218fd 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -1160,6 +1160,9 @@ void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDW, node);
}
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64CvtDL, node);
+}
void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDUw, node);
@@ -1239,6 +1242,9 @@ void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kMips64TruncWD, node);
}
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64TruncLD, node);
+}
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kMips64TruncUwD, node);
@@ -2022,6 +2028,119 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
g.TempImmediate(0), cont);
}
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
+ Mips64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
} // namespace
// Shared routine for word comparisons against zero.
@@ -2366,9 +2485,6 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
@@ -2386,25 +2502,11 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
UNREACHABLE();
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
- } else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired load opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
- }
+ VisitAtomicLoad(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
switch (rep) {
case MachineRepresentation::kWord8:
@@ -2421,25 +2523,57 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
return;
}
- if (g.CanBeImmediate(index, opcode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
- } else {
- InstructionOperand addr_reg = g.TempRegister();
- Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
- g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
+ VisitAtomicStore(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Word64AtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Word64AtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ ArchOpcode opcode = kArchNop;
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ opcode = kMips64Word64AtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kMips64Word64AtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kMips64Word64AtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ opcode = kMips64Word64AtomicStoreWord64;
+ break;
+ default:
+ UNREACHABLE();
+ return;
}
+
+ VisitAtomicStore(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2457,28 +2591,28 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
return;
}
- AddressingMode addressing_mode = kMode_MRI;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temp[3];
- temp[0] = g.TempRegister();
- temp[1] = g.TempRegister();
- temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 3, temp);
+ VisitAtomicExchange(this, node, opcode);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kMips64Word64AtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kMips64Word64AtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kMips64Word64AtomicExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kMips64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicExchange(this, node, opcode);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* old_value = node->InputAt(2);
- Node* new_value = node->InputAt(3);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2496,30 +2630,29 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
return;
}
- AddressingMode addressing_mode = kMode_MRI;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(old_value);
- inputs[input_count++] = g.UseUniqueRegister(new_value);
- InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temp[3];
- temp[0] = g.TempRegister();
- temp[1] = g.TempRegister();
- temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 3, temp);
+ VisitAtomicCompareExchange(this, node, opcode);
}
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kMips64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicCompareExchange(this, node, opcode);
+}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
ArchOpcode uint16_op, ArchOpcode word32_op) {
- Mips64OperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
ArchOpcode opcode = kArchNop;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
@@ -2537,21 +2670,7 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
return;
}
- AddressingMode addressing_mode = kMode_MRI;
- InstructionOperand inputs[3];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionOperand outputs[1];
- outputs[0] = g.UseUniqueRegister(node);
- InstructionOperand temps[4];
- temps[0] = g.TempRegister();
- temps[1] = g.TempRegister();
- temps[2] = g.TempRegister();
- temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 4, temps);
+ VisitAtomicBinop(this, node, opcode);
}
#define VISIT_ATOMIC_BINOP(op) \
@@ -2568,6 +2687,39 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode = kArchNop;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ VisitAtomicBinop(this, node, opcode);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation( \
+ node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
+ kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}