summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/access-info.cc9
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc11
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-codes-arm.h707
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc4
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc65
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h669
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc2
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc50
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc332
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h705
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc88
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc160
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h43
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc8
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.h8
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc24
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc38
-rw-r--r--deps/v8/src/compiler/backend/instruction.h7
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc6
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h723
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc40
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc6
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-codes-mips.h731
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc4
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc18
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h779
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc2
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc103
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc219
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h805
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc4
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc204
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h780
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc2
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc9
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc19
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h777
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc4
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc98
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h771
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc52
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc126
-rw-r--r--deps/v8/src/compiler/branch-elimination.h5
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc10
-rw-r--r--deps/v8/src/compiler/code-assembler.h2
-rw-r--r--deps/v8/src/compiler/csa-load-elimination.cc2
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc14
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc15
-rw-r--r--deps/v8/src/compiler/escape-analysis.h5
-rw-r--r--deps/v8/src/compiler/globals.h3
-rw-r--r--deps/v8/src/compiler/heap-refs.cc13
-rw-r--r--deps/v8/src/compiler/heap-refs.h2
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc13
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc15
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc4
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc19
-rw-r--r--deps/v8/src/compiler/linkage.cc12
-rw-r--r--deps/v8/src/compiler/linkage.h25
-rw-r--r--deps/v8/src/compiler/loop-unrolling.cc10
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc2
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc15
-rw-r--r--deps/v8/src/compiler/machine-operator.cc14
-rw-r--r--deps/v8/src/compiler/machine-operator.h6
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/node-matchers.h9
-rw-r--r--deps/v8/src/compiler/opcodes.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc28
-rw-r--r--deps/v8/src/compiler/pipeline.h3
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc18
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h2
-rw-r--r--deps/v8/src/compiler/scheduler.cc23
-rw-r--r--deps/v8/src/compiler/types.cc1
-rw-r--r--deps/v8/src/compiler/types.h4
-rw-r--r--deps/v8/src/compiler/verifier.cc2
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc123
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h11
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc184
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h53
81 files changed, 5235 insertions, 4654 deletions
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index e68ced7460..2ad2c9e945 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -836,7 +836,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// occuring before a fast mode holder on the chain.
return Invalid();
}
- if (details.location() == kField) {
+ if (details.location() == PropertyLocation::kField) {
if (details.kind() == kData) {
return ComputeDataFieldAccessInfo(receiver_map, map, holder, index,
access_mode);
@@ -846,7 +846,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
return Invalid();
}
} else {
- DCHECK_EQ(kDescriptor, details.location());
+ DCHECK_EQ(PropertyLocation::kDescriptor, details.location());
DCHECK_EQ(kAccessor, details.kind());
return ComputeAccessorDescriptorAccessInfo(receiver_map, name, map,
holder, index, access_mode);
@@ -1130,7 +1130,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
if (details.IsReadOnly()) return Invalid();
// TODO(bmeurer): Handle transition to data constant?
- if (details.location() != kField) return Invalid();
+ if (details.location() != PropertyLocation::kField) return Invalid();
int const index = details.field_index();
Representation details_representation = details.representation();
@@ -1172,8 +1172,7 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
if (descriptors_field_type->IsClass()) {
unrecorded_dependencies.push_back(
dependencies()->FieldTypeDependencyOffTheRecord(
- transition_map, number,
- MakeRef<Object>(broker(), descriptors_field_type)));
+ transition_map, number, *descriptors_field_type_ref));
// Remember the field map, and try to infer a useful type.
base::Optional<MapRef> maybe_field_map =
TryMakeRef(broker(), descriptors_field_type->AsClass());
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 7bc90fd822..b70c641db8 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -767,8 +767,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -853,13 +854,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
index c0200917b9..d4e0c2c457 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/backend/arm/instruction-codes-arm.h
@@ -11,357 +11,362 @@ namespace compiler {
// ARM-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(ArmAdd) \
- V(ArmAnd) \
- V(ArmBic) \
- V(ArmClz) \
- V(ArmCmp) \
- V(ArmCmn) \
- V(ArmTst) \
- V(ArmTeq) \
- V(ArmOrr) \
- V(ArmEor) \
- V(ArmSub) \
- V(ArmRsb) \
- V(ArmMul) \
- V(ArmMla) \
- V(ArmMls) \
- V(ArmSmull) \
- V(ArmSmmul) \
- V(ArmSmmla) \
- V(ArmUmull) \
- V(ArmSdiv) \
- V(ArmUdiv) \
- V(ArmMov) \
- V(ArmMvn) \
- V(ArmBfc) \
- V(ArmUbfx) \
- V(ArmSbfx) \
- V(ArmSxtb) \
- V(ArmSxth) \
- V(ArmSxtab) \
- V(ArmSxtah) \
- V(ArmUxtb) \
- V(ArmUxth) \
- V(ArmUxtab) \
- V(ArmRbit) \
- V(ArmRev) \
- V(ArmUxtah) \
- V(ArmAddPair) \
- V(ArmSubPair) \
- V(ArmMulPair) \
- V(ArmLslPair) \
- V(ArmLsrPair) \
- V(ArmAsrPair) \
- V(ArmVcmpF32) \
- V(ArmVaddF32) \
- V(ArmVsubF32) \
- V(ArmVmulF32) \
- V(ArmVmlaF32) \
- V(ArmVmlsF32) \
- V(ArmVdivF32) \
- V(ArmVabsF32) \
- V(ArmVnegF32) \
- V(ArmVsqrtF32) \
- V(ArmVcmpF64) \
- V(ArmVaddF64) \
- V(ArmVsubF64) \
- V(ArmVmulF64) \
- V(ArmVmlaF64) \
- V(ArmVmlsF64) \
- V(ArmVdivF64) \
- V(ArmVmodF64) \
- V(ArmVabsF64) \
- V(ArmVnegF64) \
- V(ArmVsqrtF64) \
- V(ArmVmullLow) \
- V(ArmVmullHigh) \
- V(ArmVrintmF32) \
- V(ArmVrintmF64) \
- V(ArmVrintpF32) \
- V(ArmVrintpF64) \
- V(ArmVrintzF32) \
- V(ArmVrintzF64) \
- V(ArmVrintaF64) \
- V(ArmVrintnF32) \
- V(ArmVrintnF64) \
- V(ArmVcvtF32F64) \
- V(ArmVcvtF64F32) \
- V(ArmVcvtF32S32) \
- V(ArmVcvtF32U32) \
- V(ArmVcvtF64S32) \
- V(ArmVcvtF64U32) \
- V(ArmVcvtS32F32) \
- V(ArmVcvtU32F32) \
- V(ArmVcvtS32F64) \
- V(ArmVcvtU32F64) \
- V(ArmVmovU32F32) \
- V(ArmVmovF32U32) \
- V(ArmVmovLowU32F64) \
- V(ArmVmovLowF64U32) \
- V(ArmVmovHighU32F64) \
- V(ArmVmovHighF64U32) \
- V(ArmVmovF64U32U32) \
- V(ArmVmovU32U32F64) \
- V(ArmVldrF32) \
- V(ArmVstrF32) \
- V(ArmVldrF64) \
- V(ArmVld1F64) \
- V(ArmVstrF64) \
- V(ArmVst1F64) \
- V(ArmVld1S128) \
- V(ArmVst1S128) \
- V(ArmVcnt) \
- V(ArmVpadal) \
- V(ArmVpaddl) \
- V(ArmFloat32Max) \
- V(ArmFloat64Max) \
- V(ArmFloat32Min) \
- V(ArmFloat64Min) \
- V(ArmFloat64SilenceNaN) \
- V(ArmLdrb) \
- V(ArmLdrsb) \
- V(ArmStrb) \
- V(ArmLdrh) \
- V(ArmLdrsh) \
- V(ArmStrh) \
- V(ArmLdr) \
- V(ArmStr) \
- V(ArmPush) \
- V(ArmPoke) \
- V(ArmPeek) \
- V(ArmDmbIsh) \
- V(ArmDsbIsb) \
- V(ArmF64x2Splat) \
- V(ArmF64x2ExtractLane) \
- V(ArmF64x2ReplaceLane) \
- V(ArmF64x2Abs) \
- V(ArmF64x2Neg) \
- V(ArmF64x2Sqrt) \
- V(ArmF64x2Add) \
- V(ArmF64x2Sub) \
- V(ArmF64x2Mul) \
- V(ArmF64x2Div) \
- V(ArmF64x2Min) \
- V(ArmF64x2Max) \
- V(ArmF64x2Eq) \
- V(ArmF64x2Ne) \
- V(ArmF64x2Lt) \
- V(ArmF64x2Le) \
- V(ArmF64x2Pmin) \
- V(ArmF64x2Pmax) \
- V(ArmF64x2Ceil) \
- V(ArmF64x2Floor) \
- V(ArmF64x2Trunc) \
- V(ArmF64x2NearestInt) \
- V(ArmF64x2ConvertLowI32x4S) \
- V(ArmF64x2ConvertLowI32x4U) \
- V(ArmF64x2PromoteLowF32x4) \
- V(ArmF32x4Splat) \
- V(ArmF32x4ExtractLane) \
- V(ArmF32x4ReplaceLane) \
- V(ArmF32x4SConvertI32x4) \
- V(ArmF32x4UConvertI32x4) \
- V(ArmF32x4Abs) \
- V(ArmF32x4Neg) \
- V(ArmF32x4Sqrt) \
- V(ArmF32x4RecipApprox) \
- V(ArmF32x4RecipSqrtApprox) \
- V(ArmF32x4Add) \
- V(ArmF32x4Sub) \
- V(ArmF32x4Mul) \
- V(ArmF32x4Div) \
- V(ArmF32x4Min) \
- V(ArmF32x4Max) \
- V(ArmF32x4Eq) \
- V(ArmF32x4Ne) \
- V(ArmF32x4Lt) \
- V(ArmF32x4Le) \
- V(ArmF32x4Pmin) \
- V(ArmF32x4Pmax) \
- V(ArmF32x4DemoteF64x2Zero) \
- V(ArmI64x2SplatI32Pair) \
- V(ArmI64x2ReplaceLaneI32Pair) \
- V(ArmI64x2Abs) \
- V(ArmI64x2Neg) \
- V(ArmI64x2Shl) \
- V(ArmI64x2ShrS) \
- V(ArmI64x2Add) \
- V(ArmI64x2Sub) \
- V(ArmI64x2Mul) \
- V(ArmI64x2ShrU) \
- V(ArmI64x2BitMask) \
- V(ArmI64x2Eq) \
- V(ArmI64x2Ne) \
- V(ArmI64x2GtS) \
- V(ArmI64x2GeS) \
- V(ArmI64x2SConvertI32x4Low) \
- V(ArmI64x2SConvertI32x4High) \
- V(ArmI64x2UConvertI32x4Low) \
- V(ArmI64x2UConvertI32x4High) \
- V(ArmI32x4Splat) \
- V(ArmI32x4ExtractLane) \
- V(ArmI32x4ReplaceLane) \
- V(ArmI32x4SConvertF32x4) \
- V(ArmI32x4SConvertI16x8Low) \
- V(ArmI32x4SConvertI16x8High) \
- V(ArmI32x4Neg) \
- V(ArmI32x4Shl) \
- V(ArmI32x4ShrS) \
- V(ArmI32x4Add) \
- V(ArmI32x4Sub) \
- V(ArmI32x4Mul) \
- V(ArmI32x4MinS) \
- V(ArmI32x4MaxS) \
- V(ArmI32x4Eq) \
- V(ArmI32x4Ne) \
- V(ArmI32x4GtS) \
- V(ArmI32x4GeS) \
- V(ArmI32x4UConvertF32x4) \
- V(ArmI32x4UConvertI16x8Low) \
- V(ArmI32x4UConvertI16x8High) \
- V(ArmI32x4ShrU) \
- V(ArmI32x4MinU) \
- V(ArmI32x4MaxU) \
- V(ArmI32x4GtU) \
- V(ArmI32x4GeU) \
- V(ArmI32x4Abs) \
- V(ArmI32x4BitMask) \
- V(ArmI32x4DotI16x8S) \
- V(ArmI32x4TruncSatF64x2SZero) \
- V(ArmI32x4TruncSatF64x2UZero) \
- V(ArmI16x8Splat) \
- V(ArmI16x8ExtractLaneS) \
- V(ArmI16x8ReplaceLane) \
- V(ArmI16x8SConvertI8x16Low) \
- V(ArmI16x8SConvertI8x16High) \
- V(ArmI16x8Neg) \
- V(ArmI16x8Shl) \
- V(ArmI16x8ShrS) \
- V(ArmI16x8SConvertI32x4) \
- V(ArmI16x8Add) \
- V(ArmI16x8AddSatS) \
- V(ArmI16x8Sub) \
- V(ArmI16x8SubSatS) \
- V(ArmI16x8Mul) \
- V(ArmI16x8MinS) \
- V(ArmI16x8MaxS) \
- V(ArmI16x8Eq) \
- V(ArmI16x8Ne) \
- V(ArmI16x8GtS) \
- V(ArmI16x8GeS) \
- V(ArmI16x8ExtractLaneU) \
- V(ArmI16x8UConvertI8x16Low) \
- V(ArmI16x8UConvertI8x16High) \
- V(ArmI16x8ShrU) \
- V(ArmI16x8UConvertI32x4) \
- V(ArmI16x8AddSatU) \
- V(ArmI16x8SubSatU) \
- V(ArmI16x8MinU) \
- V(ArmI16x8MaxU) \
- V(ArmI16x8GtU) \
- V(ArmI16x8GeU) \
- V(ArmI16x8RoundingAverageU) \
- V(ArmI16x8Abs) \
- V(ArmI16x8BitMask) \
- V(ArmI16x8Q15MulRSatS) \
- V(ArmI8x16Splat) \
- V(ArmI8x16ExtractLaneS) \
- V(ArmI8x16ReplaceLane) \
- V(ArmI8x16Neg) \
- V(ArmI8x16Shl) \
- V(ArmI8x16ShrS) \
- V(ArmI8x16SConvertI16x8) \
- V(ArmI8x16Add) \
- V(ArmI8x16AddSatS) \
- V(ArmI8x16Sub) \
- V(ArmI8x16SubSatS) \
- V(ArmI8x16MinS) \
- V(ArmI8x16MaxS) \
- V(ArmI8x16Eq) \
- V(ArmI8x16Ne) \
- V(ArmI8x16GtS) \
- V(ArmI8x16GeS) \
- V(ArmI8x16ExtractLaneU) \
- V(ArmI8x16ShrU) \
- V(ArmI8x16UConvertI16x8) \
- V(ArmI8x16AddSatU) \
- V(ArmI8x16SubSatU) \
- V(ArmI8x16MinU) \
- V(ArmI8x16MaxU) \
- V(ArmI8x16GtU) \
- V(ArmI8x16GeU) \
- V(ArmI8x16RoundingAverageU) \
- V(ArmI8x16Abs) \
- V(ArmI8x16BitMask) \
- V(ArmS128Const) \
- V(ArmS128Zero) \
- V(ArmS128AllOnes) \
- V(ArmS128Dup) \
- V(ArmS128And) \
- V(ArmS128Or) \
- V(ArmS128Xor) \
- V(ArmS128Not) \
- V(ArmS128Select) \
- V(ArmS128AndNot) \
- V(ArmS32x4ZipLeft) \
- V(ArmS32x4ZipRight) \
- V(ArmS32x4UnzipLeft) \
- V(ArmS32x4UnzipRight) \
- V(ArmS32x4TransposeLeft) \
- V(ArmS32x4TransposeRight) \
- V(ArmS32x4Shuffle) \
- V(ArmS16x8ZipLeft) \
- V(ArmS16x8ZipRight) \
- V(ArmS16x8UnzipLeft) \
- V(ArmS16x8UnzipRight) \
- V(ArmS16x8TransposeLeft) \
- V(ArmS16x8TransposeRight) \
- V(ArmS8x16ZipLeft) \
- V(ArmS8x16ZipRight) \
- V(ArmS8x16UnzipLeft) \
- V(ArmS8x16UnzipRight) \
- V(ArmS8x16TransposeLeft) \
- V(ArmS8x16TransposeRight) \
- V(ArmS8x16Concat) \
- V(ArmI8x16Swizzle) \
- V(ArmI8x16Shuffle) \
- V(ArmS32x2Reverse) \
- V(ArmS16x4Reverse) \
- V(ArmS16x2Reverse) \
- V(ArmS8x8Reverse) \
- V(ArmS8x4Reverse) \
- V(ArmS8x2Reverse) \
- V(ArmI64x2AllTrue) \
- V(ArmI32x4AllTrue) \
- V(ArmI16x8AllTrue) \
- V(ArmV128AnyTrue) \
- V(ArmI8x16AllTrue) \
- V(ArmS128Load8Splat) \
- V(ArmS128Load16Splat) \
- V(ArmS128Load32Splat) \
- V(ArmS128Load64Splat) \
- V(ArmS128Load8x8S) \
- V(ArmS128Load8x8U) \
- V(ArmS128Load16x4S) \
- V(ArmS128Load16x4U) \
- V(ArmS128Load32x2S) \
- V(ArmS128Load32x2U) \
- V(ArmS128Load32Zero) \
- V(ArmS128Load64Zero) \
- V(ArmS128LoadLaneLow) \
- V(ArmS128LoadLaneHigh) \
- V(ArmS128StoreLaneLow) \
- V(ArmS128StoreLaneHigh) \
- V(ArmWord32AtomicPairLoad) \
- V(ArmWord32AtomicPairStore) \
- V(ArmWord32AtomicPairAdd) \
- V(ArmWord32AtomicPairSub) \
- V(ArmWord32AtomicPairAnd) \
- V(ArmWord32AtomicPairOr) \
- V(ArmWord32AtomicPairXor) \
- V(ArmWord32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(ArmAdd) \
+ V(ArmAnd) \
+ V(ArmBic) \
+ V(ArmClz) \
+ V(ArmCmp) \
+ V(ArmCmn) \
+ V(ArmTst) \
+ V(ArmTeq) \
+ V(ArmOrr) \
+ V(ArmEor) \
+ V(ArmSub) \
+ V(ArmRsb) \
+ V(ArmMul) \
+ V(ArmMla) \
+ V(ArmMls) \
+ V(ArmSmull) \
+ V(ArmSmmul) \
+ V(ArmSmmla) \
+ V(ArmUmull) \
+ V(ArmSdiv) \
+ V(ArmUdiv) \
+ V(ArmMov) \
+ V(ArmMvn) \
+ V(ArmBfc) \
+ V(ArmUbfx) \
+ V(ArmSbfx) \
+ V(ArmSxtb) \
+ V(ArmSxth) \
+ V(ArmSxtab) \
+ V(ArmSxtah) \
+ V(ArmUxtb) \
+ V(ArmUxth) \
+ V(ArmUxtab) \
+ V(ArmRbit) \
+ V(ArmRev) \
+ V(ArmUxtah) \
+ V(ArmAddPair) \
+ V(ArmSubPair) \
+ V(ArmMulPair) \
+ V(ArmLslPair) \
+ V(ArmLsrPair) \
+ V(ArmAsrPair) \
+ V(ArmVcmpF32) \
+ V(ArmVaddF32) \
+ V(ArmVsubF32) \
+ V(ArmVmulF32) \
+ V(ArmVmlaF32) \
+ V(ArmVmlsF32) \
+ V(ArmVdivF32) \
+ V(ArmVabsF32) \
+ V(ArmVnegF32) \
+ V(ArmVsqrtF32) \
+ V(ArmVcmpF64) \
+ V(ArmVaddF64) \
+ V(ArmVsubF64) \
+ V(ArmVmulF64) \
+ V(ArmVmlaF64) \
+ V(ArmVmlsF64) \
+ V(ArmVdivF64) \
+ V(ArmVmodF64) \
+ V(ArmVabsF64) \
+ V(ArmVnegF64) \
+ V(ArmVsqrtF64) \
+ V(ArmVmullLow) \
+ V(ArmVmullHigh) \
+ V(ArmVrintmF32) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF32) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF32) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
+ V(ArmVrintnF32) \
+ V(ArmVrintnF64) \
+ V(ArmVcvtF32F64) \
+ V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
+ V(ArmVcvtF64S32) \
+ V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
+ V(ArmVcvtS32F64) \
+ V(ArmVcvtU32F64) \
+ V(ArmVmovU32F32) \
+ V(ArmVmovF32U32) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
+ V(ArmVmovU32U32F64) \
+ V(ArmVldrF32) \
+ V(ArmVstrF32) \
+ V(ArmVldrF64) \
+ V(ArmVld1F64) \
+ V(ArmVstrF64) \
+ V(ArmVst1F64) \
+ V(ArmVld1S128) \
+ V(ArmVst1S128) \
+ V(ArmVcnt) \
+ V(ArmVpadal) \
+ V(ArmVpaddl) \
+ V(ArmFloat32Max) \
+ V(ArmFloat64Max) \
+ V(ArmFloat32Min) \
+ V(ArmFloat64Min) \
+ V(ArmFloat64SilenceNaN) \
+ V(ArmLdrb) \
+ V(ArmLdrsb) \
+ V(ArmStrb) \
+ V(ArmLdrh) \
+ V(ArmLdrsh) \
+ V(ArmStrh) \
+ V(ArmLdr) \
+ V(ArmStr) \
+ V(ArmPush) \
+ V(ArmPoke) \
+ V(ArmPeek) \
+ V(ArmDmbIsh) \
+ V(ArmDsbIsb) \
+ V(ArmF64x2Splat) \
+ V(ArmF64x2ExtractLane) \
+ V(ArmF64x2ReplaceLane) \
+ V(ArmF64x2Abs) \
+ V(ArmF64x2Neg) \
+ V(ArmF64x2Sqrt) \
+ V(ArmF64x2Add) \
+ V(ArmF64x2Sub) \
+ V(ArmF64x2Mul) \
+ V(ArmF64x2Div) \
+ V(ArmF64x2Min) \
+ V(ArmF64x2Max) \
+ V(ArmF64x2Eq) \
+ V(ArmF64x2Ne) \
+ V(ArmF64x2Lt) \
+ V(ArmF64x2Le) \
+ V(ArmF64x2Pmin) \
+ V(ArmF64x2Pmax) \
+ V(ArmF64x2Ceil) \
+ V(ArmF64x2Floor) \
+ V(ArmF64x2Trunc) \
+ V(ArmF64x2NearestInt) \
+ V(ArmF64x2ConvertLowI32x4S) \
+ V(ArmF64x2ConvertLowI32x4U) \
+ V(ArmF64x2PromoteLowF32x4) \
+ V(ArmF32x4Splat) \
+ V(ArmF32x4ExtractLane) \
+ V(ArmF32x4ReplaceLane) \
+ V(ArmF32x4SConvertI32x4) \
+ V(ArmF32x4UConvertI32x4) \
+ V(ArmF32x4Abs) \
+ V(ArmF32x4Neg) \
+ V(ArmF32x4Sqrt) \
+ V(ArmF32x4RecipApprox) \
+ V(ArmF32x4RecipSqrtApprox) \
+ V(ArmF32x4Add) \
+ V(ArmF32x4Sub) \
+ V(ArmF32x4Mul) \
+ V(ArmF32x4Div) \
+ V(ArmF32x4Min) \
+ V(ArmF32x4Max) \
+ V(ArmF32x4Eq) \
+ V(ArmF32x4Ne) \
+ V(ArmF32x4Lt) \
+ V(ArmF32x4Le) \
+ V(ArmF32x4Pmin) \
+ V(ArmF32x4Pmax) \
+ V(ArmF32x4DemoteF64x2Zero) \
+ V(ArmI64x2SplatI32Pair) \
+ V(ArmI64x2ReplaceLaneI32Pair) \
+ V(ArmI64x2Abs) \
+ V(ArmI64x2Neg) \
+ V(ArmI64x2Shl) \
+ V(ArmI64x2ShrS) \
+ V(ArmI64x2Add) \
+ V(ArmI64x2Sub) \
+ V(ArmI64x2Mul) \
+ V(ArmI64x2ShrU) \
+ V(ArmI64x2BitMask) \
+ V(ArmI64x2Eq) \
+ V(ArmI64x2Ne) \
+ V(ArmI64x2GtS) \
+ V(ArmI64x2GeS) \
+ V(ArmI64x2SConvertI32x4Low) \
+ V(ArmI64x2SConvertI32x4High) \
+ V(ArmI64x2UConvertI32x4Low) \
+ V(ArmI64x2UConvertI32x4High) \
+ V(ArmI32x4Splat) \
+ V(ArmI32x4ExtractLane) \
+ V(ArmI32x4ReplaceLane) \
+ V(ArmI32x4SConvertF32x4) \
+ V(ArmI32x4SConvertI16x8Low) \
+ V(ArmI32x4SConvertI16x8High) \
+ V(ArmI32x4Neg) \
+ V(ArmI32x4Shl) \
+ V(ArmI32x4ShrS) \
+ V(ArmI32x4Add) \
+ V(ArmI32x4Sub) \
+ V(ArmI32x4Mul) \
+ V(ArmI32x4MinS) \
+ V(ArmI32x4MaxS) \
+ V(ArmI32x4Eq) \
+ V(ArmI32x4Ne) \
+ V(ArmI32x4GtS) \
+ V(ArmI32x4GeS) \
+ V(ArmI32x4UConvertF32x4) \
+ V(ArmI32x4UConvertI16x8Low) \
+ V(ArmI32x4UConvertI16x8High) \
+ V(ArmI32x4ShrU) \
+ V(ArmI32x4MinU) \
+ V(ArmI32x4MaxU) \
+ V(ArmI32x4GtU) \
+ V(ArmI32x4GeU) \
+ V(ArmI32x4Abs) \
+ V(ArmI32x4BitMask) \
+ V(ArmI32x4DotI16x8S) \
+ V(ArmI32x4TruncSatF64x2SZero) \
+ V(ArmI32x4TruncSatF64x2UZero) \
+ V(ArmI16x8Splat) \
+ V(ArmI16x8ExtractLaneS) \
+ V(ArmI16x8ReplaceLane) \
+ V(ArmI16x8SConvertI8x16Low) \
+ V(ArmI16x8SConvertI8x16High) \
+ V(ArmI16x8Neg) \
+ V(ArmI16x8Shl) \
+ V(ArmI16x8ShrS) \
+ V(ArmI16x8SConvertI32x4) \
+ V(ArmI16x8Add) \
+ V(ArmI16x8AddSatS) \
+ V(ArmI16x8Sub) \
+ V(ArmI16x8SubSatS) \
+ V(ArmI16x8Mul) \
+ V(ArmI16x8MinS) \
+ V(ArmI16x8MaxS) \
+ V(ArmI16x8Eq) \
+ V(ArmI16x8Ne) \
+ V(ArmI16x8GtS) \
+ V(ArmI16x8GeS) \
+ V(ArmI16x8ExtractLaneU) \
+ V(ArmI16x8UConvertI8x16Low) \
+ V(ArmI16x8UConvertI8x16High) \
+ V(ArmI16x8ShrU) \
+ V(ArmI16x8UConvertI32x4) \
+ V(ArmI16x8AddSatU) \
+ V(ArmI16x8SubSatU) \
+ V(ArmI16x8MinU) \
+ V(ArmI16x8MaxU) \
+ V(ArmI16x8GtU) \
+ V(ArmI16x8GeU) \
+ V(ArmI16x8RoundingAverageU) \
+ V(ArmI16x8Abs) \
+ V(ArmI16x8BitMask) \
+ V(ArmI16x8Q15MulRSatS) \
+ V(ArmI8x16Splat) \
+ V(ArmI8x16ExtractLaneS) \
+ V(ArmI8x16ReplaceLane) \
+ V(ArmI8x16Neg) \
+ V(ArmI8x16Shl) \
+ V(ArmI8x16ShrS) \
+ V(ArmI8x16SConvertI16x8) \
+ V(ArmI8x16Add) \
+ V(ArmI8x16AddSatS) \
+ V(ArmI8x16Sub) \
+ V(ArmI8x16SubSatS) \
+ V(ArmI8x16MinS) \
+ V(ArmI8x16MaxS) \
+ V(ArmI8x16Eq) \
+ V(ArmI8x16Ne) \
+ V(ArmI8x16GtS) \
+ V(ArmI8x16GeS) \
+ V(ArmI8x16ExtractLaneU) \
+ V(ArmI8x16ShrU) \
+ V(ArmI8x16UConvertI16x8) \
+ V(ArmI8x16AddSatU) \
+ V(ArmI8x16SubSatU) \
+ V(ArmI8x16MinU) \
+ V(ArmI8x16MaxU) \
+ V(ArmI8x16GtU) \
+ V(ArmI8x16GeU) \
+ V(ArmI8x16RoundingAverageU) \
+ V(ArmI8x16Abs) \
+ V(ArmI8x16BitMask) \
+ V(ArmS128Const) \
+ V(ArmS128Zero) \
+ V(ArmS128AllOnes) \
+ V(ArmS128Dup) \
+ V(ArmS128And) \
+ V(ArmS128Or) \
+ V(ArmS128Xor) \
+ V(ArmS128Not) \
+ V(ArmS128Select) \
+ V(ArmS128AndNot) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmI8x16Swizzle) \
+ V(ArmI8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
+ V(ArmI64x2AllTrue) \
+ V(ArmI32x4AllTrue) \
+ V(ArmI16x8AllTrue) \
+ V(ArmV128AnyTrue) \
+ V(ArmI8x16AllTrue) \
+ V(ArmS128Load8Splat) \
+ V(ArmS128Load16Splat) \
+ V(ArmS128Load32Splat) \
+ V(ArmS128Load64Splat) \
+ V(ArmS128Load8x8S) \
+ V(ArmS128Load8x8U) \
+ V(ArmS128Load16x4S) \
+ V(ArmS128Load16x4U) \
+ V(ArmS128Load32x2S) \
+ V(ArmS128Load32x2U) \
+ V(ArmS128Load32Zero) \
+ V(ArmS128Load64Zero) \
+ V(ArmS128LoadLaneLow) \
+ V(ArmS128LoadLaneHigh) \
+ V(ArmS128StoreLaneLow) \
+ V(ArmS128StoreLaneHigh) \
+ V(ArmWord32AtomicPairLoad) \
+ V(ArmWord32AtomicPairStore) \
+ V(ArmWord32AtomicPairAdd) \
+ V(ArmWord32AtomicPairSub) \
+ V(ArmWord32AtomicPairAnd) \
+ V(ArmWord32AtomicPairOr) \
+ V(ArmWord32AtomicPairXor) \
+ V(ArmWord32AtomicPairExchange) \
V(ArmWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 3de9b2aab6..d0511ae62b 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -498,9 +498,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
ArmOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r1));
}
void InstructionSelector::VisitStoreLane(Node* node) {
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index fcab0a739b..d04bcf245c 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -821,7 +821,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
@@ -832,10 +833,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters, 0);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters, 0);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
}
__ Bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -871,16 +872,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK_EQ(i.InputRegister(0), x1);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
- __ Debug("kArchAbortCSAAssert", 0, BREAK);
+ __ Debug("kArchAbortCSADcheck", 0, BREAK);
unwinding_info_writer_.MarkBlockWillExit();
break;
case kArchDebugBreak:
@@ -2077,6 +2078,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(f)); \
break; \
}
+#define SIMD_FCM_L_CASE(Op, ImmOp, RegOp) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ if (instr->InputCount() == 1) { \
+ __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), +0.0); \
+ } else { \
+ __ Fcm##RegOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ } \
+ break; \
+ }
+#define SIMD_FCM_G_CASE(Op, ImmOp) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ /* Currently Gt/Ge instructions are only used with zero */ \
+ DCHECK_EQ(instr->InputCount(), 1); \
+ __ Fcm##ImmOp(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), +0.0); \
+ break; \
+ }
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
@@ -2192,29 +2215,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).Format(f), 0);
break;
}
- SIMD_BINOP_LANE_SIZE_CASE(kArm64FEq, Fcmeq);
+ SIMD_FCM_L_CASE(kArm64FEq, eq, eq);
case kArm64FNe: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
VRegister dst = i.OutputSimd128Register().Format(f);
- __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
- i.InputSimd128Register(1).Format(f));
+ if (instr->InputCount() == 1) {
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f), +0.0);
+ } else {
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
+ }
__ Mvn(dst, dst);
break;
}
- case kArm64FLt: {
- VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
- __ Fcmgt(i.OutputSimd128Register().Format(f),
- i.InputSimd128Register(1).Format(f),
- i.InputSimd128Register(0).Format(f));
- break;
- }
- case kArm64FLe: {
- VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
- __ Fcmge(i.OutputSimd128Register().Format(f),
- i.InputSimd128Register(1).Format(f),
- i.InputSimd128Register(0).Format(f));
- break;
- }
+ SIMD_FCM_L_CASE(kArm64FLt, lt, gt);
+ SIMD_FCM_L_CASE(kArm64FLe, le, ge);
+ SIMD_FCM_G_CASE(kArm64FGt, gt);
+ SIMD_FCM_G_CASE(kArm64FGe, ge);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D);
case kArm64F64x2Pmin: {
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index d57203639e..d8ee809918 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -11,337 +11,344 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Arm64Add) \
- V(Arm64Add32) \
- V(Arm64And) \
- V(Arm64And32) \
- V(Arm64Bic) \
- V(Arm64Bic32) \
- V(Arm64Clz) \
- V(Arm64Clz32) \
- V(Arm64Cmp) \
- V(Arm64Cmp32) \
- V(Arm64Cmn) \
- V(Arm64Cmn32) \
- V(Arm64Cnt) \
- V(Arm64Cnt32) \
- V(Arm64Cnt64) \
- V(Arm64Tst) \
- V(Arm64Tst32) \
- V(Arm64Or) \
- V(Arm64Or32) \
- V(Arm64Orn) \
- V(Arm64Orn32) \
- V(Arm64Eor) \
- V(Arm64Eor32) \
- V(Arm64Eon) \
- V(Arm64Eon32) \
- V(Arm64Sadalp) \
- V(Arm64Saddlp) \
- V(Arm64Sub) \
- V(Arm64Sub32) \
- V(Arm64Mul) \
- V(Arm64Mul32) \
- V(Arm64Smlal) \
- V(Arm64Smlal2) \
- V(Arm64Smull) \
- V(Arm64Smull2) \
- V(Arm64Uadalp) \
- V(Arm64Uaddlp) \
- V(Arm64Umlal) \
- V(Arm64Umlal2) \
- V(Arm64Umull) \
- V(Arm64Umull2) \
- V(Arm64Madd) \
- V(Arm64Madd32) \
- V(Arm64Msub) \
- V(Arm64Msub32) \
- V(Arm64Mneg) \
- V(Arm64Mneg32) \
- V(Arm64Idiv) \
- V(Arm64Idiv32) \
- V(Arm64Udiv) \
- V(Arm64Udiv32) \
- V(Arm64Imod) \
- V(Arm64Imod32) \
- V(Arm64Umod) \
- V(Arm64Umod32) \
- V(Arm64Not) \
- V(Arm64Not32) \
- V(Arm64Lsl) \
- V(Arm64Lsl32) \
- V(Arm64Lsr) \
- V(Arm64Lsr32) \
- V(Arm64Asr) \
- V(Arm64Asr32) \
- V(Arm64Ror) \
- V(Arm64Ror32) \
- V(Arm64Mov32) \
- V(Arm64Sxtb32) \
- V(Arm64Sxth32) \
- V(Arm64Sxtb) \
- V(Arm64Sxth) \
- V(Arm64Sxtw) \
- V(Arm64Sbfx) \
- V(Arm64Sbfx32) \
- V(Arm64Ubfx) \
- V(Arm64Ubfx32) \
- V(Arm64Ubfiz32) \
- V(Arm64Bfi) \
- V(Arm64Rbit) \
- V(Arm64Rbit32) \
- V(Arm64Rev) \
- V(Arm64Rev32) \
- V(Arm64TestAndBranch32) \
- V(Arm64TestAndBranch) \
- V(Arm64CompareAndBranch32) \
- V(Arm64CompareAndBranch) \
- V(Arm64Claim) \
- V(Arm64Poke) \
- V(Arm64PokePair) \
- V(Arm64Peek) \
- V(Arm64Float32Cmp) \
- V(Arm64Float32Add) \
- V(Arm64Float32Sub) \
- V(Arm64Float32Mul) \
- V(Arm64Float32Div) \
- V(Arm64Float32Abs) \
- V(Arm64Float32Abd) \
- V(Arm64Float32Neg) \
- V(Arm64Float32Sqrt) \
- V(Arm64Float32Fnmul) \
- V(Arm64Float32RoundDown) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
- V(Arm64Float64Cmp) \
- V(Arm64Float64Add) \
- V(Arm64Float64Sub) \
- V(Arm64Float64Mul) \
- V(Arm64Float64Div) \
- V(Arm64Float64Mod) \
- V(Arm64Float64Max) \
- V(Arm64Float64Min) \
- V(Arm64Float64Abs) \
- V(Arm64Float64Abd) \
- V(Arm64Float64Neg) \
- V(Arm64Float64Sqrt) \
- V(Arm64Float64Fnmul) \
- V(Arm64Float64RoundDown) \
- V(Arm64Float32RoundUp) \
- V(Arm64Float64RoundUp) \
- V(Arm64Float64RoundTiesAway) \
- V(Arm64Float32RoundTruncate) \
- V(Arm64Float64RoundTruncate) \
- V(Arm64Float32RoundTiesEven) \
- V(Arm64Float64RoundTiesEven) \
- V(Arm64Float64SilenceNaN) \
- V(Arm64Float32ToFloat64) \
- V(Arm64Float64ToFloat32) \
- V(Arm64Float32ToInt32) \
- V(Arm64Float64ToInt32) \
- V(Arm64Float32ToUint32) \
- V(Arm64Float64ToUint32) \
- V(Arm64Float32ToInt64) \
- V(Arm64Float64ToInt64) \
- V(Arm64Float32ToUint64) \
- V(Arm64Float64ToUint64) \
- V(Arm64Int32ToFloat32) \
- V(Arm64Int32ToFloat64) \
- V(Arm64Int64ToFloat32) \
- V(Arm64Int64ToFloat64) \
- V(Arm64Uint32ToFloat32) \
- V(Arm64Uint32ToFloat64) \
- V(Arm64Uint64ToFloat32) \
- V(Arm64Uint64ToFloat64) \
- V(Arm64Float64ExtractLowWord32) \
- V(Arm64Float64ExtractHighWord32) \
- V(Arm64Float64InsertLowWord32) \
- V(Arm64Float64InsertHighWord32) \
- V(Arm64Float64MoveU64) \
- V(Arm64U64MoveFloat64) \
- V(Arm64LdrS) \
- V(Arm64StrS) \
- V(Arm64LdrD) \
- V(Arm64StrD) \
- V(Arm64LdrQ) \
- V(Arm64StrQ) \
- V(Arm64Ldrb) \
- V(Arm64Ldrsb) \
- V(Arm64LdrsbW) \
- V(Arm64Strb) \
- V(Arm64Ldrh) \
- V(Arm64Ldrsh) \
- V(Arm64LdrshW) \
- V(Arm64Strh) \
- V(Arm64Ldrsw) \
- V(Arm64LdrW) \
- V(Arm64StrW) \
- V(Arm64Ldr) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
- V(Arm64LdarDecompressTaggedSigned) \
- V(Arm64LdarDecompressTaggedPointer) \
- V(Arm64LdarDecompressAnyTagged) \
- V(Arm64Str) \
- V(Arm64StrCompressTagged) \
- V(Arm64StlrCompressTagged) \
- V(Arm64DmbIsh) \
- V(Arm64DsbIsb) \
- V(Arm64Sxtl) \
- V(Arm64Sxtl2) \
- V(Arm64Uxtl) \
- V(Arm64Uxtl2) \
- V(Arm64FSplat) \
- V(Arm64FAbs) \
- V(Arm64FSqrt) \
- V(Arm64FNeg) \
- V(Arm64FExtractLane) \
- V(Arm64FReplaceLane) \
- V(Arm64FAdd) \
- V(Arm64FSub) \
- V(Arm64FMul) \
- V(Arm64FMulElement) \
- V(Arm64FDiv) \
- V(Arm64FMin) \
- V(Arm64FMax) \
- V(Arm64FEq) \
- V(Arm64FNe) \
- V(Arm64FLt) \
- V(Arm64FLe) \
- V(Arm64F64x2Qfma) \
- V(Arm64F64x2Qfms) \
- V(Arm64F64x2Pmin) \
- V(Arm64F64x2Pmax) \
- V(Arm64F64x2ConvertLowI32x4S) \
- V(Arm64F64x2ConvertLowI32x4U) \
- V(Arm64F64x2PromoteLowF32x4) \
- V(Arm64F32x4SConvertI32x4) \
- V(Arm64F32x4UConvertI32x4) \
- V(Arm64F32x4RecipApprox) \
- V(Arm64F32x4RecipSqrtApprox) \
- V(Arm64F32x4Qfma) \
- V(Arm64F32x4Qfms) \
- V(Arm64F32x4Pmin) \
- V(Arm64F32x4Pmax) \
- V(Arm64F32x4DemoteF64x2Zero) \
- V(Arm64ISplat) \
- V(Arm64IAbs) \
- V(Arm64INeg) \
- V(Arm64IExtractLane) \
- V(Arm64IReplaceLane) \
- V(Arm64I64x2Shl) \
- V(Arm64I64x2ShrS) \
- V(Arm64IAdd) \
- V(Arm64ISub) \
- V(Arm64I64x2Mul) \
- V(Arm64IEq) \
- V(Arm64INe) \
- V(Arm64IGtS) \
- V(Arm64IGeS) \
- V(Arm64I64x2ShrU) \
- V(Arm64I64x2BitMask) \
- V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4Shl) \
- V(Arm64I32x4ShrS) \
- V(Arm64I32x4Mul) \
- V(Arm64Mla) \
- V(Arm64Mls) \
- V(Arm64IMinS) \
- V(Arm64IMaxS) \
- V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4ShrU) \
- V(Arm64IMinU) \
- V(Arm64IMaxU) \
- V(Arm64IGtU) \
- V(Arm64IGeU) \
- V(Arm64I32x4BitMask) \
- V(Arm64I32x4DotI16x8S) \
- V(Arm64I32x4TruncSatF64x2SZero) \
- V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64IExtractLaneU) \
- V(Arm64IExtractLaneS) \
- V(Arm64I16x8Shl) \
- V(Arm64I16x8ShrS) \
- V(Arm64I16x8SConvertI32x4) \
- V(Arm64IAddSatS) \
- V(Arm64ISubSatS) \
- V(Arm64I16x8Mul) \
- V(Arm64I16x8ShrU) \
- V(Arm64I16x8UConvertI32x4) \
- V(Arm64IAddSatU) \
- V(Arm64ISubSatU) \
- V(Arm64RoundingAverageU) \
- V(Arm64I16x8Q15MulRSatS) \
- V(Arm64I16x8BitMask) \
- V(Arm64I8x16Shl) \
- V(Arm64I8x16ShrS) \
- V(Arm64I8x16SConvertI16x8) \
- V(Arm64I8x16ShrU) \
- V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16BitMask) \
- V(Arm64S128Const) \
- V(Arm64S128Zero) \
- V(Arm64S128Dup) \
- V(Arm64S128And) \
- V(Arm64S128Or) \
- V(Arm64S128Xor) \
- V(Arm64S128Not) \
- V(Arm64S128Select) \
- V(Arm64S128AndNot) \
- V(Arm64Ssra) \
- V(Arm64Usra) \
- V(Arm64S32x4ZipLeft) \
- V(Arm64S32x4ZipRight) \
- V(Arm64S32x4UnzipLeft) \
- V(Arm64S32x4UnzipRight) \
- V(Arm64S32x4TransposeLeft) \
- V(Arm64S32x4TransposeRight) \
- V(Arm64S32x4Shuffle) \
- V(Arm64S16x8ZipLeft) \
- V(Arm64S16x8ZipRight) \
- V(Arm64S16x8UnzipLeft) \
- V(Arm64S16x8UnzipRight) \
- V(Arm64S16x8TransposeLeft) \
- V(Arm64S16x8TransposeRight) \
- V(Arm64S8x16ZipLeft) \
- V(Arm64S8x16ZipRight) \
- V(Arm64S8x16UnzipLeft) \
- V(Arm64S8x16UnzipRight) \
- V(Arm64S8x16TransposeLeft) \
- V(Arm64S8x16TransposeRight) \
- V(Arm64S8x16Concat) \
- V(Arm64I8x16Swizzle) \
- V(Arm64I8x16Shuffle) \
- V(Arm64S32x2Reverse) \
- V(Arm64S16x4Reverse) \
- V(Arm64S16x2Reverse) \
- V(Arm64S8x8Reverse) \
- V(Arm64S8x4Reverse) \
- V(Arm64S8x2Reverse) \
- V(Arm64V128AnyTrue) \
- V(Arm64I64x2AllTrue) \
- V(Arm64I32x4AllTrue) \
- V(Arm64I16x8AllTrue) \
- V(Arm64I8x16AllTrue) \
- V(Arm64LoadSplat) \
- V(Arm64LoadLane) \
- V(Arm64StoreLane) \
- V(Arm64S128Load8x8S) \
- V(Arm64S128Load8x8U) \
- V(Arm64S128Load16x4S) \
- V(Arm64S128Load16x4U) \
- V(Arm64S128Load32x2S) \
- V(Arm64S128Load32x2U) \
- V(Arm64Word64AtomicLoadUint64) \
- V(Arm64Word64AtomicStoreWord64) \
- V(Arm64Word64AtomicAddUint64) \
- V(Arm64Word64AtomicSubUint64) \
- V(Arm64Word64AtomicAndUint64) \
- V(Arm64Word64AtomicOrUint64) \
- V(Arm64Word64AtomicXorUint64) \
- V(Arm64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Arm64Ldr) \
+ V(Arm64Ldrb) \
+ V(Arm64LdrD) \
+ V(Arm64Ldrh) \
+ V(Arm64LdrQ) \
+ V(Arm64LdrS) \
+ V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
+ V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
+ V(Arm64Ldrsw) \
+ V(Arm64LdrW) \
+ V(Arm64LoadLane) \
+ V(Arm64LoadSplat) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64StoreLane) \
+ V(Arm64Str) \
+ V(Arm64Strb) \
+ V(Arm64StrD) \
+ V(Arm64Strh) \
+ V(Arm64StrQ) \
+ V(Arm64StrS) \
+ V(Arm64StrW)
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
+ V(Arm64Cnt) \
+ V(Arm64Cnt32) \
+ V(Arm64Cnt64) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
+ V(Arm64Sadalp) \
+ V(Arm64Saddlp) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Smlal) \
+ V(Arm64Smlal2) \
+ V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uadalp) \
+ V(Arm64Uaddlp) \
+ V(Arm64Umlal) \
+ V(Arm64Umlal2) \
+ V(Arm64Umull) \
+ V(Arm64Umull2) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtb32) \
+ V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
+ V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
+ V(Arm64Sbfx32) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePair) \
+ V(Arm64Peek) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
+ V(Arm64Float32Neg) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
+ V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
+ V(Arm64Float64Neg) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
+ V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
+ V(Arm64LdarDecompressTaggedSigned) \
+ V(Arm64LdarDecompressTaggedPointer) \
+ V(Arm64LdarDecompressAnyTagged) \
+ V(Arm64StrCompressTagged) \
+ V(Arm64StlrCompressTagged) \
+ V(Arm64DmbIsh) \
+ V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
+ V(Arm64FSplat) \
+ V(Arm64FAbs) \
+ V(Arm64FSqrt) \
+ V(Arm64FNeg) \
+ V(Arm64FExtractLane) \
+ V(Arm64FReplaceLane) \
+ V(Arm64FAdd) \
+ V(Arm64FSub) \
+ V(Arm64FMul) \
+ V(Arm64FMulElement) \
+ V(Arm64FDiv) \
+ V(Arm64FMin) \
+ V(Arm64FMax) \
+ V(Arm64FEq) \
+ V(Arm64FNe) \
+ V(Arm64FLt) \
+ V(Arm64FLe) \
+ V(Arm64FGt) \
+ V(Arm64FGe) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
+ V(Arm64ISplat) \
+ V(Arm64IAbs) \
+ V(Arm64INeg) \
+ V(Arm64IExtractLane) \
+ V(Arm64IReplaceLane) \
+ V(Arm64I64x2Shl) \
+ V(Arm64I64x2ShrS) \
+ V(Arm64IAdd) \
+ V(Arm64ISub) \
+ V(Arm64I64x2Mul) \
+ V(Arm64IEq) \
+ V(Arm64INe) \
+ V(Arm64IGtS) \
+ V(Arm64IGeS) \
+ V(Arm64I64x2ShrU) \
+ V(Arm64I64x2BitMask) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Mul) \
+ V(Arm64Mla) \
+ V(Arm64Mls) \
+ V(Arm64IMinS) \
+ V(Arm64IMaxS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64IMinU) \
+ V(Arm64IMaxU) \
+ V(Arm64IGtU) \
+ V(Arm64IGeU) \
+ V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
+ V(Arm64IExtractLaneU) \
+ V(Arm64IExtractLaneS) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64IAddSatS) \
+ V(Arm64ISubSatS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64IAddSatU) \
+ V(Arm64ISubSatU) \
+ V(Arm64RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
+ V(Arm64I16x8BitMask) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16BitMask) \
+ V(Arm64S128Const) \
+ V(Arm64S128Zero) \
+ V(Arm64S128Dup) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S128AndNot) \
+ V(Arm64Ssra) \
+ V(Arm64Usra) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64V128AnyTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
+ V(Arm64Word64AtomicLoadUint64) \
+ V(Arm64Word64AtomicStoreWord64) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint64) \
+ V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index bb16b76aaf..4d123050ec 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -170,6 +170,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64FNe:
case kArm64FLt:
case kArm64FLe:
+ case kArm64FGt:
+ case kArm64FGe:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index d102ecabb2..5dec14b998 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -579,9 +579,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Arm64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), x1));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -3538,19 +3538,11 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(F64x2Add, kArm64FAdd, 64) \
V(F64x2Sub, kArm64FSub, 64) \
V(F64x2Div, kArm64FDiv, 64) \
- V(F64x2Eq, kArm64FEq, 64) \
- V(F64x2Ne, kArm64FNe, 64) \
- V(F64x2Lt, kArm64FLt, 64) \
- V(F64x2Le, kArm64FLe, 64) \
V(F32x4Min, kArm64FMin, 32) \
V(F32x4Max, kArm64FMax, 32) \
V(F32x4Add, kArm64FAdd, 32) \
V(F32x4Sub, kArm64FSub, 32) \
V(F32x4Div, kArm64FDiv, 32) \
- V(F32x4Eq, kArm64FEq, 32) \
- V(F32x4Ne, kArm64FNe, 32) \
- V(F32x4Lt, kArm64FLt, 32) \
- V(F32x4Le, kArm64FLe, 32) \
V(I64x2Sub, kArm64ISub, 64) \
V(I64x2Eq, kArm64IEq, 64) \
V(I64x2Ne, kArm64INe, 64) \
@@ -3951,6 +3943,44 @@ VISIT_SIMD_SUB(I32x4, 32)
VISIT_SIMD_SUB(I16x8, 16)
#undef VISIT_SIMD_SUB
+namespace {
+bool isSimdZero(Arm64OperandGenerator& g, Node* node) {
+ auto m = V128ConstMatcher(node);
+ if (m.HasResolvedValue()) {
+ auto imms = m.ResolvedValue().immediate();
+ return (std::all_of(imms.begin(), imms.end(), std::logical_not<uint8_t>()));
+ }
+ return node->opcode() == IrOpcode::kS128Zero;
+}
+} // namespace
+
+#define VISIT_SIMD_FCM(Type, CmOp, CmOpposite, LaneSize) \
+ void InstructionSelector::Visit##Type##CmOp(Node* node) { \
+ Arm64OperandGenerator g(this); \
+ Node* left = node->InputAt(0); \
+ Node* right = node->InputAt(1); \
+ if (isSimdZero(g, left)) { \
+ Emit(kArm64F##CmOpposite | LaneSizeField::encode(LaneSize), \
+ g.DefineAsRegister(node), g.UseRegister(right)); \
+ return; \
+ } else if (isSimdZero(g, right)) { \
+ Emit(kArm64F##CmOp | LaneSizeField::encode(LaneSize), \
+ g.DefineAsRegister(node), g.UseRegister(left)); \
+ return; \
+ } \
+ VisitRRR(this, kArm64F##CmOp | LaneSizeField::encode(LaneSize), node); \
+ }
+
+VISIT_SIMD_FCM(F64x2, Eq, Eq, 64)
+VISIT_SIMD_FCM(F64x2, Ne, Ne, 64)
+VISIT_SIMD_FCM(F64x2, Lt, Gt, 64)
+VISIT_SIMD_FCM(F64x2, Le, Ge, 64)
+VISIT_SIMD_FCM(F32x4, Eq, Eq, 32)
+VISIT_SIMD_FCM(F32x4, Ne, Ne, 32)
+VISIT_SIMD_FCM(F32x4, Lt, Gt, 32)
+VISIT_SIMD_FCM(F32x4, Le, Ge, 32)
+#undef VISIT_SIMD_FCM
+
void InstructionSelector::VisitS128Select(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64S128Select, g.DefineSameAsFirst(node),
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 1cd78b4359..da6a9a81e3 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -791,8 +791,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, i.TempRegister(0));
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ i.TempRegister(0));
break;
}
case kArchSaveCallerRegisters: {
@@ -887,13 +889,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == edx);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -1255,79 +1257,79 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32LFence:
__ lfence();
break;
- case kSSEFloat32Cmp:
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ case kIA32Float32Cmp:
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Sqrt:
- __ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float32Sqrt:
+ __ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32Round: {
+ case kIA32Float32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat64Cmp:
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ case kIA32Float64Cmp:
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Max: {
+ case kIA32Float32Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat32NaN>(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
- __ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ Movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Max: {
+ case kIA32Float64Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat64NaN>(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
- __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
+ __ Movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat32Min: {
+ case kIA32Float32Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat32NaN>(this, i.OutputDoubleRegister());
@@ -1335,29 +1337,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
+ __ Movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(kScratchDoubleReg, i.InputOperand(1));
- __ movmskps(i.TempRegister(0), kScratchDoubleReg);
+ __ Movss(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskps(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movss(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Min: {
+ case kIA32Float64Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
- __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
zone()->New<OutOfLineLoadFloat64NaN>(this, i.OutputDoubleRegister());
@@ -1365,32 +1367,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
+ __ Movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(kScratchDoubleReg, i.InputOperand(1));
- __ movmskpd(i.TempRegister(0), kScratchDoubleReg);
+ __ Movsd(kScratchDoubleReg, i.InputOperand(1));
+ __ Movmskpd(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
- __ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
- __ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ __ Movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
- case kSSEFloat64Mod: {
+ case kIA32Float64Mod: {
Register tmp = i.TempRegister(1);
__ mov(tmp, esp);
__ AllocateStackSpace(kDoubleSize);
__ and_(esp, -8); // align to 8 byte boundary.
// Move values to st(0) and st(1).
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(esp, 0));
- __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+ __ Movsd(Operand(esp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(esp, 0));
// Loop while fprem isn't done.
Label mod_loop;
@@ -1406,76 +1408,77 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(esp, 0));
- __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+ __ Movsd(i.OutputDoubleRegister(), Operand(esp, 0));
__ mov(esp, tmp);
break;
}
- case kSSEFloat64Sqrt:
- __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64Sqrt:
+ __ Sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64Round: {
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32Float64Round: {
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
+ __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
- case kSSEFloat32ToFloat64:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float32ToFloat64:
+ __ Cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64ToFloat32:
- __ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64ToFloat32:
+ __ Cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32ToInt32:
- __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+ case kIA32Float32ToInt32:
+ __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat32ToUint32:
+ case kIA32Float32ToUint32:
__ Cvttss2ui(i.OutputRegister(), i.InputOperand(0),
i.TempSimd128Register(0));
break;
- case kSSEFloat64ToInt32:
- __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+ case kIA32Float64ToInt32:
+ __ Cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
- case kSSEFloat64ToUint32:
+ case kIA32Float64ToUint32:
__ Cvttsd2ui(i.OutputRegister(), i.InputOperand(0),
i.TempSimd128Register(0));
break;
case kSSEInt32ToFloat32:
+ // Calling Cvtsi2ss (which does a xor) regresses some benchmarks.
__ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEUint32ToFloat32:
+ case kIA32Uint32ToFloat32:
__ Cvtui2ss(i.OutputDoubleRegister(), i.InputOperand(0),
i.TempRegister(0));
break;
case kSSEInt32ToFloat64:
+ // Calling Cvtsi2sd (which does a xor) regresses some benchmarks.
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEUint32ToFloat64:
+ case kIA32Uint32ToFloat64:
__ Cvtui2sd(i.OutputDoubleRegister(), i.InputOperand(0),
i.TempRegister(0));
break;
- case kSSEFloat64ExtractLowWord32:
+ case kIA32Float64ExtractLowWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
- __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ Movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
- case kSSEFloat64ExtractHighWord32:
+ case kIA32Float64ExtractHighWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
}
break;
- case kSSEFloat64InsertLowWord32:
+ case kIA32Float64InsertLowWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
break;
- case kSSEFloat64InsertHighWord32:
+ case kIA32Float64InsertHighWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
break;
- case kSSEFloat64LoadLowWord32:
- __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ case kIA32Float64LoadLowWord32:
+ __ Movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kFloat32Add: {
__ Addss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -1524,64 +1527,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlq(kScratchDoubleReg, byte{33});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
case kFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, byte{31});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
case kFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrlq(kScratchDoubleReg, byte{1});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
case kFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psllq(kScratchDoubleReg, byte{63});
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
- i.InputOperand(0));
- } else {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
- }
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.TempRegister(0));
break;
}
- case kSSEFloat64SilenceNaN:
- __ xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
+ case kIA32Float64SilenceNaN:
+ __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
ASSEMBLE_MOVX(movsx_b);
@@ -1873,7 +1840,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F64x2PromoteLowF32x4: {
- __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ if (HasAddressingMode(instr)) {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ }
break;
}
case kIA32F32x4DemoteF64x2Zero: {
@@ -2020,28 +1991,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2Mul: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = i.TempSimd128Register(1);
-
- __ Movaps(tmp1, left);
- __ Movaps(tmp2, right);
-
- // Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, byte{32});
- __ Pmuludq(tmp1, tmp1, right);
-
- // Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, byte{32});
- __ Pmuludq(tmp2, tmp2, left);
-
- __ Paddq(tmp2, tmp2, tmp1);
- __ Psllq(tmp2, tmp2, byte{32});
-
- __ Pmuludq(dst, left, right);
- __ Paddq(dst, dst, tmp2);
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ i.TempSimd128Register(1));
break;
}
case kIA32I64x2ShrU: {
@@ -2160,34 +2112,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
break;
}
- case kIA32F32x4Abs: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, kScratchDoubleReg, byte{1});
- __ Andps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Psrld(dst, dst, byte{1});
- __ Andps(dst, src);
- }
- break;
- }
- case kIA32F32x4Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pslld(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- __ Xorps(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Pslld(dst, dst, byte{31});
- __ Xorps(dst, src);
- }
- break;
- }
case kIA32F32x4Sqrt: {
__ Sqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
@@ -2220,76 +2144,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputOperand(1));
break;
}
- case kSSEF32x4Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
- __ movaps(kScratchDoubleReg, src1);
- __ minps(kScratchDoubleReg, dst);
- __ minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ cmpps(dst, kScratchDoubleReg, 3);
- __ orps(kScratchDoubleReg, dst);
- __ psrld(dst, 10);
- __ andnps(dst, kScratchDoubleReg);
- break;
- }
- case kAVXF32x4Min: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- Operand src1 = i.InputOperand(1);
- // See comment above for correction of minps.
- __ vmovups(kScratchDoubleReg, src1);
- __ vminps(kScratchDoubleReg, kScratchDoubleReg, src0);
- __ vminps(dst, src0, src1);
- __ vorps(dst, dst, kScratchDoubleReg);
- __ vcmpneqps(kScratchDoubleReg, dst, dst);
- __ vorps(dst, dst, kScratchDoubleReg);
- __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 10);
- __ vandnps(dst, kScratchDoubleReg, dst);
- break;
- }
- case kSSEF32x4Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
- __ movaps(kScratchDoubleReg, src1);
- __ maxps(kScratchDoubleReg, dst);
- __ maxps(dst, src1);
- // Find discrepancies.
- __ xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload.
- __ cmpps(dst, kScratchDoubleReg, 3);
- __ psrld(dst, 10);
- __ andnps(dst, kScratchDoubleReg);
+ case kIA32F32x4Min: {
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
- case kAVXF32x4Max: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src0 = i.InputSimd128Register(0);
- Operand src1 = i.InputOperand(1);
- // See comment above for correction of maxps.
- __ vmovups(kScratchDoubleReg, src1);
- __ vmaxps(kScratchDoubleReg, kScratchDoubleReg, src0);
- __ vmaxps(dst, src0, src1);
- __ vxorps(dst, dst, kScratchDoubleReg);
- __ vorps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vsubps(kScratchDoubleReg, kScratchDoubleReg, dst);
- __ vcmpneqps(dst, kScratchDoubleReg, kScratchDoubleReg);
- __ vpsrld(dst, dst, 10);
- __ vandnps(dst, dst, kScratchDoubleReg);
+ case kIA32F32x4Max: {
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32F32x4Eq: {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index bb54c726aa..ca15054763 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -11,358 +11,359 @@ namespace compiler {
// IA32-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(IA32Add) \
- V(IA32And) \
- V(IA32Cmp) \
- V(IA32Cmp16) \
- V(IA32Cmp8) \
- V(IA32Test) \
- V(IA32Test16) \
- V(IA32Test8) \
- V(IA32Or) \
- V(IA32Xor) \
- V(IA32Sub) \
- V(IA32Imul) \
- V(IA32ImulHigh) \
- V(IA32UmulHigh) \
- V(IA32Idiv) \
- V(IA32Udiv) \
- V(IA32Not) \
- V(IA32Neg) \
- V(IA32Shl) \
- V(IA32Shr) \
- V(IA32Sar) \
- V(IA32AddPair) \
- V(IA32SubPair) \
- V(IA32MulPair) \
- V(IA32ShlPair) \
- V(IA32ShrPair) \
- V(IA32SarPair) \
- V(IA32Rol) \
- V(IA32Ror) \
- V(IA32Lzcnt) \
- V(IA32Tzcnt) \
- V(IA32Popcnt) \
- V(IA32Bswap) \
- V(IA32MFence) \
- V(IA32LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Mod) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEInt32ToFloat32) \
- V(SSEUint32ToFloat32) \
- V(SSEInt32ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(Float32Add) \
- V(Float32Sub) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float32Mul) \
- V(Float32Div) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Abs) \
- V(Float64Neg) \
- V(Float32Abs) \
- V(Float32Neg) \
- V(IA32Movsxbl) \
- V(IA32Movzxbl) \
- V(IA32Movb) \
- V(IA32Movsxwl) \
- V(IA32Movzxwl) \
- V(IA32Movw) \
- V(IA32Movl) \
- V(IA32Movss) \
- V(IA32Movsd) \
- V(IA32Movdqu) \
- V(IA32Movlps) \
- V(IA32Movhps) \
- V(IA32BitcastFI) \
- V(IA32BitcastIF) \
- V(IA32Lea) \
- V(IA32Push) \
- V(IA32Poke) \
- V(IA32Peek) \
- V(IA32F64x2Splat) \
- V(F64x2ExtractLane) \
- V(F64x2ReplaceLane) \
- V(IA32F64x2Sqrt) \
- V(IA32F64x2Add) \
- V(IA32F64x2Sub) \
- V(IA32F64x2Mul) \
- V(IA32F64x2Div) \
- V(IA32F64x2Min) \
- V(IA32F64x2Max) \
- V(IA32F64x2Eq) \
- V(IA32F64x2Ne) \
- V(IA32F64x2Lt) \
- V(IA32F64x2Le) \
- V(IA32F64x2Pmin) \
- V(IA32F64x2Pmax) \
- V(IA32F64x2Round) \
- V(IA32F64x2ConvertLowI32x4S) \
- V(IA32F64x2ConvertLowI32x4U) \
- V(IA32F64x2PromoteLowF32x4) \
- V(IA32I64x2SplatI32Pair) \
- V(IA32I64x2ReplaceLaneI32Pair) \
- V(IA32I64x2Abs) \
- V(IA32I64x2Neg) \
- V(IA32I64x2Shl) \
- V(IA32I64x2ShrS) \
- V(IA32I64x2Add) \
- V(IA32I64x2Sub) \
- V(IA32I64x2Mul) \
- V(IA32I64x2ShrU) \
- V(IA32I64x2BitMask) \
- V(IA32I64x2Eq) \
- V(IA32I64x2Ne) \
- V(IA32I64x2GtS) \
- V(IA32I64x2GeS) \
- V(IA32I64x2ExtMulLowI32x4S) \
- V(IA32I64x2ExtMulHighI32x4S) \
- V(IA32I64x2ExtMulLowI32x4U) \
- V(IA32I64x2ExtMulHighI32x4U) \
- V(IA32I64x2SConvertI32x4Low) \
- V(IA32I64x2SConvertI32x4High) \
- V(IA32I64x2UConvertI32x4Low) \
- V(IA32I64x2UConvertI32x4High) \
- V(IA32F32x4Splat) \
- V(IA32F32x4ExtractLane) \
- V(IA32Insertps) \
- V(IA32F32x4SConvertI32x4) \
- V(IA32F32x4UConvertI32x4) \
- V(IA32F32x4Abs) \
- V(IA32F32x4Neg) \
- V(IA32F32x4Sqrt) \
- V(IA32F32x4RecipApprox) \
- V(IA32F32x4RecipSqrtApprox) \
- V(IA32F32x4Add) \
- V(IA32F32x4Sub) \
- V(IA32F32x4Mul) \
- V(IA32F32x4Div) \
- V(SSEF32x4Min) \
- V(AVXF32x4Min) \
- V(SSEF32x4Max) \
- V(AVXF32x4Max) \
- V(IA32F32x4Eq) \
- V(IA32F32x4Ne) \
- V(IA32F32x4Lt) \
- V(IA32F32x4Le) \
- V(IA32F32x4Pmin) \
- V(IA32F32x4Pmax) \
- V(IA32F32x4Round) \
- V(IA32F32x4DemoteF64x2Zero) \
- V(IA32I32x4Splat) \
- V(IA32I32x4ExtractLane) \
- V(IA32I32x4SConvertF32x4) \
- V(IA32I32x4SConvertI16x8Low) \
- V(IA32I32x4SConvertI16x8High) \
- V(IA32I32x4Neg) \
- V(IA32I32x4Shl) \
- V(IA32I32x4ShrS) \
- V(IA32I32x4Add) \
- V(IA32I32x4Sub) \
- V(IA32I32x4Mul) \
- V(IA32I32x4MinS) \
- V(IA32I32x4MaxS) \
- V(IA32I32x4Eq) \
- V(IA32I32x4Ne) \
- V(IA32I32x4GtS) \
- V(IA32I32x4GeS) \
- V(SSEI32x4UConvertF32x4) \
- V(AVXI32x4UConvertF32x4) \
- V(IA32I32x4UConvertI16x8Low) \
- V(IA32I32x4UConvertI16x8High) \
- V(IA32I32x4ShrU) \
- V(IA32I32x4MinU) \
- V(IA32I32x4MaxU) \
- V(SSEI32x4GtU) \
- V(AVXI32x4GtU) \
- V(SSEI32x4GeU) \
- V(AVXI32x4GeU) \
- V(IA32I32x4Abs) \
- V(IA32I32x4BitMask) \
- V(IA32I32x4DotI16x8S) \
- V(IA32I32x4ExtMulLowI16x8S) \
- V(IA32I32x4ExtMulHighI16x8S) \
- V(IA32I32x4ExtMulLowI16x8U) \
- V(IA32I32x4ExtMulHighI16x8U) \
- V(IA32I32x4ExtAddPairwiseI16x8S) \
- V(IA32I32x4ExtAddPairwiseI16x8U) \
- V(IA32I32x4TruncSatF64x2SZero) \
- V(IA32I32x4TruncSatF64x2UZero) \
- V(IA32I16x8Splat) \
- V(IA32I16x8ExtractLaneS) \
- V(IA32I16x8SConvertI8x16Low) \
- V(IA32I16x8SConvertI8x16High) \
- V(IA32I16x8Neg) \
- V(IA32I16x8Shl) \
- V(IA32I16x8ShrS) \
- V(IA32I16x8SConvertI32x4) \
- V(IA32I16x8Add) \
- V(IA32I16x8AddSatS) \
- V(IA32I16x8Sub) \
- V(IA32I16x8SubSatS) \
- V(IA32I16x8Mul) \
- V(IA32I16x8MinS) \
- V(IA32I16x8MaxS) \
- V(IA32I16x8Eq) \
- V(SSEI16x8Ne) \
- V(AVXI16x8Ne) \
- V(IA32I16x8GtS) \
- V(SSEI16x8GeS) \
- V(AVXI16x8GeS) \
- V(IA32I16x8UConvertI8x16Low) \
- V(IA32I16x8UConvertI8x16High) \
- V(IA32I16x8ShrU) \
- V(IA32I16x8UConvertI32x4) \
- V(IA32I16x8AddSatU) \
- V(IA32I16x8SubSatU) \
- V(IA32I16x8MinU) \
- V(IA32I16x8MaxU) \
- V(SSEI16x8GtU) \
- V(AVXI16x8GtU) \
- V(SSEI16x8GeU) \
- V(AVXI16x8GeU) \
- V(IA32I16x8RoundingAverageU) \
- V(IA32I16x8Abs) \
- V(IA32I16x8BitMask) \
- V(IA32I16x8ExtMulLowI8x16S) \
- V(IA32I16x8ExtMulHighI8x16S) \
- V(IA32I16x8ExtMulLowI8x16U) \
- V(IA32I16x8ExtMulHighI8x16U) \
- V(IA32I16x8ExtAddPairwiseI8x16S) \
- V(IA32I16x8ExtAddPairwiseI8x16U) \
- V(IA32I16x8Q15MulRSatS) \
- V(IA32I8x16Splat) \
- V(IA32I8x16ExtractLaneS) \
- V(IA32Pinsrb) \
- V(IA32Pinsrw) \
- V(IA32Pinsrd) \
- V(IA32Pextrb) \
- V(IA32Pextrw) \
- V(IA32S128Store32Lane) \
- V(IA32I8x16SConvertI16x8) \
- V(IA32I8x16Neg) \
- V(IA32I8x16Shl) \
- V(IA32I8x16ShrS) \
- V(IA32I8x16Add) \
- V(IA32I8x16AddSatS) \
- V(IA32I8x16Sub) \
- V(IA32I8x16SubSatS) \
- V(IA32I8x16MinS) \
- V(IA32I8x16MaxS) \
- V(IA32I8x16Eq) \
- V(SSEI8x16Ne) \
- V(AVXI8x16Ne) \
- V(IA32I8x16GtS) \
- V(SSEI8x16GeS) \
- V(AVXI8x16GeS) \
- V(IA32I8x16UConvertI16x8) \
- V(IA32I8x16AddSatU) \
- V(IA32I8x16SubSatU) \
- V(IA32I8x16ShrU) \
- V(IA32I8x16MinU) \
- V(IA32I8x16MaxU) \
- V(SSEI8x16GtU) \
- V(AVXI8x16GtU) \
- V(SSEI8x16GeU) \
- V(AVXI8x16GeU) \
- V(IA32I8x16RoundingAverageU) \
- V(IA32I8x16Abs) \
- V(IA32I8x16BitMask) \
- V(IA32I8x16Popcnt) \
- V(IA32S128Const) \
- V(IA32S128Zero) \
- V(IA32S128AllOnes) \
- V(IA32S128Not) \
- V(IA32S128And) \
- V(IA32S128Or) \
- V(IA32S128Xor) \
- V(IA32S128Select) \
- V(IA32S128AndNot) \
- V(IA32I8x16Swizzle) \
- V(IA32I8x16Shuffle) \
- V(IA32S128Load8Splat) \
- V(IA32S128Load16Splat) \
- V(IA32S128Load32Splat) \
- V(IA32S128Load64Splat) \
- V(IA32S128Load8x8S) \
- V(IA32S128Load8x8U) \
- V(IA32S128Load16x4S) \
- V(IA32S128Load16x4U) \
- V(IA32S128Load32x2S) \
- V(IA32S128Load32x2U) \
- V(IA32S32x4Rotate) \
- V(IA32S32x4Swizzle) \
- V(IA32S32x4Shuffle) \
- V(IA32S16x8Blend) \
- V(IA32S16x8HalfShuffle1) \
- V(IA32S16x8HalfShuffle2) \
- V(IA32S8x16Alignr) \
- V(IA32S16x8Dup) \
- V(IA32S8x16Dup) \
- V(SSES16x8UnzipHigh) \
- V(AVXS16x8UnzipHigh) \
- V(SSES16x8UnzipLow) \
- V(AVXS16x8UnzipLow) \
- V(SSES8x16UnzipHigh) \
- V(AVXS8x16UnzipHigh) \
- V(SSES8x16UnzipLow) \
- V(AVXS8x16UnzipLow) \
- V(IA32S64x2UnpackHigh) \
- V(IA32S32x4UnpackHigh) \
- V(IA32S16x8UnpackHigh) \
- V(IA32S8x16UnpackHigh) \
- V(IA32S64x2UnpackLow) \
- V(IA32S32x4UnpackLow) \
- V(IA32S16x8UnpackLow) \
- V(IA32S8x16UnpackLow) \
- V(SSES8x16TransposeLow) \
- V(AVXS8x16TransposeLow) \
- V(SSES8x16TransposeHigh) \
- V(AVXS8x16TransposeHigh) \
- V(SSES8x8Reverse) \
- V(AVXS8x8Reverse) \
- V(SSES8x4Reverse) \
- V(AVXS8x4Reverse) \
- V(SSES8x2Reverse) \
- V(AVXS8x2Reverse) \
- V(IA32S128AnyTrue) \
- V(IA32I64x2AllTrue) \
- V(IA32I32x4AllTrue) \
- V(IA32I16x8AllTrue) \
- V(IA32I8x16AllTrue) \
- V(IA32Word32AtomicPairLoad) \
- V(IA32Word32ReleasePairStore) \
- V(IA32Word32SeqCstPairStore) \
- V(IA32Word32AtomicPairAdd) \
- V(IA32Word32AtomicPairSub) \
- V(IA32Word32AtomicPairAnd) \
- V(IA32Word32AtomicPairOr) \
- V(IA32Word32AtomicPairXor) \
- V(IA32Word32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(IA32Add) \
+ V(IA32And) \
+ V(IA32Cmp) \
+ V(IA32Cmp16) \
+ V(IA32Cmp8) \
+ V(IA32Test) \
+ V(IA32Test16) \
+ V(IA32Test8) \
+ V(IA32Or) \
+ V(IA32Xor) \
+ V(IA32Sub) \
+ V(IA32Imul) \
+ V(IA32ImulHigh) \
+ V(IA32UmulHigh) \
+ V(IA32Idiv) \
+ V(IA32Udiv) \
+ V(IA32Not) \
+ V(IA32Neg) \
+ V(IA32Shl) \
+ V(IA32Shr) \
+ V(IA32Sar) \
+ V(IA32AddPair) \
+ V(IA32SubPair) \
+ V(IA32MulPair) \
+ V(IA32ShlPair) \
+ V(IA32ShrPair) \
+ V(IA32SarPair) \
+ V(IA32Rol) \
+ V(IA32Ror) \
+ V(IA32Lzcnt) \
+ V(IA32Tzcnt) \
+ V(IA32Popcnt) \
+ V(IA32Bswap) \
+ V(IA32MFence) \
+ V(IA32LFence) \
+ V(IA32Float32Cmp) \
+ V(IA32Float32Sqrt) \
+ V(IA32Float32Round) \
+ V(IA32Float64Cmp) \
+ V(IA32Float64Mod) \
+ V(IA32Float32Max) \
+ V(IA32Float64Max) \
+ V(IA32Float32Min) \
+ V(IA32Float64Min) \
+ V(IA32Float64Sqrt) \
+ V(IA32Float64Round) \
+ V(IA32Float32ToFloat64) \
+ V(IA32Float64ToFloat32) \
+ V(IA32Float32ToInt32) \
+ V(IA32Float32ToUint32) \
+ V(IA32Float64ToInt32) \
+ V(IA32Float64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(IA32Uint32ToFloat32) \
+ V(SSEInt32ToFloat64) \
+ V(IA32Uint32ToFloat64) \
+ V(IA32Float64ExtractLowWord32) \
+ V(IA32Float64ExtractHighWord32) \
+ V(IA32Float64InsertLowWord32) \
+ V(IA32Float64InsertHighWord32) \
+ V(IA32Float64LoadLowWord32) \
+ V(IA32Float64SilenceNaN) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Abs) \
+ V(Float64Neg) \
+ V(Float32Abs) \
+ V(Float32Neg) \
+ V(IA32Movsxbl) \
+ V(IA32Movzxbl) \
+ V(IA32Movb) \
+ V(IA32Movsxwl) \
+ V(IA32Movzxwl) \
+ V(IA32Movw) \
+ V(IA32Movl) \
+ V(IA32Movss) \
+ V(IA32Movsd) \
+ V(IA32Movdqu) \
+ V(IA32Movlps) \
+ V(IA32Movhps) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
+ V(IA32Lea) \
+ V(IA32Push) \
+ V(IA32Poke) \
+ V(IA32Peek) \
+ V(IA32F64x2Splat) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
+ V(IA32F64x2Sqrt) \
+ V(IA32F64x2Add) \
+ V(IA32F64x2Sub) \
+ V(IA32F64x2Mul) \
+ V(IA32F64x2Div) \
+ V(IA32F64x2Min) \
+ V(IA32F64x2Max) \
+ V(IA32F64x2Eq) \
+ V(IA32F64x2Ne) \
+ V(IA32F64x2Lt) \
+ V(IA32F64x2Le) \
+ V(IA32F64x2Pmin) \
+ V(IA32F64x2Pmax) \
+ V(IA32F64x2Round) \
+ V(IA32F64x2ConvertLowI32x4S) \
+ V(IA32F64x2ConvertLowI32x4U) \
+ V(IA32F64x2PromoteLowF32x4) \
+ V(IA32I64x2SplatI32Pair) \
+ V(IA32I64x2ReplaceLaneI32Pair) \
+ V(IA32I64x2Abs) \
+ V(IA32I64x2Neg) \
+ V(IA32I64x2Shl) \
+ V(IA32I64x2ShrS) \
+ V(IA32I64x2Add) \
+ V(IA32I64x2Sub) \
+ V(IA32I64x2Mul) \
+ V(IA32I64x2ShrU) \
+ V(IA32I64x2BitMask) \
+ V(IA32I64x2Eq) \
+ V(IA32I64x2Ne) \
+ V(IA32I64x2GtS) \
+ V(IA32I64x2GeS) \
+ V(IA32I64x2ExtMulLowI32x4S) \
+ V(IA32I64x2ExtMulHighI32x4S) \
+ V(IA32I64x2ExtMulLowI32x4U) \
+ V(IA32I64x2ExtMulHighI32x4U) \
+ V(IA32I64x2SConvertI32x4Low) \
+ V(IA32I64x2SConvertI32x4High) \
+ V(IA32I64x2UConvertI32x4Low) \
+ V(IA32I64x2UConvertI32x4High) \
+ V(IA32F32x4Splat) \
+ V(IA32F32x4ExtractLane) \
+ V(IA32Insertps) \
+ V(IA32F32x4SConvertI32x4) \
+ V(IA32F32x4UConvertI32x4) \
+ V(IA32F32x4Sqrt) \
+ V(IA32F32x4RecipApprox) \
+ V(IA32F32x4RecipSqrtApprox) \
+ V(IA32F32x4Add) \
+ V(IA32F32x4Sub) \
+ V(IA32F32x4Mul) \
+ V(IA32F32x4Div) \
+ V(IA32F32x4Min) \
+ V(IA32F32x4Max) \
+ V(IA32F32x4Eq) \
+ V(IA32F32x4Ne) \
+ V(IA32F32x4Lt) \
+ V(IA32F32x4Le) \
+ V(IA32F32x4Pmin) \
+ V(IA32F32x4Pmax) \
+ V(IA32F32x4Round) \
+ V(IA32F32x4DemoteF64x2Zero) \
+ V(IA32I32x4Splat) \
+ V(IA32I32x4ExtractLane) \
+ V(IA32I32x4SConvertF32x4) \
+ V(IA32I32x4SConvertI16x8Low) \
+ V(IA32I32x4SConvertI16x8High) \
+ V(IA32I32x4Neg) \
+ V(IA32I32x4Shl) \
+ V(IA32I32x4ShrS) \
+ V(IA32I32x4Add) \
+ V(IA32I32x4Sub) \
+ V(IA32I32x4Mul) \
+ V(IA32I32x4MinS) \
+ V(IA32I32x4MaxS) \
+ V(IA32I32x4Eq) \
+ V(IA32I32x4Ne) \
+ V(IA32I32x4GtS) \
+ V(IA32I32x4GeS) \
+ V(SSEI32x4UConvertF32x4) \
+ V(AVXI32x4UConvertF32x4) \
+ V(IA32I32x4UConvertI16x8Low) \
+ V(IA32I32x4UConvertI16x8High) \
+ V(IA32I32x4ShrU) \
+ V(IA32I32x4MinU) \
+ V(IA32I32x4MaxU) \
+ V(SSEI32x4GtU) \
+ V(AVXI32x4GtU) \
+ V(SSEI32x4GeU) \
+ V(AVXI32x4GeU) \
+ V(IA32I32x4Abs) \
+ V(IA32I32x4BitMask) \
+ V(IA32I32x4DotI16x8S) \
+ V(IA32I32x4ExtMulLowI16x8S) \
+ V(IA32I32x4ExtMulHighI16x8S) \
+ V(IA32I32x4ExtMulLowI16x8U) \
+ V(IA32I32x4ExtMulHighI16x8U) \
+ V(IA32I32x4ExtAddPairwiseI16x8S) \
+ V(IA32I32x4ExtAddPairwiseI16x8U) \
+ V(IA32I32x4TruncSatF64x2SZero) \
+ V(IA32I32x4TruncSatF64x2UZero) \
+ V(IA32I16x8Splat) \
+ V(IA32I16x8ExtractLaneS) \
+ V(IA32I16x8SConvertI8x16Low) \
+ V(IA32I16x8SConvertI8x16High) \
+ V(IA32I16x8Neg) \
+ V(IA32I16x8Shl) \
+ V(IA32I16x8ShrS) \
+ V(IA32I16x8SConvertI32x4) \
+ V(IA32I16x8Add) \
+ V(IA32I16x8AddSatS) \
+ V(IA32I16x8Sub) \
+ V(IA32I16x8SubSatS) \
+ V(IA32I16x8Mul) \
+ V(IA32I16x8MinS) \
+ V(IA32I16x8MaxS) \
+ V(IA32I16x8Eq) \
+ V(SSEI16x8Ne) \
+ V(AVXI16x8Ne) \
+ V(IA32I16x8GtS) \
+ V(SSEI16x8GeS) \
+ V(AVXI16x8GeS) \
+ V(IA32I16x8UConvertI8x16Low) \
+ V(IA32I16x8UConvertI8x16High) \
+ V(IA32I16x8ShrU) \
+ V(IA32I16x8UConvertI32x4) \
+ V(IA32I16x8AddSatU) \
+ V(IA32I16x8SubSatU) \
+ V(IA32I16x8MinU) \
+ V(IA32I16x8MaxU) \
+ V(SSEI16x8GtU) \
+ V(AVXI16x8GtU) \
+ V(SSEI16x8GeU) \
+ V(AVXI16x8GeU) \
+ V(IA32I16x8RoundingAverageU) \
+ V(IA32I16x8Abs) \
+ V(IA32I16x8BitMask) \
+ V(IA32I16x8ExtMulLowI8x16S) \
+ V(IA32I16x8ExtMulHighI8x16S) \
+ V(IA32I16x8ExtMulLowI8x16U) \
+ V(IA32I16x8ExtMulHighI8x16U) \
+ V(IA32I16x8ExtAddPairwiseI8x16S) \
+ V(IA32I16x8ExtAddPairwiseI8x16U) \
+ V(IA32I16x8Q15MulRSatS) \
+ V(IA32I8x16Splat) \
+ V(IA32I8x16ExtractLaneS) \
+ V(IA32Pinsrb) \
+ V(IA32Pinsrw) \
+ V(IA32Pinsrd) \
+ V(IA32Pextrb) \
+ V(IA32Pextrw) \
+ V(IA32S128Store32Lane) \
+ V(IA32I8x16SConvertI16x8) \
+ V(IA32I8x16Neg) \
+ V(IA32I8x16Shl) \
+ V(IA32I8x16ShrS) \
+ V(IA32I8x16Add) \
+ V(IA32I8x16AddSatS) \
+ V(IA32I8x16Sub) \
+ V(IA32I8x16SubSatS) \
+ V(IA32I8x16MinS) \
+ V(IA32I8x16MaxS) \
+ V(IA32I8x16Eq) \
+ V(SSEI8x16Ne) \
+ V(AVXI8x16Ne) \
+ V(IA32I8x16GtS) \
+ V(SSEI8x16GeS) \
+ V(AVXI8x16GeS) \
+ V(IA32I8x16UConvertI16x8) \
+ V(IA32I8x16AddSatU) \
+ V(IA32I8x16SubSatU) \
+ V(IA32I8x16ShrU) \
+ V(IA32I8x16MinU) \
+ V(IA32I8x16MaxU) \
+ V(SSEI8x16GtU) \
+ V(AVXI8x16GtU) \
+ V(SSEI8x16GeU) \
+ V(AVXI8x16GeU) \
+ V(IA32I8x16RoundingAverageU) \
+ V(IA32I8x16Abs) \
+ V(IA32I8x16BitMask) \
+ V(IA32I8x16Popcnt) \
+ V(IA32S128Const) \
+ V(IA32S128Zero) \
+ V(IA32S128AllOnes) \
+ V(IA32S128Not) \
+ V(IA32S128And) \
+ V(IA32S128Or) \
+ V(IA32S128Xor) \
+ V(IA32S128Select) \
+ V(IA32S128AndNot) \
+ V(IA32I8x16Swizzle) \
+ V(IA32I8x16Shuffle) \
+ V(IA32S128Load8Splat) \
+ V(IA32S128Load16Splat) \
+ V(IA32S128Load32Splat) \
+ V(IA32S128Load64Splat) \
+ V(IA32S128Load8x8S) \
+ V(IA32S128Load8x8U) \
+ V(IA32S128Load16x4S) \
+ V(IA32S128Load16x4U) \
+ V(IA32S128Load32x2S) \
+ V(IA32S128Load32x2U) \
+ V(IA32S32x4Rotate) \
+ V(IA32S32x4Swizzle) \
+ V(IA32S32x4Shuffle) \
+ V(IA32S16x8Blend) \
+ V(IA32S16x8HalfShuffle1) \
+ V(IA32S16x8HalfShuffle2) \
+ V(IA32S8x16Alignr) \
+ V(IA32S16x8Dup) \
+ V(IA32S8x16Dup) \
+ V(SSES16x8UnzipHigh) \
+ V(AVXS16x8UnzipHigh) \
+ V(SSES16x8UnzipLow) \
+ V(AVXS16x8UnzipLow) \
+ V(SSES8x16UnzipHigh) \
+ V(AVXS8x16UnzipHigh) \
+ V(SSES8x16UnzipLow) \
+ V(AVXS8x16UnzipLow) \
+ V(IA32S64x2UnpackHigh) \
+ V(IA32S32x4UnpackHigh) \
+ V(IA32S16x8UnpackHigh) \
+ V(IA32S8x16UnpackHigh) \
+ V(IA32S64x2UnpackLow) \
+ V(IA32S32x4UnpackLow) \
+ V(IA32S16x8UnpackLow) \
+ V(IA32S8x16UnpackLow) \
+ V(SSES8x16TransposeLow) \
+ V(AVXS8x16TransposeLow) \
+ V(SSES8x16TransposeHigh) \
+ V(AVXS8x16TransposeHigh) \
+ V(SSES8x8Reverse) \
+ V(AVXS8x8Reverse) \
+ V(SSES8x4Reverse) \
+ V(AVXS8x4Reverse) \
+ V(SSES8x2Reverse) \
+ V(AVXS8x2Reverse) \
+ V(IA32S128AnyTrue) \
+ V(IA32I64x2AllTrue) \
+ V(IA32I32x4AllTrue) \
+ V(IA32I16x8AllTrue) \
+ V(IA32I8x16AllTrue) \
+ V(IA32Word32AtomicPairLoad) \
+ V(IA32Word32ReleasePairStore) \
+ V(IA32Word32SeqCstPairStore) \
+ V(IA32Word32AtomicPairAdd) \
+ V(IA32Word32AtomicPairSub) \
+ V(IA32Word32AtomicPairAnd) \
+ V(IA32Word32AtomicPairOr) \
+ V(IA32Word32AtomicPairXor) \
+ V(IA32Word32AtomicPairExchange) \
V(IA32Word32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 3910d45195..01e4f8faa8 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -48,33 +48,33 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Popcnt:
case kIA32Bswap:
case kIA32Lea:
- case kSSEFloat32Cmp:
- case kSSEFloat32Sqrt:
- case kSSEFloat32Round:
- case kSSEFloat64Cmp:
- case kSSEFloat64Mod:
- case kSSEFloat32Max:
- case kSSEFloat64Max:
- case kSSEFloat32Min:
- case kSSEFloat64Min:
- case kSSEFloat64Sqrt:
- case kSSEFloat64Round:
- case kSSEFloat32ToFloat64:
- case kSSEFloat64ToFloat32:
- case kSSEFloat32ToInt32:
- case kSSEFloat32ToUint32:
- case kSSEFloat64ToInt32:
- case kSSEFloat64ToUint32:
+ case kIA32Float32Cmp:
+ case kIA32Float32Sqrt:
+ case kIA32Float32Round:
+ case kIA32Float64Cmp:
+ case kIA32Float64Mod:
+ case kIA32Float32Max:
+ case kIA32Float64Max:
+ case kIA32Float32Min:
+ case kIA32Float64Min:
+ case kIA32Float64Sqrt:
+ case kIA32Float64Round:
+ case kIA32Float32ToFloat64:
+ case kIA32Float64ToFloat32:
+ case kIA32Float32ToInt32:
+ case kIA32Float32ToUint32:
+ case kIA32Float64ToInt32:
+ case kIA32Float64ToUint32:
case kSSEInt32ToFloat32:
- case kSSEUint32ToFloat32:
+ case kIA32Uint32ToFloat32:
case kSSEInt32ToFloat64:
- case kSSEUint32ToFloat64:
- case kSSEFloat64ExtractLowWord32:
- case kSSEFloat64ExtractHighWord32:
- case kSSEFloat64InsertLowWord32:
- case kSSEFloat64InsertHighWord32:
- case kSSEFloat64LoadLowWord32:
- case kSSEFloat64SilenceNaN:
+ case kIA32Uint32ToFloat64:
+ case kIA32Float64ExtractLowWord32:
+ case kIA32Float64ExtractHighWord32:
+ case kIA32Float64InsertLowWord32:
+ case kIA32Float64InsertHighWord32:
+ case kIA32Float64LoadLowWord32:
+ case kIA32Float64SilenceNaN:
case kFloat32Add:
case kFloat32Sub:
case kFloat64Add:
@@ -137,8 +137,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Insertps:
case kIA32F32x4SConvertI32x4:
case kIA32F32x4UConvertI32x4:
- case kIA32F32x4Abs:
- case kIA32F32x4Neg:
case kIA32F32x4Sqrt:
case kIA32F32x4RecipApprox:
case kIA32F32x4RecipSqrtApprox:
@@ -146,10 +144,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32F32x4Sub:
case kIA32F32x4Mul:
case kIA32F32x4Div:
- case kSSEF32x4Min:
- case kAVXF32x4Min:
- case kSSEF32x4Max:
- case kAVXF32x4Max:
+ case kIA32F32x4Min:
+ case kIA32F32x4Max:
case kIA32F32x4Eq:
case kIA32F32x4Ne:
case kIA32F32x4Lt:
@@ -406,8 +402,8 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kIA32Imul:
case kIA32ImulHigh:
return 5;
- case kSSEFloat32Cmp:
- case kSSEFloat64Cmp:
+ case kIA32Float32Cmp:
+ case kIA32Float64Cmp:
return 9;
case kFloat32Add:
case kFloat32Sub:
@@ -415,24 +411,24 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kFloat64Sub:
case kFloat32Abs:
case kFloat32Neg:
- case kSSEFloat64Max:
- case kSSEFloat64Min:
+ case kIA32Float64Max:
+ case kIA32Float64Min:
case kFloat64Abs:
case kFloat64Neg:
return 5;
case kFloat32Mul:
return 4;
- case kSSEFloat32ToFloat64:
- case kSSEFloat64ToFloat32:
+ case kIA32Float32ToFloat64:
+ case kIA32Float64ToFloat32:
return 6;
- case kSSEFloat32Round:
- case kSSEFloat64Round:
- case kSSEFloat32ToInt32:
- case kSSEFloat64ToInt32:
+ case kIA32Float32Round:
+ case kIA32Float64Round:
+ case kIA32Float32ToInt32:
+ case kIA32Float64ToInt32:
return 8;
- case kSSEFloat32ToUint32:
+ case kIA32Float32ToUint32:
return 21;
- case kSSEFloat64ToUint32:
+ case kIA32Float64ToUint32:
return 15;
case kIA32Idiv:
return 33;
@@ -442,10 +438,10 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 35;
case kFloat64Div:
return 63;
- case kSSEFloat32Sqrt:
- case kSSEFloat64Sqrt:
+ case kIA32Float32Sqrt:
+ case kIA32Float64Sqrt:
return 25;
- case kSSEFloat64Mod:
+ case kIA32Float64Mod:
return 50;
case kArchTruncateDoubleToI:
return 9;
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index ce792692f0..8c2b58564a 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -99,11 +99,14 @@ class IA32OperandGenerator final : public OperandGenerator {
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
- case IrOpcode::kNumberConstant:
case IrOpcode::kExternalConstant:
case IrOpcode::kRelocatableInt32Constant:
case IrOpcode::kRelocatableInt64Constant:
return true;
+ case IrOpcode::kNumberConstant: {
+ const double value = OpParameter<double>(node->op());
+ return bit_cast<int64_t>(value) == 0;
+ }
case IrOpcode::kHeapConstant: {
// TODO(bmeurer): We must not dereference handles concurrently. If we
// really have to this here, then we need to find a way to put this
@@ -329,10 +332,13 @@ void VisitRROFloat(InstructionSelector* selector, Node* node,
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
+ InstructionOperand temps[] = {g.TempRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(opcode, g.DefineAsRegister(node), g.Use(input));
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input),
+ arraysize(temps), temps);
} else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
+ arraysize(temps), temps);
}
}
@@ -455,9 +461,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
IA32OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
void InstructionSelector::VisitLoadLane(Node* node) {
@@ -575,7 +581,7 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
InstructionCode code = opcode | AddressingModeField::encode(mode);
Emit(code, 1, outputs, input_count, inputs);
}
@@ -1123,53 +1129,53 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
VisitShift(this, node, kIA32Ror);
}
-#define RO_OP_LIST(V) \
- V(Word32Clz, kIA32Lzcnt) \
- V(Word32Ctz, kIA32Tzcnt) \
- V(Word32Popcnt, kIA32Popcnt) \
- V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64) \
- V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
- V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
- V(TruncateFloat32ToInt32, kSSEFloat32ToInt32) \
- V(ChangeFloat64ToInt32, kSSEFloat64ToInt32) \
- V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32) \
- V(RoundFloat64ToInt32, kSSEFloat64ToInt32) \
- V(BitcastFloat32ToInt32, kIA32BitcastFI) \
- V(BitcastInt32ToFloat32, kIA32BitcastIF) \
- V(Float32Sqrt, kSSEFloat32Sqrt) \
- V(Float64Sqrt, kSSEFloat64Sqrt) \
- V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
- V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32) \
- V(SignExtendWord8ToInt32, kIA32Movsxbl) \
- V(SignExtendWord16ToInt32, kIA32Movsxwl) \
+#define RO_OP_LIST(V) \
+ V(Word32Clz, kIA32Lzcnt) \
+ V(Word32Ctz, kIA32Tzcnt) \
+ V(Word32Popcnt, kIA32Popcnt) \
+ V(ChangeFloat32ToFloat64, kIA32Float32ToFloat64) \
+ V(RoundInt32ToFloat32, kSSEInt32ToFloat32) \
+ V(ChangeInt32ToFloat64, kSSEInt32ToFloat64) \
+ V(TruncateFloat32ToInt32, kIA32Float32ToInt32) \
+ V(ChangeFloat64ToInt32, kIA32Float64ToInt32) \
+ V(TruncateFloat64ToFloat32, kIA32Float64ToFloat32) \
+ V(RoundFloat64ToInt32, kIA32Float64ToInt32) \
+ V(BitcastFloat32ToInt32, kIA32BitcastFI) \
+ V(BitcastInt32ToFloat32, kIA32BitcastIF) \
+ V(Float32Sqrt, kIA32Float32Sqrt) \
+ V(Float64Sqrt, kIA32Float64Sqrt) \
+ V(Float64ExtractLowWord32, kIA32Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32, kIA32Float64ExtractHighWord32) \
+ V(SignExtendWord8ToInt32, kIA32Movsxbl) \
+ V(SignExtendWord16ToInt32, kIA32Movsxwl) \
V(F64x2Sqrt, kIA32F64x2Sqrt)
-#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)
-
-#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
- V(TruncateFloat32ToUint32, kSSEFloat32ToUint32) \
- V(ChangeFloat64ToUint32, kSSEFloat64ToUint32) \
- V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)
-
-#define RR_OP_LIST(V) \
- V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
- V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown)) \
- V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown)) \
- V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp)) \
- V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp)) \
- V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
- V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
- V(Float32RoundTiesEven, \
- kSSEFloat32Round | MiscField::encode(kRoundToNearest)) \
- V(Float64RoundTiesEven, \
- kSSEFloat64Round | MiscField::encode(kRoundToNearest)) \
- V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
- V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
- V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
- V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
- V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
- V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
- V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
+#define RO_WITH_TEMP_OP_LIST(V) V(ChangeUint32ToFloat64, kIA32Uint32ToFloat64)
+
+#define RO_WITH_TEMP_SIMD_OP_LIST(V) \
+ V(TruncateFloat32ToUint32, kIA32Float32ToUint32) \
+ V(ChangeFloat64ToUint32, kIA32Float64ToUint32) \
+ V(TruncateFloat64ToUint32, kIA32Float64ToUint32)
+
+#define RR_OP_LIST(V) \
+ V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+ V(Float32RoundDown, kIA32Float32Round | MiscField::encode(kRoundDown)) \
+ V(Float64RoundDown, kIA32Float64Round | MiscField::encode(kRoundDown)) \
+ V(Float32RoundUp, kIA32Float32Round | MiscField::encode(kRoundUp)) \
+ V(Float64RoundUp, kIA32Float64Round | MiscField::encode(kRoundUp)) \
+ V(Float32RoundTruncate, kIA32Float32Round | MiscField::encode(kRoundToZero)) \
+ V(Float64RoundTruncate, kIA32Float64Round | MiscField::encode(kRoundToZero)) \
+ V(Float32RoundTiesEven, \
+ kIA32Float32Round | MiscField::encode(kRoundToNearest)) \
+ V(Float64RoundTiesEven, \
+ kIA32Float64Round | MiscField::encode(kRoundToNearest)) \
+ V(F32x4Ceil, kIA32F32x4Round | MiscField::encode(kRoundUp)) \
+ V(F32x4Floor, kIA32F32x4Round | MiscField::encode(kRoundDown)) \
+ V(F32x4Trunc, kIA32F32x4Round | MiscField::encode(kRoundToZero)) \
+ V(F32x4NearestInt, kIA32F32x4Round | MiscField::encode(kRoundToNearest)) \
+ V(F64x2Ceil, kIA32F64x2Round | MiscField::encode(kRoundUp)) \
+ V(F64x2Floor, kIA32F64x2Round | MiscField::encode(kRoundDown)) \
+ V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
#define RRO_FLOAT_OP_LIST(V) \
@@ -1195,6 +1201,8 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(Float64Abs, kFloat64Abs) \
V(Float32Neg, kFloat32Neg) \
V(Float64Neg, kFloat64Neg) \
+ V(F32x4Abs, kFloat32Abs) \
+ V(F32x4Neg, kFloat32Neg) \
V(F64x2Abs, kFloat64Abs) \
V(F64x2Neg, kFloat64Neg)
@@ -1347,14 +1355,14 @@ void InstructionSelector::VisitUint32Mod(Node* node) {
void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ Emit(kIA32Uint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
arraysize(temps), temps);
}
void InstructionSelector::VisitFloat64Mod(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister()};
- Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Mod, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1362,7 +1370,7 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
void InstructionSelector::VisitFloat32Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat32Max, g.DefineSameAsFirst(node),
+ Emit(kIA32Float32Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1370,7 +1378,7 @@ void InstructionSelector::VisitFloat32Max(Node* node) {
void InstructionSelector::VisitFloat64Max(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Max, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1378,7 +1386,7 @@ void InstructionSelector::VisitFloat64Max(Node* node) {
void InstructionSelector::VisitFloat32Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat32Min, g.DefineSameAsFirst(node),
+ Emit(kIA32Float32Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1386,7 +1394,7 @@ void InstructionSelector::VisitFloat32Min(Node* node) {
void InstructionSelector::VisitFloat64Min(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
- Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64Min, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)),
arraysize(temps), temps);
}
@@ -1622,7 +1630,7 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
- VisitCompare(selector, kSSEFloat32Cmp, right, left, cont, false);
+ VisitCompare(selector, kIA32Float32Cmp, right, left, cont, false);
}
// Shared routine for multiple float64 compare operations (inputs commuted).
@@ -1630,7 +1638,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
- VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
+ VisitCompare(selector, kIA32Float64Cmp, right, left, cont, false);
}
// Shared routine for multiple word compare operations.
@@ -1965,10 +1973,10 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Float64Matcher mleft(left);
if (mleft.HasResolvedValue() &&
(bit_cast<uint64_t>(mleft.ResolvedValue()) >> 32) == 0u) {
- Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ Emit(kIA32Float64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
return;
}
- Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
@@ -1976,13 +1984,13 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
IA32OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
- Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.Use(right));
}
void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat64SilenceNaN, g.DefineSameAsFirst(node),
+ Emit(kIA32Float64SilenceNaN, g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -2247,8 +2255,6 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16)
#define SIMD_BINOP_LIST(V) \
- V(F32x4Min) \
- V(F32x4Max) \
V(I32x4GtU) \
V(I32x4GeU) \
V(I16x8Ne) \
@@ -2269,6 +2275,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(F32x4Ne) \
V(F32x4Lt) \
V(F32x4Le) \
+ V(F32x4Min) \
+ V(F32x4Max) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
@@ -2339,10 +2347,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_UNOP_LIST(V) \
V(F64x2ConvertLowI32x4S) \
- V(F64x2PromoteLowF32x4) \
V(F32x4DemoteF64x2Zero) \
- V(F32x4Abs) \
- V(F32x4Neg) \
V(F32x4Sqrt) \
V(F32x4SConvertI32x4) \
V(F32x4RecipApprox) \
@@ -3169,6 +3174,25 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
VisitRRSimd(this, node, kIA32I64x2Abs, kIA32I64x2Abs);
}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionCode code = kIA32F64x2PromoteLowF32x4;
+ Node* input = node->InputAt(0);
+ LoadTransformMatcher m(input);
+
+ if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
+ // Trap handler is not supported on IA32.
+ DCHECK_NE(m.ResolvedValue().kind, MemoryAccessKind::kProtected);
+ // LoadTransforms cannot be eliminated, so they are visited even if
+ // unused. Mark it as defined so that we don't visit it.
+ MarkAsDefined(input);
+ VisitLoad(node, input, code);
+ return;
+ }
+
+ VisitRR(this, node, code);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 63cf3ca06f..56d4d960bd 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -92,7 +92,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchBinarySearchSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
- V(ArchAbortCSAAssert) \
+ V(ArchAbortCSADcheck) \
V(ArchDebugBreak) \
V(ArchComment) \
V(ArchThrowTerminator) \
@@ -296,23 +296,58 @@ static_assert(
"All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
-using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
-using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+using MiscField = base::BitField<int, 22, 10>;
+
+// {MiscField} is used for a variety of things, depending on the opcode.
+// TODO(turbofan): There should be an abstraction that ensures safe encoding and
+// decoding. {HasMemoryAccessMode} and its uses are a small step in that
+// direction.
+
// LaneSizeField and AccessModeField are helper types to encode/decode a lane
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
+// TODO(turbofan): {HasMemoryAccessMode} is currently only used to guard
+// decoding (in CodeGenerator and InstructionScheduler). Encoding (in
+// InstructionSelector) is not yet guarded. There are in fact instructions for
+// which InstructionSelector does set a MemoryAccessMode but CodeGenerator
+// doesn't care to consume it (e.g. kArm64LdrDecompressTaggedSigned). This is
+// scary. {HasMemoryAccessMode} does not include these instructions, so they can
+// be easily found by guarding encoding.
+inline bool HasMemoryAccessMode(ArchOpcode opcode) {
+ switch (opcode) {
+#define CASE(Name) \
+ case k##Name: \
+ return true;
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(CASE)
+#undef CASE
+ default:
+ return false;
+ }
+}
+
+using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
+using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
+
// AtomicWidthField overlaps with MiscField and is used for the various Atomic
// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
// architectures are assumed to be 32bit wide.
using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
+
// AtomicMemoryOrderField overlaps with MiscField and is used for the various
// Atomic opcodes. This field is not used on all architectures. It is used on
// architectures where the codegen for kSeqCst and kAcqRel differ only by
// emitting fences.
using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
-using MiscField = base::BitField<int, 22, 10>;
+
+// ParamField and FPParamField overlap with MiscField, as the latter is never
+// used for Call instructions. These 2 fields represent the general purpose
+// and floating point parameter counts of a direct call into C and are given 5
+// bits each, which allow storing a number up to the current maximum parameter
+// count, which is 20 (see kMaxCParameters defined in macro-assembler.h).
+using ParamField = base::BitField<int, 22, 5>;
+using FPParamField = base::BitField<int, 27, 5>;
// This static assertion serves as an early warning if we are about to exhaust
// the available opcode space. If we are about to exhaust it, we should start
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index bdad838f3e..3d0be78262 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -167,12 +167,16 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
pending_loads_.push_back(new_node);
- } else if (instr->IsDeoptimizeCall() || instr->IsTrap()) {
+ } else if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
// Ensure that deopts or traps are not reordered with respect to
// side-effect instructions.
if (last_side_effect_instr_ != nullptr) {
last_side_effect_instr_->AddSuccessor(new_node);
}
+ }
+
+ // Update last deoptimization or trap point.
+ if (instr->IsDeoptimizeCall() || CanTrap(instr)) {
last_deopt_or_trap_ = new_node;
}
@@ -304,7 +308,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
#if V8_ENABLE_WEBASSEMBLY
case kArchTailCallWasm:
#endif // V8_ENABLE_WEBASSEMBLY
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return kHasSideEffect;
case kArchDebugBreak:
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.h b/deps/v8/src/compiler/backend/instruction-scheduler.h
index c22190bd50..d4c08a033d 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.h
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.h
@@ -169,6 +169,12 @@ class InstructionScheduler final : public ZoneObject {
return (GetInstructionFlags(instr) & kIsLoadOperation) != 0;
}
+ bool CanTrap(const Instruction* instr) const {
+ return instr->IsTrap() ||
+ (instr->HasMemoryAccessMode() &&
+ instr->memory_access_mode() == kMemoryAccessProtected);
+ }
+
// The scheduler will not move the following instructions before the last
// deopt/trap check:
// * loads (this is conservative)
@@ -184,7 +190,7 @@ class InstructionScheduler final : public ZoneObject {
// trap point we encountered.
bool DependsOnDeoptOrTrap(const Instruction* instr) const {
return MayNeedDeoptOrTrapCheck(instr) || instr->IsDeoptimizeCall() ||
- instr->IsTrap() || HasSideEffect(instr) || IsLoadOperation(instr);
+ CanTrap(instr) || HasSideEffect(instr) || IsLoadOperation(instr);
}
// Identify nops used as a definition point for live-in registers at
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index cd2b83ac3d..beb716abbe 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -1195,9 +1195,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCall ||
- node->opcode() == IrOpcode::kProtectedLoad ||
node->opcode() == IrOpcode::kProtectedStore ||
- node->opcode() == IrOpcode::kLoadTransform ||
#define ADD_EFFECT_FOR_ATOMIC_OP(Opcode) \
node->opcode() == IrOpcode::k##Opcode ||
MACHINE_ATOMIC_OP_LIST(ADD_EFFECT_FOR_ATOMIC_OP)
@@ -1454,8 +1452,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
return;
- case IrOpcode::kAbortCSAAssert:
- VisitAbortCSAAssert(node);
+ case IrOpcode::kAbortCSADcheck:
+ VisitAbortCSADcheck(node);
return;
case IrOpcode::kDebugBreak:
VisitDebugBreak(node);
@@ -2786,7 +2784,7 @@ namespace {
LinkageLocation ExceptionLocation() {
return LinkageLocation::ForRegister(kReturnRegister0.code(),
- MachineType::IntPtr());
+ MachineType::TaggedPointer());
}
constexpr InstructionCode EncodeCallDescriptorFlags(
@@ -2916,16 +2914,20 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
InstructionCode opcode;
switch (call_descriptor->kind()) {
case CallDescriptor::kCallAddress: {
- int misc_field = static_cast<int>(call_descriptor->ParameterCount());
+ int gp_param_count =
+ static_cast<int>(call_descriptor->GPParameterCount());
+ int fp_param_count =
+ static_cast<int>(call_descriptor->FPParameterCount());
#if ABI_USES_FUNCTION_DESCRIPTORS
- // Highest misc_field bit is used on AIX to indicate if a CFunction call
- // has function descriptor or not.
- STATIC_ASSERT(MiscField::kSize == kHasFunctionDescriptorBitShift + 1);
+ // Highest fp_param_count bit is used on AIX to indicate if a CFunction
+ // call has function descriptor or not.
+ STATIC_ASSERT(FPParamField::kSize == kHasFunctionDescriptorBitShift + 1);
if (!call_descriptor->NoFunctionDescriptor()) {
- misc_field |= 1 << kHasFunctionDescriptorBitShift;
+ fp_param_count |= 1 << kHasFunctionDescriptorBitShift;
}
#endif
- opcode = kArchCallCFunction | MiscField::encode(misc_field);
+ opcode = kArchCallCFunction | ParamField::encode(gp_param_count) |
+ FPParamField::encode(fp_param_count);
break;
}
case CallDescriptor::kCallCodeObject:
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 0da8e054ae..a5c008bad5 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -7,7 +7,9 @@
#include <cstddef>
#include <iomanip>
+#include "src/codegen/aligned-slot-allocator.h"
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/machine-type.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/source-position.h"
#include "src/compiler/common-operator.h"
@@ -77,10 +79,15 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
- if (kSimpleFPAliasing || !this->IsFPLocationOperand() ||
- !other.IsFPLocationOperand())
+ const bool kComplexFPAliasing = !kSimpleFPAliasing &&
+ this->IsFPLocationOperand() &&
+ other.IsFPLocationOperand();
+ const bool kComplexS128SlotAliasing =
+ (this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
+ (other.IsSimd128StackSlot() && this->IsAnyStackSlot());
+ if (!kComplexFPAliasing && !kComplexS128SlotAliasing) {
return EqualsCanonicalized(other);
- // Aliasing is complex and both operands are fp locations.
+ }
const LocationOperand& loc = *LocationOperand::cast(this);
const LocationOperand& other_loc = LocationOperand::cast(other);
LocationOperand::LocationKind kind = loc.location_kind();
@@ -88,22 +95,29 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
if (kind != other_kind) return false;
MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep = other_loc.representation();
- if (rep == other_rep) return EqualsCanonicalized(other);
- if (kind == LocationOperand::REGISTER) {
- // FP register-register interference.
- return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
- other_loc.register_code());
+
+ if (kComplexFPAliasing && !kComplexS128SlotAliasing) {
+ if (rep == other_rep) return EqualsCanonicalized(other);
+ if (kind == LocationOperand::REGISTER) {
+ // FP register-register interference.
+ return GetRegConfig()->AreAliases(rep, loc.register_code(), other_rep,
+ other_loc.register_code());
+ }
}
- // FP slot-slot interference. Slots of different FP reps can alias because
- // the gap resolver may break a move into 2 or 4 equivalent smaller moves.
+
+ // Complex multi-slot operand interference:
+ // - slots of different FP reps can alias because the gap resolver may break a
+ // move into 2 or 4 equivalent smaller moves,
+ // - stack layout can be rearranged for tail calls
DCHECK_EQ(LocationOperand::STACK_SLOT, kind);
int index_hi = loc.index();
int index_lo =
- index_hi - (1 << ElementSizeLog2Of(rep)) / kSystemPointerSize + 1;
+ index_hi -
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(rep)) + 1;
int other_index_hi = other_loc.index();
int other_index_lo =
other_index_hi -
- (1 << ElementSizeLog2Of(other_rep)) / kSystemPointerSize + 1;
+ AlignedSlotAllocator::NumSlotsForWidth(ElementSizeInBytes(other_rep)) + 1;
return other_index_hi >= index_lo && index_hi >= other_index_lo;
}
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 8698ed8a98..7372a5160d 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -882,6 +882,13 @@ class V8_EXPORT_PRIVATE Instruction final {
return FlagsConditionField::decode(opcode());
}
int misc() const { return MiscField::decode(opcode()); }
+ bool HasMemoryAccessMode() const {
+ return compiler::HasMemoryAccessMode(arch_opcode());
+ }
+ MemoryAccessMode memory_access_mode() const {
+ DCHECK(HasMemoryAccessMode());
+ return AccessModeField::decode(opcode());
+ }
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
index 0397a36145..33226126cd 100644
--- a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -748,13 +748,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
index f31818cac2..e38d37451d 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -11,365 +11,370 @@ namespace compiler {
// LOONG64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Loong64Add_d) \
- V(Loong64Add_w) \
- V(Loong64AddOvf_d) \
- V(Loong64Sub_d) \
- V(Loong64Sub_w) \
- V(Loong64SubOvf_d) \
- V(Loong64Mul_d) \
- V(Loong64MulOvf_w) \
- V(Loong64Mulh_d) \
- V(Loong64Mulh_w) \
- V(Loong64Mulh_wu) \
- V(Loong64Mul_w) \
- V(Loong64Div_d) \
- V(Loong64Div_w) \
- V(Loong64Div_du) \
- V(Loong64Div_wu) \
- V(Loong64Mod_d) \
- V(Loong64Mod_w) \
- V(Loong64Mod_du) \
- V(Loong64Mod_wu) \
- V(Loong64And) \
- V(Loong64And32) \
- V(Loong64Or) \
- V(Loong64Or32) \
- V(Loong64Nor) \
- V(Loong64Nor32) \
- V(Loong64Xor) \
- V(Loong64Xor32) \
- V(Loong64Alsl_d) \
- V(Loong64Alsl_w) \
- V(Loong64Sll_d) \
- V(Loong64Sll_w) \
- V(Loong64Srl_d) \
- V(Loong64Srl_w) \
- V(Loong64Sra_d) \
- V(Loong64Sra_w) \
- V(Loong64Rotr_d) \
- V(Loong64Rotr_w) \
- V(Loong64Bstrpick_d) \
- V(Loong64Bstrpick_w) \
- V(Loong64Bstrins_d) \
- V(Loong64Bstrins_w) \
- V(Loong64ByteSwap64) \
- V(Loong64ByteSwap32) \
- V(Loong64Clz_d) \
- V(Loong64Clz_w) \
- V(Loong64Mov) \
- V(Loong64Tst) \
- V(Loong64Cmp) \
- V(Loong64Float32Cmp) \
- V(Loong64Float32Add) \
- V(Loong64Float32Sub) \
- V(Loong64Float32Mul) \
- V(Loong64Float32Div) \
- V(Loong64Float32Abs) \
- V(Loong64Float32Neg) \
- V(Loong64Float32Sqrt) \
- V(Loong64Float32Max) \
- V(Loong64Float32Min) \
- V(Loong64Float32ToFloat64) \
- V(Loong64Float32RoundDown) \
- V(Loong64Float32RoundUp) \
- V(Loong64Float32RoundTruncate) \
- V(Loong64Float32RoundTiesEven) \
- V(Loong64Float32ToInt32) \
- V(Loong64Float32ToInt64) \
- V(Loong64Float32ToUint32) \
- V(Loong64Float32ToUint64) \
- V(Loong64Float64Cmp) \
- V(Loong64Float64Add) \
- V(Loong64Float64Sub) \
- V(Loong64Float64Mul) \
- V(Loong64Float64Div) \
- V(Loong64Float64Mod) \
- V(Loong64Float64Abs) \
- V(Loong64Float64Neg) \
- V(Loong64Float64Sqrt) \
- V(Loong64Float64Max) \
- V(Loong64Float64Min) \
- V(Loong64Float64ToFloat32) \
- V(Loong64Float64RoundDown) \
- V(Loong64Float64RoundUp) \
- V(Loong64Float64RoundTruncate) \
- V(Loong64Float64RoundTiesEven) \
- V(Loong64Float64ToInt32) \
- V(Loong64Float64ToInt64) \
- V(Loong64Float64ToUint32) \
- V(Loong64Float64ToUint64) \
- V(Loong64Int32ToFloat32) \
- V(Loong64Int32ToFloat64) \
- V(Loong64Int64ToFloat32) \
- V(Loong64Int64ToFloat64) \
- V(Loong64Uint32ToFloat32) \
- V(Loong64Uint32ToFloat64) \
- V(Loong64Uint64ToFloat32) \
- V(Loong64Uint64ToFloat64) \
- V(Loong64Float64ExtractLowWord32) \
- V(Loong64Float64ExtractHighWord32) \
- V(Loong64Float64InsertLowWord32) \
- V(Loong64Float64InsertHighWord32) \
- V(Loong64BitcastDL) \
- V(Loong64BitcastLD) \
- V(Loong64Float64SilenceNaN) \
- V(Loong64Ld_b) \
- V(Loong64Ld_bu) \
- V(Loong64St_b) \
- V(Loong64Ld_h) \
- V(Loong64Ld_hu) \
- V(Loong64St_h) \
- V(Loong64Ld_w) \
- V(Loong64Ld_wu) \
- V(Loong64St_w) \
- V(Loong64Ld_d) \
- V(Loong64St_d) \
- V(Loong64Fld_s) \
- V(Loong64Fst_s) \
- V(Loong64Fld_d) \
- V(Loong64Fst_d) \
- V(Loong64Push) \
- V(Loong64Peek) \
- V(Loong64Poke) \
- V(Loong64StackClaim) \
- V(Loong64Ext_w_b) \
- V(Loong64Ext_w_h) \
- V(Loong64Dbar) \
- V(Loong64S128Const) \
- V(Loong64S128Zero) \
- V(Loong64S128AllOnes) \
- V(Loong64I32x4Splat) \
- V(Loong64I32x4ExtractLane) \
- V(Loong64I32x4ReplaceLane) \
- V(Loong64I32x4Add) \
- V(Loong64I32x4Sub) \
- V(Loong64F64x2Abs) \
- V(Loong64F64x2Neg) \
- V(Loong64F32x4Splat) \
- V(Loong64F32x4ExtractLane) \
- V(Loong64F32x4ReplaceLane) \
- V(Loong64F32x4SConvertI32x4) \
- V(Loong64F32x4UConvertI32x4) \
- V(Loong64I32x4Mul) \
- V(Loong64I32x4MaxS) \
- V(Loong64I32x4MinS) \
- V(Loong64I32x4Eq) \
- V(Loong64I32x4Ne) \
- V(Loong64I32x4Shl) \
- V(Loong64I32x4ShrS) \
- V(Loong64I32x4ShrU) \
- V(Loong64I32x4MaxU) \
- V(Loong64I32x4MinU) \
- V(Loong64F64x2Sqrt) \
- V(Loong64F64x2Add) \
- V(Loong64F64x2Sub) \
- V(Loong64F64x2Mul) \
- V(Loong64F64x2Div) \
- V(Loong64F64x2Min) \
- V(Loong64F64x2Max) \
- V(Loong64F64x2Eq) \
- V(Loong64F64x2Ne) \
- V(Loong64F64x2Lt) \
- V(Loong64F64x2Le) \
- V(Loong64F64x2Splat) \
- V(Loong64F64x2ExtractLane) \
- V(Loong64F64x2ReplaceLane) \
- V(Loong64F64x2Pmin) \
- V(Loong64F64x2Pmax) \
- V(Loong64F64x2Ceil) \
- V(Loong64F64x2Floor) \
- V(Loong64F64x2Trunc) \
- V(Loong64F64x2NearestInt) \
- V(Loong64F64x2ConvertLowI32x4S) \
- V(Loong64F64x2ConvertLowI32x4U) \
- V(Loong64F64x2PromoteLowF32x4) \
- V(Loong64I64x2Splat) \
- V(Loong64I64x2ExtractLane) \
- V(Loong64I64x2ReplaceLane) \
- V(Loong64I64x2Add) \
- V(Loong64I64x2Sub) \
- V(Loong64I64x2Mul) \
- V(Loong64I64x2Neg) \
- V(Loong64I64x2Shl) \
- V(Loong64I64x2ShrS) \
- V(Loong64I64x2ShrU) \
- V(Loong64I64x2BitMask) \
- V(Loong64I64x2Eq) \
- V(Loong64I64x2Ne) \
- V(Loong64I64x2GtS) \
- V(Loong64I64x2GeS) \
- V(Loong64I64x2Abs) \
- V(Loong64I64x2SConvertI32x4Low) \
- V(Loong64I64x2SConvertI32x4High) \
- V(Loong64I64x2UConvertI32x4Low) \
- V(Loong64I64x2UConvertI32x4High) \
- V(Loong64ExtMulLow) \
- V(Loong64ExtMulHigh) \
- V(Loong64ExtAddPairwise) \
- V(Loong64F32x4Abs) \
- V(Loong64F32x4Neg) \
- V(Loong64F32x4Sqrt) \
- V(Loong64F32x4RecipApprox) \
- V(Loong64F32x4RecipSqrtApprox) \
- V(Loong64F32x4Add) \
- V(Loong64F32x4Sub) \
- V(Loong64F32x4Mul) \
- V(Loong64F32x4Div) \
- V(Loong64F32x4Max) \
- V(Loong64F32x4Min) \
- V(Loong64F32x4Eq) \
- V(Loong64F32x4Ne) \
- V(Loong64F32x4Lt) \
- V(Loong64F32x4Le) \
- V(Loong64F32x4Pmin) \
- V(Loong64F32x4Pmax) \
- V(Loong64F32x4Ceil) \
- V(Loong64F32x4Floor) \
- V(Loong64F32x4Trunc) \
- V(Loong64F32x4NearestInt) \
- V(Loong64F32x4DemoteF64x2Zero) \
- V(Loong64I32x4SConvertF32x4) \
- V(Loong64I32x4UConvertF32x4) \
- V(Loong64I32x4Neg) \
- V(Loong64I32x4GtS) \
- V(Loong64I32x4GeS) \
- V(Loong64I32x4GtU) \
- V(Loong64I32x4GeU) \
- V(Loong64I32x4Abs) \
- V(Loong64I32x4BitMask) \
- V(Loong64I32x4DotI16x8S) \
- V(Loong64I32x4TruncSatF64x2SZero) \
- V(Loong64I32x4TruncSatF64x2UZero) \
- V(Loong64I16x8Splat) \
- V(Loong64I16x8ExtractLaneU) \
- V(Loong64I16x8ExtractLaneS) \
- V(Loong64I16x8ReplaceLane) \
- V(Loong64I16x8Neg) \
- V(Loong64I16x8Shl) \
- V(Loong64I16x8ShrS) \
- V(Loong64I16x8ShrU) \
- V(Loong64I16x8Add) \
- V(Loong64I16x8AddSatS) \
- V(Loong64I16x8Sub) \
- V(Loong64I16x8SubSatS) \
- V(Loong64I16x8Mul) \
- V(Loong64I16x8MaxS) \
- V(Loong64I16x8MinS) \
- V(Loong64I16x8Eq) \
- V(Loong64I16x8Ne) \
- V(Loong64I16x8GtS) \
- V(Loong64I16x8GeS) \
- V(Loong64I16x8AddSatU) \
- V(Loong64I16x8SubSatU) \
- V(Loong64I16x8MaxU) \
- V(Loong64I16x8MinU) \
- V(Loong64I16x8GtU) \
- V(Loong64I16x8GeU) \
- V(Loong64I16x8RoundingAverageU) \
- V(Loong64I16x8Abs) \
- V(Loong64I16x8BitMask) \
- V(Loong64I16x8Q15MulRSatS) \
- V(Loong64I8x16Splat) \
- V(Loong64I8x16ExtractLaneU) \
- V(Loong64I8x16ExtractLaneS) \
- V(Loong64I8x16ReplaceLane) \
- V(Loong64I8x16Neg) \
- V(Loong64I8x16Shl) \
- V(Loong64I8x16ShrS) \
- V(Loong64I8x16Add) \
- V(Loong64I8x16AddSatS) \
- V(Loong64I8x16Sub) \
- V(Loong64I8x16SubSatS) \
- V(Loong64I8x16MaxS) \
- V(Loong64I8x16MinS) \
- V(Loong64I8x16Eq) \
- V(Loong64I8x16Ne) \
- V(Loong64I8x16GtS) \
- V(Loong64I8x16GeS) \
- V(Loong64I8x16ShrU) \
- V(Loong64I8x16AddSatU) \
- V(Loong64I8x16SubSatU) \
- V(Loong64I8x16MaxU) \
- V(Loong64I8x16MinU) \
- V(Loong64I8x16GtU) \
- V(Loong64I8x16GeU) \
- V(Loong64I8x16RoundingAverageU) \
- V(Loong64I8x16Abs) \
- V(Loong64I8x16Popcnt) \
- V(Loong64I8x16BitMask) \
- V(Loong64S128And) \
- V(Loong64S128Or) \
- V(Loong64S128Xor) \
- V(Loong64S128Not) \
- V(Loong64S128Select) \
- V(Loong64S128AndNot) \
- V(Loong64I64x2AllTrue) \
- V(Loong64I32x4AllTrue) \
- V(Loong64I16x8AllTrue) \
- V(Loong64I8x16AllTrue) \
- V(Loong64V128AnyTrue) \
- V(Loong64S32x4InterleaveRight) \
- V(Loong64S32x4InterleaveLeft) \
- V(Loong64S32x4PackEven) \
- V(Loong64S32x4PackOdd) \
- V(Loong64S32x4InterleaveEven) \
- V(Loong64S32x4InterleaveOdd) \
- V(Loong64S32x4Shuffle) \
- V(Loong64S16x8InterleaveRight) \
- V(Loong64S16x8InterleaveLeft) \
- V(Loong64S16x8PackEven) \
- V(Loong64S16x8PackOdd) \
- V(Loong64S16x8InterleaveEven) \
- V(Loong64S16x8InterleaveOdd) \
- V(Loong64S16x4Reverse) \
- V(Loong64S16x2Reverse) \
- V(Loong64S8x16InterleaveRight) \
- V(Loong64S8x16InterleaveLeft) \
- V(Loong64S8x16PackEven) \
- V(Loong64S8x16PackOdd) \
- V(Loong64S8x16InterleaveEven) \
- V(Loong64S8x16InterleaveOdd) \
- V(Loong64I8x16Shuffle) \
- V(Loong64I8x16Swizzle) \
- V(Loong64S8x16Concat) \
- V(Loong64S8x8Reverse) \
- V(Loong64S8x4Reverse) \
- V(Loong64S8x2Reverse) \
- V(Loong64S128LoadSplat) \
- V(Loong64S128Load8x8S) \
- V(Loong64S128Load8x8U) \
- V(Loong64S128Load16x4S) \
- V(Loong64S128Load16x4U) \
- V(Loong64S128Load32x2S) \
- V(Loong64S128Load32x2U) \
- V(Loong64S128Load32Zero) \
- V(Loong64S128Load64Zero) \
- V(Loong64LoadLane) \
- V(Loong64StoreLane) \
- V(Loong64I32x4SConvertI16x8Low) \
- V(Loong64I32x4SConvertI16x8High) \
- V(Loong64I32x4UConvertI16x8Low) \
- V(Loong64I32x4UConvertI16x8High) \
- V(Loong64I16x8SConvertI8x16Low) \
- V(Loong64I16x8SConvertI8x16High) \
- V(Loong64I16x8SConvertI32x4) \
- V(Loong64I16x8UConvertI32x4) \
- V(Loong64I16x8UConvertI8x16Low) \
- V(Loong64I16x8UConvertI8x16High) \
- V(Loong64I8x16SConvertI16x8) \
- V(Loong64I8x16UConvertI16x8) \
- V(Loong64StoreCompressTagged) \
- V(Loong64Word64AtomicLoadUint32) \
- V(Loong64Word64AtomicLoadUint64) \
- V(Loong64Word64AtomicStoreWord64) \
- V(Loong64Word64AtomicAddUint64) \
- V(Loong64Word64AtomicSubUint64) \
- V(Loong64Word64AtomicAndUint64) \
- V(Loong64Word64AtomicOrUint64) \
- V(Loong64Word64AtomicXorUint64) \
- V(Loong64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4RecipApprox) \
+ V(Loong64F32x4RecipSqrtApprox) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
V(Loong64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
index 454bfa9986..29f9b111db 100644
--- a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -345,9 +345,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Loong64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -1355,37 +1355,21 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // On LoongArch64, int32 values should all be sign-extended to 64-bit, so
+ // no need to sign-extend them here.
+ // But when call to a host function in simulator, if the function return an
+ // int32 value, the simulator do not sign-extend to int64, because in
+ // simulator we do not know the function whether return an int32 or int64.
#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if ((value->opcode() == IrOpcode::kLoad ||
- value->opcode() == IrOpcode::kLoadImmutable) &&
- CanCover(node, value)) {
- // Generate sign-extending load.
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
- InstructionCode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
- break;
- case MachineRepresentation::kWord32:
- opcode = kLoong64Ld_w;
- break;
- default:
- UNREACHABLE();
- }
- EmitLoad(this, value, opcode, node);
- } else {
+ if (value->opcode() == IrOpcode::kCall) {
Loong64OperandGenerator g(this);
- Emit(kLoong64Sll_w, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.TempImmediate(0));
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
+ g.TempImmediate(0));
+ return;
}
-#else
- EmitIdentity(node);
#endif
+ EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 736248c824..97c9e0978e 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -809,13 +809,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
index 40f1ef3e98..3f0d8f9d39 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/backend/mips/instruction-codes-mips.h
@@ -11,369 +11,374 @@ namespace compiler {
// MIPS-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(MipsAdd) \
- V(MipsAddOvf) \
- V(MipsSub) \
- V(MipsSubOvf) \
- V(MipsMul) \
- V(MipsMulOvf) \
- V(MipsMulHigh) \
- V(MipsMulHighU) \
- V(MipsDiv) \
- V(MipsDivU) \
- V(MipsMod) \
- V(MipsModU) \
- V(MipsAnd) \
- V(MipsOr) \
- V(MipsNor) \
- V(MipsXor) \
- V(MipsClz) \
- V(MipsCtz) \
- V(MipsPopcnt) \
- V(MipsLsa) \
- V(MipsShl) \
- V(MipsShr) \
- V(MipsSar) \
- V(MipsShlPair) \
- V(MipsShrPair) \
- V(MipsSarPair) \
- V(MipsExt) \
- V(MipsIns) \
- V(MipsRor) \
- V(MipsMov) \
- V(MipsTst) \
- V(MipsCmp) \
- V(MipsCmpS) \
- V(MipsAddS) \
- V(MipsSubS) \
- V(MipsMulS) \
- V(MipsDivS) \
- V(MipsAbsS) \
- V(MipsSqrtS) \
- V(MipsMaxS) \
- V(MipsMinS) \
- V(MipsCmpD) \
- V(MipsAddD) \
- V(MipsSubD) \
- V(MipsMulD) \
- V(MipsDivD) \
- V(MipsModD) \
- V(MipsAbsD) \
- V(MipsSqrtD) \
- V(MipsMaxD) \
- V(MipsMinD) \
- V(MipsNegS) \
- V(MipsNegD) \
- V(MipsAddPair) \
- V(MipsSubPair) \
- V(MipsMulPair) \
- V(MipsMaddS) \
- V(MipsMaddD) \
- V(MipsMsubS) \
- V(MipsMsubD) \
- V(MipsFloat32RoundDown) \
- V(MipsFloat32RoundTruncate) \
- V(MipsFloat32RoundUp) \
- V(MipsFloat32RoundTiesEven) \
- V(MipsFloat64RoundDown) \
- V(MipsFloat64RoundTruncate) \
- V(MipsFloat64RoundUp) \
- V(MipsFloat64RoundTiesEven) \
- V(MipsCvtSD) \
- V(MipsCvtDS) \
- V(MipsTruncWD) \
- V(MipsRoundWD) \
- V(MipsFloorWD) \
- V(MipsCeilWD) \
- V(MipsTruncWS) \
- V(MipsRoundWS) \
- V(MipsFloorWS) \
- V(MipsCeilWS) \
- V(MipsTruncUwD) \
- V(MipsTruncUwS) \
- V(MipsCvtDW) \
- V(MipsCvtDUw) \
- V(MipsCvtSW) \
- V(MipsCvtSUw) \
- V(MipsLb) \
- V(MipsLbu) \
- V(MipsSb) \
- V(MipsLh) \
- V(MipsUlh) \
- V(MipsLhu) \
- V(MipsUlhu) \
- V(MipsSh) \
- V(MipsUsh) \
- V(MipsLw) \
- V(MipsUlw) \
- V(MipsSw) \
- V(MipsUsw) \
- V(MipsLwc1) \
- V(MipsUlwc1) \
- V(MipsSwc1) \
- V(MipsUswc1) \
- V(MipsLdc1) \
- V(MipsUldc1) \
- V(MipsSdc1) \
- V(MipsUsdc1) \
- V(MipsFloat64ExtractLowWord32) \
- V(MipsFloat64ExtractHighWord32) \
- V(MipsFloat64InsertLowWord32) \
- V(MipsFloat64InsertHighWord32) \
- V(MipsFloat64SilenceNaN) \
- V(MipsFloat32Max) \
- V(MipsFloat64Max) \
- V(MipsFloat32Min) \
- V(MipsFloat64Min) \
- V(MipsPush) \
- V(MipsPeek) \
- V(MipsStoreToStackSlot) \
- V(MipsByteSwap32) \
- V(MipsStackClaim) \
- V(MipsSeb) \
- V(MipsSeh) \
- V(MipsSync) \
- V(MipsS128Zero) \
- V(MipsI32x4Splat) \
- V(MipsI32x4ExtractLane) \
- V(MipsI32x4ReplaceLane) \
- V(MipsI32x4Add) \
- V(MipsI32x4Sub) \
- V(MipsF64x2Abs) \
- V(MipsF64x2Neg) \
- V(MipsF64x2Sqrt) \
- V(MipsF64x2Add) \
- V(MipsF64x2Sub) \
- V(MipsF64x2Mul) \
- V(MipsF64x2Div) \
- V(MipsF64x2Min) \
- V(MipsF64x2Max) \
- V(MipsF64x2Eq) \
- V(MipsF64x2Ne) \
- V(MipsF64x2Lt) \
- V(MipsF64x2Le) \
- V(MipsF64x2Pmin) \
- V(MipsF64x2Pmax) \
- V(MipsF64x2Ceil) \
- V(MipsF64x2Floor) \
- V(MipsF64x2Trunc) \
- V(MipsF64x2NearestInt) \
- V(MipsF64x2ConvertLowI32x4S) \
- V(MipsF64x2ConvertLowI32x4U) \
- V(MipsF64x2PromoteLowF32x4) \
- V(MipsI64x2Add) \
- V(MipsI64x2Sub) \
- V(MipsI64x2Mul) \
- V(MipsI64x2Neg) \
- V(MipsI64x2Shl) \
- V(MipsI64x2ShrS) \
- V(MipsI64x2ShrU) \
- V(MipsI64x2BitMask) \
- V(MipsI64x2Eq) \
- V(MipsI64x2Ne) \
- V(MipsI64x2GtS) \
- V(MipsI64x2GeS) \
- V(MipsI64x2Abs) \
- V(MipsI64x2SConvertI32x4Low) \
- V(MipsI64x2SConvertI32x4High) \
- V(MipsI64x2UConvertI32x4Low) \
- V(MipsI64x2UConvertI32x4High) \
- V(MipsI64x2ExtMulLowI32x4S) \
- V(MipsI64x2ExtMulHighI32x4S) \
- V(MipsI64x2ExtMulLowI32x4U) \
- V(MipsI64x2ExtMulHighI32x4U) \
- V(MipsF32x4Splat) \
- V(MipsF32x4ExtractLane) \
- V(MipsF32x4ReplaceLane) \
- V(MipsF32x4SConvertI32x4) \
- V(MipsF32x4UConvertI32x4) \
- V(MipsF32x4DemoteF64x2Zero) \
- V(MipsI32x4Mul) \
- V(MipsI32x4MaxS) \
- V(MipsI32x4MinS) \
- V(MipsI32x4Eq) \
- V(MipsI32x4Ne) \
- V(MipsI32x4Shl) \
- V(MipsI32x4ShrS) \
- V(MipsI32x4ShrU) \
- V(MipsI32x4MaxU) \
- V(MipsI32x4MinU) \
- V(MipsF64x2Splat) \
- V(MipsF64x2ExtractLane) \
- V(MipsF64x2ReplaceLane) \
- V(MipsF32x4Abs) \
- V(MipsF32x4Neg) \
- V(MipsF32x4Sqrt) \
- V(MipsF32x4RecipApprox) \
- V(MipsF32x4RecipSqrtApprox) \
- V(MipsF32x4Add) \
- V(MipsF32x4Sub) \
- V(MipsF32x4Mul) \
- V(MipsF32x4Div) \
- V(MipsF32x4Max) \
- V(MipsF32x4Min) \
- V(MipsF32x4Eq) \
- V(MipsF32x4Ne) \
- V(MipsF32x4Lt) \
- V(MipsF32x4Le) \
- V(MipsF32x4Pmin) \
- V(MipsF32x4Pmax) \
- V(MipsF32x4Ceil) \
- V(MipsF32x4Floor) \
- V(MipsF32x4Trunc) \
- V(MipsF32x4NearestInt) \
- V(MipsI32x4SConvertF32x4) \
- V(MipsI32x4UConvertF32x4) \
- V(MipsI32x4Neg) \
- V(MipsI32x4GtS) \
- V(MipsI32x4GeS) \
- V(MipsI32x4GtU) \
- V(MipsI32x4GeU) \
- V(MipsI32x4Abs) \
- V(MipsI32x4BitMask) \
- V(MipsI32x4DotI16x8S) \
- V(MipsI32x4ExtMulLowI16x8S) \
- V(MipsI32x4ExtMulHighI16x8S) \
- V(MipsI32x4ExtMulLowI16x8U) \
- V(MipsI32x4ExtMulHighI16x8U) \
- V(MipsI32x4TruncSatF64x2SZero) \
- V(MipsI32x4TruncSatF64x2UZero) \
- V(MipsI32x4ExtAddPairwiseI16x8S) \
- V(MipsI32x4ExtAddPairwiseI16x8U) \
- V(MipsI16x8Splat) \
- V(MipsI16x8ExtractLaneU) \
- V(MipsI16x8ExtractLaneS) \
- V(MipsI16x8ReplaceLane) \
- V(MipsI16x8Neg) \
- V(MipsI16x8Shl) \
- V(MipsI16x8ShrS) \
- V(MipsI16x8ShrU) \
- V(MipsI16x8Add) \
- V(MipsI16x8AddSatS) \
- V(MipsI16x8Sub) \
- V(MipsI16x8SubSatS) \
- V(MipsI16x8Mul) \
- V(MipsI16x8MaxS) \
- V(MipsI16x8MinS) \
- V(MipsI16x8Eq) \
- V(MipsI16x8Ne) \
- V(MipsI16x8GtS) \
- V(MipsI16x8GeS) \
- V(MipsI16x8AddSatU) \
- V(MipsI16x8SubSatU) \
- V(MipsI16x8MaxU) \
- V(MipsI16x8MinU) \
- V(MipsI16x8GtU) \
- V(MipsI16x8GeU) \
- V(MipsI16x8RoundingAverageU) \
- V(MipsI16x8Abs) \
- V(MipsI16x8BitMask) \
- V(MipsI16x8Q15MulRSatS) \
- V(MipsI16x8ExtMulLowI8x16S) \
- V(MipsI16x8ExtMulHighI8x16S) \
- V(MipsI16x8ExtMulLowI8x16U) \
- V(MipsI16x8ExtMulHighI8x16U) \
- V(MipsI16x8ExtAddPairwiseI8x16S) \
- V(MipsI16x8ExtAddPairwiseI8x16U) \
- V(MipsI8x16Splat) \
- V(MipsI8x16ExtractLaneU) \
- V(MipsI8x16ExtractLaneS) \
- V(MipsI8x16ReplaceLane) \
- V(MipsI8x16Neg) \
- V(MipsI8x16Shl) \
- V(MipsI8x16ShrS) \
- V(MipsI8x16Add) \
- V(MipsI8x16AddSatS) \
- V(MipsI8x16Sub) \
- V(MipsI8x16SubSatS) \
- V(MipsI8x16MaxS) \
- V(MipsI8x16MinS) \
- V(MipsI8x16Eq) \
- V(MipsI8x16Ne) \
- V(MipsI8x16GtS) \
- V(MipsI8x16GeS) \
- V(MipsI8x16ShrU) \
- V(MipsI8x16AddSatU) \
- V(MipsI8x16SubSatU) \
- V(MipsI8x16MaxU) \
- V(MipsI8x16MinU) \
- V(MipsI8x16GtU) \
- V(MipsI8x16GeU) \
- V(MipsI8x16RoundingAverageU) \
- V(MipsI8x16Abs) \
- V(MipsI8x16Popcnt) \
- V(MipsI8x16BitMask) \
- V(MipsS128And) \
- V(MipsS128Or) \
- V(MipsS128Xor) \
- V(MipsS128Not) \
- V(MipsS128Select) \
- V(MipsS128AndNot) \
- V(MipsI64x2AllTrue) \
- V(MipsI32x4AllTrue) \
- V(MipsI16x8AllTrue) \
- V(MipsI8x16AllTrue) \
- V(MipsV128AnyTrue) \
- V(MipsS32x4InterleaveRight) \
- V(MipsS32x4InterleaveLeft) \
- V(MipsS32x4PackEven) \
- V(MipsS32x4PackOdd) \
- V(MipsS32x4InterleaveEven) \
- V(MipsS32x4InterleaveOdd) \
- V(MipsS32x4Shuffle) \
- V(MipsS16x8InterleaveRight) \
- V(MipsS16x8InterleaveLeft) \
- V(MipsS16x8PackEven) \
- V(MipsS16x8PackOdd) \
- V(MipsS16x8InterleaveEven) \
- V(MipsS16x8InterleaveOdd) \
- V(MipsS16x4Reverse) \
- V(MipsS16x2Reverse) \
- V(MipsS8x16InterleaveRight) \
- V(MipsS8x16InterleaveLeft) \
- V(MipsS8x16PackEven) \
- V(MipsS8x16PackOdd) \
- V(MipsS8x16InterleaveEven) \
- V(MipsS8x16InterleaveOdd) \
- V(MipsI8x16Shuffle) \
- V(MipsI8x16Swizzle) \
- V(MipsS8x16Concat) \
- V(MipsS8x8Reverse) \
- V(MipsS8x4Reverse) \
- V(MipsS8x2Reverse) \
- V(MipsS128Load8Splat) \
- V(MipsS128Load16Splat) \
- V(MipsS128Load32Splat) \
- V(MipsS128Load64Splat) \
- V(MipsS128Load8x8S) \
- V(MipsS128Load8x8U) \
- V(MipsS128Load16x4S) \
- V(MipsS128Load16x4U) \
- V(MipsS128Load32x2S) \
- V(MipsS128Load32x2U) \
- V(MipsMsaLd) \
- V(MipsMsaSt) \
- V(MipsI32x4SConvertI16x8Low) \
- V(MipsI32x4SConvertI16x8High) \
- V(MipsI32x4UConvertI16x8Low) \
- V(MipsI32x4UConvertI16x8High) \
- V(MipsI16x8SConvertI8x16Low) \
- V(MipsI16x8SConvertI8x16High) \
- V(MipsI16x8SConvertI32x4) \
- V(MipsI16x8UConvertI32x4) \
- V(MipsI16x8UConvertI8x16Low) \
- V(MipsI16x8UConvertI8x16High) \
- V(MipsI8x16SConvertI16x8) \
- V(MipsI8x16UConvertI16x8) \
- V(MipsWord32AtomicPairLoad) \
- V(MipsWord32AtomicPairStore) \
- V(MipsWord32AtomicPairAdd) \
- V(MipsWord32AtomicPairSub) \
- V(MipsWord32AtomicPairAnd) \
- V(MipsWord32AtomicPairOr) \
- V(MipsWord32AtomicPairXor) \
- V(MipsWord32AtomicPairExchange) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(MipsAdd) \
+ V(MipsAddOvf) \
+ V(MipsSub) \
+ V(MipsSubOvf) \
+ V(MipsMul) \
+ V(MipsMulOvf) \
+ V(MipsMulHigh) \
+ V(MipsMulHighU) \
+ V(MipsDiv) \
+ V(MipsDivU) \
+ V(MipsMod) \
+ V(MipsModU) \
+ V(MipsAnd) \
+ V(MipsOr) \
+ V(MipsNor) \
+ V(MipsXor) \
+ V(MipsClz) \
+ V(MipsCtz) \
+ V(MipsPopcnt) \
+ V(MipsLsa) \
+ V(MipsShl) \
+ V(MipsShr) \
+ V(MipsSar) \
+ V(MipsShlPair) \
+ V(MipsShrPair) \
+ V(MipsSarPair) \
+ V(MipsExt) \
+ V(MipsIns) \
+ V(MipsRor) \
+ V(MipsMov) \
+ V(MipsTst) \
+ V(MipsCmp) \
+ V(MipsCmpS) \
+ V(MipsAddS) \
+ V(MipsSubS) \
+ V(MipsMulS) \
+ V(MipsDivS) \
+ V(MipsAbsS) \
+ V(MipsSqrtS) \
+ V(MipsMaxS) \
+ V(MipsMinS) \
+ V(MipsCmpD) \
+ V(MipsAddD) \
+ V(MipsSubD) \
+ V(MipsMulD) \
+ V(MipsDivD) \
+ V(MipsModD) \
+ V(MipsAbsD) \
+ V(MipsSqrtD) \
+ V(MipsMaxD) \
+ V(MipsMinD) \
+ V(MipsNegS) \
+ V(MipsNegD) \
+ V(MipsAddPair) \
+ V(MipsSubPair) \
+ V(MipsMulPair) \
+ V(MipsMaddS) \
+ V(MipsMaddD) \
+ V(MipsMsubS) \
+ V(MipsMsubD) \
+ V(MipsFloat32RoundDown) \
+ V(MipsFloat32RoundTruncate) \
+ V(MipsFloat32RoundUp) \
+ V(MipsFloat32RoundTiesEven) \
+ V(MipsFloat64RoundDown) \
+ V(MipsFloat64RoundTruncate) \
+ V(MipsFloat64RoundUp) \
+ V(MipsFloat64RoundTiesEven) \
+ V(MipsCvtSD) \
+ V(MipsCvtDS) \
+ V(MipsTruncWD) \
+ V(MipsRoundWD) \
+ V(MipsFloorWD) \
+ V(MipsCeilWD) \
+ V(MipsTruncWS) \
+ V(MipsRoundWS) \
+ V(MipsFloorWS) \
+ V(MipsCeilWS) \
+ V(MipsTruncUwD) \
+ V(MipsTruncUwS) \
+ V(MipsCvtDW) \
+ V(MipsCvtDUw) \
+ V(MipsCvtSW) \
+ V(MipsCvtSUw) \
+ V(MipsLb) \
+ V(MipsLbu) \
+ V(MipsSb) \
+ V(MipsLh) \
+ V(MipsUlh) \
+ V(MipsLhu) \
+ V(MipsUlhu) \
+ V(MipsSh) \
+ V(MipsUsh) \
+ V(MipsLw) \
+ V(MipsUlw) \
+ V(MipsSw) \
+ V(MipsUsw) \
+ V(MipsLwc1) \
+ V(MipsUlwc1) \
+ V(MipsSwc1) \
+ V(MipsUswc1) \
+ V(MipsLdc1) \
+ V(MipsUldc1) \
+ V(MipsSdc1) \
+ V(MipsUsdc1) \
+ V(MipsFloat64ExtractLowWord32) \
+ V(MipsFloat64ExtractHighWord32) \
+ V(MipsFloat64InsertLowWord32) \
+ V(MipsFloat64InsertHighWord32) \
+ V(MipsFloat64SilenceNaN) \
+ V(MipsFloat32Max) \
+ V(MipsFloat64Max) \
+ V(MipsFloat32Min) \
+ V(MipsFloat64Min) \
+ V(MipsPush) \
+ V(MipsPeek) \
+ V(MipsStoreToStackSlot) \
+ V(MipsByteSwap32) \
+ V(MipsStackClaim) \
+ V(MipsSeb) \
+ V(MipsSeh) \
+ V(MipsSync) \
+ V(MipsS128Zero) \
+ V(MipsI32x4Splat) \
+ V(MipsI32x4ExtractLane) \
+ V(MipsI32x4ReplaceLane) \
+ V(MipsI32x4Add) \
+ V(MipsI32x4Sub) \
+ V(MipsF64x2Abs) \
+ V(MipsF64x2Neg) \
+ V(MipsF64x2Sqrt) \
+ V(MipsF64x2Add) \
+ V(MipsF64x2Sub) \
+ V(MipsF64x2Mul) \
+ V(MipsF64x2Div) \
+ V(MipsF64x2Min) \
+ V(MipsF64x2Max) \
+ V(MipsF64x2Eq) \
+ V(MipsF64x2Ne) \
+ V(MipsF64x2Lt) \
+ V(MipsF64x2Le) \
+ V(MipsF64x2Pmin) \
+ V(MipsF64x2Pmax) \
+ V(MipsF64x2Ceil) \
+ V(MipsF64x2Floor) \
+ V(MipsF64x2Trunc) \
+ V(MipsF64x2NearestInt) \
+ V(MipsF64x2ConvertLowI32x4S) \
+ V(MipsF64x2ConvertLowI32x4U) \
+ V(MipsF64x2PromoteLowF32x4) \
+ V(MipsI64x2Add) \
+ V(MipsI64x2Sub) \
+ V(MipsI64x2Mul) \
+ V(MipsI64x2Neg) \
+ V(MipsI64x2Shl) \
+ V(MipsI64x2ShrS) \
+ V(MipsI64x2ShrU) \
+ V(MipsI64x2BitMask) \
+ V(MipsI64x2Eq) \
+ V(MipsI64x2Ne) \
+ V(MipsI64x2GtS) \
+ V(MipsI64x2GeS) \
+ V(MipsI64x2Abs) \
+ V(MipsI64x2SConvertI32x4Low) \
+ V(MipsI64x2SConvertI32x4High) \
+ V(MipsI64x2UConvertI32x4Low) \
+ V(MipsI64x2UConvertI32x4High) \
+ V(MipsI64x2ExtMulLowI32x4S) \
+ V(MipsI64x2ExtMulHighI32x4S) \
+ V(MipsI64x2ExtMulLowI32x4U) \
+ V(MipsI64x2ExtMulHighI32x4U) \
+ V(MipsF32x4Splat) \
+ V(MipsF32x4ExtractLane) \
+ V(MipsF32x4ReplaceLane) \
+ V(MipsF32x4SConvertI32x4) \
+ V(MipsF32x4UConvertI32x4) \
+ V(MipsF32x4DemoteF64x2Zero) \
+ V(MipsI32x4Mul) \
+ V(MipsI32x4MaxS) \
+ V(MipsI32x4MinS) \
+ V(MipsI32x4Eq) \
+ V(MipsI32x4Ne) \
+ V(MipsI32x4Shl) \
+ V(MipsI32x4ShrS) \
+ V(MipsI32x4ShrU) \
+ V(MipsI32x4MaxU) \
+ V(MipsI32x4MinU) \
+ V(MipsF64x2Splat) \
+ V(MipsF64x2ExtractLane) \
+ V(MipsF64x2ReplaceLane) \
+ V(MipsF32x4Abs) \
+ V(MipsF32x4Neg) \
+ V(MipsF32x4Sqrt) \
+ V(MipsF32x4RecipApprox) \
+ V(MipsF32x4RecipSqrtApprox) \
+ V(MipsF32x4Add) \
+ V(MipsF32x4Sub) \
+ V(MipsF32x4Mul) \
+ V(MipsF32x4Div) \
+ V(MipsF32x4Max) \
+ V(MipsF32x4Min) \
+ V(MipsF32x4Eq) \
+ V(MipsF32x4Ne) \
+ V(MipsF32x4Lt) \
+ V(MipsF32x4Le) \
+ V(MipsF32x4Pmin) \
+ V(MipsF32x4Pmax) \
+ V(MipsF32x4Ceil) \
+ V(MipsF32x4Floor) \
+ V(MipsF32x4Trunc) \
+ V(MipsF32x4NearestInt) \
+ V(MipsI32x4SConvertF32x4) \
+ V(MipsI32x4UConvertF32x4) \
+ V(MipsI32x4Neg) \
+ V(MipsI32x4GtS) \
+ V(MipsI32x4GeS) \
+ V(MipsI32x4GtU) \
+ V(MipsI32x4GeU) \
+ V(MipsI32x4Abs) \
+ V(MipsI32x4BitMask) \
+ V(MipsI32x4DotI16x8S) \
+ V(MipsI32x4ExtMulLowI16x8S) \
+ V(MipsI32x4ExtMulHighI16x8S) \
+ V(MipsI32x4ExtMulLowI16x8U) \
+ V(MipsI32x4ExtMulHighI16x8U) \
+ V(MipsI32x4TruncSatF64x2SZero) \
+ V(MipsI32x4TruncSatF64x2UZero) \
+ V(MipsI32x4ExtAddPairwiseI16x8S) \
+ V(MipsI32x4ExtAddPairwiseI16x8U) \
+ V(MipsI16x8Splat) \
+ V(MipsI16x8ExtractLaneU) \
+ V(MipsI16x8ExtractLaneS) \
+ V(MipsI16x8ReplaceLane) \
+ V(MipsI16x8Neg) \
+ V(MipsI16x8Shl) \
+ V(MipsI16x8ShrS) \
+ V(MipsI16x8ShrU) \
+ V(MipsI16x8Add) \
+ V(MipsI16x8AddSatS) \
+ V(MipsI16x8Sub) \
+ V(MipsI16x8SubSatS) \
+ V(MipsI16x8Mul) \
+ V(MipsI16x8MaxS) \
+ V(MipsI16x8MinS) \
+ V(MipsI16x8Eq) \
+ V(MipsI16x8Ne) \
+ V(MipsI16x8GtS) \
+ V(MipsI16x8GeS) \
+ V(MipsI16x8AddSatU) \
+ V(MipsI16x8SubSatU) \
+ V(MipsI16x8MaxU) \
+ V(MipsI16x8MinU) \
+ V(MipsI16x8GtU) \
+ V(MipsI16x8GeU) \
+ V(MipsI16x8RoundingAverageU) \
+ V(MipsI16x8Abs) \
+ V(MipsI16x8BitMask) \
+ V(MipsI16x8Q15MulRSatS) \
+ V(MipsI16x8ExtMulLowI8x16S) \
+ V(MipsI16x8ExtMulHighI8x16S) \
+ V(MipsI16x8ExtMulLowI8x16U) \
+ V(MipsI16x8ExtMulHighI8x16U) \
+ V(MipsI16x8ExtAddPairwiseI8x16S) \
+ V(MipsI16x8ExtAddPairwiseI8x16U) \
+ V(MipsI8x16Splat) \
+ V(MipsI8x16ExtractLaneU) \
+ V(MipsI8x16ExtractLaneS) \
+ V(MipsI8x16ReplaceLane) \
+ V(MipsI8x16Neg) \
+ V(MipsI8x16Shl) \
+ V(MipsI8x16ShrS) \
+ V(MipsI8x16Add) \
+ V(MipsI8x16AddSatS) \
+ V(MipsI8x16Sub) \
+ V(MipsI8x16SubSatS) \
+ V(MipsI8x16MaxS) \
+ V(MipsI8x16MinS) \
+ V(MipsI8x16Eq) \
+ V(MipsI8x16Ne) \
+ V(MipsI8x16GtS) \
+ V(MipsI8x16GeS) \
+ V(MipsI8x16ShrU) \
+ V(MipsI8x16AddSatU) \
+ V(MipsI8x16SubSatU) \
+ V(MipsI8x16MaxU) \
+ V(MipsI8x16MinU) \
+ V(MipsI8x16GtU) \
+ V(MipsI8x16GeU) \
+ V(MipsI8x16RoundingAverageU) \
+ V(MipsI8x16Abs) \
+ V(MipsI8x16Popcnt) \
+ V(MipsI8x16BitMask) \
+ V(MipsS128And) \
+ V(MipsS128Or) \
+ V(MipsS128Xor) \
+ V(MipsS128Not) \
+ V(MipsS128Select) \
+ V(MipsS128AndNot) \
+ V(MipsI64x2AllTrue) \
+ V(MipsI32x4AllTrue) \
+ V(MipsI16x8AllTrue) \
+ V(MipsI8x16AllTrue) \
+ V(MipsV128AnyTrue) \
+ V(MipsS32x4InterleaveRight) \
+ V(MipsS32x4InterleaveLeft) \
+ V(MipsS32x4PackEven) \
+ V(MipsS32x4PackOdd) \
+ V(MipsS32x4InterleaveEven) \
+ V(MipsS32x4InterleaveOdd) \
+ V(MipsS32x4Shuffle) \
+ V(MipsS16x8InterleaveRight) \
+ V(MipsS16x8InterleaveLeft) \
+ V(MipsS16x8PackEven) \
+ V(MipsS16x8PackOdd) \
+ V(MipsS16x8InterleaveEven) \
+ V(MipsS16x8InterleaveOdd) \
+ V(MipsS16x4Reverse) \
+ V(MipsS16x2Reverse) \
+ V(MipsS8x16InterleaveRight) \
+ V(MipsS8x16InterleaveLeft) \
+ V(MipsS8x16PackEven) \
+ V(MipsS8x16PackOdd) \
+ V(MipsS8x16InterleaveEven) \
+ V(MipsS8x16InterleaveOdd) \
+ V(MipsI8x16Shuffle) \
+ V(MipsI8x16Swizzle) \
+ V(MipsS8x16Concat) \
+ V(MipsS8x8Reverse) \
+ V(MipsS8x4Reverse) \
+ V(MipsS8x2Reverse) \
+ V(MipsS128Load8Splat) \
+ V(MipsS128Load16Splat) \
+ V(MipsS128Load32Splat) \
+ V(MipsS128Load64Splat) \
+ V(MipsS128Load8x8S) \
+ V(MipsS128Load8x8U) \
+ V(MipsS128Load16x4S) \
+ V(MipsS128Load16x4U) \
+ V(MipsS128Load32x2S) \
+ V(MipsS128Load32x2U) \
+ V(MipsMsaLd) \
+ V(MipsMsaSt) \
+ V(MipsI32x4SConvertI16x8Low) \
+ V(MipsI32x4SConvertI16x8High) \
+ V(MipsI32x4UConvertI16x8Low) \
+ V(MipsI32x4UConvertI16x8High) \
+ V(MipsI16x8SConvertI8x16Low) \
+ V(MipsI16x8SConvertI8x16High) \
+ V(MipsI16x8SConvertI32x4) \
+ V(MipsI16x8UConvertI32x4) \
+ V(MipsI16x8UConvertI8x16Low) \
+ V(MipsI16x8UConvertI8x16High) \
+ V(MipsI8x16SConvertI16x8) \
+ V(MipsI8x16UConvertI16x8) \
+ V(MipsWord32AtomicPairLoad) \
+ V(MipsWord32AtomicPairStore) \
+ V(MipsWord32AtomicPairAdd) \
+ V(MipsWord32AtomicPairSub) \
+ V(MipsWord32AtomicPairAnd) \
+ V(MipsWord32AtomicPairOr) \
+ V(MipsWord32AtomicPairXor) \
+ V(MipsWord32AtomicPairExchange) \
V(MipsWord32AtomicPairCompareExchange)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index aeb1756227..d59392b40a 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1427,7 +1427,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
2);
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchComment:
case kArchDeoptimize:
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index 477c791ca0..39d1feef96 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -278,9 +278,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
MipsOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void InstructionSelector::VisitLoadTransform(Node* node) {
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index f6fccd43d2..5d6a745407 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -770,13 +770,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -1032,14 +1032,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64And32:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Or32:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
break;
case kMips64Nor:
if (instr->InputAt(1)->IsRegister()) {
@@ -1052,11 +1050,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kMips64Nor32:
if (instr->InputAt(1)->IsRegister()) {
__ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
} else {
DCHECK_EQ(0, i.InputOperand(1).immediate());
__ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
- __ sll(i.OutputRegister(), i.OutputRegister(), 0x0);
}
break;
case kMips64Xor:
@@ -1103,23 +1099,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
- __ srl(i.OutputRegister(), i.OutputRegister(),
+ __ srl(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
- __ sll(i.OutputRegister(), i.InputRegister(0), 0x0);
- __ sra(i.OutputRegister(), i.OutputRegister(),
+ __ sra(i.OutputRegister(), i.InputRegister(0),
static_cast<uint16_t>(imm));
}
break;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index 30d7f5af75..003b6bd6c2 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -11,393 +11,398 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64Sync) \
- V(Mips64AssertEqual) \
- V(Mips64S128Const) \
- V(Mips64S128Zero) \
- V(Mips64S128AllOnes) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4Sub) \
- V(Mips64F64x2Abs) \
- V(Mips64F64x2Neg) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F64x2Sqrt) \
- V(Mips64F64x2Add) \
- V(Mips64F64x2Sub) \
- V(Mips64F64x2Mul) \
- V(Mips64F64x2Div) \
- V(Mips64F64x2Min) \
- V(Mips64F64x2Max) \
- V(Mips64F64x2Eq) \
- V(Mips64F64x2Ne) \
- V(Mips64F64x2Lt) \
- V(Mips64F64x2Le) \
- V(Mips64F64x2Splat) \
- V(Mips64F64x2ExtractLane) \
- V(Mips64F64x2ReplaceLane) \
- V(Mips64F64x2Pmin) \
- V(Mips64F64x2Pmax) \
- V(Mips64F64x2Ceil) \
- V(Mips64F64x2Floor) \
- V(Mips64F64x2Trunc) \
- V(Mips64F64x2NearestInt) \
- V(Mips64F64x2ConvertLowI32x4S) \
- V(Mips64F64x2ConvertLowI32x4U) \
- V(Mips64F64x2PromoteLowF32x4) \
- V(Mips64I64x2Splat) \
- V(Mips64I64x2ExtractLane) \
- V(Mips64I64x2ReplaceLane) \
- V(Mips64I64x2Add) \
- V(Mips64I64x2Sub) \
- V(Mips64I64x2Mul) \
- V(Mips64I64x2Neg) \
- V(Mips64I64x2Shl) \
- V(Mips64I64x2ShrS) \
- V(Mips64I64x2ShrU) \
- V(Mips64I64x2BitMask) \
- V(Mips64I64x2Eq) \
- V(Mips64I64x2Ne) \
- V(Mips64I64x2GtS) \
- V(Mips64I64x2GeS) \
- V(Mips64I64x2Abs) \
- V(Mips64I64x2SConvertI32x4Low) \
- V(Mips64I64x2SConvertI32x4High) \
- V(Mips64I64x2UConvertI32x4Low) \
- V(Mips64I64x2UConvertI32x4High) \
- V(Mips64ExtMulLow) \
- V(Mips64ExtMulHigh) \
- V(Mips64ExtAddPairwise) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4Sqrt) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Div) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64F32x4Pmin) \
- V(Mips64F32x4Pmax) \
- V(Mips64F32x4Ceil) \
- V(Mips64F32x4Floor) \
- V(Mips64F32x4Trunc) \
- V(Mips64F32x4NearestInt) \
- V(Mips64F32x4DemoteF64x2Zero) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I32x4Abs) \
- V(Mips64I32x4BitMask) \
- V(Mips64I32x4DotI16x8S) \
- V(Mips64I32x4TruncSatF64x2SZero) \
- V(Mips64I32x4TruncSatF64x2UZero) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLaneU) \
- V(Mips64I16x8ExtractLaneS) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSatS) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSatS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSatU) \
- V(Mips64I16x8SubSatU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I16x8RoundingAverageU) \
- V(Mips64I16x8Abs) \
- V(Mips64I16x8BitMask) \
- V(Mips64I16x8Q15MulRSatS) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLaneU) \
- V(Mips64I8x16ExtractLaneS) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSatS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSatS) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSatU) \
- V(Mips64I8x16SubSatU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64I8x16RoundingAverageU) \
- V(Mips64I8x16Abs) \
- V(Mips64I8x16Popcnt) \
- V(Mips64I8x16BitMask) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S128AndNot) \
- V(Mips64I64x2AllTrue) \
- V(Mips64I32x4AllTrue) \
- V(Mips64I16x8AllTrue) \
- V(Mips64I8x16AllTrue) \
- V(Mips64V128AnyTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64I8x16Shuffle) \
- V(Mips64I8x16Swizzle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64S128LoadSplat) \
- V(Mips64S128Load8x8S) \
- V(Mips64S128Load8x8U) \
- V(Mips64S128Load16x4S) \
- V(Mips64S128Load16x4U) \
- V(Mips64S128Load32x2S) \
- V(Mips64S128Load32x2U) \
- V(Mips64S128Load32Zero) \
- V(Mips64S128Load64Zero) \
- V(Mips64S128LoadLane) \
- V(Mips64S128StoreLane) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8) \
- V(Mips64StoreCompressTagged) \
- V(Mips64Word64AtomicLoadUint64) \
- V(Mips64Word64AtomicStoreWord64) \
- V(Mips64Word64AtomicAddUint64) \
- V(Mips64Word64AtomicSubUint64) \
- V(Mips64Word64AtomicAndUint64) \
- V(Mips64Word64AtomicOrUint64) \
- V(Mips64Word64AtomicXorUint64) \
- V(Mips64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64Sync) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Const) \
+ V(Mips64S128Zero) \
+ V(Mips64S128AllOnes) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F64x2Abs) \
+ V(Mips64F64x2Neg) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F64x2Sqrt) \
+ V(Mips64F64x2Add) \
+ V(Mips64F64x2Sub) \
+ V(Mips64F64x2Mul) \
+ V(Mips64F64x2Div) \
+ V(Mips64F64x2Min) \
+ V(Mips64F64x2Max) \
+ V(Mips64F64x2Eq) \
+ V(Mips64F64x2Ne) \
+ V(Mips64F64x2Lt) \
+ V(Mips64F64x2Le) \
+ V(Mips64F64x2Splat) \
+ V(Mips64F64x2ExtractLane) \
+ V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
+ V(Mips64F64x2Ceil) \
+ V(Mips64F64x2Floor) \
+ V(Mips64F64x2Trunc) \
+ V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
+ V(Mips64I64x2Add) \
+ V(Mips64I64x2Sub) \
+ V(Mips64I64x2Mul) \
+ V(Mips64I64x2Neg) \
+ V(Mips64I64x2Shl) \
+ V(Mips64I64x2ShrS) \
+ V(Mips64I64x2ShrU) \
+ V(Mips64I64x2BitMask) \
+ V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
+ V(Mips64ExtMulLow) \
+ V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Div) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
+ V(Mips64F32x4Ceil) \
+ V(Mips64F32x4Floor) \
+ V(Mips64F32x4Trunc) \
+ V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLaneU) \
+ V(Mips64I16x8ExtractLaneS) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSatS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSatS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
+ V(Mips64I16x8Q15MulRSatS) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLaneU) \
+ V(Mips64I8x16ExtractLaneS) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSatS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSatS) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
+ V(Mips64I8x16BitMask) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S128AndNot) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64S128LoadSplat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
+ V(Mips64S128LoadLane) \
+ V(Mips64S128StoreLane) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64StoreCompressTagged) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index f79e334ed6..734009ca30 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -1301,7 +1301,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchJumpLatency();
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index 192f82c9db..93c123bd65 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -311,14 +311,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
}
- if (cont->IsDeoptimize()) {
- // If we can deoptimize as a result of the binop, we need to make sure that
- // the deopt inputs are not overwritten by the binop result. One way
- // to achieve that is to declare the output register as same-as-first.
- outputs[output_count++] = g.DefineSameAsFirst(node);
- } else {
- outputs[output_count++] = g.DefineAsRegister(node);
- }
+ outputs[output_count++] = g.DefineAsRegister(node);
DCHECK_NE(0u, input_count);
DCHECK_EQ(1u, output_count);
@@ -356,9 +349,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
Mips64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -498,7 +491,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
+ opcode = kMips64Lw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -854,7 +847,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
- Emit(kMips64Dshl, g.DefineSameAsFirst(node),
+ Emit(kMips64Dshl, g.DefineAsRegister(node),
g.UseRegister(m.left().node()->InputAt(0)),
g.UseImmediate(m.right().node()));
return;
@@ -1446,44 +1439,49 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
}
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+ // On MIPS64, int32 values should all be sign-extended to 64-bit, so
+ // no need to sign-extend them here.
+ // But when call to a host function in simulator, if the function return an
+ // int32 value, the simulator do not sign-extend to int64, because in
+ // simulator we do not know the function whether return an int32 or int64.
+#ifdef USE_SIMULATOR
Node* value = node->InputAt(0);
- if ((value->opcode() == IrOpcode::kLoad ||
- value->opcode() == IrOpcode::kLoadImmutable) &&
- CanCover(node, value)) {
- // Generate sign-extending load.
- LoadRepresentation load_rep = LoadRepresentationOf(value->op());
- InstructionCode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Lw;
- break;
- default:
- UNREACHABLE();
- }
- EmitLoad(this, value, opcode, node);
- } else {
+ if (value->opcode() == IrOpcode::kCall) {
Mips64OperandGenerator g(this);
- Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
g.TempImmediate(0));
+ return;
}
+#endif
+ EmitIdentity(node);
}
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
DCHECK_NE(node->opcode(), IrOpcode::kPhi);
switch (node->opcode()) {
- // 32-bit operations will write their result in a 64 bit register,
- // clearing the top 32 bits of the destination register.
- case IrOpcode::kUint32Div:
- case IrOpcode::kUint32Mod:
- case IrOpcode::kUint32MulHigh:
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable: {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1491,7 +1489,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
return true;
default:
return false;
@@ -1507,10 +1504,24 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
+ IrOpcode::Value opcode = value->opcode();
+
+ if (opcode == IrOpcode::kLoad || opcode == IrOpcode::kUnalignedLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ ArchOpcode arch_opcode =
+ opcode == IrOpcode::kUnalignedLoad ? kMips64Ulwu : kMips64Lwu;
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, arch_opcode, node);
+ return;
+ }
+ }
+
if (ZeroExtendsWord32ToWord64(value)) {
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ EmitIdentity(node);
return;
}
+
Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -1528,7 +1539,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Int64BinopMatcher m(value);
if (m.right().IsInRange(32, 63)) {
// After smi untagging no need for truncate. Combine sequence.
- Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ Emit(kMips64Dsar, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
@@ -1540,8 +1551,8 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
- Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
- g.TempImmediate(0), g.TempImmediate(32));
+ Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
}
void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
@@ -1836,7 +1847,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
+ opcode = kMips64Ulw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 0bf29ba686..b91f6209f2 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -564,69 +564,35 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ sync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
+#define ASSEMBLE_ATOMIC_BINOP(bin_inst, _type) \
do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ lwsync(); \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(kScratchReg, operand); \
- __ bne(&binop, cr0); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, store_inst, \
- ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label binop; \
- __ lwsync(); \
- __ bind(&binop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ bin_inst(kScratchReg, i.OutputRegister(), i.InputRegister(2)); \
- __ store_inst(kScratchReg, operand); \
- __ bne(&binop, cr0); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst, \
- input_ext) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ input_ext(r0, i.InputRegister(2)); \
- __ lwsync(); \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ cmp_inst(i.OutputRegister(), r0, cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
- __ sync(); \
- } while (false)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
- store_inst, ext_instr) \
- do { \
- MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
- Label loop; \
- Label exit; \
- __ ext_instr(r0, i.InputRegister(2)); \
- __ lwsync(); \
- __ bind(&loop); \
- __ load_inst(i.OutputRegister(), operand); \
- __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
- __ cmp_inst(i.OutputRegister(), r0, cr0); \
- __ bne(&exit, cr0); \
- __ store_inst(i.InputRegister(3), operand); \
- __ bne(&loop, cr0); \
- __ bind(&exit); \
- __ sync(); \
+ auto bin_op = [&](Register dst, Register lhs, Register rhs) { \
+ if (std::is_signed<_type>::value) { \
+ switch (sizeof(_type)) { \
+ case 1: \
+ __ extsb(dst, lhs); \
+ break; \
+ case 2: \
+ __ extsh(dst, lhs); \
+ break; \
+ case 4: \
+ __ extsw(dst, lhs); \
+ break; \
+ case 8: \
+ break; \
+ default: \
+ UNREACHABLE(); \
+ } \
+ __ bin_inst(dst, dst, rhs); \
+ } else { \
+ __ bin_inst(dst, lhs, rhs); \
+ } \
+ }; \
+ MemOperand dst_operand = \
+ MemOperand(i.InputRegister(0), i.InputRegister(1)); \
+ __ AtomicOps<_type>(dst_operand, i.InputRegister(2), i.OutputRegister(), \
+ kScratchReg, bin_op); \
+ break; \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -888,8 +854,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -932,8 +900,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
break;
case kArchCallCFunction: {
- int misc_field = MiscField::decode(instr->opcode());
- int num_parameters = misc_field;
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const fp_param_field = FPParamField::decode(instr->opcode());
+ int num_fp_parameters = fp_param_field;
bool has_function_descriptor = false;
int offset = 20 * kInstrSize;
@@ -954,10 +923,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#if ABI_USES_FUNCTION_DESCRIPTORS
// AIX/PPC64BE Linux uses a function descriptor
- int kNumParametersMask = kHasFunctionDescriptorBitMask - 1;
- num_parameters = kNumParametersMask & misc_field;
+ int kNumFPParametersMask = kHasFunctionDescriptorBitMask - 1;
+ num_fp_parameters = kNumFPParametersMask & fp_param_field;
has_function_descriptor =
- (misc_field & kHasFunctionDescriptorBitMask) != 0;
+ (fp_param_field & kHasFunctionDescriptorBitMask) != 0;
// AIX may emit 2 extra Load instructions under CallCFunctionHelper
// due to having function descriptor.
if (has_function_descriptor) {
@@ -980,10 +949,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters, has_function_descriptor);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters,
+ has_function_descriptor);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters, has_function_descriptor);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters,
+ has_function_descriptor);
}
// TODO(miladfar): In the above block, kScratchReg must be populated with
// the strictly-correct PC, which is the return address at this spot. The
@@ -1026,13 +997,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssembleArchTableSwitch(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r4);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -2015,66 +1986,94 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kAtomicLoadInt16:
UNREACHABLE();
case kAtomicExchangeInt8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
- __ extsb(i.OutputRegister(0), i.OutputRegister(0));
+ __ AtomicExchange<int8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
+ __ AtomicExchange<uint8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kAtomicExchangeInt16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
- __ extsh(i.OutputRegister(0), i.OutputRegister(0));
+ __ AtomicExchange<int16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
+ __ AtomicExchange<uint16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
+ __ AtomicExchange<uint32_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kPPC_AtomicExchangeWord64:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
+ __ AtomicExchange<uint64_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.OutputRegister());
break;
case kAtomicCompareExchangeInt8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lbarx, stbcx, extsb);
+ __ AtomicCompareExchange<int8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lbarx, stbcx, ZeroExtByte);
+ __ AtomicCompareExchange<uint8_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kAtomicCompareExchangeInt16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lharx, sthcx, extsh);
+ __ AtomicCompareExchange<int16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lharx, sthcx, ZeroExtHalfWord);
+ __ AtomicCompareExchange<uint16_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx, ZeroExtWord32);
+ __ AtomicCompareExchange<uint32_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
case kPPC_AtomicCompareExchangeWord64:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, ldarx, stdcx, mr);
+ __ AtomicCompareExchange<uint64_t>(
+ MemOperand(i.InputRegister(0), i.InputRegister(1)),
+ i.InputRegister(2), i.InputRegister(3), i.OutputRegister(),
+ kScratchReg);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kPPC_Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
- break; \
- case kPPC_Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
- break; \
- case kPPC_Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
- break; \
- case kPPC_Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
- break; \
- case kPPC_Atomic##op##Int32: \
- ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lwarx, stwcx, extsw); \
- break; \
- case kPPC_Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
- break; \
- case kPPC_Atomic##op##Int64: \
- case kPPC_Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(inst, ldarx, stdcx); \
+#define ATOMIC_BINOP_CASE(op, inst) \
+ case kPPC_Atomic##op##Int8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int8_t); \
+ break; \
+ case kPPC_Atomic##op##Uint8: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint8_t); \
+ break; \
+ case kPPC_Atomic##op##Int16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int16_t); \
+ break; \
+ case kPPC_Atomic##op##Uint16: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint16_t); \
+ break; \
+ case kPPC_Atomic##op##Int32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, int32_t); \
+ break; \
+ case kPPC_Atomic##op##Uint32: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint32_t); \
+ break; \
+ case kPPC_Atomic##op##Int64: \
+ case kPPC_Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(inst, uint64_t); \
break;
ATOMIC_BINOP_CASE(Add, add)
ATOMIC_BINOP_CASE(Sub, sub)
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 4182e8b71b..4f9003257f 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -11,406 +11,411 @@ namespace compiler {
// PPC-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(PPC_Peek) \
- V(PPC_Sync) \
- V(PPC_And) \
- V(PPC_AndComplement) \
- V(PPC_Or) \
- V(PPC_OrComplement) \
- V(PPC_Xor) \
- V(PPC_ShiftLeft32) \
- V(PPC_ShiftLeft64) \
- V(PPC_ShiftLeftPair) \
- V(PPC_ShiftRight32) \
- V(PPC_ShiftRight64) \
- V(PPC_ShiftRightPair) \
- V(PPC_ShiftRightAlg32) \
- V(PPC_ShiftRightAlg64) \
- V(PPC_ShiftRightAlgPair) \
- V(PPC_RotRight32) \
- V(PPC_RotRight64) \
- V(PPC_Not) \
- V(PPC_RotLeftAndMask32) \
- V(PPC_RotLeftAndClear64) \
- V(PPC_RotLeftAndClearLeft64) \
- V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add32) \
- V(PPC_Add64) \
- V(PPC_AddWithOverflow32) \
- V(PPC_AddPair) \
- V(PPC_AddDouble) \
- V(PPC_Sub) \
- V(PPC_SubWithOverflow32) \
- V(PPC_SubPair) \
- V(PPC_SubDouble) \
- V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
- V(PPC_Mul64) \
- V(PPC_MulHigh32) \
- V(PPC_MulHighU32) \
- V(PPC_MulPair) \
- V(PPC_MulDouble) \
- V(PPC_Div32) \
- V(PPC_Div64) \
- V(PPC_DivU32) \
- V(PPC_DivU64) \
- V(PPC_DivDouble) \
- V(PPC_Mod32) \
- V(PPC_Mod64) \
- V(PPC_ModU32) \
- V(PPC_ModU64) \
- V(PPC_ModDouble) \
- V(PPC_Neg) \
- V(PPC_NegDouble) \
- V(PPC_SqrtDouble) \
- V(PPC_FloorDouble) \
- V(PPC_CeilDouble) \
- V(PPC_TruncateDouble) \
- V(PPC_RoundDouble) \
- V(PPC_MaxDouble) \
- V(PPC_MinDouble) \
- V(PPC_AbsDouble) \
- V(PPC_Cntlz32) \
- V(PPC_Cntlz64) \
- V(PPC_Popcnt32) \
- V(PPC_Popcnt64) \
- V(PPC_Cmp32) \
- V(PPC_Cmp64) \
- V(PPC_CmpDouble) \
- V(PPC_Tst32) \
- V(PPC_Tst64) \
- V(PPC_Push) \
- V(PPC_PushFrame) \
- V(PPC_StoreToStackSlot) \
- V(PPC_ExtendSignWord8) \
- V(PPC_ExtendSignWord16) \
- V(PPC_ExtendSignWord32) \
- V(PPC_Uint32ToUint64) \
- V(PPC_Int64ToInt32) \
- V(PPC_Int64ToFloat32) \
- V(PPC_Int64ToDouble) \
- V(PPC_Uint64ToFloat32) \
- V(PPC_Uint64ToDouble) \
- V(PPC_Int32ToFloat32) \
- V(PPC_Int32ToDouble) \
- V(PPC_Uint32ToFloat32) \
- V(PPC_Float32ToInt32) \
- V(PPC_Float32ToUint32) \
- V(PPC_Uint32ToDouble) \
- V(PPC_Float32ToDouble) \
- V(PPC_Float64SilenceNaN) \
- V(PPC_DoubleToInt32) \
- V(PPC_DoubleToUint32) \
- V(PPC_DoubleToInt64) \
- V(PPC_DoubleToUint64) \
- V(PPC_DoubleToFloat32) \
- V(PPC_DoubleExtractLowWord32) \
- V(PPC_DoubleExtractHighWord32) \
- V(PPC_DoubleInsertLowWord32) \
- V(PPC_DoubleInsertHighWord32) \
- V(PPC_DoubleConstruct) \
- V(PPC_BitcastInt32ToFloat32) \
- V(PPC_BitcastFloat32ToInt32) \
- V(PPC_BitcastInt64ToDouble) \
- V(PPC_BitcastDoubleToInt64) \
- V(PPC_LoadWordS8) \
- V(PPC_LoadWordU8) \
- V(PPC_LoadWordS16) \
- V(PPC_LoadWordU16) \
- V(PPC_LoadWordS32) \
- V(PPC_LoadWordU32) \
- V(PPC_LoadByteRev32) \
- V(PPC_LoadWord64) \
- V(PPC_LoadByteRev64) \
- V(PPC_LoadFloat32) \
- V(PPC_LoadDouble) \
- V(PPC_LoadSimd128) \
- V(PPC_LoadReverseSimd128RR) \
- V(PPC_StoreWord8) \
- V(PPC_StoreWord16) \
- V(PPC_StoreWord32) \
- V(PPC_StoreByteRev32) \
- V(PPC_StoreWord64) \
- V(PPC_StoreByteRev64) \
- V(PPC_StoreFloat32) \
- V(PPC_StoreDouble) \
- V(PPC_StoreSimd128) \
- V(PPC_ByteRev32) \
- V(PPC_ByteRev64) \
- V(PPC_AtomicExchangeUint8) \
- V(PPC_AtomicExchangeUint16) \
- V(PPC_AtomicExchangeWord32) \
- V(PPC_AtomicExchangeWord64) \
- V(PPC_AtomicCompareExchangeUint8) \
- V(PPC_AtomicCompareExchangeUint16) \
- V(PPC_AtomicCompareExchangeWord32) \
- V(PPC_AtomicCompareExchangeWord64) \
- V(PPC_AtomicAddUint8) \
- V(PPC_AtomicAddUint16) \
- V(PPC_AtomicAddUint32) \
- V(PPC_AtomicAddUint64) \
- V(PPC_AtomicAddInt8) \
- V(PPC_AtomicAddInt16) \
- V(PPC_AtomicAddInt32) \
- V(PPC_AtomicAddInt64) \
- V(PPC_AtomicSubUint8) \
- V(PPC_AtomicSubUint16) \
- V(PPC_AtomicSubUint32) \
- V(PPC_AtomicSubUint64) \
- V(PPC_AtomicSubInt8) \
- V(PPC_AtomicSubInt16) \
- V(PPC_AtomicSubInt32) \
- V(PPC_AtomicSubInt64) \
- V(PPC_AtomicAndUint8) \
- V(PPC_AtomicAndUint16) \
- V(PPC_AtomicAndUint32) \
- V(PPC_AtomicAndUint64) \
- V(PPC_AtomicAndInt8) \
- V(PPC_AtomicAndInt16) \
- V(PPC_AtomicAndInt32) \
- V(PPC_AtomicAndInt64) \
- V(PPC_AtomicOrUint8) \
- V(PPC_AtomicOrUint16) \
- V(PPC_AtomicOrUint32) \
- V(PPC_AtomicOrUint64) \
- V(PPC_AtomicOrInt8) \
- V(PPC_AtomicOrInt16) \
- V(PPC_AtomicOrInt32) \
- V(PPC_AtomicOrInt64) \
- V(PPC_AtomicXorUint8) \
- V(PPC_AtomicXorUint16) \
- V(PPC_AtomicXorUint32) \
- V(PPC_AtomicXorUint64) \
- V(PPC_AtomicXorInt8) \
- V(PPC_AtomicXorInt16) \
- V(PPC_AtomicXorInt32) \
- V(PPC_AtomicXorInt64) \
- V(PPC_F64x2Splat) \
- V(PPC_F64x2ExtractLane) \
- V(PPC_F64x2ReplaceLane) \
- V(PPC_F64x2Add) \
- V(PPC_F64x2Sub) \
- V(PPC_F64x2Mul) \
- V(PPC_F64x2Eq) \
- V(PPC_F64x2Ne) \
- V(PPC_F64x2Le) \
- V(PPC_F64x2Lt) \
- V(PPC_F64x2Abs) \
- V(PPC_F64x2Neg) \
- V(PPC_F64x2Sqrt) \
- V(PPC_F64x2Qfma) \
- V(PPC_F64x2Qfms) \
- V(PPC_F64x2Div) \
- V(PPC_F64x2Min) \
- V(PPC_F64x2Max) \
- V(PPC_F64x2Ceil) \
- V(PPC_F64x2Floor) \
- V(PPC_F64x2Trunc) \
- V(PPC_F64x2Pmin) \
- V(PPC_F64x2Pmax) \
- V(PPC_F64x2ConvertLowI32x4S) \
- V(PPC_F64x2ConvertLowI32x4U) \
- V(PPC_F64x2PromoteLowF32x4) \
- V(PPC_F32x4Splat) \
- V(PPC_F32x4ExtractLane) \
- V(PPC_F32x4ReplaceLane) \
- V(PPC_F32x4Add) \
- V(PPC_F32x4Sub) \
- V(PPC_F32x4Mul) \
- V(PPC_F32x4Eq) \
- V(PPC_F32x4Ne) \
- V(PPC_F32x4Lt) \
- V(PPC_F32x4Le) \
- V(PPC_F32x4Abs) \
- V(PPC_F32x4Neg) \
- V(PPC_F32x4RecipApprox) \
- V(PPC_F32x4RecipSqrtApprox) \
- V(PPC_F32x4Sqrt) \
- V(PPC_F32x4SConvertI32x4) \
- V(PPC_F32x4UConvertI32x4) \
- V(PPC_F32x4Div) \
- V(PPC_F32x4Min) \
- V(PPC_F32x4Max) \
- V(PPC_F32x4Ceil) \
- V(PPC_F32x4Floor) \
- V(PPC_F32x4Trunc) \
- V(PPC_F32x4Pmin) \
- V(PPC_F32x4Pmax) \
- V(PPC_F32x4Qfma) \
- V(PPC_F32x4Qfms) \
- V(PPC_F32x4DemoteF64x2Zero) \
- V(PPC_I64x2Splat) \
- V(PPC_I64x2ExtractLane) \
- V(PPC_I64x2ReplaceLane) \
- V(PPC_I64x2Add) \
- V(PPC_I64x2Sub) \
- V(PPC_I64x2Mul) \
- V(PPC_I64x2Eq) \
- V(PPC_I64x2Ne) \
- V(PPC_I64x2GtS) \
- V(PPC_I64x2GeS) \
- V(PPC_I64x2Shl) \
- V(PPC_I64x2ShrS) \
- V(PPC_I64x2ShrU) \
- V(PPC_I64x2Neg) \
- V(PPC_I64x2BitMask) \
- V(PPC_I64x2SConvertI32x4Low) \
- V(PPC_I64x2SConvertI32x4High) \
- V(PPC_I64x2UConvertI32x4Low) \
- V(PPC_I64x2UConvertI32x4High) \
- V(PPC_I64x2ExtMulLowI32x4S) \
- V(PPC_I64x2ExtMulHighI32x4S) \
- V(PPC_I64x2ExtMulLowI32x4U) \
- V(PPC_I64x2ExtMulHighI32x4U) \
- V(PPC_I64x2Abs) \
- V(PPC_I32x4Splat) \
- V(PPC_I32x4ExtractLane) \
- V(PPC_I32x4ReplaceLane) \
- V(PPC_I32x4Add) \
- V(PPC_I32x4Sub) \
- V(PPC_I32x4Mul) \
- V(PPC_I32x4MinS) \
- V(PPC_I32x4MinU) \
- V(PPC_I32x4MaxS) \
- V(PPC_I32x4MaxU) \
- V(PPC_I32x4Eq) \
- V(PPC_I32x4Ne) \
- V(PPC_I32x4GtS) \
- V(PPC_I32x4GeS) \
- V(PPC_I32x4GtU) \
- V(PPC_I32x4GeU) \
- V(PPC_I32x4Shl) \
- V(PPC_I32x4ShrS) \
- V(PPC_I32x4ShrU) \
- V(PPC_I32x4Neg) \
- V(PPC_I32x4Abs) \
- V(PPC_I32x4SConvertF32x4) \
- V(PPC_I32x4UConvertF32x4) \
- V(PPC_I32x4SConvertI16x8Low) \
- V(PPC_I32x4SConvertI16x8High) \
- V(PPC_I32x4UConvertI16x8Low) \
- V(PPC_I32x4UConvertI16x8High) \
- V(PPC_I32x4BitMask) \
- V(PPC_I32x4DotI16x8S) \
- V(PPC_I32x4ExtAddPairwiseI16x8S) \
- V(PPC_I32x4ExtAddPairwiseI16x8U) \
- V(PPC_I32x4ExtMulLowI16x8S) \
- V(PPC_I32x4ExtMulHighI16x8S) \
- V(PPC_I32x4ExtMulLowI16x8U) \
- V(PPC_I32x4ExtMulHighI16x8U) \
- V(PPC_I32x4TruncSatF64x2SZero) \
- V(PPC_I32x4TruncSatF64x2UZero) \
- V(PPC_I16x8Splat) \
- V(PPC_I16x8ExtractLaneU) \
- V(PPC_I16x8ExtractLaneS) \
- V(PPC_I16x8ReplaceLane) \
- V(PPC_I16x8Add) \
- V(PPC_I16x8Sub) \
- V(PPC_I16x8Mul) \
- V(PPC_I16x8MinS) \
- V(PPC_I16x8MinU) \
- V(PPC_I16x8MaxS) \
- V(PPC_I16x8MaxU) \
- V(PPC_I16x8Eq) \
- V(PPC_I16x8Ne) \
- V(PPC_I16x8GtS) \
- V(PPC_I16x8GeS) \
- V(PPC_I16x8GtU) \
- V(PPC_I16x8GeU) \
- V(PPC_I16x8Shl) \
- V(PPC_I16x8ShrS) \
- V(PPC_I16x8ShrU) \
- V(PPC_I16x8Neg) \
- V(PPC_I16x8Abs) \
- V(PPC_I16x8SConvertI32x4) \
- V(PPC_I16x8UConvertI32x4) \
- V(PPC_I16x8SConvertI8x16Low) \
- V(PPC_I16x8SConvertI8x16High) \
- V(PPC_I16x8UConvertI8x16Low) \
- V(PPC_I16x8UConvertI8x16High) \
- V(PPC_I16x8AddSatS) \
- V(PPC_I16x8SubSatS) \
- V(PPC_I16x8AddSatU) \
- V(PPC_I16x8SubSatU) \
- V(PPC_I16x8RoundingAverageU) \
- V(PPC_I16x8BitMask) \
- V(PPC_I16x8ExtAddPairwiseI8x16S) \
- V(PPC_I16x8ExtAddPairwiseI8x16U) \
- V(PPC_I16x8Q15MulRSatS) \
- V(PPC_I16x8ExtMulLowI8x16S) \
- V(PPC_I16x8ExtMulHighI8x16S) \
- V(PPC_I16x8ExtMulLowI8x16U) \
- V(PPC_I16x8ExtMulHighI8x16U) \
- V(PPC_I8x16Splat) \
- V(PPC_I8x16ExtractLaneU) \
- V(PPC_I8x16ExtractLaneS) \
- V(PPC_I8x16ReplaceLane) \
- V(PPC_I8x16Add) \
- V(PPC_I8x16Sub) \
- V(PPC_I8x16MinS) \
- V(PPC_I8x16MinU) \
- V(PPC_I8x16MaxS) \
- V(PPC_I8x16MaxU) \
- V(PPC_I8x16Eq) \
- V(PPC_I8x16Ne) \
- V(PPC_I8x16GtS) \
- V(PPC_I8x16GeS) \
- V(PPC_I8x16GtU) \
- V(PPC_I8x16GeU) \
- V(PPC_I8x16Shl) \
- V(PPC_I8x16ShrS) \
- V(PPC_I8x16ShrU) \
- V(PPC_I8x16Neg) \
- V(PPC_I8x16Abs) \
- V(PPC_I8x16SConvertI16x8) \
- V(PPC_I8x16UConvertI16x8) \
- V(PPC_I8x16AddSatS) \
- V(PPC_I8x16SubSatS) \
- V(PPC_I8x16AddSatU) \
- V(PPC_I8x16SubSatU) \
- V(PPC_I8x16RoundingAverageU) \
- V(PPC_I8x16Shuffle) \
- V(PPC_I8x16Swizzle) \
- V(PPC_I8x16BitMask) \
- V(PPC_I8x16Popcnt) \
- V(PPC_I64x2AllTrue) \
- V(PPC_I32x4AllTrue) \
- V(PPC_I16x8AllTrue) \
- V(PPC_I8x16AllTrue) \
- V(PPC_V128AnyTrue) \
- V(PPC_S128And) \
- V(PPC_S128Or) \
- V(PPC_S128Xor) \
- V(PPC_S128Const) \
- V(PPC_S128Zero) \
- V(PPC_S128AllOnes) \
- V(PPC_S128Not) \
- V(PPC_S128Select) \
- V(PPC_S128AndNot) \
- V(PPC_S128Load8Splat) \
- V(PPC_S128Load16Splat) \
- V(PPC_S128Load32Splat) \
- V(PPC_S128Load64Splat) \
- V(PPC_S128Load8x8S) \
- V(PPC_S128Load8x8U) \
- V(PPC_S128Load16x4S) \
- V(PPC_S128Load16x4U) \
- V(PPC_S128Load32x2S) \
- V(PPC_S128Load32x2U) \
- V(PPC_S128Load32Zero) \
- V(PPC_S128Load64Zero) \
- V(PPC_S128Load8Lane) \
- V(PPC_S128Load16Lane) \
- V(PPC_S128Load32Lane) \
- V(PPC_S128Load64Lane) \
- V(PPC_S128Store8Lane) \
- V(PPC_S128Store16Lane) \
- V(PPC_S128Store32Lane) \
- V(PPC_S128Store64Lane) \
- V(PPC_StoreCompressTagged) \
- V(PPC_LoadDecompressTaggedSigned) \
- V(PPC_LoadDecompressTaggedPointer) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(PPC_Peek) \
+ V(PPC_Sync) \
+ V(PPC_And) \
+ V(PPC_AndComplement) \
+ V(PPC_Or) \
+ V(PPC_OrComplement) \
+ V(PPC_Xor) \
+ V(PPC_ShiftLeft32) \
+ V(PPC_ShiftLeft64) \
+ V(PPC_ShiftLeftPair) \
+ V(PPC_ShiftRight32) \
+ V(PPC_ShiftRight64) \
+ V(PPC_ShiftRightPair) \
+ V(PPC_ShiftRightAlg32) \
+ V(PPC_ShiftRightAlg64) \
+ V(PPC_ShiftRightAlgPair) \
+ V(PPC_RotRight32) \
+ V(PPC_RotRight64) \
+ V(PPC_Not) \
+ V(PPC_RotLeftAndMask32) \
+ V(PPC_RotLeftAndClear64) \
+ V(PPC_RotLeftAndClearLeft64) \
+ V(PPC_RotLeftAndClearRight64) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
+ V(PPC_AddWithOverflow32) \
+ V(PPC_AddPair) \
+ V(PPC_AddDouble) \
+ V(PPC_Sub) \
+ V(PPC_SubWithOverflow32) \
+ V(PPC_SubPair) \
+ V(PPC_SubDouble) \
+ V(PPC_Mul32) \
+ V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul64) \
+ V(PPC_MulHigh32) \
+ V(PPC_MulHighU32) \
+ V(PPC_MulPair) \
+ V(PPC_MulDouble) \
+ V(PPC_Div32) \
+ V(PPC_Div64) \
+ V(PPC_DivU32) \
+ V(PPC_DivU64) \
+ V(PPC_DivDouble) \
+ V(PPC_Mod32) \
+ V(PPC_Mod64) \
+ V(PPC_ModU32) \
+ V(PPC_ModU64) \
+ V(PPC_ModDouble) \
+ V(PPC_Neg) \
+ V(PPC_NegDouble) \
+ V(PPC_SqrtDouble) \
+ V(PPC_FloorDouble) \
+ V(PPC_CeilDouble) \
+ V(PPC_TruncateDouble) \
+ V(PPC_RoundDouble) \
+ V(PPC_MaxDouble) \
+ V(PPC_MinDouble) \
+ V(PPC_AbsDouble) \
+ V(PPC_Cntlz32) \
+ V(PPC_Cntlz64) \
+ V(PPC_Popcnt32) \
+ V(PPC_Popcnt64) \
+ V(PPC_Cmp32) \
+ V(PPC_Cmp64) \
+ V(PPC_CmpDouble) \
+ V(PPC_Tst32) \
+ V(PPC_Tst64) \
+ V(PPC_Push) \
+ V(PPC_PushFrame) \
+ V(PPC_StoreToStackSlot) \
+ V(PPC_ExtendSignWord8) \
+ V(PPC_ExtendSignWord16) \
+ V(PPC_ExtendSignWord32) \
+ V(PPC_Uint32ToUint64) \
+ V(PPC_Int64ToInt32) \
+ V(PPC_Int64ToFloat32) \
+ V(PPC_Int64ToDouble) \
+ V(PPC_Uint64ToFloat32) \
+ V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToFloat32) \
+ V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToFloat32) \
+ V(PPC_Float32ToInt32) \
+ V(PPC_Float32ToUint32) \
+ V(PPC_Uint32ToDouble) \
+ V(PPC_Float32ToDouble) \
+ V(PPC_Float64SilenceNaN) \
+ V(PPC_DoubleToInt32) \
+ V(PPC_DoubleToUint32) \
+ V(PPC_DoubleToInt64) \
+ V(PPC_DoubleToUint64) \
+ V(PPC_DoubleToFloat32) \
+ V(PPC_DoubleExtractLowWord32) \
+ V(PPC_DoubleExtractHighWord32) \
+ V(PPC_DoubleInsertLowWord32) \
+ V(PPC_DoubleInsertHighWord32) \
+ V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
+ V(PPC_LoadWordS8) \
+ V(PPC_LoadWordU8) \
+ V(PPC_LoadWordS16) \
+ V(PPC_LoadWordU16) \
+ V(PPC_LoadWordS32) \
+ V(PPC_LoadWordU32) \
+ V(PPC_LoadByteRev32) \
+ V(PPC_LoadWord64) \
+ V(PPC_LoadByteRev64) \
+ V(PPC_LoadFloat32) \
+ V(PPC_LoadDouble) \
+ V(PPC_LoadSimd128) \
+ V(PPC_LoadReverseSimd128RR) \
+ V(PPC_StoreWord8) \
+ V(PPC_StoreWord16) \
+ V(PPC_StoreWord32) \
+ V(PPC_StoreByteRev32) \
+ V(PPC_StoreWord64) \
+ V(PPC_StoreByteRev64) \
+ V(PPC_StoreFloat32) \
+ V(PPC_StoreDouble) \
+ V(PPC_StoreSimd128) \
+ V(PPC_ByteRev32) \
+ V(PPC_ByteRev64) \
+ V(PPC_AtomicExchangeUint8) \
+ V(PPC_AtomicExchangeUint16) \
+ V(PPC_AtomicExchangeWord32) \
+ V(PPC_AtomicExchangeWord64) \
+ V(PPC_AtomicCompareExchangeUint8) \
+ V(PPC_AtomicCompareExchangeUint16) \
+ V(PPC_AtomicCompareExchangeWord32) \
+ V(PPC_AtomicCompareExchangeWord64) \
+ V(PPC_AtomicAddUint8) \
+ V(PPC_AtomicAddUint16) \
+ V(PPC_AtomicAddUint32) \
+ V(PPC_AtomicAddUint64) \
+ V(PPC_AtomicAddInt8) \
+ V(PPC_AtomicAddInt16) \
+ V(PPC_AtomicAddInt32) \
+ V(PPC_AtomicAddInt64) \
+ V(PPC_AtomicSubUint8) \
+ V(PPC_AtomicSubUint16) \
+ V(PPC_AtomicSubUint32) \
+ V(PPC_AtomicSubUint64) \
+ V(PPC_AtomicSubInt8) \
+ V(PPC_AtomicSubInt16) \
+ V(PPC_AtomicSubInt32) \
+ V(PPC_AtomicSubInt64) \
+ V(PPC_AtomicAndUint8) \
+ V(PPC_AtomicAndUint16) \
+ V(PPC_AtomicAndUint32) \
+ V(PPC_AtomicAndUint64) \
+ V(PPC_AtomicAndInt8) \
+ V(PPC_AtomicAndInt16) \
+ V(PPC_AtomicAndInt32) \
+ V(PPC_AtomicAndInt64) \
+ V(PPC_AtomicOrUint8) \
+ V(PPC_AtomicOrUint16) \
+ V(PPC_AtomicOrUint32) \
+ V(PPC_AtomicOrUint64) \
+ V(PPC_AtomicOrInt8) \
+ V(PPC_AtomicOrInt16) \
+ V(PPC_AtomicOrInt32) \
+ V(PPC_AtomicOrInt64) \
+ V(PPC_AtomicXorUint8) \
+ V(PPC_AtomicXorUint16) \
+ V(PPC_AtomicXorUint32) \
+ V(PPC_AtomicXorUint64) \
+ V(PPC_AtomicXorInt8) \
+ V(PPC_AtomicXorInt16) \
+ V(PPC_AtomicXorInt32) \
+ V(PPC_AtomicXorInt64) \
+ V(PPC_F64x2Splat) \
+ V(PPC_F64x2ExtractLane) \
+ V(PPC_F64x2ReplaceLane) \
+ V(PPC_F64x2Add) \
+ V(PPC_F64x2Sub) \
+ V(PPC_F64x2Mul) \
+ V(PPC_F64x2Eq) \
+ V(PPC_F64x2Ne) \
+ V(PPC_F64x2Le) \
+ V(PPC_F64x2Lt) \
+ V(PPC_F64x2Abs) \
+ V(PPC_F64x2Neg) \
+ V(PPC_F64x2Sqrt) \
+ V(PPC_F64x2Qfma) \
+ V(PPC_F64x2Qfms) \
+ V(PPC_F64x2Div) \
+ V(PPC_F64x2Min) \
+ V(PPC_F64x2Max) \
+ V(PPC_F64x2Ceil) \
+ V(PPC_F64x2Floor) \
+ V(PPC_F64x2Trunc) \
+ V(PPC_F64x2Pmin) \
+ V(PPC_F64x2Pmax) \
+ V(PPC_F64x2ConvertLowI32x4S) \
+ V(PPC_F64x2ConvertLowI32x4U) \
+ V(PPC_F64x2PromoteLowF32x4) \
+ V(PPC_F32x4Splat) \
+ V(PPC_F32x4ExtractLane) \
+ V(PPC_F32x4ReplaceLane) \
+ V(PPC_F32x4Add) \
+ V(PPC_F32x4Sub) \
+ V(PPC_F32x4Mul) \
+ V(PPC_F32x4Eq) \
+ V(PPC_F32x4Ne) \
+ V(PPC_F32x4Lt) \
+ V(PPC_F32x4Le) \
+ V(PPC_F32x4Abs) \
+ V(PPC_F32x4Neg) \
+ V(PPC_F32x4RecipApprox) \
+ V(PPC_F32x4RecipSqrtApprox) \
+ V(PPC_F32x4Sqrt) \
+ V(PPC_F32x4SConvertI32x4) \
+ V(PPC_F32x4UConvertI32x4) \
+ V(PPC_F32x4Div) \
+ V(PPC_F32x4Min) \
+ V(PPC_F32x4Max) \
+ V(PPC_F32x4Ceil) \
+ V(PPC_F32x4Floor) \
+ V(PPC_F32x4Trunc) \
+ V(PPC_F32x4Pmin) \
+ V(PPC_F32x4Pmax) \
+ V(PPC_F32x4Qfma) \
+ V(PPC_F32x4Qfms) \
+ V(PPC_F32x4DemoteF64x2Zero) \
+ V(PPC_I64x2Splat) \
+ V(PPC_I64x2ExtractLane) \
+ V(PPC_I64x2ReplaceLane) \
+ V(PPC_I64x2Add) \
+ V(PPC_I64x2Sub) \
+ V(PPC_I64x2Mul) \
+ V(PPC_I64x2Eq) \
+ V(PPC_I64x2Ne) \
+ V(PPC_I64x2GtS) \
+ V(PPC_I64x2GeS) \
+ V(PPC_I64x2Shl) \
+ V(PPC_I64x2ShrS) \
+ V(PPC_I64x2ShrU) \
+ V(PPC_I64x2Neg) \
+ V(PPC_I64x2BitMask) \
+ V(PPC_I64x2SConvertI32x4Low) \
+ V(PPC_I64x2SConvertI32x4High) \
+ V(PPC_I64x2UConvertI32x4Low) \
+ V(PPC_I64x2UConvertI32x4High) \
+ V(PPC_I64x2ExtMulLowI32x4S) \
+ V(PPC_I64x2ExtMulHighI32x4S) \
+ V(PPC_I64x2ExtMulLowI32x4U) \
+ V(PPC_I64x2ExtMulHighI32x4U) \
+ V(PPC_I64x2Abs) \
+ V(PPC_I32x4Splat) \
+ V(PPC_I32x4ExtractLane) \
+ V(PPC_I32x4ReplaceLane) \
+ V(PPC_I32x4Add) \
+ V(PPC_I32x4Sub) \
+ V(PPC_I32x4Mul) \
+ V(PPC_I32x4MinS) \
+ V(PPC_I32x4MinU) \
+ V(PPC_I32x4MaxS) \
+ V(PPC_I32x4MaxU) \
+ V(PPC_I32x4Eq) \
+ V(PPC_I32x4Ne) \
+ V(PPC_I32x4GtS) \
+ V(PPC_I32x4GeS) \
+ V(PPC_I32x4GtU) \
+ V(PPC_I32x4GeU) \
+ V(PPC_I32x4Shl) \
+ V(PPC_I32x4ShrS) \
+ V(PPC_I32x4ShrU) \
+ V(PPC_I32x4Neg) \
+ V(PPC_I32x4Abs) \
+ V(PPC_I32x4SConvertF32x4) \
+ V(PPC_I32x4UConvertF32x4) \
+ V(PPC_I32x4SConvertI16x8Low) \
+ V(PPC_I32x4SConvertI16x8High) \
+ V(PPC_I32x4UConvertI16x8Low) \
+ V(PPC_I32x4UConvertI16x8High) \
+ V(PPC_I32x4BitMask) \
+ V(PPC_I32x4DotI16x8S) \
+ V(PPC_I32x4ExtAddPairwiseI16x8S) \
+ V(PPC_I32x4ExtAddPairwiseI16x8U) \
+ V(PPC_I32x4ExtMulLowI16x8S) \
+ V(PPC_I32x4ExtMulHighI16x8S) \
+ V(PPC_I32x4ExtMulLowI16x8U) \
+ V(PPC_I32x4ExtMulHighI16x8U) \
+ V(PPC_I32x4TruncSatF64x2SZero) \
+ V(PPC_I32x4TruncSatF64x2UZero) \
+ V(PPC_I16x8Splat) \
+ V(PPC_I16x8ExtractLaneU) \
+ V(PPC_I16x8ExtractLaneS) \
+ V(PPC_I16x8ReplaceLane) \
+ V(PPC_I16x8Add) \
+ V(PPC_I16x8Sub) \
+ V(PPC_I16x8Mul) \
+ V(PPC_I16x8MinS) \
+ V(PPC_I16x8MinU) \
+ V(PPC_I16x8MaxS) \
+ V(PPC_I16x8MaxU) \
+ V(PPC_I16x8Eq) \
+ V(PPC_I16x8Ne) \
+ V(PPC_I16x8GtS) \
+ V(PPC_I16x8GeS) \
+ V(PPC_I16x8GtU) \
+ V(PPC_I16x8GeU) \
+ V(PPC_I16x8Shl) \
+ V(PPC_I16x8ShrS) \
+ V(PPC_I16x8ShrU) \
+ V(PPC_I16x8Neg) \
+ V(PPC_I16x8Abs) \
+ V(PPC_I16x8SConvertI32x4) \
+ V(PPC_I16x8UConvertI32x4) \
+ V(PPC_I16x8SConvertI8x16Low) \
+ V(PPC_I16x8SConvertI8x16High) \
+ V(PPC_I16x8UConvertI8x16Low) \
+ V(PPC_I16x8UConvertI8x16High) \
+ V(PPC_I16x8AddSatS) \
+ V(PPC_I16x8SubSatS) \
+ V(PPC_I16x8AddSatU) \
+ V(PPC_I16x8SubSatU) \
+ V(PPC_I16x8RoundingAverageU) \
+ V(PPC_I16x8BitMask) \
+ V(PPC_I16x8ExtAddPairwiseI8x16S) \
+ V(PPC_I16x8ExtAddPairwiseI8x16U) \
+ V(PPC_I16x8Q15MulRSatS) \
+ V(PPC_I16x8ExtMulLowI8x16S) \
+ V(PPC_I16x8ExtMulHighI8x16S) \
+ V(PPC_I16x8ExtMulLowI8x16U) \
+ V(PPC_I16x8ExtMulHighI8x16U) \
+ V(PPC_I8x16Splat) \
+ V(PPC_I8x16ExtractLaneU) \
+ V(PPC_I8x16ExtractLaneS) \
+ V(PPC_I8x16ReplaceLane) \
+ V(PPC_I8x16Add) \
+ V(PPC_I8x16Sub) \
+ V(PPC_I8x16MinS) \
+ V(PPC_I8x16MinU) \
+ V(PPC_I8x16MaxS) \
+ V(PPC_I8x16MaxU) \
+ V(PPC_I8x16Eq) \
+ V(PPC_I8x16Ne) \
+ V(PPC_I8x16GtS) \
+ V(PPC_I8x16GeS) \
+ V(PPC_I8x16GtU) \
+ V(PPC_I8x16GeU) \
+ V(PPC_I8x16Shl) \
+ V(PPC_I8x16ShrS) \
+ V(PPC_I8x16ShrU) \
+ V(PPC_I8x16Neg) \
+ V(PPC_I8x16Abs) \
+ V(PPC_I8x16SConvertI16x8) \
+ V(PPC_I8x16UConvertI16x8) \
+ V(PPC_I8x16AddSatS) \
+ V(PPC_I8x16SubSatS) \
+ V(PPC_I8x16AddSatU) \
+ V(PPC_I8x16SubSatU) \
+ V(PPC_I8x16RoundingAverageU) \
+ V(PPC_I8x16Shuffle) \
+ V(PPC_I8x16Swizzle) \
+ V(PPC_I8x16BitMask) \
+ V(PPC_I8x16Popcnt) \
+ V(PPC_I64x2AllTrue) \
+ V(PPC_I32x4AllTrue) \
+ V(PPC_I16x8AllTrue) \
+ V(PPC_I8x16AllTrue) \
+ V(PPC_V128AnyTrue) \
+ V(PPC_S128And) \
+ V(PPC_S128Or) \
+ V(PPC_S128Xor) \
+ V(PPC_S128Const) \
+ V(PPC_S128Zero) \
+ V(PPC_S128AllOnes) \
+ V(PPC_S128Not) \
+ V(PPC_S128Select) \
+ V(PPC_S128AndNot) \
+ V(PPC_S128Load8Splat) \
+ V(PPC_S128Load16Splat) \
+ V(PPC_S128Load32Splat) \
+ V(PPC_S128Load64Splat) \
+ V(PPC_S128Load8x8S) \
+ V(PPC_S128Load8x8U) \
+ V(PPC_S128Load16x4S) \
+ V(PPC_S128Load16x4U) \
+ V(PPC_S128Load32x2S) \
+ V(PPC_S128Load32x2U) \
+ V(PPC_S128Load32Zero) \
+ V(PPC_S128Load64Zero) \
+ V(PPC_S128Load8Lane) \
+ V(PPC_S128Load16Lane) \
+ V(PPC_S128Load32Lane) \
+ V(PPC_S128Load64Lane) \
+ V(PPC_S128Store8Lane) \
+ V(PPC_S128Store16Lane) \
+ V(PPC_S128Store32Lane) \
+ V(PPC_S128Store64Lane) \
+ V(PPC_StoreCompressTagged) \
+ V(PPC_LoadDecompressTaggedSigned) \
+ V(PPC_LoadDecompressTaggedPointer) \
V(PPC_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index bfa7c0a6e0..28f071ec68 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -162,9 +162,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
PPCOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
static void VisitLoadCommon(InstructionSelector* selector, Node* node,
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 559378b19b..c95299ee1d 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -441,8 +441,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), 0, size, \
+ sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
@@ -743,13 +743,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == a0);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
@@ -2049,6 +2049,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
break;
}
+ case kRiscvF32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ fmv_x_w(kScratchReg, i.InputSingleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
+ case kRiscvF64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ fmv_x_d(kScratchReg, i.InputDoubleRegister(0));
+ __ vmv_vx(i.OutputSimd128Register(), kScratchReg);
+ break;
+ }
case kRiscvI32x4Abs: {
__ VU.set(kScratchReg, E32, m1);
__ vmv_vx(kSimd128RegZero, zero_reg);
@@ -2144,12 +2156,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvI32x4GtS: {
- __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), E32, m1);
break;
}
case kRiscvI64x2GtS: {
- __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1), E64, m1);
break;
}
@@ -2392,6 +2404,171 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vor_vv(dst, dst, kSimd128ScratchReg);
break;
}
+ case kRiscvF32x4Abs: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Abs: {
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vfabs_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Neg: {
+ __ VU.set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF64x2Neg: {
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vfneg_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4DemoteF64x2Zero: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfncvt_f_f_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmv_vi(v0, 12);
+ __ vmerge_vx(i.OutputSimd128Register(), zero_reg,
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Add: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Sub: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Add: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF64x2Sub: {
+ __ VU.set(kScratchReg, E64, m1);
+ __ vfsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvF32x4Ceil: {
+ __ Ceil_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Ceil: {
+ __ Ceil_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Floor: {
+ __ Floor_f(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF64x2Floor: {
+ __ Floor_d(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvS128Select: {
+ __ VU.set(kScratchReg, E8, m1);
+ __ vand_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnot_vv(kSimd128ScratchReg2, i.InputSimd128Register(0));
+ __ vand_vv(kSimd128ScratchReg2, i.InputSimd128Register(2),
+ kSimd128ScratchReg2);
+ __ vor_vv(i.OutputSimd128Register(), kSimd128ScratchReg,
+ kSimd128ScratchReg2);
+ break;
+ }
+ case kRiscvF32x4UConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfcvt_f_xu_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4SConvertI32x4: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfcvt_f_x_v(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Div: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfdiv_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Mul: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ VU.set(RoundingMode::RTZ);
+ __ vfmul_vv(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvF32x4Eq: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfeq_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Ne: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfne_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Lt: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmflt_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Le: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmfle_vv(v0, i.InputSimd128Register(1), i.InputSimd128Register(0));
+ __ vmv_vx(i.OutputSimd128Register(), zero_reg);
+ __ vmerge_vi(i.OutputSimd128Register(), -1, i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvF32x4Max: {
+ __ VU.set(kScratchReg, E32, m1);
+ const int32_t kNaN = 0x7FC00000;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmax_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvF32x4Min: {
+ __ VU.set(kScratchReg, E32, m1);
+ const int32_t kNaN = 0x7FC00000;
+ __ vmfeq_vv(v0, i.InputSimd128Register(0), i.InputSimd128Register(0));
+ __ vmfeq_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(1));
+ __ vand_vv(v0, v0, kSimd128ScratchReg);
+ __ li(kScratchReg, kNaN);
+ __ vmv_vx(kSimd128ScratchReg, kScratchReg);
+ __ vfmin_vv(kSimd128ScratchReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Mask);
+ __ vmv_vv(i.OutputSimd128Register(), kSimd128ScratchReg);
+ break;
+ }
default:
#ifdef DEBUG
switch (arch_opcode) {
@@ -3061,7 +3238,18 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
- ZoneDeque<DeoptimizationExit*>* exits) {}
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ __ ForceConstantPoolEmissionWithoutJump();
+ int total_size = 0;
+ for (DeoptimizationExit* exit : deoptimization_exits_) {
+ total_size += (exit->kind() == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize;
+ }
+
+ __ CheckTrampolinePoolQuick(total_size);
+ DCHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 0c8d99a8e8..f3aa0f29a8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -9,396 +9,400 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
// RISC-V-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(RiscvAdd32) \
- V(RiscvAdd64) \
- V(RiscvAddOvf64) \
- V(RiscvSub32) \
- V(RiscvSub64) \
- V(RiscvSubOvf64) \
- V(RiscvMul32) \
- V(RiscvMulOvf32) \
- V(RiscvMulHigh32) \
- V(RiscvMulHigh64) \
- V(RiscvMulHighU32) \
- V(RiscvMul64) \
- V(RiscvDiv32) \
- V(RiscvDiv64) \
- V(RiscvDivU32) \
- V(RiscvDivU64) \
- V(RiscvMod32) \
- V(RiscvMod64) \
- V(RiscvModU32) \
- V(RiscvModU64) \
- V(RiscvAnd) \
- V(RiscvAnd32) \
- V(RiscvOr) \
- V(RiscvOr32) \
- V(RiscvNor) \
- V(RiscvNor32) \
- V(RiscvXor) \
- V(RiscvXor32) \
- V(RiscvClz32) \
- V(RiscvShl32) \
- V(RiscvShr32) \
- V(RiscvSar32) \
- V(RiscvZeroExtendWord) \
- V(RiscvSignExtendWord) \
- V(RiscvClz64) \
- V(RiscvCtz32) \
- V(RiscvCtz64) \
- V(RiscvPopcnt32) \
- V(RiscvPopcnt64) \
- V(RiscvShl64) \
- V(RiscvShr64) \
- V(RiscvSar64) \
- V(RiscvRor32) \
- V(RiscvRor64) \
- V(RiscvMov) \
- V(RiscvTst) \
- V(RiscvCmp) \
- V(RiscvCmpZero) \
- V(RiscvCmpS) \
- V(RiscvAddS) \
- V(RiscvSubS) \
- V(RiscvMulS) \
- V(RiscvDivS) \
- V(RiscvModS) \
- V(RiscvAbsS) \
- V(RiscvNegS) \
- V(RiscvSqrtS) \
- V(RiscvMaxS) \
- V(RiscvMinS) \
- V(RiscvCmpD) \
- V(RiscvAddD) \
- V(RiscvSubD) \
- V(RiscvMulD) \
- V(RiscvDivD) \
- V(RiscvModD) \
- V(RiscvAbsD) \
- V(RiscvNegD) \
- V(RiscvSqrtD) \
- V(RiscvMaxD) \
- V(RiscvMinD) \
- V(RiscvFloat64RoundDown) \
- V(RiscvFloat64RoundTruncate) \
- V(RiscvFloat64RoundUp) \
- V(RiscvFloat64RoundTiesEven) \
- V(RiscvFloat32RoundDown) \
- V(RiscvFloat32RoundTruncate) \
- V(RiscvFloat32RoundUp) \
- V(RiscvFloat32RoundTiesEven) \
- V(RiscvCvtSD) \
- V(RiscvCvtDS) \
- V(RiscvTruncWD) \
- V(RiscvRoundWD) \
- V(RiscvFloorWD) \
- V(RiscvCeilWD) \
- V(RiscvTruncWS) \
- V(RiscvRoundWS) \
- V(RiscvFloorWS) \
- V(RiscvCeilWS) \
- V(RiscvTruncLS) \
- V(RiscvTruncLD) \
- V(RiscvTruncUwD) \
- V(RiscvTruncUwS) \
- V(RiscvTruncUlS) \
- V(RiscvTruncUlD) \
- V(RiscvCvtDW) \
- V(RiscvCvtSL) \
- V(RiscvCvtSW) \
- V(RiscvCvtSUw) \
- V(RiscvCvtSUl) \
- V(RiscvCvtDL) \
- V(RiscvCvtDUw) \
- V(RiscvCvtDUl) \
- V(RiscvLb) \
- V(RiscvLbu) \
- V(RiscvSb) \
- V(RiscvLh) \
- V(RiscvUlh) \
- V(RiscvLhu) \
- V(RiscvUlhu) \
- V(RiscvSh) \
- V(RiscvUsh) \
- V(RiscvLd) \
- V(RiscvUld) \
- V(RiscvLw) \
- V(RiscvUlw) \
- V(RiscvLwu) \
- V(RiscvUlwu) \
- V(RiscvSw) \
- V(RiscvUsw) \
- V(RiscvSd) \
- V(RiscvUsd) \
- V(RiscvLoadFloat) \
- V(RiscvULoadFloat) \
- V(RiscvStoreFloat) \
- V(RiscvUStoreFloat) \
- V(RiscvLoadDouble) \
- V(RiscvULoadDouble) \
- V(RiscvStoreDouble) \
- V(RiscvUStoreDouble) \
- V(RiscvBitcastDL) \
- V(RiscvBitcastLD) \
- V(RiscvBitcastInt32ToFloat32) \
- V(RiscvBitcastFloat32ToInt32) \
- V(RiscvFloat64ExtractLowWord32) \
- V(RiscvFloat64ExtractHighWord32) \
- V(RiscvFloat64InsertLowWord32) \
- V(RiscvFloat64InsertHighWord32) \
- V(RiscvFloat32Max) \
- V(RiscvFloat64Max) \
- V(RiscvFloat32Min) \
- V(RiscvFloat64Min) \
- V(RiscvFloat64SilenceNaN) \
- V(RiscvPush) \
- V(RiscvPeek) \
- V(RiscvByteSwap64) \
- V(RiscvByteSwap32) \
- V(RiscvStoreToStackSlot) \
- V(RiscvStackClaim) \
- V(RiscvSignExtendByte) \
- V(RiscvSignExtendShort) \
- V(RiscvSync) \
- V(RiscvAssertEqual) \
- V(RiscvS128Const) \
- V(RiscvS128Zero) \
- V(RiscvS128AllOnes) \
- V(RiscvI32x4Splat) \
- V(RiscvI32x4ExtractLane) \
- V(RiscvI32x4ReplaceLane) \
- V(RiscvI32x4Add) \
- V(RiscvI32x4Sub) \
- V(RiscvF64x2Abs) \
- V(RiscvF64x2Neg) \
- V(RiscvF32x4Splat) \
- V(RiscvF32x4ExtractLane) \
- V(RiscvF32x4ReplaceLane) \
- V(RiscvF32x4SConvertI32x4) \
- V(RiscvF32x4UConvertI32x4) \
- V(RiscvI64x2SConvertI32x4Low) \
- V(RiscvI64x2SConvertI32x4High) \
- V(RiscvI64x2UConvertI32x4Low) \
- V(RiscvI64x2UConvertI32x4High) \
- V(RiscvI32x4Mul) \
- V(RiscvI32x4MaxS) \
- V(RiscvI32x4MinS) \
- V(RiscvI32x4Eq) \
- V(RiscvI32x4Ne) \
- V(RiscvI32x4Shl) \
- V(RiscvI32x4ShrS) \
- V(RiscvI32x4ShrU) \
- V(RiscvI32x4MaxU) \
- V(RiscvI32x4MinU) \
- V(RiscvI64x2GtS) \
- V(RiscvI64x2GeS) \
- V(RiscvI64x2Eq) \
- V(RiscvI64x2Ne) \
- V(RiscvF64x2Sqrt) \
- V(RiscvF64x2Add) \
- V(RiscvF64x2Sub) \
- V(RiscvF64x2Mul) \
- V(RiscvF64x2Div) \
- V(RiscvF64x2Min) \
- V(RiscvF64x2Max) \
- V(RiscvF64x2ConvertLowI32x4S) \
- V(RiscvF64x2ConvertLowI32x4U) \
- V(RiscvF64x2PromoteLowF32x4) \
- V(RiscvF64x2Eq) \
- V(RiscvF64x2Ne) \
- V(RiscvF64x2Lt) \
- V(RiscvF64x2Le) \
- V(RiscvF64x2Splat) \
- V(RiscvF64x2ExtractLane) \
- V(RiscvF64x2ReplaceLane) \
- V(RiscvF64x2Pmin) \
- V(RiscvF64x2Pmax) \
- V(RiscvF64x2Ceil) \
- V(RiscvF64x2Floor) \
- V(RiscvF64x2Trunc) \
- V(RiscvF64x2NearestInt) \
- V(RiscvI64x2Splat) \
- V(RiscvI64x2ExtractLane) \
- V(RiscvI64x2ReplaceLane) \
- V(RiscvI64x2Add) \
- V(RiscvI64x2Sub) \
- V(RiscvI64x2Mul) \
- V(RiscvI64x2Abs) \
- V(RiscvI64x2Neg) \
- V(RiscvI64x2Shl) \
- V(RiscvI64x2ShrS) \
- V(RiscvI64x2ShrU) \
- V(RiscvI64x2BitMask) \
- V(RiscvF32x4Abs) \
- V(RiscvF32x4Neg) \
- V(RiscvF32x4Sqrt) \
- V(RiscvF32x4RecipApprox) \
- V(RiscvF32x4RecipSqrtApprox) \
- V(RiscvF32x4Add) \
- V(RiscvF32x4Sub) \
- V(RiscvF32x4Mul) \
- V(RiscvF32x4Div) \
- V(RiscvF32x4Max) \
- V(RiscvF32x4Min) \
- V(RiscvF32x4Eq) \
- V(RiscvF32x4Ne) \
- V(RiscvF32x4Lt) \
- V(RiscvF32x4Le) \
- V(RiscvF32x4Pmin) \
- V(RiscvF32x4Pmax) \
- V(RiscvF32x4DemoteF64x2Zero) \
- V(RiscvF32x4Ceil) \
- V(RiscvF32x4Floor) \
- V(RiscvF32x4Trunc) \
- V(RiscvF32x4NearestInt) \
- V(RiscvI32x4SConvertF32x4) \
- V(RiscvI32x4UConvertF32x4) \
- V(RiscvI32x4Neg) \
- V(RiscvI32x4GtS) \
- V(RiscvI32x4GeS) \
- V(RiscvI32x4GtU) \
- V(RiscvI32x4GeU) \
- V(RiscvI32x4Abs) \
- V(RiscvI32x4BitMask) \
- V(RiscvI32x4DotI16x8S) \
- V(RiscvI32x4TruncSatF64x2SZero) \
- V(RiscvI32x4TruncSatF64x2UZero) \
- V(RiscvI16x8Splat) \
- V(RiscvI16x8ExtractLaneU) \
- V(RiscvI16x8ExtractLaneS) \
- V(RiscvI16x8ReplaceLane) \
- V(RiscvI16x8Neg) \
- V(RiscvI16x8Shl) \
- V(RiscvI16x8ShrS) \
- V(RiscvI16x8ShrU) \
- V(RiscvI16x8Add) \
- V(RiscvI16x8AddSatS) \
- V(RiscvI16x8Sub) \
- V(RiscvI16x8SubSatS) \
- V(RiscvI16x8Mul) \
- V(RiscvI16x8MaxS) \
- V(RiscvI16x8MinS) \
- V(RiscvI16x8Eq) \
- V(RiscvI16x8Ne) \
- V(RiscvI16x8GtS) \
- V(RiscvI16x8GeS) \
- V(RiscvI16x8AddSatU) \
- V(RiscvI16x8SubSatU) \
- V(RiscvI16x8MaxU) \
- V(RiscvI16x8MinU) \
- V(RiscvI16x8GtU) \
- V(RiscvI16x8GeU) \
- V(RiscvI16x8RoundingAverageU) \
- V(RiscvI16x8Q15MulRSatS) \
- V(RiscvI16x8Abs) \
- V(RiscvI16x8BitMask) \
- V(RiscvI8x16Splat) \
- V(RiscvI8x16ExtractLaneU) \
- V(RiscvI8x16ExtractLaneS) \
- V(RiscvI8x16ReplaceLane) \
- V(RiscvI8x16Neg) \
- V(RiscvI8x16Shl) \
- V(RiscvI8x16ShrS) \
- V(RiscvI8x16Add) \
- V(RiscvI8x16AddSatS) \
- V(RiscvI8x16Sub) \
- V(RiscvI8x16SubSatS) \
- V(RiscvI8x16MaxS) \
- V(RiscvI8x16MinS) \
- V(RiscvI8x16Eq) \
- V(RiscvI8x16Ne) \
- V(RiscvI8x16GtS) \
- V(RiscvI8x16GeS) \
- V(RiscvI8x16ShrU) \
- V(RiscvI8x16AddSatU) \
- V(RiscvI8x16SubSatU) \
- V(RiscvI8x16MaxU) \
- V(RiscvI8x16MinU) \
- V(RiscvI8x16GtU) \
- V(RiscvI8x16GeU) \
- V(RiscvI8x16RoundingAverageU) \
- V(RiscvI8x16Abs) \
- V(RiscvI8x16BitMask) \
- V(RiscvI8x16Popcnt) \
- V(RiscvS128And) \
- V(RiscvS128Or) \
- V(RiscvS128Xor) \
- V(RiscvS128Not) \
- V(RiscvS128Select) \
- V(RiscvS128AndNot) \
- V(RiscvI32x4AllTrue) \
- V(RiscvI16x8AllTrue) \
- V(RiscvV128AnyTrue) \
- V(RiscvI8x16AllTrue) \
- V(RiscvI64x2AllTrue) \
- V(RiscvS32x4InterleaveRight) \
- V(RiscvS32x4InterleaveLeft) \
- V(RiscvS32x4PackEven) \
- V(RiscvS32x4PackOdd) \
- V(RiscvS32x4InterleaveEven) \
- V(RiscvS32x4InterleaveOdd) \
- V(RiscvS32x4Shuffle) \
- V(RiscvS16x8InterleaveRight) \
- V(RiscvS16x8InterleaveLeft) \
- V(RiscvS16x8PackEven) \
- V(RiscvS16x8PackOdd) \
- V(RiscvS16x8InterleaveEven) \
- V(RiscvS16x8InterleaveOdd) \
- V(RiscvS16x4Reverse) \
- V(RiscvS16x2Reverse) \
- V(RiscvS8x16InterleaveRight) \
- V(RiscvS8x16InterleaveLeft) \
- V(RiscvS8x16PackEven) \
- V(RiscvS8x16PackOdd) \
- V(RiscvS8x16InterleaveEven) \
- V(RiscvS8x16InterleaveOdd) \
- V(RiscvI8x16Shuffle) \
- V(RiscvI8x16Swizzle) \
- V(RiscvS8x16Concat) \
- V(RiscvS8x8Reverse) \
- V(RiscvS8x4Reverse) \
- V(RiscvS8x2Reverse) \
- V(RiscvS128Load8Splat) \
- V(RiscvS128Load16Splat) \
- V(RiscvS128Load32Splat) \
- V(RiscvS128Load64Splat) \
- V(RiscvS128Load8x8S) \
- V(RiscvS128Load8x8U) \
- V(RiscvS128Load16x4S) \
- V(RiscvS128Load16x4U) \
- V(RiscvS128Load32x2S) \
- V(RiscvS128Load32x2U) \
- V(RiscvS128LoadLane) \
- V(RiscvS128StoreLane) \
- V(RiscvRvvLd) \
- V(RiscvRvvSt) \
- V(RiscvI32x4SConvertI16x8Low) \
- V(RiscvI32x4SConvertI16x8High) \
- V(RiscvI32x4UConvertI16x8Low) \
- V(RiscvI32x4UConvertI16x8High) \
- V(RiscvI16x8SConvertI8x16Low) \
- V(RiscvI16x8SConvertI8x16High) \
- V(RiscvI16x8SConvertI32x4) \
- V(RiscvI16x8UConvertI32x4) \
- V(RiscvI16x8UConvertI8x16Low) \
- V(RiscvI16x8UConvertI8x16High) \
- V(RiscvI8x16SConvertI16x8) \
- V(RiscvI8x16UConvertI16x8) \
- V(RiscvWord64AtomicLoadUint64) \
- V(RiscvWord64AtomicStoreWord64) \
- V(RiscvWord64AtomicAddUint64) \
- V(RiscvWord64AtomicSubUint64) \
- V(RiscvWord64AtomicAndUint64) \
- V(RiscvWord64AtomicOrUint64) \
- V(RiscvWord64AtomicXorUint64) \
- V(RiscvWord64AtomicExchangeUint64) \
- V(RiscvWord64AtomicCompareExchangeUint64) \
- V(RiscvStoreCompressTagged) \
- V(RiscvLoadDecompressTaggedSigned) \
- V(RiscvLoadDecompressTaggedPointer) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(RiscvAdd32) \
+ V(RiscvAdd64) \
+ V(RiscvAddOvf64) \
+ V(RiscvSub32) \
+ V(RiscvSub64) \
+ V(RiscvSubOvf64) \
+ V(RiscvMul32) \
+ V(RiscvMulOvf32) \
+ V(RiscvMulHigh32) \
+ V(RiscvMulHigh64) \
+ V(RiscvMulHighU32) \
+ V(RiscvMul64) \
+ V(RiscvDiv32) \
+ V(RiscvDiv64) \
+ V(RiscvDivU32) \
+ V(RiscvDivU64) \
+ V(RiscvMod32) \
+ V(RiscvMod64) \
+ V(RiscvModU32) \
+ V(RiscvModU64) \
+ V(RiscvAnd) \
+ V(RiscvAnd32) \
+ V(RiscvOr) \
+ V(RiscvOr32) \
+ V(RiscvNor) \
+ V(RiscvNor32) \
+ V(RiscvXor) \
+ V(RiscvXor32) \
+ V(RiscvClz32) \
+ V(RiscvShl32) \
+ V(RiscvShr32) \
+ V(RiscvSar32) \
+ V(RiscvZeroExtendWord) \
+ V(RiscvSignExtendWord) \
+ V(RiscvClz64) \
+ V(RiscvCtz32) \
+ V(RiscvCtz64) \
+ V(RiscvPopcnt32) \
+ V(RiscvPopcnt64) \
+ V(RiscvShl64) \
+ V(RiscvShr64) \
+ V(RiscvSar64) \
+ V(RiscvRor32) \
+ V(RiscvRor64) \
+ V(RiscvMov) \
+ V(RiscvTst) \
+ V(RiscvCmp) \
+ V(RiscvCmpZero) \
+ V(RiscvCmpS) \
+ V(RiscvAddS) \
+ V(RiscvSubS) \
+ V(RiscvMulS) \
+ V(RiscvDivS) \
+ V(RiscvModS) \
+ V(RiscvAbsS) \
+ V(RiscvNegS) \
+ V(RiscvSqrtS) \
+ V(RiscvMaxS) \
+ V(RiscvMinS) \
+ V(RiscvCmpD) \
+ V(RiscvAddD) \
+ V(RiscvSubD) \
+ V(RiscvMulD) \
+ V(RiscvDivD) \
+ V(RiscvModD) \
+ V(RiscvAbsD) \
+ V(RiscvNegD) \
+ V(RiscvSqrtD) \
+ V(RiscvMaxD) \
+ V(RiscvMinD) \
+ V(RiscvFloat64RoundDown) \
+ V(RiscvFloat64RoundTruncate) \
+ V(RiscvFloat64RoundUp) \
+ V(RiscvFloat64RoundTiesEven) \
+ V(RiscvFloat32RoundDown) \
+ V(RiscvFloat32RoundTruncate) \
+ V(RiscvFloat32RoundUp) \
+ V(RiscvFloat32RoundTiesEven) \
+ V(RiscvCvtSD) \
+ V(RiscvCvtDS) \
+ V(RiscvTruncWD) \
+ V(RiscvRoundWD) \
+ V(RiscvFloorWD) \
+ V(RiscvCeilWD) \
+ V(RiscvTruncWS) \
+ V(RiscvRoundWS) \
+ V(RiscvFloorWS) \
+ V(RiscvCeilWS) \
+ V(RiscvTruncLS) \
+ V(RiscvTruncLD) \
+ V(RiscvTruncUwD) \
+ V(RiscvTruncUwS) \
+ V(RiscvTruncUlS) \
+ V(RiscvTruncUlD) \
+ V(RiscvCvtDW) \
+ V(RiscvCvtSL) \
+ V(RiscvCvtSW) \
+ V(RiscvCvtSUw) \
+ V(RiscvCvtSUl) \
+ V(RiscvCvtDL) \
+ V(RiscvCvtDUw) \
+ V(RiscvCvtDUl) \
+ V(RiscvLb) \
+ V(RiscvLbu) \
+ V(RiscvSb) \
+ V(RiscvLh) \
+ V(RiscvUlh) \
+ V(RiscvLhu) \
+ V(RiscvUlhu) \
+ V(RiscvSh) \
+ V(RiscvUsh) \
+ V(RiscvLd) \
+ V(RiscvUld) \
+ V(RiscvLw) \
+ V(RiscvUlw) \
+ V(RiscvLwu) \
+ V(RiscvUlwu) \
+ V(RiscvSw) \
+ V(RiscvUsw) \
+ V(RiscvSd) \
+ V(RiscvUsd) \
+ V(RiscvLoadFloat) \
+ V(RiscvULoadFloat) \
+ V(RiscvStoreFloat) \
+ V(RiscvUStoreFloat) \
+ V(RiscvLoadDouble) \
+ V(RiscvULoadDouble) \
+ V(RiscvStoreDouble) \
+ V(RiscvUStoreDouble) \
+ V(RiscvBitcastDL) \
+ V(RiscvBitcastLD) \
+ V(RiscvBitcastInt32ToFloat32) \
+ V(RiscvBitcastFloat32ToInt32) \
+ V(RiscvFloat64ExtractLowWord32) \
+ V(RiscvFloat64ExtractHighWord32) \
+ V(RiscvFloat64InsertLowWord32) \
+ V(RiscvFloat64InsertHighWord32) \
+ V(RiscvFloat32Max) \
+ V(RiscvFloat64Max) \
+ V(RiscvFloat32Min) \
+ V(RiscvFloat64Min) \
+ V(RiscvFloat64SilenceNaN) \
+ V(RiscvPush) \
+ V(RiscvPeek) \
+ V(RiscvByteSwap64) \
+ V(RiscvByteSwap32) \
+ V(RiscvStoreToStackSlot) \
+ V(RiscvStackClaim) \
+ V(RiscvSignExtendByte) \
+ V(RiscvSignExtendShort) \
+ V(RiscvSync) \
+ V(RiscvAssertEqual) \
+ V(RiscvS128Const) \
+ V(RiscvS128Zero) \
+ V(RiscvS128AllOnes) \
+ V(RiscvI32x4Splat) \
+ V(RiscvI32x4ExtractLane) \
+ V(RiscvI32x4ReplaceLane) \
+ V(RiscvI32x4Add) \
+ V(RiscvI32x4Sub) \
+ V(RiscvF64x2Abs) \
+ V(RiscvF64x2Neg) \
+ V(RiscvF32x4Splat) \
+ V(RiscvF32x4ExtractLane) \
+ V(RiscvF32x4ReplaceLane) \
+ V(RiscvF32x4SConvertI32x4) \
+ V(RiscvF32x4UConvertI32x4) \
+ V(RiscvI64x2SConvertI32x4Low) \
+ V(RiscvI64x2SConvertI32x4High) \
+ V(RiscvI64x2UConvertI32x4Low) \
+ V(RiscvI64x2UConvertI32x4High) \
+ V(RiscvI32x4Mul) \
+ V(RiscvI32x4MaxS) \
+ V(RiscvI32x4MinS) \
+ V(RiscvI32x4Eq) \
+ V(RiscvI32x4Ne) \
+ V(RiscvI32x4Shl) \
+ V(RiscvI32x4ShrS) \
+ V(RiscvI32x4ShrU) \
+ V(RiscvI32x4MaxU) \
+ V(RiscvI32x4MinU) \
+ V(RiscvI64x2GtS) \
+ V(RiscvI64x2GeS) \
+ V(RiscvI64x2Eq) \
+ V(RiscvI64x2Ne) \
+ V(RiscvF64x2Sqrt) \
+ V(RiscvF64x2Add) \
+ V(RiscvF64x2Sub) \
+ V(RiscvF64x2Mul) \
+ V(RiscvF64x2Div) \
+ V(RiscvF64x2Min) \
+ V(RiscvF64x2Max) \
+ V(RiscvF64x2ConvertLowI32x4S) \
+ V(RiscvF64x2ConvertLowI32x4U) \
+ V(RiscvF64x2PromoteLowF32x4) \
+ V(RiscvF64x2Eq) \
+ V(RiscvF64x2Ne) \
+ V(RiscvF64x2Lt) \
+ V(RiscvF64x2Le) \
+ V(RiscvF64x2Splat) \
+ V(RiscvF64x2ExtractLane) \
+ V(RiscvF64x2ReplaceLane) \
+ V(RiscvF64x2Pmin) \
+ V(RiscvF64x2Pmax) \
+ V(RiscvF64x2Ceil) \
+ V(RiscvF64x2Floor) \
+ V(RiscvF64x2Trunc) \
+ V(RiscvF64x2NearestInt) \
+ V(RiscvI64x2Splat) \
+ V(RiscvI64x2ExtractLane) \
+ V(RiscvI64x2ReplaceLane) \
+ V(RiscvI64x2Add) \
+ V(RiscvI64x2Sub) \
+ V(RiscvI64x2Mul) \
+ V(RiscvI64x2Abs) \
+ V(RiscvI64x2Neg) \
+ V(RiscvI64x2Shl) \
+ V(RiscvI64x2ShrS) \
+ V(RiscvI64x2ShrU) \
+ V(RiscvI64x2BitMask) \
+ V(RiscvF32x4Abs) \
+ V(RiscvF32x4Neg) \
+ V(RiscvF32x4Sqrt) \
+ V(RiscvF32x4RecipApprox) \
+ V(RiscvF32x4RecipSqrtApprox) \
+ V(RiscvF32x4Add) \
+ V(RiscvF32x4Sub) \
+ V(RiscvF32x4Mul) \
+ V(RiscvF32x4Div) \
+ V(RiscvF32x4Max) \
+ V(RiscvF32x4Min) \
+ V(RiscvF32x4Eq) \
+ V(RiscvF32x4Ne) \
+ V(RiscvF32x4Lt) \
+ V(RiscvF32x4Le) \
+ V(RiscvF32x4Pmin) \
+ V(RiscvF32x4Pmax) \
+ V(RiscvF32x4DemoteF64x2Zero) \
+ V(RiscvF32x4Ceil) \
+ V(RiscvF32x4Floor) \
+ V(RiscvF32x4Trunc) \
+ V(RiscvF32x4NearestInt) \
+ V(RiscvI32x4SConvertF32x4) \
+ V(RiscvI32x4UConvertF32x4) \
+ V(RiscvI32x4Neg) \
+ V(RiscvI32x4GtS) \
+ V(RiscvI32x4GeS) \
+ V(RiscvI32x4GtU) \
+ V(RiscvI32x4GeU) \
+ V(RiscvI32x4Abs) \
+ V(RiscvI32x4BitMask) \
+ V(RiscvI32x4DotI16x8S) \
+ V(RiscvI32x4TruncSatF64x2SZero) \
+ V(RiscvI32x4TruncSatF64x2UZero) \
+ V(RiscvI16x8Splat) \
+ V(RiscvI16x8ExtractLaneU) \
+ V(RiscvI16x8ExtractLaneS) \
+ V(RiscvI16x8ReplaceLane) \
+ V(RiscvI16x8Neg) \
+ V(RiscvI16x8Shl) \
+ V(RiscvI16x8ShrS) \
+ V(RiscvI16x8ShrU) \
+ V(RiscvI16x8Add) \
+ V(RiscvI16x8AddSatS) \
+ V(RiscvI16x8Sub) \
+ V(RiscvI16x8SubSatS) \
+ V(RiscvI16x8Mul) \
+ V(RiscvI16x8MaxS) \
+ V(RiscvI16x8MinS) \
+ V(RiscvI16x8Eq) \
+ V(RiscvI16x8Ne) \
+ V(RiscvI16x8GtS) \
+ V(RiscvI16x8GeS) \
+ V(RiscvI16x8AddSatU) \
+ V(RiscvI16x8SubSatU) \
+ V(RiscvI16x8MaxU) \
+ V(RiscvI16x8MinU) \
+ V(RiscvI16x8GtU) \
+ V(RiscvI16x8GeU) \
+ V(RiscvI16x8RoundingAverageU) \
+ V(RiscvI16x8Q15MulRSatS) \
+ V(RiscvI16x8Abs) \
+ V(RiscvI16x8BitMask) \
+ V(RiscvI8x16Splat) \
+ V(RiscvI8x16ExtractLaneU) \
+ V(RiscvI8x16ExtractLaneS) \
+ V(RiscvI8x16ReplaceLane) \
+ V(RiscvI8x16Neg) \
+ V(RiscvI8x16Shl) \
+ V(RiscvI8x16ShrS) \
+ V(RiscvI8x16Add) \
+ V(RiscvI8x16AddSatS) \
+ V(RiscvI8x16Sub) \
+ V(RiscvI8x16SubSatS) \
+ V(RiscvI8x16MaxS) \
+ V(RiscvI8x16MinS) \
+ V(RiscvI8x16Eq) \
+ V(RiscvI8x16Ne) \
+ V(RiscvI8x16GtS) \
+ V(RiscvI8x16GeS) \
+ V(RiscvI8x16ShrU) \
+ V(RiscvI8x16AddSatU) \
+ V(RiscvI8x16SubSatU) \
+ V(RiscvI8x16MaxU) \
+ V(RiscvI8x16MinU) \
+ V(RiscvI8x16GtU) \
+ V(RiscvI8x16GeU) \
+ V(RiscvI8x16RoundingAverageU) \
+ V(RiscvI8x16Abs) \
+ V(RiscvI8x16BitMask) \
+ V(RiscvI8x16Popcnt) \
+ V(RiscvS128And) \
+ V(RiscvS128Or) \
+ V(RiscvS128Xor) \
+ V(RiscvS128Not) \
+ V(RiscvS128Select) \
+ V(RiscvS128AndNot) \
+ V(RiscvI32x4AllTrue) \
+ V(RiscvI16x8AllTrue) \
+ V(RiscvV128AnyTrue) \
+ V(RiscvI8x16AllTrue) \
+ V(RiscvI64x2AllTrue) \
+ V(RiscvS32x4InterleaveRight) \
+ V(RiscvS32x4InterleaveLeft) \
+ V(RiscvS32x4PackEven) \
+ V(RiscvS32x4PackOdd) \
+ V(RiscvS32x4InterleaveEven) \
+ V(RiscvS32x4InterleaveOdd) \
+ V(RiscvS32x4Shuffle) \
+ V(RiscvS16x8InterleaveRight) \
+ V(RiscvS16x8InterleaveLeft) \
+ V(RiscvS16x8PackEven) \
+ V(RiscvS16x8PackOdd) \
+ V(RiscvS16x8InterleaveEven) \
+ V(RiscvS16x8InterleaveOdd) \
+ V(RiscvS16x4Reverse) \
+ V(RiscvS16x2Reverse) \
+ V(RiscvS8x16InterleaveRight) \
+ V(RiscvS8x16InterleaveLeft) \
+ V(RiscvS8x16PackEven) \
+ V(RiscvS8x16PackOdd) \
+ V(RiscvS8x16InterleaveEven) \
+ V(RiscvS8x16InterleaveOdd) \
+ V(RiscvI8x16Shuffle) \
+ V(RiscvI8x16Swizzle) \
+ V(RiscvS8x16Concat) \
+ V(RiscvS8x8Reverse) \
+ V(RiscvS8x4Reverse) \
+ V(RiscvS8x2Reverse) \
+ V(RiscvS128Load8Splat) \
+ V(RiscvS128Load16Splat) \
+ V(RiscvS128Load32Splat) \
+ V(RiscvS128Load64Splat) \
+ V(RiscvS128Load8x8S) \
+ V(RiscvS128Load8x8U) \
+ V(RiscvS128Load16x4S) \
+ V(RiscvS128Load16x4U) \
+ V(RiscvS128Load32x2S) \
+ V(RiscvS128Load32x2U) \
+ V(RiscvS128LoadLane) \
+ V(RiscvS128StoreLane) \
+ V(RiscvRvvLd) \
+ V(RiscvRvvSt) \
+ V(RiscvI32x4SConvertI16x8Low) \
+ V(RiscvI32x4SConvertI16x8High) \
+ V(RiscvI32x4UConvertI16x8Low) \
+ V(RiscvI32x4UConvertI16x8High) \
+ V(RiscvI16x8SConvertI8x16Low) \
+ V(RiscvI16x8SConvertI8x16High) \
+ V(RiscvI16x8SConvertI32x4) \
+ V(RiscvI16x8UConvertI32x4) \
+ V(RiscvI16x8UConvertI8x16Low) \
+ V(RiscvI16x8UConvertI8x16High) \
+ V(RiscvI8x16SConvertI16x8) \
+ V(RiscvI8x16UConvertI16x8) \
+ V(RiscvWord64AtomicLoadUint64) \
+ V(RiscvWord64AtomicStoreWord64) \
+ V(RiscvWord64AtomicAddUint64) \
+ V(RiscvWord64AtomicSubUint64) \
+ V(RiscvWord64AtomicAndUint64) \
+ V(RiscvWord64AtomicOrUint64) \
+ V(RiscvWord64AtomicXorUint64) \
+ V(RiscvWord64AtomicExchangeUint64) \
+ V(RiscvWord64AtomicCompareExchangeUint64) \
+ V(RiscvStoreCompressTagged) \
+ V(RiscvLoadDecompressTaggedSigned) \
+ V(RiscvLoadDecompressTaggedPointer) \
V(RiscvLoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 471628b1f8..54d9a98663 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -1117,7 +1117,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return AssembleArchJumpLatency();
case kArchTableSwitch:
return AssembleArchTableSwitchLatency();
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
return CallLatency() + 1;
case kArchDebugBreak:
return 1;
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 85d61aa02f..6fc64256ec 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -363,9 +363,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
RiscvOperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
}
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
@@ -454,7 +454,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kRiscvLwu : kRiscvLw;
+ opcode = kRiscvLw;
break;
#ifdef V8_COMPRESS_POINTERS
case MachineRepresentation::kTaggedSigned:
@@ -1287,7 +1287,6 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
case MachineRepresentation::kWord16:
- case MachineRepresentation::kWord32:
return true;
default:
return false;
@@ -1623,7 +1622,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
break;
case MachineRepresentation::kWord32:
- opcode = load_rep.IsUnsigned() ? kRiscvUlwu : kRiscvUlw;
+ opcode = kRiscvUlw;
break;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 3c2c3d6c06..e58a0ed576 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -1174,8 +1174,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArchPrepareCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
- __ PrepareCallCFunction(num_parameters, kScratchReg);
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_gp_parameters + num_fp_parameters,
+ kScratchReg);
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
break;
@@ -1211,7 +1213,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
// Put the return address in a stack slot.
#if V8_ENABLE_WEBASSEMBLY
@@ -1224,10 +1227,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (instr->InputAt(0)->IsImmediate()) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
+ __ CallCFunction(ref, num_gp_parameters, num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
+ __ CallCFunction(func, num_gp_parameters, num_fp_parameters);
}
__ bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -1263,13 +1266,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == r3);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ stop();
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 03806b57b1..7dcd7212c9 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -11,392 +11,397 @@ namespace compiler {
// S390-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(S390_Peek) \
- V(S390_Abs32) \
- V(S390_Abs64) \
- V(S390_And32) \
- V(S390_And64) \
- V(S390_Or32) \
- V(S390_Or64) \
- V(S390_Xor32) \
- V(S390_Xor64) \
- V(S390_ShiftLeft32) \
- V(S390_ShiftLeft64) \
- V(S390_ShiftRight32) \
- V(S390_ShiftRight64) \
- V(S390_ShiftRightArith32) \
- V(S390_ShiftRightArith64) \
- V(S390_RotRight32) \
- V(S390_RotRight64) \
- V(S390_Not32) \
- V(S390_Not64) \
- V(S390_RotLeftAndClear64) \
- V(S390_RotLeftAndClearLeft64) \
- V(S390_RotLeftAndClearRight64) \
- V(S390_Lay) \
- V(S390_Add32) \
- V(S390_Add64) \
- V(S390_AddFloat) \
- V(S390_AddDouble) \
- V(S390_Sub32) \
- V(S390_Sub64) \
- V(S390_SubFloat) \
- V(S390_SubDouble) \
- V(S390_Mul32) \
- V(S390_Mul32WithOverflow) \
- V(S390_Mul64) \
- V(S390_MulHigh32) \
- V(S390_MulHighU32) \
- V(S390_MulFloat) \
- V(S390_MulDouble) \
- V(S390_Div32) \
- V(S390_Div64) \
- V(S390_DivU32) \
- V(S390_DivU64) \
- V(S390_DivFloat) \
- V(S390_DivDouble) \
- V(S390_Mod32) \
- V(S390_Mod64) \
- V(S390_ModU32) \
- V(S390_ModU64) \
- V(S390_ModDouble) \
- V(S390_Neg32) \
- V(S390_Neg64) \
- V(S390_NegDouble) \
- V(S390_NegFloat) \
- V(S390_SqrtFloat) \
- V(S390_FloorFloat) \
- V(S390_CeilFloat) \
- V(S390_TruncateFloat) \
- V(S390_FloatNearestInt) \
- V(S390_AbsFloat) \
- V(S390_SqrtDouble) \
- V(S390_FloorDouble) \
- V(S390_CeilDouble) \
- V(S390_TruncateDouble) \
- V(S390_RoundDouble) \
- V(S390_DoubleNearestInt) \
- V(S390_MaxFloat) \
- V(S390_MaxDouble) \
- V(S390_MinFloat) \
- V(S390_MinDouble) \
- V(S390_AbsDouble) \
- V(S390_Cntlz32) \
- V(S390_Cntlz64) \
- V(S390_Popcnt32) \
- V(S390_Popcnt64) \
- V(S390_Cmp32) \
- V(S390_Cmp64) \
- V(S390_CmpFloat) \
- V(S390_CmpDouble) \
- V(S390_Tst32) \
- V(S390_Tst64) \
- V(S390_Push) \
- V(S390_PushFrame) \
- V(S390_StoreToStackSlot) \
- V(S390_SignExtendWord8ToInt32) \
- V(S390_SignExtendWord16ToInt32) \
- V(S390_SignExtendWord8ToInt64) \
- V(S390_SignExtendWord16ToInt64) \
- V(S390_SignExtendWord32ToInt64) \
- V(S390_Uint32ToUint64) \
- V(S390_Int64ToInt32) \
- V(S390_Int64ToFloat32) \
- V(S390_Int64ToDouble) \
- V(S390_Uint64ToFloat32) \
- V(S390_Uint64ToDouble) \
- V(S390_Int32ToFloat32) \
- V(S390_Int32ToDouble) \
- V(S390_Uint32ToFloat32) \
- V(S390_Uint32ToDouble) \
- V(S390_Float32ToInt64) \
- V(S390_Float32ToUint64) \
- V(S390_Float32ToInt32) \
- V(S390_Float32ToUint32) \
- V(S390_Float32ToDouble) \
- V(S390_Float64SilenceNaN) \
- V(S390_DoubleToInt32) \
- V(S390_DoubleToUint32) \
- V(S390_DoubleToInt64) \
- V(S390_DoubleToUint64) \
- V(S390_DoubleToFloat32) \
- V(S390_DoubleExtractLowWord32) \
- V(S390_DoubleExtractHighWord32) \
- V(S390_DoubleInsertLowWord32) \
- V(S390_DoubleInsertHighWord32) \
- V(S390_DoubleConstruct) \
- V(S390_BitcastInt32ToFloat32) \
- V(S390_BitcastFloat32ToInt32) \
- V(S390_BitcastInt64ToDouble) \
- V(S390_BitcastDoubleToInt64) \
- V(S390_LoadWordS8) \
- V(S390_LoadWordU8) \
- V(S390_LoadWordS16) \
- V(S390_LoadWordU16) \
- V(S390_LoadWordS32) \
- V(S390_LoadWordU32) \
- V(S390_LoadAndTestWord32) \
- V(S390_LoadAndTestWord64) \
- V(S390_LoadAndTestFloat32) \
- V(S390_LoadAndTestFloat64) \
- V(S390_LoadReverse16RR) \
- V(S390_LoadReverse32RR) \
- V(S390_LoadReverse64RR) \
- V(S390_LoadReverseSimd128RR) \
- V(S390_LoadReverseSimd128) \
- V(S390_LoadReverse16) \
- V(S390_LoadReverse32) \
- V(S390_LoadReverse64) \
- V(S390_LoadWord64) \
- V(S390_LoadFloat32) \
- V(S390_LoadDouble) \
- V(S390_StoreWord8) \
- V(S390_StoreWord16) \
- V(S390_StoreWord32) \
- V(S390_StoreWord64) \
- V(S390_StoreReverse16) \
- V(S390_StoreReverse32) \
- V(S390_StoreReverse64) \
- V(S390_StoreReverseSimd128) \
- V(S390_StoreFloat32) \
- V(S390_StoreDouble) \
- V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint64) \
- V(S390_F64x2Splat) \
- V(S390_F64x2ReplaceLane) \
- V(S390_F64x2Abs) \
- V(S390_F64x2Neg) \
- V(S390_F64x2Sqrt) \
- V(S390_F64x2Add) \
- V(S390_F64x2Sub) \
- V(S390_F64x2Mul) \
- V(S390_F64x2Div) \
- V(S390_F64x2Eq) \
- V(S390_F64x2Ne) \
- V(S390_F64x2Lt) \
- V(S390_F64x2Le) \
- V(S390_F64x2Min) \
- V(S390_F64x2Max) \
- V(S390_F64x2ExtractLane) \
- V(S390_F64x2Qfma) \
- V(S390_F64x2Qfms) \
- V(S390_F64x2Pmin) \
- V(S390_F64x2Pmax) \
- V(S390_F64x2Ceil) \
- V(S390_F64x2Floor) \
- V(S390_F64x2Trunc) \
- V(S390_F64x2NearestInt) \
- V(S390_F64x2ConvertLowI32x4S) \
- V(S390_F64x2ConvertLowI32x4U) \
- V(S390_F64x2PromoteLowF32x4) \
- V(S390_F32x4Splat) \
- V(S390_F32x4ExtractLane) \
- V(S390_F32x4ReplaceLane) \
- V(S390_F32x4Add) \
- V(S390_F32x4Sub) \
- V(S390_F32x4Mul) \
- V(S390_F32x4Eq) \
- V(S390_F32x4Ne) \
- V(S390_F32x4Lt) \
- V(S390_F32x4Le) \
- V(S390_F32x4Abs) \
- V(S390_F32x4Neg) \
- V(S390_F32x4RecipApprox) \
- V(S390_F32x4RecipSqrtApprox) \
- V(S390_F32x4SConvertI32x4) \
- V(S390_F32x4UConvertI32x4) \
- V(S390_F32x4Sqrt) \
- V(S390_F32x4Div) \
- V(S390_F32x4Min) \
- V(S390_F32x4Max) \
- V(S390_F32x4Qfma) \
- V(S390_F32x4Qfms) \
- V(S390_F32x4Pmin) \
- V(S390_F32x4Pmax) \
- V(S390_F32x4Ceil) \
- V(S390_F32x4Floor) \
- V(S390_F32x4Trunc) \
- V(S390_F32x4NearestInt) \
- V(S390_F32x4DemoteF64x2Zero) \
- V(S390_I64x2Neg) \
- V(S390_I64x2Add) \
- V(S390_I64x2Sub) \
- V(S390_I64x2Shl) \
- V(S390_I64x2ShrS) \
- V(S390_I64x2ShrU) \
- V(S390_I64x2Mul) \
- V(S390_I64x2Splat) \
- V(S390_I64x2ReplaceLane) \
- V(S390_I64x2ExtractLane) \
- V(S390_I64x2Eq) \
- V(S390_I64x2BitMask) \
- V(S390_I64x2ExtMulLowI32x4S) \
- V(S390_I64x2ExtMulHighI32x4S) \
- V(S390_I64x2ExtMulLowI32x4U) \
- V(S390_I64x2ExtMulHighI32x4U) \
- V(S390_I64x2SConvertI32x4Low) \
- V(S390_I64x2SConvertI32x4High) \
- V(S390_I64x2UConvertI32x4Low) \
- V(S390_I64x2UConvertI32x4High) \
- V(S390_I64x2Ne) \
- V(S390_I64x2GtS) \
- V(S390_I64x2GeS) \
- V(S390_I64x2Abs) \
- V(S390_I32x4Splat) \
- V(S390_I32x4ExtractLane) \
- V(S390_I32x4ReplaceLane) \
- V(S390_I32x4Add) \
- V(S390_I32x4Sub) \
- V(S390_I32x4Mul) \
- V(S390_I32x4MinS) \
- V(S390_I32x4MinU) \
- V(S390_I32x4MaxS) \
- V(S390_I32x4MaxU) \
- V(S390_I32x4Eq) \
- V(S390_I32x4Ne) \
- V(S390_I32x4GtS) \
- V(S390_I32x4GeS) \
- V(S390_I32x4GtU) \
- V(S390_I32x4GeU) \
- V(S390_I32x4Neg) \
- V(S390_I32x4Shl) \
- V(S390_I32x4ShrS) \
- V(S390_I32x4ShrU) \
- V(S390_I32x4SConvertF32x4) \
- V(S390_I32x4UConvertF32x4) \
- V(S390_I32x4SConvertI16x8Low) \
- V(S390_I32x4SConvertI16x8High) \
- V(S390_I32x4UConvertI16x8Low) \
- V(S390_I32x4UConvertI16x8High) \
- V(S390_I32x4Abs) \
- V(S390_I32x4BitMask) \
- V(S390_I32x4DotI16x8S) \
- V(S390_I32x4ExtMulLowI16x8S) \
- V(S390_I32x4ExtMulHighI16x8S) \
- V(S390_I32x4ExtMulLowI16x8U) \
- V(S390_I32x4ExtMulHighI16x8U) \
- V(S390_I32x4ExtAddPairwiseI16x8S) \
- V(S390_I32x4ExtAddPairwiseI16x8U) \
- V(S390_I32x4TruncSatF64x2SZero) \
- V(S390_I32x4TruncSatF64x2UZero) \
- V(S390_I16x8Splat) \
- V(S390_I16x8ExtractLaneU) \
- V(S390_I16x8ExtractLaneS) \
- V(S390_I16x8ReplaceLane) \
- V(S390_I16x8Add) \
- V(S390_I16x8Sub) \
- V(S390_I16x8Mul) \
- V(S390_I16x8MinS) \
- V(S390_I16x8MinU) \
- V(S390_I16x8MaxS) \
- V(S390_I16x8MaxU) \
- V(S390_I16x8Eq) \
- V(S390_I16x8Ne) \
- V(S390_I16x8GtS) \
- V(S390_I16x8GeS) \
- V(S390_I16x8GtU) \
- V(S390_I16x8GeU) \
- V(S390_I16x8Shl) \
- V(S390_I16x8ShrS) \
- V(S390_I16x8ShrU) \
- V(S390_I16x8Neg) \
- V(S390_I16x8SConvertI32x4) \
- V(S390_I16x8UConvertI32x4) \
- V(S390_I16x8SConvertI8x16Low) \
- V(S390_I16x8SConvertI8x16High) \
- V(S390_I16x8UConvertI8x16Low) \
- V(S390_I16x8UConvertI8x16High) \
- V(S390_I16x8AddSatS) \
- V(S390_I16x8SubSatS) \
- V(S390_I16x8AddSatU) \
- V(S390_I16x8SubSatU) \
- V(S390_I16x8RoundingAverageU) \
- V(S390_I16x8Abs) \
- V(S390_I16x8BitMask) \
- V(S390_I16x8ExtMulLowI8x16S) \
- V(S390_I16x8ExtMulHighI8x16S) \
- V(S390_I16x8ExtMulLowI8x16U) \
- V(S390_I16x8ExtMulHighI8x16U) \
- V(S390_I16x8ExtAddPairwiseI8x16S) \
- V(S390_I16x8ExtAddPairwiseI8x16U) \
- V(S390_I16x8Q15MulRSatS) \
- V(S390_I8x16Splat) \
- V(S390_I8x16ExtractLaneU) \
- V(S390_I8x16ExtractLaneS) \
- V(S390_I8x16ReplaceLane) \
- V(S390_I8x16Add) \
- V(S390_I8x16Sub) \
- V(S390_I8x16MinS) \
- V(S390_I8x16MinU) \
- V(S390_I8x16MaxS) \
- V(S390_I8x16MaxU) \
- V(S390_I8x16Eq) \
- V(S390_I8x16Ne) \
- V(S390_I8x16GtS) \
- V(S390_I8x16GeS) \
- V(S390_I8x16GtU) \
- V(S390_I8x16GeU) \
- V(S390_I8x16Shl) \
- V(S390_I8x16ShrS) \
- V(S390_I8x16ShrU) \
- V(S390_I8x16Neg) \
- V(S390_I8x16SConvertI16x8) \
- V(S390_I8x16UConvertI16x8) \
- V(S390_I8x16AddSatS) \
- V(S390_I8x16SubSatS) \
- V(S390_I8x16AddSatU) \
- V(S390_I8x16SubSatU) \
- V(S390_I8x16RoundingAverageU) \
- V(S390_I8x16Abs) \
- V(S390_I8x16BitMask) \
- V(S390_I8x16Shuffle) \
- V(S390_I8x16Swizzle) \
- V(S390_I8x16Popcnt) \
- V(S390_I64x2AllTrue) \
- V(S390_I32x4AllTrue) \
- V(S390_I16x8AllTrue) \
- V(S390_I8x16AllTrue) \
- V(S390_V128AnyTrue) \
- V(S390_S128And) \
- V(S390_S128Or) \
- V(S390_S128Xor) \
- V(S390_S128Const) \
- V(S390_S128Zero) \
- V(S390_S128AllOnes) \
- V(S390_S128Not) \
- V(S390_S128Select) \
- V(S390_S128AndNot) \
- V(S390_S128Load8Splat) \
- V(S390_S128Load16Splat) \
- V(S390_S128Load32Splat) \
- V(S390_S128Load64Splat) \
- V(S390_S128Load8x8S) \
- V(S390_S128Load8x8U) \
- V(S390_S128Load16x4S) \
- V(S390_S128Load16x4U) \
- V(S390_S128Load32x2S) \
- V(S390_S128Load32x2U) \
- V(S390_S128Load32Zero) \
- V(S390_S128Load64Zero) \
- V(S390_S128Load8Lane) \
- V(S390_S128Load16Lane) \
- V(S390_S128Load32Lane) \
- V(S390_S128Load64Lane) \
- V(S390_S128Store8Lane) \
- V(S390_S128Store16Lane) \
- V(S390_S128Store32Lane) \
- V(S390_S128Store64Lane) \
- V(S390_StoreSimd128) \
- V(S390_LoadSimd128) \
- V(S390_StoreCompressTagged) \
- V(S390_LoadDecompressTaggedSigned) \
- V(S390_LoadDecompressTaggedPointer) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) // None.
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(S390_Peek) \
+ V(S390_Abs32) \
+ V(S390_Abs64) \
+ V(S390_And32) \
+ V(S390_And64) \
+ V(S390_Or32) \
+ V(S390_Or64) \
+ V(S390_Xor32) \
+ V(S390_Xor64) \
+ V(S390_ShiftLeft32) \
+ V(S390_ShiftLeft64) \
+ V(S390_ShiftRight32) \
+ V(S390_ShiftRight64) \
+ V(S390_ShiftRightArith32) \
+ V(S390_ShiftRightArith64) \
+ V(S390_RotRight32) \
+ V(S390_RotRight64) \
+ V(S390_Not32) \
+ V(S390_Not64) \
+ V(S390_RotLeftAndClear64) \
+ V(S390_RotLeftAndClearLeft64) \
+ V(S390_RotLeftAndClearRight64) \
+ V(S390_Lay) \
+ V(S390_Add32) \
+ V(S390_Add64) \
+ V(S390_AddFloat) \
+ V(S390_AddDouble) \
+ V(S390_Sub32) \
+ V(S390_Sub64) \
+ V(S390_SubFloat) \
+ V(S390_SubDouble) \
+ V(S390_Mul32) \
+ V(S390_Mul32WithOverflow) \
+ V(S390_Mul64) \
+ V(S390_MulHigh32) \
+ V(S390_MulHighU32) \
+ V(S390_MulFloat) \
+ V(S390_MulDouble) \
+ V(S390_Div32) \
+ V(S390_Div64) \
+ V(S390_DivU32) \
+ V(S390_DivU64) \
+ V(S390_DivFloat) \
+ V(S390_DivDouble) \
+ V(S390_Mod32) \
+ V(S390_Mod64) \
+ V(S390_ModU32) \
+ V(S390_ModU64) \
+ V(S390_ModDouble) \
+ V(S390_Neg32) \
+ V(S390_Neg64) \
+ V(S390_NegDouble) \
+ V(S390_NegFloat) \
+ V(S390_SqrtFloat) \
+ V(S390_FloorFloat) \
+ V(S390_CeilFloat) \
+ V(S390_TruncateFloat) \
+ V(S390_FloatNearestInt) \
+ V(S390_AbsFloat) \
+ V(S390_SqrtDouble) \
+ V(S390_FloorDouble) \
+ V(S390_CeilDouble) \
+ V(S390_TruncateDouble) \
+ V(S390_RoundDouble) \
+ V(S390_DoubleNearestInt) \
+ V(S390_MaxFloat) \
+ V(S390_MaxDouble) \
+ V(S390_MinFloat) \
+ V(S390_MinDouble) \
+ V(S390_AbsDouble) \
+ V(S390_Cntlz32) \
+ V(S390_Cntlz64) \
+ V(S390_Popcnt32) \
+ V(S390_Popcnt64) \
+ V(S390_Cmp32) \
+ V(S390_Cmp64) \
+ V(S390_CmpFloat) \
+ V(S390_CmpDouble) \
+ V(S390_Tst32) \
+ V(S390_Tst64) \
+ V(S390_Push) \
+ V(S390_PushFrame) \
+ V(S390_StoreToStackSlot) \
+ V(S390_SignExtendWord8ToInt32) \
+ V(S390_SignExtendWord16ToInt32) \
+ V(S390_SignExtendWord8ToInt64) \
+ V(S390_SignExtendWord16ToInt64) \
+ V(S390_SignExtendWord32ToInt64) \
+ V(S390_Uint32ToUint64) \
+ V(S390_Int64ToInt32) \
+ V(S390_Int64ToFloat32) \
+ V(S390_Int64ToDouble) \
+ V(S390_Uint64ToFloat32) \
+ V(S390_Uint64ToDouble) \
+ V(S390_Int32ToFloat32) \
+ V(S390_Int32ToDouble) \
+ V(S390_Uint32ToFloat32) \
+ V(S390_Uint32ToDouble) \
+ V(S390_Float32ToInt64) \
+ V(S390_Float32ToUint64) \
+ V(S390_Float32ToInt32) \
+ V(S390_Float32ToUint32) \
+ V(S390_Float32ToDouble) \
+ V(S390_Float64SilenceNaN) \
+ V(S390_DoubleToInt32) \
+ V(S390_DoubleToUint32) \
+ V(S390_DoubleToInt64) \
+ V(S390_DoubleToUint64) \
+ V(S390_DoubleToFloat32) \
+ V(S390_DoubleExtractLowWord32) \
+ V(S390_DoubleExtractHighWord32) \
+ V(S390_DoubleInsertLowWord32) \
+ V(S390_DoubleInsertHighWord32) \
+ V(S390_DoubleConstruct) \
+ V(S390_BitcastInt32ToFloat32) \
+ V(S390_BitcastFloat32ToInt32) \
+ V(S390_BitcastInt64ToDouble) \
+ V(S390_BitcastDoubleToInt64) \
+ V(S390_LoadWordS8) \
+ V(S390_LoadWordU8) \
+ V(S390_LoadWordS16) \
+ V(S390_LoadWordU16) \
+ V(S390_LoadWordS32) \
+ V(S390_LoadWordU32) \
+ V(S390_LoadAndTestWord32) \
+ V(S390_LoadAndTestWord64) \
+ V(S390_LoadAndTestFloat32) \
+ V(S390_LoadAndTestFloat64) \
+ V(S390_LoadReverse16RR) \
+ V(S390_LoadReverse32RR) \
+ V(S390_LoadReverse64RR) \
+ V(S390_LoadReverseSimd128RR) \
+ V(S390_LoadReverseSimd128) \
+ V(S390_LoadReverse16) \
+ V(S390_LoadReverse32) \
+ V(S390_LoadReverse64) \
+ V(S390_LoadWord64) \
+ V(S390_LoadFloat32) \
+ V(S390_LoadDouble) \
+ V(S390_StoreWord8) \
+ V(S390_StoreWord16) \
+ V(S390_StoreWord32) \
+ V(S390_StoreWord64) \
+ V(S390_StoreReverse16) \
+ V(S390_StoreReverse32) \
+ V(S390_StoreReverse64) \
+ V(S390_StoreReverseSimd128) \
+ V(S390_StoreFloat32) \
+ V(S390_StoreDouble) \
+ V(S390_Word64AtomicExchangeUint64) \
+ V(S390_Word64AtomicCompareExchangeUint64) \
+ V(S390_Word64AtomicAddUint64) \
+ V(S390_Word64AtomicSubUint64) \
+ V(S390_Word64AtomicAndUint64) \
+ V(S390_Word64AtomicOrUint64) \
+ V(S390_Word64AtomicXorUint64) \
+ V(S390_F64x2Splat) \
+ V(S390_F64x2ReplaceLane) \
+ V(S390_F64x2Abs) \
+ V(S390_F64x2Neg) \
+ V(S390_F64x2Sqrt) \
+ V(S390_F64x2Add) \
+ V(S390_F64x2Sub) \
+ V(S390_F64x2Mul) \
+ V(S390_F64x2Div) \
+ V(S390_F64x2Eq) \
+ V(S390_F64x2Ne) \
+ V(S390_F64x2Lt) \
+ V(S390_F64x2Le) \
+ V(S390_F64x2Min) \
+ V(S390_F64x2Max) \
+ V(S390_F64x2ExtractLane) \
+ V(S390_F64x2Qfma) \
+ V(S390_F64x2Qfms) \
+ V(S390_F64x2Pmin) \
+ V(S390_F64x2Pmax) \
+ V(S390_F64x2Ceil) \
+ V(S390_F64x2Floor) \
+ V(S390_F64x2Trunc) \
+ V(S390_F64x2NearestInt) \
+ V(S390_F64x2ConvertLowI32x4S) \
+ V(S390_F64x2ConvertLowI32x4U) \
+ V(S390_F64x2PromoteLowF32x4) \
+ V(S390_F32x4Splat) \
+ V(S390_F32x4ExtractLane) \
+ V(S390_F32x4ReplaceLane) \
+ V(S390_F32x4Add) \
+ V(S390_F32x4Sub) \
+ V(S390_F32x4Mul) \
+ V(S390_F32x4Eq) \
+ V(S390_F32x4Ne) \
+ V(S390_F32x4Lt) \
+ V(S390_F32x4Le) \
+ V(S390_F32x4Abs) \
+ V(S390_F32x4Neg) \
+ V(S390_F32x4RecipApprox) \
+ V(S390_F32x4RecipSqrtApprox) \
+ V(S390_F32x4SConvertI32x4) \
+ V(S390_F32x4UConvertI32x4) \
+ V(S390_F32x4Sqrt) \
+ V(S390_F32x4Div) \
+ V(S390_F32x4Min) \
+ V(S390_F32x4Max) \
+ V(S390_F32x4Qfma) \
+ V(S390_F32x4Qfms) \
+ V(S390_F32x4Pmin) \
+ V(S390_F32x4Pmax) \
+ V(S390_F32x4Ceil) \
+ V(S390_F32x4Floor) \
+ V(S390_F32x4Trunc) \
+ V(S390_F32x4NearestInt) \
+ V(S390_F32x4DemoteF64x2Zero) \
+ V(S390_I64x2Neg) \
+ V(S390_I64x2Add) \
+ V(S390_I64x2Sub) \
+ V(S390_I64x2Shl) \
+ V(S390_I64x2ShrS) \
+ V(S390_I64x2ShrU) \
+ V(S390_I64x2Mul) \
+ V(S390_I64x2Splat) \
+ V(S390_I64x2ReplaceLane) \
+ V(S390_I64x2ExtractLane) \
+ V(S390_I64x2Eq) \
+ V(S390_I64x2BitMask) \
+ V(S390_I64x2ExtMulLowI32x4S) \
+ V(S390_I64x2ExtMulHighI32x4S) \
+ V(S390_I64x2ExtMulLowI32x4U) \
+ V(S390_I64x2ExtMulHighI32x4U) \
+ V(S390_I64x2SConvertI32x4Low) \
+ V(S390_I64x2SConvertI32x4High) \
+ V(S390_I64x2UConvertI32x4Low) \
+ V(S390_I64x2UConvertI32x4High) \
+ V(S390_I64x2Ne) \
+ V(S390_I64x2GtS) \
+ V(S390_I64x2GeS) \
+ V(S390_I64x2Abs) \
+ V(S390_I32x4Splat) \
+ V(S390_I32x4ExtractLane) \
+ V(S390_I32x4ReplaceLane) \
+ V(S390_I32x4Add) \
+ V(S390_I32x4Sub) \
+ V(S390_I32x4Mul) \
+ V(S390_I32x4MinS) \
+ V(S390_I32x4MinU) \
+ V(S390_I32x4MaxS) \
+ V(S390_I32x4MaxU) \
+ V(S390_I32x4Eq) \
+ V(S390_I32x4Ne) \
+ V(S390_I32x4GtS) \
+ V(S390_I32x4GeS) \
+ V(S390_I32x4GtU) \
+ V(S390_I32x4GeU) \
+ V(S390_I32x4Neg) \
+ V(S390_I32x4Shl) \
+ V(S390_I32x4ShrS) \
+ V(S390_I32x4ShrU) \
+ V(S390_I32x4SConvertF32x4) \
+ V(S390_I32x4UConvertF32x4) \
+ V(S390_I32x4SConvertI16x8Low) \
+ V(S390_I32x4SConvertI16x8High) \
+ V(S390_I32x4UConvertI16x8Low) \
+ V(S390_I32x4UConvertI16x8High) \
+ V(S390_I32x4Abs) \
+ V(S390_I32x4BitMask) \
+ V(S390_I32x4DotI16x8S) \
+ V(S390_I32x4ExtMulLowI16x8S) \
+ V(S390_I32x4ExtMulHighI16x8S) \
+ V(S390_I32x4ExtMulLowI16x8U) \
+ V(S390_I32x4ExtMulHighI16x8U) \
+ V(S390_I32x4ExtAddPairwiseI16x8S) \
+ V(S390_I32x4ExtAddPairwiseI16x8U) \
+ V(S390_I32x4TruncSatF64x2SZero) \
+ V(S390_I32x4TruncSatF64x2UZero) \
+ V(S390_I16x8Splat) \
+ V(S390_I16x8ExtractLaneU) \
+ V(S390_I16x8ExtractLaneS) \
+ V(S390_I16x8ReplaceLane) \
+ V(S390_I16x8Add) \
+ V(S390_I16x8Sub) \
+ V(S390_I16x8Mul) \
+ V(S390_I16x8MinS) \
+ V(S390_I16x8MinU) \
+ V(S390_I16x8MaxS) \
+ V(S390_I16x8MaxU) \
+ V(S390_I16x8Eq) \
+ V(S390_I16x8Ne) \
+ V(S390_I16x8GtS) \
+ V(S390_I16x8GeS) \
+ V(S390_I16x8GtU) \
+ V(S390_I16x8GeU) \
+ V(S390_I16x8Shl) \
+ V(S390_I16x8ShrS) \
+ V(S390_I16x8ShrU) \
+ V(S390_I16x8Neg) \
+ V(S390_I16x8SConvertI32x4) \
+ V(S390_I16x8UConvertI32x4) \
+ V(S390_I16x8SConvertI8x16Low) \
+ V(S390_I16x8SConvertI8x16High) \
+ V(S390_I16x8UConvertI8x16Low) \
+ V(S390_I16x8UConvertI8x16High) \
+ V(S390_I16x8AddSatS) \
+ V(S390_I16x8SubSatS) \
+ V(S390_I16x8AddSatU) \
+ V(S390_I16x8SubSatU) \
+ V(S390_I16x8RoundingAverageU) \
+ V(S390_I16x8Abs) \
+ V(S390_I16x8BitMask) \
+ V(S390_I16x8ExtMulLowI8x16S) \
+ V(S390_I16x8ExtMulHighI8x16S) \
+ V(S390_I16x8ExtMulLowI8x16U) \
+ V(S390_I16x8ExtMulHighI8x16U) \
+ V(S390_I16x8ExtAddPairwiseI8x16S) \
+ V(S390_I16x8ExtAddPairwiseI8x16U) \
+ V(S390_I16x8Q15MulRSatS) \
+ V(S390_I8x16Splat) \
+ V(S390_I8x16ExtractLaneU) \
+ V(S390_I8x16ExtractLaneS) \
+ V(S390_I8x16ReplaceLane) \
+ V(S390_I8x16Add) \
+ V(S390_I8x16Sub) \
+ V(S390_I8x16MinS) \
+ V(S390_I8x16MinU) \
+ V(S390_I8x16MaxS) \
+ V(S390_I8x16MaxU) \
+ V(S390_I8x16Eq) \
+ V(S390_I8x16Ne) \
+ V(S390_I8x16GtS) \
+ V(S390_I8x16GeS) \
+ V(S390_I8x16GtU) \
+ V(S390_I8x16GeU) \
+ V(S390_I8x16Shl) \
+ V(S390_I8x16ShrS) \
+ V(S390_I8x16ShrU) \
+ V(S390_I8x16Neg) \
+ V(S390_I8x16SConvertI16x8) \
+ V(S390_I8x16UConvertI16x8) \
+ V(S390_I8x16AddSatS) \
+ V(S390_I8x16SubSatS) \
+ V(S390_I8x16AddSatU) \
+ V(S390_I8x16SubSatU) \
+ V(S390_I8x16RoundingAverageU) \
+ V(S390_I8x16Abs) \
+ V(S390_I8x16BitMask) \
+ V(S390_I8x16Shuffle) \
+ V(S390_I8x16Swizzle) \
+ V(S390_I8x16Popcnt) \
+ V(S390_I64x2AllTrue) \
+ V(S390_I32x4AllTrue) \
+ V(S390_I16x8AllTrue) \
+ V(S390_I8x16AllTrue) \
+ V(S390_V128AnyTrue) \
+ V(S390_S128And) \
+ V(S390_S128Or) \
+ V(S390_S128Xor) \
+ V(S390_S128Const) \
+ V(S390_S128Zero) \
+ V(S390_S128AllOnes) \
+ V(S390_S128Not) \
+ V(S390_S128Select) \
+ V(S390_S128AndNot) \
+ V(S390_S128Load8Splat) \
+ V(S390_S128Load16Splat) \
+ V(S390_S128Load32Splat) \
+ V(S390_S128Load64Splat) \
+ V(S390_S128Load8x8S) \
+ V(S390_S128Load8x8U) \
+ V(S390_S128Load16x4S) \
+ V(S390_S128Load16x4U) \
+ V(S390_S128Load32x2S) \
+ V(S390_S128Load32x2U) \
+ V(S390_S128Load32Zero) \
+ V(S390_S128Load64Zero) \
+ V(S390_S128Load8Lane) \
+ V(S390_S128Load16Lane) \
+ V(S390_S128Load32Lane) \
+ V(S390_S128Load64Lane) \
+ V(S390_S128Store8Lane) \
+ V(S390_S128Store16Lane) \
+ V(S390_S128Store32Lane) \
+ V(S390_S128Store64Lane) \
+ V(S390_StoreSimd128) \
+ V(S390_LoadSimd128) \
+ V(S390_StoreCompressTagged) \
+ V(S390_LoadDecompressTaggedSigned) \
+ V(S390_LoadDecompressTaggedPointer) \
V(S390_LoadDecompressAnyTagged)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index 489065e65f..120eaf41dc 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -689,9 +689,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
S390OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
void InstructionSelector::VisitLoad(Node* node, Node* value,
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 3e2298de3e..57e0143285 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -693,7 +693,7 @@ class WasmProtectedInstructionTrap final : public WasmOutOfLineTrap {
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
+ const MemoryAccessMode access_mode = instr->memory_access_mode();
if (access_mode == kMemoryAccessProtected) {
zone->New<WasmProtectedInstructionTrap>(codegen, pc, instr);
}
@@ -703,7 +703,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr, int pc) {
- DCHECK_NE(kMemoryAccessProtected, AccessModeField::decode(opcode));
+ DCHECK_NE(kMemoryAccessProtected, instr->memory_access_mode());
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1305,7 +1305,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
- int const num_parameters = MiscField::decode(instr->opcode());
+ int const num_gp_parameters = ParamField::decode(instr->opcode());
+ int const num_fp_parameters = FPParamField::decode(instr->opcode());
Label return_location;
#if V8_ENABLE_WEBASSEMBLY
if (linkage()->GetIncomingDescriptor()->IsWasmCapiFunction()) {
@@ -1317,10 +1318,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif // V8_ENABLE_WEBASSEMBLY
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
- __ CallCFunction(ref, num_parameters);
+ __ CallCFunction(ref, num_gp_parameters + num_fp_parameters);
} else {
Register func = i.InputRegister(0);
- __ CallCFunction(func, num_parameters);
+ __ CallCFunction(func, num_gp_parameters + num_fp_parameters);
}
__ bind(&return_location);
#if V8_ENABLE_WEBASSEMBLY
@@ -1360,13 +1361,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchComment:
__ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
break;
- case kArchAbortCSAAssert:
+ case kArchAbortCSADcheck:
DCHECK(i.InputRegister(0) == rdx);
{
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
- FrameScope scope(tasm(), StackFrame::NONE);
- __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
RelocInfo::CODE_TARGET);
}
__ int3();
@@ -2194,21 +2195,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kX64Float32Abs: {
- __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kX64Float32Neg: {
- __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kX64F64x2Abs:
case kX64Float64Abs: {
- __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kX64F64x2Neg:
case kX64Float64Neg: {
- __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ kScratchRegister);
break;
}
case kSSEFloat64SilenceNaN:
@@ -2702,7 +2707,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2PromoteLowF32x4: {
- __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ if (HasAddressingMode(instr)) {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ Cvtps2pd(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ __ Cvtps2pd(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ }
break;
}
case kX64F32x4DemoteF64x2Zero: {
@@ -2817,42 +2827,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minps in both orders, merge the resuls, and adjust.
- __ Movaps(kScratchDoubleReg, src1);
- __ Minps(kScratchDoubleReg, dst);
- __ Minps(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpunordps(dst, kScratchDoubleReg);
- __ Orps(kScratchDoubleReg, dst);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ __ F32x4Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F32x4Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxps instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxps in both orders, merge the resuls, and adjust.
- __ Movaps(kScratchDoubleReg, src1);
- __ Maxps(kScratchDoubleReg, dst);
- __ Maxps(dst, src1);
- // Find discrepancies.
- __ Xorps(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orps(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subps(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpunordps(dst, kScratchDoubleReg);
- __ Psrld(dst, byte{10});
- __ Andnps(dst, kScratchDoubleReg);
+ __ F32x4Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F32x4Eq: {
@@ -2965,28 +2946,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister left = i.InputSimd128Register(0);
- XMMRegister right = i.InputSimd128Register(1);
- XMMRegister tmp1 = i.TempSimd128Register(0);
- XMMRegister tmp2 = kScratchDoubleReg;
-
- __ Movdqa(tmp1, left);
- __ Movdqa(tmp2, right);
-
- // Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, byte{32});
- __ Pmuludq(tmp1, right);
-
- // Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, byte{32});
- __ Pmuludq(tmp2, left);
-
- __ Paddq(tmp2, tmp1);
- __ Psllq(tmp2, byte{32});
-
- __ Pmuludq(left, right);
- __ Paddq(left, tmp2); // left == dst
+ __ I64x2Mul(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.TempSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64I64x2Eq: {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index e7fe45c5de..ad9906585c 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -11,389 +11,394 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Rol) \
- V(X64Rol32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(X64Bswap) \
- V(X64Bswap32) \
- V(X64MFence) \
- V(X64LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(X64Float64Abs) \
- V(X64Float64Neg) \
- V(X64Float32Abs) \
- V(X64Float32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
- V(X64F64x2Qfma) \
- V(X64F64x2Qfms) \
- V(X64F64x2Pmin) \
- V(X64F64x2Pmax) \
- V(X64F64x2Round) \
- V(X64F64x2ConvertLowI32x4S) \
- V(X64F64x2ConvertLowI32x4U) \
- V(X64F64x2PromoteLowF32x4) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4SConvertI32x4) \
- V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64F32x4Qfma) \
- V(X64F32x4Qfms) \
- V(X64F32x4Pmin) \
- V(X64F32x4Pmax) \
- V(X64F32x4Round) \
- V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
- V(X64I64x2ExtMulLowI32x4S) \
- V(X64I64x2ExtMulHighI32x4S) \
- V(X64I64x2ExtMulLowI32x4U) \
- V(X64I64x2ExtMulHighI32x4U) \
- V(X64I64x2SConvertI32x4Low) \
- V(X64I64x2SConvertI32x4High) \
- V(X64I64x2UConvertI32x4Low) \
- V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4SConvertF32x4) \
- V(X64I32x4SConvertI16x8Low) \
- V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4UConvertF32x4) \
- V(X64I32x4UConvertI16x8Low) \
- V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
- V(X64I32x4DotI16x8S) \
- V(X64I32x4ExtMulLowI16x8S) \
- V(X64I32x4ExtMulHighI16x8S) \
- V(X64I32x4ExtMulLowI16x8U) \
- V(X64I32x4ExtMulHighI16x8U) \
- V(X64I32x4ExtAddPairwiseI16x8S) \
- V(X64I32x4ExtAddPairwiseI16x8U) \
- V(X64I32x4TruncSatF64x2SZero) \
- V(X64I32x4TruncSatF64x2UZero) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
- V(X64I16x8SConvertI8x16Low) \
- V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
- V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8UConvertI8x16Low) \
- V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
- V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSatU) \
- V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
- V(X64I16x8ExtMulLowI8x16S) \
- V(X64I16x8ExtMulHighI8x16S) \
- V(X64I16x8ExtMulLowI8x16U) \
- V(X64I16x8ExtMulHighI8x16U) \
- V(X64I16x8ExtAddPairwiseI8x16S) \
- V(X64I16x8ExtAddPairwiseI8x16U) \
- V(X64I16x8Q15MulRSatS) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
- V(X64Pinsrb) \
- V(X64Pinsrw) \
- V(X64Pinsrd) \
- V(X64Pinsrq) \
- V(X64Pextrb) \
- V(X64Pextrw) \
- V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
- V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSatU) \
- V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
- V(X64S128Const) \
- V(X64S128Zero) \
- V(X64S128AllOnes) \
- V(X64S128Not) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Select) \
- V(X64S128AndNot) \
- V(X64I8x16Swizzle) \
- V(X64I8x16Shuffle) \
- V(X64I8x16Popcnt) \
- V(X64S128Load8Splat) \
- V(X64S128Load16Splat) \
- V(X64S128Load32Splat) \
- V(X64S128Load64Splat) \
- V(X64S128Load8x8S) \
- V(X64S128Load8x8U) \
- V(X64S128Load16x4S) \
- V(X64S128Load16x4U) \
- V(X64S128Load32x2S) \
- V(X64S128Load32x2U) \
- V(X64S128Store32Lane) \
- V(X64S128Store64Lane) \
- V(X64Shufps) \
- V(X64S32x4Rotate) \
- V(X64S32x4Swizzle) \
- V(X64S32x4Shuffle) \
- V(X64S16x8Blend) \
- V(X64S16x8HalfShuffle1) \
- V(X64S16x8HalfShuffle2) \
- V(X64S8x16Alignr) \
- V(X64S16x8Dup) \
- V(X64S8x16Dup) \
- V(X64S16x8UnzipHigh) \
- V(X64S16x8UnzipLow) \
- V(X64S8x16UnzipHigh) \
- V(X64S8x16UnzipLow) \
- V(X64S64x2UnpackHigh) \
- V(X64S32x4UnpackHigh) \
- V(X64S16x8UnpackHigh) \
- V(X64S8x16UnpackHigh) \
- V(X64S64x2UnpackLow) \
- V(X64S32x4UnpackLow) \
- V(X64S16x8UnpackLow) \
- V(X64S8x16UnpackLow) \
- V(X64S8x16TransposeLow) \
- V(X64S8x16TransposeHigh) \
- V(X64S8x8Reverse) \
- V(X64S8x4Reverse) \
- V(X64S8x2Reverse) \
- V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
- V(X64Word64AtomicAddUint64) \
- V(X64Word64AtomicSubUint64) \
- V(X64Word64AtomicAndUint64) \
- V(X64Word64AtomicOrUint64) \
- V(X64Word64AtomicXorUint64) \
- V(X64Word64AtomicStoreWord64) \
- V(X64Word64AtomicExchangeUint64) \
+
+// Opcodes that support a MemoryAccessMode.
+#define TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(X64F64x2PromoteLowF32x4) \
+ V(X64Movb) \
+ V(X64Movdqu) \
+ V(X64Movl) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movsxbl) \
+ V(X64Movsxbq) \
+ V(X64Movsxlq) \
+ V(X64Movsxwl) \
+ V(X64Movsxwq) \
+ V(X64Movw) \
+ V(X64Movzxbl) \
+ V(X64Movzxbq) \
+ V(X64Movzxwl) \
+ V(X64Movzxwq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
+ V(X64Pinsrb) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pinsrw) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane)
+
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ TARGET_ARCH_OPCODE_WITH_MEMORY_ACCESS_MODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
+ V(X64MFence) \
+ V(X64LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(X64Float64Abs) \
+ V(X64Float64Neg) \
+ V(X64Float32Abs) \
+ V(X64Float32Neg) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
+ V(X64F64x2Add) \
+ V(X64F64x2Sub) \
+ V(X64F64x2Mul) \
+ V(X64F64x2Div) \
+ V(X64F64x2Min) \
+ V(X64F64x2Max) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Div) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
+ V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2ExtMulLowI32x4S) \
+ V(X64I64x2ExtMulHighI32x4S) \
+ V(X64I64x2ExtMulLowI32x4U) \
+ V(X64I64x2ExtMulHighI32x4U) \
+ V(X64I64x2SConvertI32x4Low) \
+ V(X64I64x2SConvertI32x4High) \
+ V(X64I64x2UConvertI32x4Low) \
+ V(X64I64x2UConvertI32x4High) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
+ V(X64I32x4ExtMulLowI16x8S) \
+ V(X64I32x4ExtMulHighI16x8S) \
+ V(X64I32x4ExtMulLowI16x8U) \
+ V(X64I32x4ExtMulHighI16x8U) \
+ V(X64I32x4ExtAddPairwiseI16x8S) \
+ V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLaneS) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSatS) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSatS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
+ V(X64I16x8ExtMulLowI8x16S) \
+ V(X64I16x8ExtMulHighI8x16S) \
+ V(X64I16x8ExtMulLowI8x16U) \
+ V(X64I16x8ExtMulHighI8x16U) \
+ V(X64I16x8ExtAddPairwiseI8x16S) \
+ V(X64I16x8ExtAddPairwiseI8x16U) \
+ V(X64I16x8Q15MulRSatS) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLaneS) \
+ V(X64I8x16SConvertI16x8) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSatS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSatS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
+ V(X64I8x16ShrU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
+ V(X64S128Const) \
+ V(X64S128Zero) \
+ V(X64S128AllOnes) \
+ V(X64S128Not) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Select) \
+ V(X64S128AndNot) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
+ V(X64Shufps) \
+ V(X64S32x4Rotate) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
+ V(X64V128AnyTrue) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 2f44f0dee5..c477c44b07 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -16,6 +16,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
#include "src/roots/roots-inl.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -376,9 +377,9 @@ void InstructionSelector::VisitStackSlot(Node* node) {
sequence()->AddImmediate(Constant(slot)), 0, nullptr);
}
-void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+void InstructionSelector::VisitAbortCSADcheck(Node* node) {
X64OperandGenerator g(this);
- Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
+ Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
void InstructionSelector::VisitLoadLane(Node* node) {
@@ -1006,15 +1007,15 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
kPositiveDisplacement);
return;
} else {
- Int64BinopMatcher m(node);
- if ((m.left().IsChangeInt32ToInt64() ||
- m.left().IsChangeUint32ToUint64()) &&
- m.right().IsInRange(32, 63)) {
+ Int64BinopMatcher bm(node);
+ if ((bm.left().IsChangeInt32ToInt64() ||
+ bm.left().IsChangeUint32ToUint64()) &&
+ bm.right().IsInRange(32, 63)) {
// There's no need to sign/zero-extend to 64-bit if we shift out the upper
// 32 bits anyway.
Emit(kX64Shl, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()->InputAt(0)),
- g.UseImmediate(m.right().node()));
+ g.UseRegister(bm.left().node()->InputAt(0)),
+ g.UseImmediate(bm.right().node()));
return;
}
}
@@ -2434,19 +2435,19 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
Int64BinopMatcher m(value);
if (m.right().Is(0)) {
// Try to combine the branch with a comparison.
- Node* const user = m.node();
- Node* const value = m.left().node();
- if (CanCover(user, value)) {
- switch (value->opcode()) {
+ Node* const eq_user = m.node();
+ Node* const eq_value = m.left().node();
+ if (CanCover(eq_user, eq_value)) {
+ switch (eq_value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, cont);
+ return VisitWordCompare(this, eq_value, kX64Cmp, cont);
case IrOpcode::kWord64And:
- return VisitWordCompare(this, value, kX64Test, cont);
+ return VisitWordCompare(this, eq_value, kX64Test, cont);
default:
break;
}
}
- return VisitCompareZero(this, user, value, kX64Cmp, cont);
+ return VisitCompareZero(this, eq_user, eq_value, kX64Cmp, cont);
}
return VisitWord64EqualImpl(this, value, cont);
}
@@ -3040,7 +3041,6 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_UNOP_LIST(V) \
V(F64x2Sqrt) \
V(F64x2ConvertLowI32x4S) \
- V(F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
@@ -3842,6 +3842,26 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
}
}
+void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
+ X64OperandGenerator g(this);
+ InstructionCode code = kX64F64x2PromoteLowF32x4;
+ Node* input = node->InputAt(0);
+ LoadTransformMatcher m(input);
+
+ if (m.Is(LoadTransformation::kS128Load64Zero) && CanCover(node, input)) {
+ if (m.ResolvedValue().kind == MemoryAccessKind::kProtected) {
+ code |= AccessModeField::encode(kMemoryAccessProtected);
+ }
+ // LoadTransforms cannot be eliminated, so they are visited even if
+ // unused. Mark it as defined so that we don't visit it.
+ MarkAsDefined(input);
+ VisitLoad(node, input, code);
+ return;
+ }
+
+ VisitRR(this, node, code);
+}
+
void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
int first_input_index,
Node* node) {
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 1515340503..d2fce8a276 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -5,6 +5,7 @@
#include "src/compiler/branch-elimination.h"
#include "src/base/small-vector.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
@@ -14,12 +15,15 @@ namespace internal {
namespace compiler {
BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
- Zone* zone, Phase phase)
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ Phase phase)
: AdvancedReducer(editor),
jsgraph_(js_graph),
node_conditions_(js_graph->graph()->NodeCount(), zone),
reduced_(js_graph->graph()->NodeCount(), zone),
zone_(zone),
+ source_positions_(source_positions),
dead_(js_graph->Dead()),
phase_(phase) {}
@@ -158,6 +162,72 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
return TakeConditionsFromFirstControl(node);
}
+// Simplify a trap following a merge.
+// Assuming condition is in control1's path conditions, and !condition is in
+// control2's path condtions, the following transformation takes place:
+//
+// control1 control2 condition effect1
+// \ / \ / |
+// Merge X | control1
+// | / \ | /
+// effect1 effect2 | | TrapIf control2
+// \ | /| ==> | \ /
+// EffectPhi | | effect2 Merge
+// | / | | /
+// condition | / \ | /
+// \ | / EffectPhi
+// TrapIf
+// TODO(manoskouk): We require that the trap's effect input is the Merge's
+// EffectPhi, so we can ensure that the new traps' effect inputs are not
+// dominated by the Merge. Can we relax this?
+bool BranchElimination::TryPullTrapIntoMerge(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+ Node* merge = NodeProperties::GetControlInput(node);
+ DCHECK_EQ(merge->opcode(), IrOpcode::kMerge);
+ Node* condition = NodeProperties::GetValueInput(node, 0);
+ Node* effect_input = NodeProperties::GetEffectInput(node);
+ if (!(effect_input->opcode() == IrOpcode::kEffectPhi &&
+ NodeProperties::GetControlInput(effect_input) == merge)) {
+ return false;
+ }
+
+ bool trapping_condition = node->opcode() == IrOpcode::kTrapIf;
+ base::SmallVector<Node*, 8> new_merge_inputs;
+ for (Edge edge : merge->input_edges()) {
+ Node* input = edge.to();
+ ControlPathConditions from_input = node_conditions_.Get(input);
+ Node* previous_branch;
+ bool condition_value;
+ if (!from_input.LookupCondition(condition, &previous_branch,
+ &condition_value)) {
+ return false;
+ }
+ if (condition_value == trapping_condition) {
+ Node* inputs[] = {
+ condition, NodeProperties::GetEffectInput(effect_input, edge.index()),
+ input};
+ Node* trap_clone = graph()->NewNode(node->op(), 3, inputs);
+ if (source_positions_) {
+ source_positions_->SetSourcePosition(
+ trap_clone, source_positions_->GetSourcePosition(node));
+ }
+ new_merge_inputs.emplace_back(trap_clone);
+ } else {
+ new_merge_inputs.emplace_back(input);
+ }
+ }
+
+ for (int i = 0; i < merge->InputCount(); i++) {
+ merge->ReplaceInput(i, new_merge_inputs[i]);
+ }
+ ReplaceWithValue(node, dead(), dead(), merge);
+ node->Kill();
+ Revisit(merge);
+
+ return true;
+}
+
Reduction BranchElimination::ReduceTrapConditional(Node* node) {
DCHECK(node->opcode() == IrOpcode::kTrapIf ||
node->opcode() == IrOpcode::kTrapUnless);
@@ -167,17 +237,59 @@ Reduction BranchElimination::ReduceTrapConditional(Node* node) {
// If we do not know anything about the predecessor, do not propagate just
// yet because we will have to recompute anyway once we compute the
// predecessor.
- if (!reduced_.Get(control_input)) {
- return NoChange();
+ if (!reduced_.Get(control_input)) return NoChange();
+
+ // If the trap comes directly after a merge, pull it into the merge. This will
+ // unlock other optimizations later.
+ if (control_input->opcode() == IrOpcode::kMerge &&
+ TryPullTrapIntoMerge(node)) {
+ return Replace(dead());
}
+
ControlPathConditions from_input = node_conditions_.Get(control_input);
- Node* branch;
+ Node* previous_branch;
bool condition_value;
- if (from_input.LookupCondition(condition, &branch, &condition_value)) {
+ if (from_input.LookupCondition(condition, &previous_branch,
+ &condition_value)) {
if (condition_value == trapping_condition) {
- // This will always trap. Mark its outputs as dead and connect it to
- // graph()->end().
+ // Special case: Trap directly inside a branch without sibling nodes.
+ // Replace the branch with the trap.
+ // condition control condition control
+ // | \ / \ /
+ // | Branch TrapIf
+ // | / \ ==> |
+ // | IfTrue IfFalse <subgraph2>
+ // | / |
+ // TrapIf <subraph2> Dead
+ // | |
+ // <subgraph1> <subgraph1>
+ // (and symmetrically for TrapUnless.)
+ if ((control_input->opcode() == IrOpcode::kIfTrue ||
+ control_input->opcode() == IrOpcode::kIfFalse) &&
+ control_input->UseCount() == 1) {
+ Node* branch = NodeProperties::GetControlInput(control_input);
+ DCHECK_EQ(branch->opcode(), IrOpcode::kBranch);
+ if (condition == NodeProperties::GetValueInput(branch, 0)) {
+ Node* other_if_branch = nullptr;
+ for (Node* use : branch->uses()) {
+ if (use != control_input) other_if_branch = use;
+ }
+ DCHECK_NOT_NULL(other_if_branch);
+
+ node->ReplaceInput(NodeProperties::FirstControlIndex(node),
+ NodeProperties::GetControlInput(branch));
+ ReplaceWithValue(node, dead(), dead(), dead());
+ ReplaceWithValue(other_if_branch, node, node, node);
+ other_if_branch->Kill();
+ control_input->Kill();
+ branch->Kill();
+ return Changed(node);
+ }
+ }
+
+ // General case: This will always trap. Mark its outputs as dead and
+ // connect it to graph()->end().
ReplaceWithValue(node, dead(), dead(), dead());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = graph()->NewNode(common()->Throw(), effect, node);
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 93bacbff7b..7964e0a1b9 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -19,6 +19,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
class JSGraph;
+class SourcePositionTable;
class V8_EXPORT_PRIVATE BranchElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -28,7 +29,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
kLATE,
};
BranchElimination(Editor* editor, JSGraph* js_graph, Zone* zone,
- Phase phase = kLATE);
+ SourcePositionTable* sourse_positions, Phase phase = kLATE);
~BranchElimination() final;
const char* reducer_name() const override { return "BranchElimination"; }
@@ -108,6 +109,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction ReduceStart(Node* node);
Reduction ReduceOtherControl(Node* node);
void SimplifyBranchCondition(Node* branch);
+ bool TryPullTrapIntoMerge(Node* node);
Reduction TakeConditionsFromFirstControl(Node* node);
Reduction UpdateConditions(Node* node, ControlPathConditions conditions);
@@ -131,6 +133,7 @@ class V8_EXPORT_PRIVATE BranchElimination final
node_conditions_;
NodeAuxData<bool> reduced_;
Zone* zone_;
+ SourcePositionTable* source_positions_;
Node* dead_;
Phase phase_;
};
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index e62babccf1..95a84ceeab 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -66,6 +66,8 @@ namespace {
// == arm64 ====================================================================
// ===========================================================================
#define PARAM_REGISTERS x0, x1, x2, x3, x4, x5, x6, x7
+#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
+#define FP_RETURN_REGISTER d0
#define CALLEE_SAVE_REGISTERS \
(1 << x19.code()) | (1 << x20.code()) | (1 << x21.code()) | \
(1 << x22.code()) | (1 << x23.code()) | (1 << x24.code()) | \
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index d27744072a..a723d21a10 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -245,9 +245,9 @@ void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode<MaybeObject> node,
base::EmbeddedVector<char, 1024> message;
SNPrintF(message, "no Object: %s", location);
TNode<String> message_node = StringConstant(message.begin());
- // This somewhat misuses the AbortCSAAssert runtime function. This will print
- // "abort: CSA_ASSERT failed: <message>", which is good enough.
- AbortCSAAssert(message_node);
+ // This somewhat misuses the AbortCSADcheck runtime function. This will print
+ // "abort: CSA_DCHECK failed: <message>", which is good enough.
+ AbortCSADcheck(message_node);
Unreachable();
Bind(&ok);
}
@@ -503,8 +503,8 @@ void CodeAssembler::ReturnIf(TNode<BoolT> condition, TNode<Object> value) {
Bind(&if_continue);
}
-void CodeAssembler::AbortCSAAssert(Node* message) {
- raw_assembler()->AbortCSAAssert(message);
+void CodeAssembler::AbortCSADcheck(Node* message) {
+ raw_assembler()->AbortCSADcheck(message);
}
void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 7a22086260..fcef5bdd72 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -631,7 +631,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void ReturnIf(TNode<BoolT> condition, TNode<Object> value);
- void AbortCSAAssert(Node* message);
+ void AbortCSADcheck(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const char* msg) {
diff --git a/deps/v8/src/compiler/csa-load-elimination.cc b/deps/v8/src/compiler/csa-load-elimination.cc
index b5df8b542b..ece79a7156 100644
--- a/deps/v8/src/compiler/csa-load-elimination.cc
+++ b/deps/v8/src/compiler/csa-load-elimination.cc
@@ -46,7 +46,7 @@ Reduction CsaLoadElimination::Reduce(Node* node) {
case IrOpcode::kStoreToObject:
return ReduceStoreToObject(node, ObjectAccessOf(node->op()));
case IrOpcode::kDebugBreak:
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
// Avoid changing optimizations in the presence of debug instructions.
return PropagateInputState(node);
case IrOpcode::kCall:
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 83eb6c215c..9d000724b5 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -5165,6 +5165,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
Node* value_is_smi = ObjectIsSmi(node);
__ GotoIf(value_is_smi, if_error);
+ ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
+
switch (arg_type.GetSequenceType()) {
case CTypeInfo::SequenceType::kIsSequence: {
CHECK_EQ(arg_type.GetType(), CTypeInfo::Type::kVoid);
@@ -5185,8 +5187,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
kNoWriteBarrier),
stack_slot, 0, node);
- Node* target_address = __ ExternalConstant(
- ExternalReference::Create(c_functions[func_index].address));
+ Node* target_address = __ ExternalConstant(ExternalReference::Create(
+ c_functions[func_index].address, ref_type));
__ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5199,8 +5201,8 @@ EffectControlLinearizer::AdaptOverloadedFastCallArgument(
fast_api_call::GetTypedArrayElementsKind(
overloads_resolution_result.element_type),
&next);
- Node* target_address = __ ExternalConstant(
- ExternalReference::Create(c_functions[func_index].address));
+ Node* target_address = __ ExternalConstant(ExternalReference::Create(
+ c_functions[func_index].address, ref_type));
__ Goto(&merge, target_address, stack_slot);
break;
}
@@ -5387,6 +5389,8 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
Node** const inputs = graph()->zone()->NewArray<Node*>(
kFastTargetAddressInputCount + c_arg_count + n.FastCallExtraInputCount());
+ ExternalReference::Type ref_type = ExternalReference::FAST_C_CALL;
+
// The inputs to {Call} node for the fast call look like:
// [fast callee, receiver, ... C arguments, [optional Options], effect,
// control].
@@ -5398,7 +5402,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
// with a Phi node created by AdaptOverloadedFastCallArgument.
inputs[kFastTargetAddressInputIndex] =
(c_functions.size() == 1) ? __ ExternalConstant(ExternalReference::Create(
- c_functions[0].address))
+ c_functions[0].address, ref_type))
: nullptr;
for (int i = 0; i < c_arg_count; ++i) {
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 7ff6ab684f..bf693c71dc 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -510,12 +510,15 @@ int OffsetOfFieldAccess(const Operator* op) {
return access.offset;
}
-int OffsetOfElementAt(ElementAccess const& access, int index) {
+Maybe<int> OffsetOfElementAt(ElementAccess const& access, int index) {
+ MachineRepresentation representation = access.machine_type.representation();
+ // Double elements accesses are not yet supported. See chromium:1237821.
+ if (representation == MachineRepresentation::kFloat64) return Nothing<int>();
+
DCHECK_GE(index, 0);
- DCHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
- kTaggedSizeLog2);
- return access.header_size +
- (index << ElementSizeLog2Of(access.machine_type.representation()));
+ DCHECK_GE(ElementSizeLog2Of(representation), kTaggedSizeLog2);
+ return Just(access.header_size +
+ (index << ElementSizeLog2Of(representation)));
}
Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
@@ -527,7 +530,7 @@ Maybe<int> OffsetOfElementsAccess(const Operator* op, Node* index_node) {
double min = index_type.Min();
int index = static_cast<int>(min);
if (index < 0 || index != min || index != max) return Nothing<int>();
- return Just(OffsetOfElementAt(ElementAccessOf(op), index));
+ return OffsetOfElementAt(ElementAccessOf(op), index);
}
Node* LowerCompareMapsWithoutLoad(Node* checked_map,
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index 907c7cc087..d3f9768fe7 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -139,6 +139,11 @@ class VirtualObject : public Dependable {
}
return Just(fields_.at(offset / kTaggedSize));
}
+ Maybe<Variable> FieldAt(Maybe<int> maybe_offset) const {
+ int offset;
+ if (!maybe_offset.To(&offset)) return Nothing<Variable>();
+ return FieldAt(offset);
+ }
Id id() const { return id_; }
int size() const { return static_cast<int>(kTaggedSize * fields_.size()); }
// Escaped might mean that the object escaped to untracked memory or that it
diff --git a/deps/v8/src/compiler/globals.h b/deps/v8/src/compiler/globals.h
index 392cb23917..23f834cd6c 100644
--- a/deps/v8/src/compiler/globals.h
+++ b/deps/v8/src/compiler/globals.h
@@ -92,7 +92,8 @@ const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
// to add support for IA32, because it has a totally different approach
// (using FP stack). As support is added to more platforms, please make sure
// to list them here in order to enable tests of this functionality.
-#if defined(V8_TARGET_ARCH_X64)
+#if defined(V8_TARGET_ARCH_X64) || \
+ (defined(V8_TARGET_ARCH_ARM64) && !defined(USE_SIMULATOR))
#define V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
#endif
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index c246430de2..19c7bd1ef6 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -1272,7 +1272,7 @@ bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
boilerplate->map().instance_descriptors(isolate), isolate);
for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
+ if (details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
@@ -1780,11 +1780,6 @@ MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
object()->FindFieldOwner(broker()->isolate(), descriptor_index));
}
-ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetFieldType(descriptor_index);
-}
-
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
uint32_t index) const {
DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
@@ -2605,12 +2600,6 @@ NameRef DescriptorArrayRef::GetPropertyKey(
return result;
}
-ObjectRef DescriptorArrayRef::GetFieldType(
- InternalIndex descriptor_index) const {
- return MakeRef(broker(),
- Object::cast(object()->GetFieldType(descriptor_index)));
-}
-
base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
InternalIndex descriptor_index) const {
HeapObject heap_object;
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index 4644071ea5..7f737c0c26 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -603,7 +603,6 @@ class DescriptorArrayRef : public HeapObjectRef {
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
- ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_index) const;
};
@@ -742,7 +741,6 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const;
- ObjectRef GetFieldType(InternalIndex descriptor_index) const;
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 91197ead1e..de8dcfacba 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4451,8 +4451,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
- FeedbackCellRef feedback_cell =
- MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
+ FeedbackCellRef feedback_cell = feedback_target.value().AsFeedbackCell();
// TODO(neis): This check seems unnecessary.
if (feedback_cell.feedback_vector().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
@@ -5951,9 +5950,13 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
Effect effect = n.effect();
Control control = n.control();
- // Optimize for the case where we simply clone the {receiver},
- // i.e. when the {start} is zero and the {end} is undefined
- // (meaning it will be set to {receiver}s "length" property).
+ // Optimize for the case where we simply clone the {receiver}, i.e. when the
+ // {start} is zero and the {end} is undefined (meaning it will be set to
+ // {receiver}s "length" property). This logic should be in sync with
+ // ReduceArrayPrototypeSlice (to a reasonable degree). This is because
+ // CloneFastJSArray produces arrays which are potentially COW. If there's a
+ // discrepancy, TF generates code which produces a COW array and then expects
+ // it to be non-COW (or the other way around) -> immediate deopt.
if (!NumberMatcher(start).Is(0) ||
!HeapObjectMatcher(end).Is(factory()->undefined_value())) {
return NoChange();
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 60c9017fc2..1b79b9d786 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -1711,7 +1711,7 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
for (InternalIndex i : InternalIndex::Range(boilerplate_nof)) {
PropertyDetails const property_details =
boilerplate_map.GetPropertyDetails(i);
- if (property_details.location() != kField) continue;
+ if (property_details.location() != PropertyLocation::kField) continue;
DCHECK_EQ(kData, property_details.kind());
if ((*max_properties)-- == 0) return {};
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index deb8345bf7..b2e012d8c4 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -472,11 +472,24 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Determine the call target.
base::Optional<SharedFunctionInfoRef> shared_info(DetermineCallTarget(node));
if (!shared_info.has_value()) return NoChange();
- DCHECK(shared_info->IsInlineable());
SharedFunctionInfoRef outer_shared_info =
MakeRef(broker(), info_->shared_info());
+ SharedFunctionInfo::Inlineability inlineability =
+ shared_info->GetInlineability();
+ if (inlineability != SharedFunctionInfo::kIsInlineable) {
+ // The function is no longer inlineable. The only way this can happen is if
+ // the function had its optimization disabled in the meantime, e.g. because
+ // another optimization job failed too often.
+ CHECK_EQ(inlineability, SharedFunctionInfo::kHasOptimizationDisabled);
+ TRACE("Not inlining " << *shared_info << " into " << outer_shared_info
+ << " because it had its optimization disabled.");
+ return NoChange();
+ }
+ // NOTE: Even though we bailout in the kHasOptimizationDisabled case above, we
+ // won't notice if the function's optimization is disabled after this point.
+
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
!IsConstructable(shared_info->kind())) {
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index cdbc4848cc..d100fd91af 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -1744,10 +1744,6 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- for (ElementAccessInfo const& access_info : access_infos) {
- if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
- }
-
// Check for the monomorphic case.
PropertyAccessBuilder access_builder(jsgraph(), broker(), dependencies());
if (access_infos.size() == 1) {
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index 956f13d7f9..38c523596c 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -153,6 +153,7 @@ class JSSpeculativeBinopBuilder final {
}
const Operator* SpeculativeBigIntOp(BigIntOperationHint hint) {
+ DCHECK(jsgraph()->machine()->Is64());
switch (op_->opcode()) {
case IrOpcode::kJSAdd:
return simplified()->SpeculativeBigIntAdd(hint);
@@ -206,6 +207,7 @@ class JSSpeculativeBinopBuilder final {
}
Node* TryBuildBigIntBinop() {
+ DCHECK(jsgraph()->machine()->Is64());
BigIntOperationHint hint;
if (GetBinaryBigIntOperationHint(&hint)) {
const Operator* op = SpeculativeBigIntOp(hint);
@@ -321,10 +323,13 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceUnaryOperation(
jsgraph()->SmiConstant(-1), effect, control, slot);
node = b.TryBuildNumberBinop();
if (!node) {
- if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
- const Operator* op = jsgraph()->simplified()->SpeculativeBigIntNegate(
- BigIntOperationHint::kBigInt);
- node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ if (jsgraph()->machine()->Is64()) {
+ if (GetBinaryOperationHint(slot) == BinaryOperationHint::kBigInt) {
+ const Operator* op =
+ jsgraph()->simplified()->SpeculativeBigIntNegate(
+ BigIntOperationHint::kBigInt);
+ node = jsgraph()->graph()->NewNode(op, operand, effect, control);
+ }
}
}
break;
@@ -403,8 +408,10 @@ JSTypeHintLowering::LoweringResult JSTypeHintLowering::ReduceBinaryOperation(
}
if (op->opcode() == IrOpcode::kJSAdd ||
op->opcode() == IrOpcode::kJSSubtract) {
- if (Node* node = b.TryBuildBigIntBinop()) {
- return LoweringResult::SideEffectFree(node, node, control);
+ if (jsgraph()->machine()->Is64()) {
+ if (Node* node = b.TryBuildBigIntBinop()) {
+ return LoweringResult::SideEffectFree(node, node, control);
+ }
}
}
break;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index fec0040b61..2197fe6a65 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -208,6 +208,18 @@ int CallDescriptor::CalculateFixedFrameSize(CodeKind code_kind) const {
UNREACHABLE();
}
+void CallDescriptor::ComputeParamCounts() const {
+ gp_param_count_ = 0;
+ fp_param_count_ = 0;
+ for (size_t i = 0; i < ParameterCount(); ++i) {
+ if (IsFloatingPoint(GetParameterType(i).representation())) {
+ ++fp_param_count_.value();
+ } else {
+ ++gp_param_count_.value();
+ }
+ }
+}
+
CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
OptimizedCompilationInfo* info) {
#if V8_ENABLE_WEBASSEMBLY
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 707c7d98ab..d157b44e03 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -305,9 +305,27 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of return values from this call.
size_t ReturnCount() const { return location_sig_->return_count(); }
- // The number of C parameters to this call.
+ // The number of C parameters to this call. The following invariant
+ // should hold true:
+ // ParameterCount() == GPParameterCount() + FPParameterCount()
size_t ParameterCount() const { return location_sig_->parameter_count(); }
+ // The number of general purpose C parameters to this call.
+ size_t GPParameterCount() const {
+ if (!gp_param_count_) {
+ ComputeParamCounts();
+ }
+ return gp_param_count_.value();
+ }
+
+ // The number of floating point C parameters to this call.
+ size_t FPParameterCount() const {
+ if (!fp_param_count_) {
+ ComputeParamCounts();
+ }
+ return fp_param_count_.value();
+ }
+
// The number of stack parameter slots to the call.
size_t ParameterSlotCount() const { return param_slot_count_; }
@@ -417,6 +435,8 @@ class V8_EXPORT_PRIVATE CallDescriptor final
}
private:
+ void ComputeParamCounts() const;
+
friend class Linkage;
const Kind kind_;
@@ -434,6 +454,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const Flags flags_;
const StackArgumentOrder stack_order_;
const char* const debug_name_;
+
+ mutable base::Optional<size_t> gp_param_count_;
+ mutable base::Optional<size_t> fp_param_count_;
};
DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
diff --git a/deps/v8/src/compiler/loop-unrolling.cc b/deps/v8/src/compiler/loop-unrolling.cc
index 973bb7af19..357b17a3ec 100644
--- a/deps/v8/src/compiler/loop-unrolling.cc
+++ b/deps/v8/src/compiler/loop-unrolling.cc
@@ -35,11 +35,11 @@ void UnrollLoop(Node* loop_node, ZoneUnorderedSet<Node*>* loop, uint32_t depth,
NodeVector copies(tmp_zone);
NodeCopier copier(graph, copied_size, &copies, unrolling_count);
- {
- copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
- base::make_iterator_range(loop->begin(), loop->end()),
- source_positions, node_origins);
- }
+ source_positions->AddDecorator();
+ copier.CopyNodes(graph, tmp_zone, graph->NewNode(common->Dead()),
+ base::make_iterator_range(loop->begin(), loop->end()),
+ source_positions, node_origins);
+ source_positions->RemoveDecorator();
#define COPY(node, n) copier.map(node, n)
#define FOREACH_COPY_INDEX(i) for (uint32_t i = 0; i < unrolling_count; i++)
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index fedb208b5f..31f0526679 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -543,7 +543,7 @@ class MachineRepresentationChecker {
case IrOpcode::kParameter:
case IrOpcode::kProjection:
break;
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
CheckValueInputIsTagged(node, 0);
break;
case IrOpcode::kLoad:
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 775e5ada81..db137dfeb4 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -1254,17 +1254,12 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
- MachineRepresentation rep;
- int value_input;
- if (nm.IsStore()) {
- rep = StoreRepresentationOf(node->op()).representation();
- value_input = 2;
- } else {
- DCHECK(nm.IsUnalignedStore());
- rep = UnalignedStoreRepresentationOf(node->op());
- value_input = 2;
- }
+ DCHECK(nm.IsStore() || nm.IsUnalignedStore());
+ MachineRepresentation rep =
+ nm.IsStore() ? StoreRepresentationOf(node->op()).representation()
+ : UnalignedStoreRepresentationOf(node->op());
+ const int value_input = 2;
Node* const value = node->InputAt(value_input);
switch (value->opcode()) {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index d24030e1a7..e2d1686d5d 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -1242,12 +1242,12 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
- struct AbortCSAAssertOperator : public Operator {
- AbortCSAAssertOperator()
- : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
- "AbortCSAAssert", 1, 1, 1, 0, 1, 0) {}
+ struct AbortCSADcheckOperator : public Operator {
+ AbortCSADcheckOperator()
+ : Operator(IrOpcode::kAbortCSADcheck, Operator::kNoThrow,
+ "AbortCSADcheck", 1, 1, 1, 0, 1, 0) {}
};
- AbortCSAAssertOperator kAbortCSAAssert;
+ AbortCSADcheckOperator kAbortCSADcheck;
struct DebugBreakOperator : public Operator {
DebugBreakOperator()
@@ -1626,8 +1626,8 @@ const Operator* MachineOperatorBuilder::BitcastMaybeObjectToWord() {
return &cache_.kBitcastMaybeObjectToWord;
}
-const Operator* MachineOperatorBuilder::AbortCSAAssert() {
- return &cache_.kAbortCSAAssert;
+const Operator* MachineOperatorBuilder::AbortCSADcheck() {
+ return &cache_.kAbortCSADcheck;
}
const Operator* MachineOperatorBuilder::DebugBreak() {
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 7bd73663ab..493ea08ac1 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -119,6 +119,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE LoadTransformParameters const& LoadTransformParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+V8_EXPORT_PRIVATE bool operator==(LoadTransformParameters,
+ LoadTransformParameters);
+bool operator!=(LoadTransformParameters, LoadTransformParameters);
+
struct LoadLaneParameters {
MemoryAccessKind kind;
LoadRepresentation rep;
@@ -404,7 +408,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
MachineOperatorBuilder& operator=(const MachineOperatorBuilder&) = delete;
const Operator* Comment(const char* msg);
- const Operator* AbortCSAAssert();
+ const Operator* AbortCSADcheck();
const Operator* DebugBreak();
const Operator* UnsafePointerAdd();
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index ba4a5c1f67..a92dd67c62 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -22,7 +22,7 @@ namespace {
bool CanAllocate(const Node* node) {
switch (node->opcode()) {
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 52dc476dc4..86e4884421 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -16,6 +16,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/objects/heap-object.h"
@@ -816,6 +817,14 @@ struct V8_EXPORT_PRIVATE DiamondMatcher
Node* if_false_;
};
+struct LoadTransformMatcher
+ : ValueMatcher<LoadTransformParameters, IrOpcode::kLoadTransform> {
+ explicit LoadTransformMatcher(Node* node) : ValueMatcher(node) {}
+ bool Is(LoadTransformation t) {
+ return HasResolvedValue() && ResolvedValue().transformation == t;
+ }
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index b956f148cc..d3739f55b3 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -681,7 +681,7 @@
MACHINE_FLOAT64_BINOP_LIST(V) \
MACHINE_FLOAT64_UNOP_LIST(V) \
MACHINE_ATOMIC_OP_LIST(V) \
- V(AbortCSAAssert) \
+ V(AbortCSADcheck) \
V(DebugBreak) \
V(Comment) \
V(Load) \
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 8d3d93aa2a..d4e47f7361 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -1696,8 +1696,11 @@ struct WasmInliningPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
DeadCodeElimination dead(&graph_reducer, data->graph(),
data->mcgraph()->common(), temp_zone);
+ // For now, hard-code inlining the function at index 0.
+ InlineByIndex heuristics({0});
WasmInliner inliner(&graph_reducer, env, data->source_positions(),
- data->node_origins(), data->mcgraph(), wire_bytes, 0);
+ data->node_origins(), data->mcgraph(), wire_bytes,
+ &heuristics);
AddReducer(data, &graph_reducer, &dead);
AddReducer(data, &graph_reducer, &inliner);
@@ -1850,9 +1853,9 @@ struct LoadEliminationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone,
- BranchElimination::kEARLY);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions(),
+ BranchElimination::kEARLY);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
@@ -1919,8 +1922,8 @@ struct LateOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
@@ -2048,7 +2051,7 @@ struct WasmOptimizationPhase {
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -2103,7 +2106,7 @@ struct CsaEarlyOptimizationPhase {
data->machine(), temp_zone);
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
BranchElimination branch_condition_elimination(
- &graph_reducer, data->jsgraph(), temp_zone);
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
AddReducer(data, &graph_reducer, &machine_reducer);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
@@ -2121,8 +2124,8 @@ struct CsaOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- BranchElimination branch_condition_elimination(&graph_reducer,
- data->jsgraph(), temp_zone);
+ BranchElimination branch_condition_elimination(
+ &graph_reducer, data->jsgraph(), temp_zone, data->source_positions());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common(), temp_zone);
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
@@ -3097,7 +3100,7 @@ std::ostream& operator<<(std::ostream& out, const BlockStartsAsJSON& s) {
// static
wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
- int wasm_kind, const char* debug_name, const AssemblerOptions& options,
+ const char* debug_name, const AssemblerOptions& options,
SourcePositionTable* source_positions) {
Graph* graph = mcgraph->graph();
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
@@ -3160,6 +3163,9 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
result.frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount();
result.tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots();
result.result_tier = wasm::ExecutionTier::kTurbofan;
+ if (kind == CodeKind::WASM_TO_JS_FUNCTION) {
+ result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
+ }
DCHECK(result.succeeded());
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 19fd715885..2a166b2073 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -66,8 +66,7 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code.
static wasm::WasmCompilationResult GenerateCodeForWasmNativeStub(
CallDescriptor* call_descriptor, MachineGraph* mcgraph, CodeKind kind,
- int wasm_kind, const char* debug_name,
- const AssemblerOptions& assembler_options,
+ const char* debug_name, const AssemblerOptions& assembler_options,
SourcePositionTable* source_positions = nullptr);
// Returns a new compilation job for a wasm heap stub.
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 383d63dd69..2a2eb07fe1 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -190,12 +190,12 @@ void RawMachineAssembler::OptimizeControlFlow(Schedule* schedule, Graph* graph,
false_block->ClearPredecessors();
size_t arity = block->PredecessorCount();
- for (size_t i = 0; i < arity; ++i) {
- BasicBlock* predecessor = block->PredecessorAt(i);
+ for (size_t j = 0; j < arity; ++j) {
+ BasicBlock* predecessor = block->PredecessorAt(j);
predecessor->ClearSuccessors();
if (block->deferred()) predecessor->set_deferred(true);
Node* branch_clone = graph->CloneNode(branch);
- int phi_input = static_cast<int>(i);
+ int phi_input = static_cast<int>(j);
NodeProperties::ReplaceValueInput(
branch_clone, NodeProperties::GetValueInput(phi, phi_input), 0);
BasicBlock* new_true_block = schedule->NewBasicBlock();
@@ -571,14 +571,14 @@ void RawMachineAssembler::Switch(Node* index, RawMachineLabel* default_label,
size_t succ_count = case_count + 1;
Node* switch_node = MakeNode(common()->Switch(succ_count), 1, &index);
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
- for (size_t index = 0; index < case_count; ++index) {
- int32_t case_value = case_values[index];
+ for (size_t i = 0; i < case_count; ++i) {
+ int32_t case_value = case_values[i];
BasicBlock* case_block = schedule()->NewBasicBlock();
Node* case_node =
graph()->NewNode(common()->IfValue(case_value), switch_node);
schedule()->AddNode(case_block, case_node);
- schedule()->AddGoto(case_block, Use(case_labels[index]));
- succ_blocks[index] = case_block;
+ schedule()->AddGoto(case_block, Use(case_labels[i]));
+ succ_blocks[i] = case_block;
}
BasicBlock* default_block = schedule()->NewBasicBlock();
Node* default_node = graph()->NewNode(common()->IfDefault(), switch_node);
@@ -673,8 +673,8 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
current_block_ = nullptr;
}
-void RawMachineAssembler::AbortCSAAssert(Node* message) {
- AddNode(machine()->AbortCSAAssert(), message);
+void RawMachineAssembler::AbortCSADcheck(Node* message) {
+ AddNode(machine()->AbortCSADcheck(), message);
}
void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index f0bb6e0425..23051dfbba 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -1033,7 +1033,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
- void AbortCSAAssert(Node* message);
+ void AbortCSADcheck(Node* message);
void DebugBreak();
void Unreachable();
void Comment(const std::string& msg);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 07a716bfa7..a54caf2abe 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -967,8 +967,9 @@ class SpecialRPONumberer : public ZoneObject {
if (HasLoopNumber(current)) {
++loop_depth;
current_loop = &loops_[GetLoopNumber(current)];
- BasicBlock* end = current_loop->end;
- current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
+ BasicBlock* loop_end = current_loop->end;
+ current->set_loop_end(loop_end == nullptr ? BeyondEndSentinel()
+ : loop_end);
current_header = current_loop->header;
TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -1025,8 +1026,8 @@ class SpecialRPONumberer : public ZoneObject {
// loop header H are members of the loop too. O(|blocks between M and H|).
while (queue_length > 0) {
BasicBlock* block = (*queue)[--queue_length].block;
- for (size_t i = 0; i < block->PredecessorCount(); i++) {
- BasicBlock* pred = block->PredecessorAt(i);
+ for (size_t j = 0; j < block->PredecessorCount(); j++) {
+ BasicBlock* pred = block->PredecessorAt(j);
if (pred != header) {
if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
loops_[loop_num].members->Add(pred->id().ToInt());
@@ -1124,7 +1125,7 @@ class SpecialRPONumberer : public ZoneObject {
// Check the contiguousness of loops.
int count = 0;
for (int j = 0; j < static_cast<int>(order->size()); j++) {
- BasicBlock* block = order->at(j);
+ block = order->at(j);
DCHECK_EQ(block->rpo_number(), j);
if (j < header->rpo_number() || j >= end->rpo_number()) {
DCHECK(!header->LoopContains(block));
@@ -1440,9 +1441,9 @@ class ScheduleLateNodeVisitor {
queue->push(node);
do {
scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
- Node* const node = queue->front();
+ Node* const n = queue->front();
queue->pop();
- VisitNode(node);
+ VisitNode(n);
} while (!queue->empty());
}
}
@@ -1821,8 +1822,8 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
// temporary solution and should be merged into the rest of the scheduler as
// soon as the approach settled for all floating loops.
NodeVector propagation_roots(control_flow_builder_->control_);
- for (Node* node : control_flow_builder_->control_) {
- for (Node* use : node->uses()) {
+ for (Node* control : control_flow_builder_->control_) {
+ for (Node* use : control->uses()) {
if (NodeProperties::IsPhi(use) && IsLive(use)) {
propagation_roots.push_back(use);
}
@@ -1830,8 +1831,8 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
}
if (FLAG_trace_turbo_scheduler) {
TRACE("propagation roots: ");
- for (Node* node : propagation_roots) {
- TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
+ for (Node* r : propagation_roots) {
+ TRACE("#%d:%s ", r->id(), r->op()->mnemonic());
}
TRACE("\n");
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index a1f9b93dce..15c9f195e0 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -275,6 +275,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
DCHECK(!map.is_undetectable());
return kBoundFunction;
case JS_FUNCTION_TYPE:
+ case JS_CLASS_CONSTRUCTOR_TYPE:
case JS_PROMISE_CONSTRUCTOR_TYPE:
case JS_REG_EXP_CONSTRUCTOR_TYPE:
case JS_ARRAY_CONSTRUCTOR_TYPE:
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index a28a28c59e..cdd8c0b0f0 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -72,10 +72,6 @@ namespace compiler {
// existing assumptions or tests.
// Consequently, do not normally use Equals for type tests, always use Is!
//
-// The NowIs operator implements state-sensitive subtying, as described above.
-// Any compilation decision based on such temporary properties requires runtime
-// guarding!
-//
//
// PROPERTIES
//
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index a0f2aa569d..a8bbd06b5f 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -919,7 +919,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
break;
case IrOpcode::kComment:
- case IrOpcode::kAbortCSAAssert:
+ case IrOpcode::kAbortCSADcheck:
case IrOpcode::kDebugBreak:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index b3d6e7bb74..8446640bfc 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -654,8 +654,9 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
limit, effect()));
- Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
- stack_check.Chain(control());
+ Node* if_true;
+ Node* if_false;
+ gasm_->Branch(check, &if_true, &if_false, BranchHint::kTrue);
if (stack_check_call_operator_ == nullptr) {
// Build and cache the stack check call operator and the constant
@@ -676,15 +677,18 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position) {
stack_check_call_operator_ = mcgraph()->common()->Call(call_descriptor);
}
- Node* call = graph()->NewNode(stack_check_call_operator_.get(),
- stack_check_code_node_.get(), effect(),
- stack_check.if_false);
-
+ Node* call =
+ graph()->NewNode(stack_check_call_operator_.get(),
+ stack_check_code_node_.get(), effect(), if_false);
SetSourcePosition(call, position);
- Node* ephi = stack_check.EffectPhi(effect(), call);
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
+ Node* merge = graph()->NewNode(mcgraph()->common()->Merge(2), if_true, call);
+ DCHECK_GT(call->op()->EffectOutputCount(), 0);
+ Node* ephi = graph()->NewNode(mcgraph()->common()->EffectPhi(2), effect(),
+ call, merge);
- SetEffectControl(ephi, stack_check.merge);
+ SetEffectControl(ephi, merge);
}
void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
@@ -2905,8 +2909,8 @@ Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
- // TODO(manoskouk): Don't always set control if we ever add properties to wasm
- // calls.
+ // TODO(manoskouk): If we have kNoThrow calls, do not set them as control.
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
SetControl(call);
size_t ret_count = sig->return_count();
@@ -2935,8 +2939,8 @@ Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
- // TODO(manoskouk): {call} will not always be a control node if we ever add
- // properties to wasm calls.
+ // TODO(manoskouk): If we have kNoThrow calls, do not merge them to end.
+ DCHECK_GT(call->op()->ControlOutputCount(), 0);
gasm_->MergeControlToEnd(call);
return call;
@@ -3155,7 +3159,7 @@ Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
}
// TODO(9495): Support CAPI function refs.
-Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
+Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
base::Vector<Node*> rets,
CheckForNull null_check,
@@ -3166,8 +3170,6 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
position);
}
- const wasm::FunctionSig* sig = env_->module->signature(sig_index);
-
Node* function_data = gasm_->LoadFunctionDataFromJSFunction(args[0]);
auto load_target = gasm_->MakeLabel();
@@ -3227,20 +3229,37 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
return call;
}
-Node* WasmGraphBuilder::CallRef(uint32_t sig_index, base::Vector<Node*> args,
+void WasmGraphBuilder::CompareToExternalFunctionAtIndex(
+ Node* func_ref, uint32_t function_index, Node** success_control,
+ Node** failure_control) {
+ // Since we are comparing to a function reference, it is guaranteed that
+ // instance->wasm_external_functions() has been initialized.
+ Node* external_functions = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), GetInstance(),
+ wasm::ObjectAccess::ToTagged(
+ WasmInstanceObject::kWasmExternalFunctionsOffset));
+ Node* function_ref = gasm_->LoadFixedArrayElement(
+ external_functions, gasm_->IntPtrConstant(function_index),
+ MachineType::AnyTagged());
+ gasm_->Branch(gasm_->WordEqual(function_ref, func_ref), success_control,
+ failure_control, BranchHint::kTrue);
+}
+
+Node* WasmGraphBuilder::CallRef(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
base::Vector<Node*> rets,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig_index, args, rets, null_check,
- IsReturnCall::kCallContinues, position);
+ return BuildCallRef(sig, args, rets, null_check, IsReturnCall::kCallContinues,
+ position);
}
-Node* WasmGraphBuilder::ReturnCallRef(uint32_t sig_index,
+Node* WasmGraphBuilder::ReturnCallRef(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
WasmGraphBuilder::CheckForNull null_check,
wasm::WasmCodePosition position) {
- return BuildCallRef(sig_index, args, {}, null_check,
- IsReturnCall::kReturnCall, position);
+ return BuildCallRef(sig, args, {}, null_check, IsReturnCall::kReturnCall,
+ position);
}
Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
@@ -5563,6 +5582,7 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
length, gasm_->Uint32Constant(WasmArray::MaxLength(type))),
position);
wasm::ValueType element_type = type->element_type();
+ // TODO(7748): Consider using gasm_->Allocate().
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
// Do NOT mark this as Operator::kEliminatable, because that would cause the
// Call node to have no control inputs, which means it could get scheduled
@@ -5597,6 +5617,25 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
return a;
}
+Node* WasmGraphBuilder::ArrayInit(uint32_t array_index,
+ const wasm::ArrayType* type, Node* rtt,
+ base::Vector<Node*> elements) {
+ wasm::ValueType element_type = type->element_type();
+ // TODO(7748): Consider using gasm_->Allocate().
+ Node* array =
+ gasm_->CallBuiltin(Builtin::kWasmAllocateArray_Uninitialized,
+ Operator::kNoDeopt | Operator::kNoThrow, rtt,
+ Int32Constant(static_cast<int32_t>(elements.size())),
+ Int32Constant(element_type.element_size_bytes()));
+ for (int i = 0; i < static_cast<int>(elements.size()); i++) {
+ Node* offset =
+ gasm_->WasmArrayElementOffset(Int32Constant(i), element_type);
+ gasm_->StoreToObject(ObjectAccessForGCStores(element_type), array, offset,
+ elements[i]);
+ }
+ return array;
+}
+
Node* WasmGraphBuilder::RttCanon(uint32_t type_index) {
Node* maps_list =
LOAD_INSTANCE_FIELD(ManagedObjectMaps, MachineType::TaggedPointer());
@@ -5974,6 +6013,11 @@ void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
+ auto skip = gasm_->MakeLabel();
+
+ gasm_->GotoIf(gasm_->WordEqual(length, Int32Constant(0)), &skip,
+ BranchHint::kFalse);
+
Node* function =
gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
MachineType arg_types[]{
@@ -5983,6 +6027,8 @@ void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
MachineSignature sig(0, 6, arg_types);
BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
src_index, length);
+ gasm_->Goto(&skip);
+ gasm_->Bind(&skip);
}
// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
@@ -7501,7 +7547,7 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::CompilationEnv env(
nullptr, wasm::kNoBoundsChecks,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
- wasm::WasmFeatures::All());
+ wasm::WasmFeatures::All(), wasm::DynamicTiering::kDisabled);
WasmGraphBuilder builder(&env, mcgraph->zone(), mcgraph, sig,
source_positions);
@@ -7532,11 +7578,12 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- call_descriptor, mcgraph, CodeKind::WASM_FUNCTION,
- wasm::WasmCode::kFunction, debug_name, WasmStubAssemblerOptions(),
- source_positions);
- return result;
+ // The code does not call to JS, but conceptually it is an import wrapper,
+ // hence use {WASM_TO_JS_FUNCTION} here.
+ // TODO(wasm): Rename this to {WASM_IMPORT_CALL}?
+ return Pipeline::GenerateCodeForWasmNativeStub(
+ call_descriptor, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, debug_name,
+ WasmStubAssemblerOptions(), source_positions);
}
} // namespace
@@ -7590,12 +7637,9 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
- wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION,
- wasm::WasmCode::kWasmToJsWrapper, func_name, WasmStubAssemblerOptions(),
- source_position_table);
- result.kind = wasm::WasmCompilationResult::kWasmToJsWrapper;
- return result;
+ return Pipeline::GenerateCodeForWasmNativeStub(
+ incoming, mcgraph, CodeKind::WASM_TO_JS_FUNCTION, func_name,
+ WasmStubAssemblerOptions(), source_position_table);
}
wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
@@ -7634,8 +7678,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
const char* debug_name = "WasmCapiCall";
wasm::WasmCompilationResult result = Pipeline::GenerateCodeForWasmNativeStub(
- call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION,
- wasm::WasmCode::kWasmToCapiWrapper, debug_name,
+ call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION, debug_name,
WasmStubAssemblerOptions(), source_positions);
wasm::WasmCode* published_code;
{
@@ -7816,10 +7859,9 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
auto* allocator = wasm::GetWasmEngine()->allocator();
- wasm::VoidResult graph_construction_result =
- wasm::BuildTFGraph(allocator, env->enabled_features, env->module,
- &builder, detected, func_body, loop_infos,
- node_origins, func_index, wasm::kInstrumentEndpoints);
+ wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
+ allocator, env->enabled_features, env->module, &builder, detected,
+ func_body, loop_infos, node_origins, func_index, wasm::kRegularFunction);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7903,7 +7945,8 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
}
if (ContainsSimd(func_body.sig) && !CpuFeatures::SupportsWasmSimd128()) {
- call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
+ // Fail compilation if hardware does not support SIMD.
+ return wasm::WasmCompilationResult{};
}
Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 328152b363..ad33c7e1c6 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -325,16 +325,19 @@ class WasmGraphBuilder {
Node* CallIndirect(uint32_t table_index, uint32_t sig_index,
base::Vector<Node*> args, base::Vector<Node*> rets,
wasm::WasmCodePosition position);
- Node* CallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* CallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
wasm::WasmCodePosition position);
+ void CompareToExternalFunctionAtIndex(Node* func_ref, uint32_t function_index,
+ Node** success_control,
+ Node** failure_control);
Node* ReturnCall(uint32_t index, base::Vector<Node*> args,
wasm::WasmCodePosition position);
Node* ReturnCallIndirect(uint32_t table_index, uint32_t sig_index,
base::Vector<Node*> args,
wasm::WasmCodePosition position);
- Node* ReturnCallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* ReturnCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
CheckForNull null_check, wasm::WasmCodePosition position);
void BrOnNull(Node* ref_object, Node** non_null_node, Node** null_node);
@@ -474,6 +477,8 @@ class WasmGraphBuilder {
void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
Node* src_array, Node* src_index, CheckForNull src_null_check,
Node* length, wasm::WasmCodePosition position);
+ Node* ArrayInit(uint32_t array_index, const wasm::ArrayType* type, Node* rtt,
+ base::Vector<Node*> elements);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
@@ -586,7 +591,7 @@ class WasmGraphBuilder {
base::Vector<Node*> rets,
wasm::WasmCodePosition position, Node* func_index,
IsReturnCall continuation);
- Node* BuildCallRef(uint32_t sig_index, base::Vector<Node*> args,
+ Node* BuildCallRef(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, CheckForNull null_check,
IsReturnCall continuation,
wasm::WasmCodePosition position);
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
index 6753769953..965b467d67 100644
--- a/deps/v8/src/compiler/wasm-inlining.cc
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -4,6 +4,8 @@
#include "src/compiler/wasm-inlining.h"
+#include "src/compiler/all-nodes.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/wasm-compiler.h"
#include "src/wasm/function-body-decoder.h"
@@ -16,34 +18,47 @@ namespace internal {
namespace compiler {
Reduction WasmInliner::Reduce(Node* node) {
- if (node->opcode() == IrOpcode::kCall) {
- return ReduceCall(node);
- } else {
- return NoChange();
+ switch (node->opcode()) {
+ case IrOpcode::kCall:
+ case IrOpcode::kTailCall:
+ return ReduceCall(node);
+ default:
+ return NoChange();
}
}
-// TODO(12166): Abstract over a heuristics provider.
+// TODO(12166): Save inlined frames for trap/--trace-wasm purposes. Consider
+// tail calls.
+// TODO(12166): Inline indirect calls/call_ref.
Reduction WasmInliner::ReduceCall(Node* call) {
+ DCHECK(call->opcode() == IrOpcode::kCall ||
+ call->opcode() == IrOpcode::kTailCall);
Node* callee = NodeProperties::GetValueInput(call, 0);
IrOpcode::Value reloc_opcode = mcgraph_->machine()->Is32()
? IrOpcode::kRelocatableInt32Constant
: IrOpcode::kRelocatableInt64Constant;
if (callee->opcode() != reloc_opcode) return NoChange();
auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
- if (static_cast<uint32_t>(info.value()) != inlinee_index_) return NoChange();
+ uint32_t inlinee_index = static_cast<uint32_t>(info.value());
+ if (!heuristics_->DoInline(source_positions_->GetSourcePosition(call),
+ inlinee_index)) {
+ return NoChange();
+ }
+
+ CHECK_LT(inlinee_index, module()->functions.size());
+ const wasm::WasmFunction* inlinee = &module()->functions[inlinee_index];
- CHECK_LT(inlinee_index_, module()->functions.size());
- const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
- base::Vector<const byte> function_bytes =
- wire_bytes_->GetCode(function->code);
- const wasm::FunctionBody inlinee_body(function->sig, function->code.offset(),
+ base::Vector<const byte> function_bytes = wire_bytes_->GetCode(inlinee->code);
+
+ const wasm::FunctionBody inlinee_body(inlinee->sig, inlinee->code.offset(),
function_bytes.begin(),
function_bytes.end());
wasm::WasmFeatures detected;
- WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig, spt_);
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig,
+ source_positions_);
std::vector<WasmLoopInfo> infos;
+ size_t subgraph_min_node_id = graph()->NodeCount();
wasm::DecodeResult result;
Node* inlinee_start;
Node* inlinee_end;
@@ -51,25 +66,24 @@ Reduction WasmInliner::ReduceCall(Node* call) {
Graph::SubgraphScope scope(graph());
result = wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features,
module(), &builder, &detected, inlinee_body,
- &infos, node_origins_, inlinee_index_,
- wasm::kDoNotInstrumentEndpoints);
+ &infos, node_origins_, inlinee_index,
+ wasm::kInlinedFunction);
inlinee_start = graph()->start();
inlinee_end = graph()->end();
}
if (result.failed()) return NoChange();
- return InlineCall(call, inlinee_start, inlinee_end);
+ return call->opcode() == IrOpcode::kCall
+ ? InlineCall(call, inlinee_start, inlinee_end, inlinee->sig,
+ subgraph_min_node_id)
+ : InlineTailCall(call, inlinee_start, inlinee_end);
}
-// TODO(12166): Handle exceptions and tail calls.
-Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
- Node* callee_end) {
- DCHECK_EQ(call->opcode(), IrOpcode::kCall);
-
- /* 1) Rewire callee formal parameters to the call-site real parameters. Rewire
- * effect and control dependencies of callee's start node with the respective
- * inputs of the call node.
- */
+/* Rewire callee formal parameters to the call-site real parameters. Rewire
+ * effect and control dependencies of callee's start node with the respective
+ * inputs of the call node.
+ */
+void WasmInliner::RewireFunctionEntry(Node* call, Node* callee_start) {
Node* control = NodeProperties::GetControlInput(call);
Node* effect = NodeProperties::GetEffectInput(call);
@@ -93,16 +107,55 @@ Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
break;
}
}
+}
- /* 2) Rewire uses of the call node to the return values of the callee. Since
- * there might be multiple return nodes in the callee, we have to create Merge
- * and Phi nodes for them.
- */
+Reduction WasmInliner::InlineTailCall(Node* call, Node* callee_start,
+ Node* callee_end) {
+ DCHECK(call->opcode() == IrOpcode::kTailCall);
+ // 1) Rewire function entry.
+ RewireFunctionEntry(call, callee_start);
+ // 2) For tail calls, all we have to do is rewire all terminators of the
+ // inlined graph to the end of the caller graph.
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ }
+ callee_end->Kill();
+ return Replace(mcgraph()->Dead());
+}
+
+Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
+ Node* callee_end,
+ const wasm::FunctionSig* inlinee_sig,
+ size_t subgraph_min_node_id) {
+ DCHECK(call->opcode() == IrOpcode::kCall);
+
+ // 0) Before doing anything, if {call} has an exception handler, collect all
+ // unhandled calls in the subgraph.
+ Node* handler = nullptr;
+ std::vector<Node*> unhandled_subcalls;
+ if (NodeProperties::IsExceptionalCall(call, &handler)) {
+ AllNodes subgraph_nodes(zone(), callee_end, graph());
+ for (Node* node : subgraph_nodes.reachable) {
+ if (node->id() >= subgraph_min_node_id &&
+ !node->op()->HasProperty(Operator::kNoThrow) &&
+ !NodeProperties::IsExceptionalCall(node)) {
+ unhandled_subcalls.push_back(node);
+ }
+ }
+ }
+
+ // 1) Rewire function entry.
+ RewireFunctionEntry(call, callee_start);
+
+ // 2) Handle all graph terminators for the callee.
NodeVector return_nodes(zone());
for (Node* const input : callee_end->inputs()) {
DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
switch (input->opcode()) {
case IrOpcode::kReturn:
+ // Returns are collected to be rewired into the caller graph later.
return_nodes.push_back(input);
break;
case IrOpcode::kDeoptimize:
@@ -111,16 +164,79 @@ Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
NodeProperties::MergeControlToEnd(graph(), common(), input);
Revisit(graph()->end());
break;
- case IrOpcode::kTailCall:
- // TODO(12166): A tail call in the inlined function has to be
- // transformed into a regular call in the caller function.
- UNIMPLEMENTED();
+ case IrOpcode::kTailCall: {
+ // A tail call in the callee inlined in a regular call in the caller has
+ // to be transformed into a regular call, and then returned from the
+ // inlinee. It will then be handled like any other return.
+ auto descriptor = CallDescriptorOf(input->op());
+ NodeProperties::ChangeOp(input, common()->Call(descriptor));
+ int return_arity = static_cast<int>(inlinee_sig->return_count());
+ NodeVector return_inputs(zone());
+ // The first input of a return node is always the 0 constant.
+ return_inputs.push_back(graph()->NewNode(common()->Int32Constant(0)));
+ if (return_arity == 1) {
+ return_inputs.push_back(input);
+ } else if (return_arity > 1) {
+ for (int i = 0; i < return_arity; i++) {
+ return_inputs.push_back(
+ graph()->NewNode(common()->Projection(i), input, input));
+ }
+ }
+
+ // Add effect and control inputs.
+ return_inputs.push_back(input->op()->EffectOutputCount() > 0
+ ? input
+ : NodeProperties::GetEffectInput(input));
+ return_inputs.push_back(input->op()->ControlOutputCount() > 0
+ ? input
+ : NodeProperties::GetControlInput(input));
+
+ Node* ret = graph()->NewNode(common()->Return(return_arity),
+ static_cast<int>(return_inputs.size()),
+ return_inputs.data());
+ return_nodes.push_back(ret);
+ break;
+ }
default:
UNREACHABLE();
}
}
+ callee_end->Kill();
+
+ // 3) Rewire unhandled calls to the handler.
+ std::vector<Node*> on_exception_nodes;
+ for (Node* subcall : unhandled_subcalls) {
+ Node* on_success = graph()->NewNode(common()->IfSuccess(), subcall);
+ NodeProperties::ReplaceUses(subcall, subcall, subcall, on_success);
+ NodeProperties::ReplaceControlInput(on_success, subcall);
+ Node* on_exception =
+ graph()->NewNode(common()->IfException(), subcall, subcall);
+ on_exception_nodes.push_back(on_exception);
+ }
+
+ int subcall_count = static_cast<int>(on_exception_nodes.size());
+
+ if (subcall_count > 0) {
+ Node* control_output =
+ graph()->NewNode(common()->Merge(subcall_count), subcall_count,
+ on_exception_nodes.data());
+ on_exception_nodes.push_back(control_output);
+ Node* value_output = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, subcall_count),
+ subcall_count + 1, on_exception_nodes.data());
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(subcall_count), subcall_count + 1,
+ on_exception_nodes.data());
+ ReplaceWithValue(handler, value_output, effect_output, control_output);
+ } else if (handler != nullptr) {
+ // Nothing in the inlined function can throw. Remove the handler.
+ ReplaceWithValue(handler, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ }
if (return_nodes.size() > 0) {
+ /* 4) Collect all return site value, effect, and control inputs into phis
+ * and merges. */
int const return_count = static_cast<int>(return_nodes.size());
NodeVector controls(zone());
NodeVector effects(zone());
@@ -150,14 +266,14 @@ Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
ith_values.push_back(control_output);
// Find the correct machine representation for the return values from the
// inlinee signature.
- const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
MachineRepresentation repr =
- function->sig->GetReturn(i).machine_representation();
+ inlinee_sig->GetReturn(i).machine_representation();
Node* ith_value_output = graph()->NewNode(
common()->Phi(repr, return_count),
static_cast<int>(ith_values.size()), &ith_values.front());
values.push_back(ith_value_output);
}
+ for (Node* return_node : return_nodes) return_node->Kill();
if (return_arity == 0) {
// Void function, no value uses.
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
index 8b31b6b291..b63e232198 100644
--- a/deps/v8/src/compiler/wasm-inlining.h
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -18,6 +18,7 @@ namespace internal {
namespace wasm {
struct CompilationEnv;
struct WasmModule;
+struct WasmFunction;
class WireBytesStorage;
} // namespace wasm
@@ -29,24 +30,49 @@ namespace compiler {
class NodeOriginTable;
class SourcePositionTable;
+// Parent class for classes that provide heuristics on how to inline in wasm.
+class WasmInliningHeuristics {
+ public:
+ virtual bool DoInline(SourcePosition position,
+ uint32_t function_index) const = 0;
+};
+
+// A simple inlining heuristic that inlines all function calls to a set of given
+// function indices.
+class InlineByIndex : public WasmInliningHeuristics {
+ public:
+ explicit InlineByIndex(uint32_t function_index)
+ : WasmInliningHeuristics(), function_indices_(function_index) {}
+ InlineByIndex(std::initializer_list<uint32_t> function_indices)
+ : WasmInliningHeuristics(), function_indices_(function_indices) {}
+
+ bool DoInline(SourcePosition position,
+ uint32_t function_index) const override {
+ return function_indices_.count(function_index) > 0;
+ }
+
+ private:
+ std::unordered_set<uint32_t> function_indices_;
+};
+
// The WasmInliner provides the core graph inlining machinery for Webassembly
// graphs. Note that this class only deals with the mechanics of how to inline
-// one graph into another, heuristics that decide what and how much to inline
-// are beyond its scope. As a current placeholder, only a function at specific
-// given index {inlinee_index} is inlined.
+// one graph into another; heuristics that decide what and how much to inline
+// are provided by {WasmInliningHeuristics}.
class WasmInliner final : public AdvancedReducer {
public:
WasmInliner(Editor* editor, wasm::CompilationEnv* env,
- SourcePositionTable* spt, NodeOriginTable* node_origins,
- MachineGraph* mcgraph, const wasm::WireBytesStorage* wire_bytes,
- uint32_t inlinee_index)
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins, MachineGraph* mcgraph,
+ const wasm::WireBytesStorage* wire_bytes,
+ const WasmInliningHeuristics* heuristics)
: AdvancedReducer(editor),
env_(env),
- spt_(spt),
+ source_positions_(source_positions),
node_origins_(node_origins),
mcgraph_(mcgraph),
wire_bytes_(wire_bytes),
- inlinee_index_(inlinee_index) {}
+ heuristics_(heuristics) {}
const char* reducer_name() const override { return "WasmInliner"; }
@@ -58,16 +84,21 @@ class WasmInliner final : public AdvancedReducer {
Graph* graph() const { return mcgraph_->graph(); }
MachineGraph* mcgraph() const { return mcgraph_; }
const wasm::WasmModule* module() const;
+ const wasm::WasmFunction* inlinee() const;
Reduction ReduceCall(Node* call);
- Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end);
+ Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end,
+ const wasm::FunctionSig* inlinee_sig,
+ size_t subgraph_min_node_id);
+ Reduction InlineTailCall(Node* call, Node* callee_start, Node* callee_end);
+ void RewireFunctionEntry(Node* call, Node* callee_start);
wasm::CompilationEnv* const env_;
- SourcePositionTable* const spt_;
+ SourcePositionTable* const source_positions_;
NodeOriginTable* const node_origins_;
MachineGraph* const mcgraph_;
const wasm::WireBytesStorage* const wire_bytes_;
- const uint32_t inlinee_index_;
+ const WasmInliningHeuristics* const heuristics_;
};
} // namespace compiler