summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler')
-rw-r--r--deps/v8/src/compiler/OWNERS5
-rw-r--r--deps/v8/src/compiler/access-builder.cc8
-rw-r--r--deps/v8/src/compiler/access-builder.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc325
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h1
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc1
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc181
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc425
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h9
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc28
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc263
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc47
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h8
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc17
-rw-r--r--deps/v8/src/compiler/code-assembler.h14
-rw-r--r--deps/v8/src/compiler/code-generator.cc23
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc12
-rw-r--r--deps/v8/src/compiler/common-operator.cc97
-rw-r--r--deps/v8/src/compiler/common-operator.h22
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc49
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.h30
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc510
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h12
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc45
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h2
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc19
-rw-r--r--deps/v8/src/compiler/frame.cc11
-rw-r--r--deps/v8/src/compiler/frame.h49
-rw-r--r--deps/v8/src/compiler/gap-resolver.cc14
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc36
-rw-r--r--deps/v8/src/compiler/graph-assembler.h16
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h1
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc761
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h43
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc53
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc316
-rw-r--r--deps/v8/src/compiler/instruction-codes.h14
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc30
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h29
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc348
-rw-r--r--deps/v8/src/compiler/instruction-selector.h31
-rw-r--r--deps/v8/src/compiler/instruction.cc4
-rw-r--r--deps/v8/src/compiler/instruction.h14
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc15
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc742
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h11
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc2109
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h29
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc16
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc29
-rw-r--r--deps/v8/src/compiler/js-graph.cc4
-rw-r--r--deps/v8/src/compiler/js-graph.h4
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc3
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h2
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc103
-rw-r--r--deps/v8/src/compiler/js-operator.cc43
-rw-r--r--deps/v8/src/compiler/js-operator.h48
-rw-r--r--deps/v8/src/compiler/js-type-hint-lowering.cc5
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc121
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h5
-rw-r--r--deps/v8/src/compiler/linkage.cc41
-rw-r--r--deps/v8/src/compiler/linkage.h27
-rw-r--r--deps/v8/src/compiler/load-elimination.cc4
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc2
-rw-r--r--deps/v8/src/compiler/loop-peeling.cc95
-rw-r--r--deps/v8/src/compiler/loop-peeling.h31
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.cc34
-rw-r--r--deps/v8/src/compiler/loop-variable-optimizer.h7
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc43
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc24
-rw-r--r--deps/v8/src/compiler/machine-operator.cc84
-rw-r--r--deps/v8/src/compiler/machine-operator.h22
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc2
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc228
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h1
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc169
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc245
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h1
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc190
-rw-r--r--deps/v8/src/compiler/node-properties.cc38
-rw-r--r--deps/v8/src/compiler/node-properties.h3
-rw-r--r--deps/v8/src/compiler/opcodes.h46
-rw-r--r--deps/v8/src/compiler/operation-typer.cc10
-rw-r--r--deps/v8/src/compiler/operation-typer.h2
-rw-r--r--deps/v8/src/compiler/pipeline.cc96
-rw-r--r--deps/v8/src/compiler/pipeline.h2
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc302
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc307
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc15
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc32
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h7
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc55
-rw-r--r--deps/v8/src/compiler/representation-change.cc67
-rw-r--r--deps/v8/src/compiler/representation-change.h23
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc212
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc321
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc8
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc135
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc315
-rw-r--r--deps/v8/src/compiler/simplified-operator.h178
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc2
-rw-r--r--deps/v8/src/compiler/store-store-elimination.cc4
-rw-r--r--deps/v8/src/compiler/type-cache.h2
-rw-r--r--deps/v8/src/compiler/typer.cc140
-rw-r--r--deps/v8/src/compiler/types.cc2
-rw-r--r--deps/v8/src/compiler/types.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc59
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc693
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h121
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc123
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc479
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h16
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc32
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc217
118 files changed, 6599 insertions, 5945 deletions
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index b63f5431e2..2e9052e0c3 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -3,13 +3,16 @@ set noparent
bmeurer@chromium.org
jarin@chromium.org
mstarzinger@chromium.org
-mtrofin@chromium.org
titzer@chromium.org
danno@chromium.org
tebbi@chromium.org
neis@chromium.org
mvstanton@chromium.org
+# For backend
+bbudge@chromium.org
+mtrofin@chromium.org
+
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
per-file wasm-*=bradnelson@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index ac4fc4363b..13d6801c32 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -52,6 +52,14 @@ FieldAccess AccessBuilder::ForHeapNumberValue() {
return access;
}
+// static
+FieldAccess AccessBuilder::ForBigIntBitfield() {
+ FieldAccess access = {
+ kTaggedBase, BigInt::kBitfieldOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::IntPtr(),
+ kNoWriteBarrier};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index e348c0f71b..a2ce1f800b 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -38,6 +38,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to HeapNumber::value() field.
static FieldAccess ForHeapNumberValue();
+ // Provides access to BigInt's bit field.
+ static FieldAccess ForBigIntBitfield();
+
// Provides access to JSObject::properties() field.
static FieldAccess ForJSObjectPropertiesOrHash();
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 1a66e5b7d4..a238cf29d4 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -152,49 +152,6 @@ class ArmOperandConverter final : public InstructionOperandConverter {
namespace {
-class OutOfLineLoadFloat final : public OutOfLineCode {
- public:
- OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Compute sqrtf(-1.0f), which results in a quiet single-precision NaN.
- __ vmov(result_, Float32(-1.0f));
- __ vsqrt(result_, result_);
- }
-
- private:
- SwVfpRegister const result_;
-};
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DwVfpRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- // Compute sqrt(-1.0), which results in a quiet double-precision NaN.
- __ vmov(result_, Double(-1.0));
- __ vsqrt(result_, result_);
- }
-
- private:
- DwVfpRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
@@ -359,64 +316,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
- do { \
- auto result = i.Output##Type##Register(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoad##Type(this, result); \
- __ b(hs, ool->entry()); \
- __ vldr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- __ b(hs, ool->entry()); \
- __ asm_instr(result, i.InputOffset(2)); \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FP(Type) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.Input##Type##Register(2); \
- __ vstr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- auto value = i.InputRegister(2); \
- __ asm_instr(value, i.InputOffset(3), lo); \
- DCHECK_EQ(LeaveCC, i.OutputSBit()); \
- } while (0)
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
do { \
__ asm_instr(i.OutputRegister(), \
@@ -432,51 +331,51 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label exchange; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&exchange); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ store_instr(i.TempRegister(0), i.InputRegister(2), i.InputRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &exchange); \
- __ dmb(ISH); \
- } while (0)
-
-#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr) \
- do { \
- Label compareExchange; \
- Label exit; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
- __ dmb(ISH); \
- __ bind(&compareExchange); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ teq(i.InputRegister(2), Operand(i.OutputRegister(0))); \
- __ b(ne, &exit); \
- __ store_instr(i.TempRegister(0), i.InputRegister(3), i.InputRegister(0)); \
- __ teq(i.TempRegister(0), Operand(0)); \
- __ b(ne, &compareExchange); \
- __ bind(&exit); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
+ do { \
+ Label exchange; \
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&exchange); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(2), i.TempRegister(1)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &exchange); \
+ __ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_instr, store_instr, \
+ cmp_reg) \
do { \
- Label binop; \
- __ add(i.InputRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ Label compareExchange; \
+ Label exit; \
__ dmb(ISH); \
- __ bind(&binop); \
- __ load_instr(i.OutputRegister(0), i.InputRegister(0)); \
- __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
- Operand(i.InputRegister(2))); \
- __ store_instr(i.TempRegister(1), i.TempRegister(0), i.InputRegister(0)); \
- __ teq(i.TempRegister(1), Operand(0)); \
- __ b(ne, &binop); \
+ __ bind(&compareExchange); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ teq(cmp_reg, Operand(i.OutputRegister(0))); \
+ __ b(ne, &exit); \
+ __ store_instr(i.TempRegister(0), i.InputRegister(3), i.TempRegister(1)); \
+ __ teq(i.TempRegister(0), Operand(0)); \
+ __ b(ne, &compareExchange); \
+ __ bind(&exit); \
__ dmb(ISH); \
} while (0)
+#define ASSEMBLE_ATOMIC_BINOP(load_instr, store_instr, bin_instr) \
+ do { \
+ Label binop; \
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1)); \
+ __ dmb(ISH); \
+ __ bind(&binop); \
+ __ load_instr(i.OutputRegister(0), i.TempRegister(1)); \
+ __ bin_instr(i.TempRegister(0), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_instr(i.TempRegister(2), i.TempRegister(0), i.TempRegister(1)); \
+ __ teq(i.TempRegister(2), Operand(0)); \
+ __ b(ne, &binop); \
+ __ dmb(ISH); \
+ } while (0)
+
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
@@ -675,17 +574,18 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
+// 1. compute the offset of the {CodeDataContainer} from our current location
+// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
int pc_offset = __ pc_offset();
- int offset =
- Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc_offset + 8);
+ int offset = Code::kCodeDataContainerOffset -
+ (Code::kHeaderSize + pc_offset + TurboAssembler::kPcLoadDelta);
// We can use the register pc - 8 for the address of the current instruction.
- __ ldr(ip, MemOperand(pc, offset));
+ __ ldr_pcrel(ip, offset);
__ ldr(ip, FieldMemOperand(ip, CodeDataContainer::kKindSpecificFlagsOffset));
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
@@ -804,7 +704,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Check the function's context matches the context argument.
__ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1681,13 +1581,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmPush:
if (instr->InputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
- if (op->representation() == MachineRepresentation::kFloat64) {
- __ vpush(i.InputDoubleRegister(0));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
- __ vpush(i.InputFloatRegister(0));
- frame_access_state()->IncreaseSPDelta(1);
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ vpush(i.InputFloatRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ vpush(i.InputDoubleRegister(0));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ break;
+ case MachineRepresentation::kSimd128: {
+ __ vpush(i.InputSimd128Register(0));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
} else {
__ push(i.InputRegister(0));
@@ -1701,6 +1611,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmPeek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ vldr(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ ldr(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kArmF32x4Splat: {
int src_code = i.InputFloatRegister(0).code();
__ vdup(Neon32, i.OutputSimd128Register(),
@@ -2558,47 +2486,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ExtractLane(i.OutputRegister(), kScratchDoubleReg, NeonS8, 0);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FP(Float);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FP(Double);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(strb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(strh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(str);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FP(Float);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FP(Double);
- break;
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
-
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
@@ -2642,25 +2529,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
case kAtomicCompareExchangeInt8:
- __ uxtb(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxtb(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
+ i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint8:
- __ uxtb(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxtb(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
+ i.TempRegister(2));
break;
case kAtomicCompareExchangeInt16:
- __ uxth(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxth(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
+ i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
case kAtomicCompareExchangeUint16:
- __ uxth(i.InputRegister(2), i.InputRegister(2));
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ __ uxth(i.TempRegister(2), i.InputRegister(2));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
+ i.TempRegister(2));
break;
case kAtomicCompareExchangeWord32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex);
+ __ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
+ i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
case kAtomic##op##Int8: \
@@ -2686,10 +2583,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, orr)
ATOMIC_BINOP_CASE(Xor, eor)
#undef ATOMIC_BINOP_CASE
-#undef ASSEMBLE_CHECKED_LOAD_FP
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER
-#undef ASSEMBLE_CHECKED_STORE_FP
-#undef ASSEMBLE_CHECKED_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
@@ -2774,7 +2667,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2878,7 +2771,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -2929,15 +2822,16 @@ void CodeGenerator::AssembleConstructFrame() {
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromThrow));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
}
__ bind(&done);
}
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
shrink_slots -= 2 * base::bits::CountPopulation(saves_fp);
if (shrink_slots > 0) {
__ sub(sp, sp, Operand(shrink_slots * kPointerSize));
@@ -2953,16 +2847,29 @@ void CodeGenerator::AssembleConstructFrame() {
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
}
+
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
}
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Create space for returns.
+ __ sub(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ // Free space of returns.
+ __ add(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index c839d25cab..a7cf80450a 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -124,6 +124,7 @@ namespace compiler {
V(ArmStr) \
V(ArmPush) \
V(ArmPoke) \
+ V(ArmPeek) \
V(ArmF32x4Splat) \
V(ArmF32x4ExtractLane) \
V(ArmF32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 0092a9dbe5..a592515179 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -262,6 +262,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmLdrh:
case kArmLdrsh:
case kArmLdr:
+ case kArmPeek:
return kIsLoadOperation;
case kArmVstrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 4ded82fa5b..f94d114d07 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -300,7 +300,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -721,93 +722,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand = g.CanBeImmediate(length, kArmCmp)
- ? g.UseImmediate(length)
- : g.UseRegister(length);
- Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), g.NoOutput(),
- offset_operand, length_operand, g.UseRegister(value),
- g.UseRegister(buffer), offset_operand);
-}
-
-
namespace {
void EmitBic(InstructionSelector* selector, Node* node, Node* left,
@@ -868,14 +782,14 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t const shift = mshr.right().Value();
if (((shift == 8) || (shift == 16) || (shift == 24)) &&
- (value == 0xff)) {
+ (value == 0xFF)) {
// Merge SHR into AND by emitting a UXTB instruction with a
// bytewise rotation.
Emit(kArmUxtb, g.DefineAsRegister(m.node()),
g.UseRegister(mshr.left().node()),
g.TempImmediate(mshr.right().Value()));
return;
- } else if (((shift == 8) || (shift == 16)) && (value == 0xffff)) {
+ } else if (((shift == 8) || (shift == 16)) && (value == 0xFFFF)) {
// Merge SHR into AND by emitting a UXTH instruction with a
// bytewise rotation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
@@ -897,9 +811,9 @@ void InstructionSelector::VisitWord32And(Node* node) {
}
}
}
- } else if (value == 0xffff) {
+ } else if (value == 0xFFFF) {
// Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
- // better than AND 0xff for this operation.
+ // better than AND 0xFF for this operation.
Emit(kArmUxth, g.DefineAsRegister(m.node()),
g.UseRegister(m.left().node()), g.TempImmediate(0));
return;
@@ -995,7 +909,8 @@ void VisitShift(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1206,6 +1121,7 @@ void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
@@ -1230,12 +1146,12 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(0xff)) {
+ if (mleft.right().Is(0xFF)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
return;
- } else if (mleft.right().Is(0xffff)) {
+ } else if (mleft.right().Is(0xFFFF)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(0));
@@ -1284,12 +1200,12 @@ void InstructionSelector::VisitInt32Add(Node* node) {
}
case IrOpcode::kWord32And: {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().Is(0xff)) {
+ if (mright.right().Is(0xFF)) {
Emit(kArmUxtab, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
return;
- } else if (mright.right().Is(0xffff)) {
+ } else if (mright.right().Is(0xFFFF)) {
Emit(kArmUxtah, g.DefineAsRegister(node),
g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(0));
@@ -1358,7 +1274,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
result_operand, shift_31);
@@ -1596,22 +1513,44 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
Emit(kArmPoke | MiscField::encode(slot), g.NoOutput(),
- g.UseRegister(input.node()));
+ g.UseRegister(input.node));
}
}
} else {
// Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input.node() == nullptr) continue;
- Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node()));
+ if (input.node == nullptr) continue;
+ Emit(kArmPush, g.NoOutput(), g.UseRegister(input.node));
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ ArmOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kArmPeek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1630,7 +1569,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1825,7 +1765,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -1984,7 +1925,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
@@ -2006,14 +1948,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2276,15 +2218,14 @@ void InstructionSelector::VisitAtomicExchange(Node* node) {
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
@@ -2313,16 +2254,16 @@ void InstructionSelector::VisitAtomicCompareExchange(Node* node) {
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[4];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(old_value);
inputs[input_count++] = g.UseUniqueRegister(new_value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, 1, temp);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
void InstructionSelector::VisitAtomicBinaryOperation(
@@ -2352,17 +2293,15 @@ void InstructionSelector::VisitAtomicBinaryOperation(
AddressingMode addressing_mode = kMode_Offset_RR;
InstructionOperand inputs[3];
size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseRegister(base);
inputs[input_count++] = g.UseRegister(index);
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand temps[2];
- size_t temp_count = 0;
- temps[temp_count++] = g.TempRegister();
- temps[temp_count++] = g.TempRegister();
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
+ g.TempRegister()};
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
+ Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
#define VISIT_ATOMIC_BINOP(op) \
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 3673ee2426..147d85a171 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -264,46 +264,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
namespace {
-class OutOfLineLoadNaN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadNaN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ Mov(result_, 0); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
@@ -336,14 +296,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
- __ Push(lr);
+ __ Push(lr, padreg);
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
__ StackPointer());
}
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
if (must_save_lr_) {
- __ Pop(lr);
+ __ Pop(padreg, lr);
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
}
}
@@ -416,90 +376,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
- do { \
- if (length.IsImmediate() && \
- base::bits::IsPowerOfTwo(length.ImmediateValue())) { \
- __ Tst(offset, ~(length.ImmediateValue() - 1)); \
- __ B(ne, out_of_bounds); \
- } else { \
- __ Cmp(offset, length); \
- __ B(hs, out_of_bounds); \
- } \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
- do { \
- auto result = i.OutputFloat##width##Register(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister32(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
- __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
- __ Bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputFloat##width##OrZeroRegister(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ Str(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputOrZeroRegister32(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto offset = i.InputRegister32(1); \
- auto length = i.InputOperand32(2); \
- auto value = i.InputOrZeroRegister64(3); \
- Label done; \
- ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
- __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
- __ Bind(&done); \
- } while (0)
-
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -579,12 +455,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
- const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
- __ Mov(csp, fp);
- } else {
- __ Mov(jssp, fp);
- }
+ __ Mov(csp, fp);
__ Pop(fp, lr);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -633,6 +504,7 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
int current_sp_offset = state->GetSPToFPSlotCount() +
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ DCHECK_EQ(stack_slot_delta % 2, 0);
if (stack_slot_delta > 0) {
tasm->Claim(stack_slot_delta);
state->IncreaseSPDelta(stack_slot_delta);
@@ -652,31 +524,48 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
+ DCHECK_EQ(first_unused_stack_slot % 2, 0);
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
+ DCHECK(instr->IsTailCall());
+ InstructionOperandConverter g(this, instr);
+ int optional_padding_slot = g.InputInt32(instr->InputCount() - 2);
+ if (optional_padding_slot % 2) {
+ __ Poke(padreg, optional_padding_slot * kPointerSize);
+ }
}
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
-// 1. load the address of the current instruction;
+// 1. compute the offset of the {CodeDataContainer} from our current location
+// and load it.
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
- Label current;
- // The Adr instruction gets the address of the current instruction.
- __ Adr(x2, &current);
- __ Bind(&current);
- int pc = __ pc_offset();
- int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
- __ Ldr(x2, MemOperand(x2, offset));
- __ Ldr(x2, FieldMemOperand(x2, CodeDataContainer::kKindSpecificFlagsOffset));
- __ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.AcquireX();
+ {
+ // Since we always emit a bailout check at the very beginning we can be
+ // certain that the distance between here and the {CodeDataContainer} is
+ // fixed and always in range of a load.
+ int data_container_offset =
+ (Code::kCodeDataContainerOffset - Code::kHeaderSize) - __ pc_offset();
+ DCHECK_GE(0, data_container_offset);
+ DCHECK_EQ(0, data_container_offset % 4);
+ InstructionAccurateScope scope(tasm());
+ __ ldr_pcrel(scratch, data_container_offset >> 2);
+ }
+ __ Ldr(scratch,
+ FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ Label not_deoptimized;
+ __ Tbz(scratch, Code::kMarkedForDeoptimizationBit, &not_deoptimized);
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
- __ Jump(code, RelocInfo::CODE_TARGET, ne);
+ __ Jump(code, RelocInfo::CODE_TARGET);
+ __ Bind(&not_deoptimized);
}
// Assembles an instruction after register allocation, producing machine code.
@@ -700,18 +589,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(target);
}
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -734,18 +611,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(target);
}
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -813,24 +678,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp = scope.AcquireX();
__ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, temp);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Add(x10, x10, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(x10);
RecordCallPosition(instr);
- // TODO(titzer): this is ugly. JSSP should be a caller-save register
- // in this case, but it is not possible to express in the register
- // allocator.
- CallDescriptor::Flags flags(MiscField::decode(opcode));
- if (flags & CallDescriptor::kRestoreJSSP) {
- __ Ldr(jssp, MemOperand(csp));
- __ Mov(csp, jssp);
- }
- if (flags & CallDescriptor::kRestoreCSP) {
- __ Mov(csp, jssp);
- __ AssertCspAligned();
- }
frame_access_state()->ClearSPDelta();
break;
}
@@ -1339,75 +1192,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64CompareAndBranch:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64ClaimCSP: {
- int count = RoundUp(i.InputInt32(0), 2);
- Register prev = __ StackPointer();
- if (prev.Is(jssp)) {
- // TODO(titzer): make this a macro-assembler method.
- // Align the CSP and store the previous JSSP on the stack. We do not
- // need to modify the SP delta here, as we will continue to access the
- // frame via JSSP.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
-
- // TODO(arm64): Storing JSSP on the stack is redundant when calling a C
- // function, as JSSP is callee-saved (we still need to do this when
- // calling a code object that uses the CSP as the stack pointer). See
- // the code generation for kArchCallCodeObject vs. kArchCallCFunction
- // (the latter does not restore CSP/JSSP).
- // TurboAssembler::CallCFunction() (safely) drops this extra slot
- // anyway.
- int sp_alignment = __ ActivationFrameAlignment();
- __ Sub(tmp, jssp, kPointerSize);
- __ Bic(csp, tmp, sp_alignment - 1);
- __ Str(jssp, MemOperand(csp));
- if (count > 0) {
- __ SetStackPointer(csp);
- __ Claim(count);
- __ SetStackPointer(prev);
- }
- } else {
- __ AssertCspAligned();
- if (count > 0) {
- __ Claim(count);
- frame_access_state()->IncreaseSPDelta(count);
- }
- }
- break;
- }
- case kArm64ClaimJSSP: {
+ case kArm64Claim: {
int count = i.InputInt32(0);
- if (csp.Is(__ StackPointer())) {
- // No JSSP is set up. Compute it from the CSP.
- __ AssertCspAligned();
- if (count > 0) {
- int even = RoundUp(count, 2);
- __ Sub(jssp, csp, count * kPointerSize);
- // We must also update CSP to maintain stack consistency:
- __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
- __ AssertStackConsistency();
- frame_access_state()->IncreaseSPDelta(even);
- } else {
- __ Mov(jssp, csp);
- }
- } else {
- // JSSP is the current stack pointer, just use regular Claim().
+ DCHECK_EQ(count % 2, 0);
+ __ AssertCspAligned();
+ if (count > 0) {
__ Claim(count);
frame_access_state()->IncreaseSPDelta(count);
}
break;
}
- case kArm64PokeCSP: // fall through
- case kArm64PokeJSSP: {
- Register prev = __ StackPointer();
- __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
+ case kArm64Poke: {
Operand operand(i.InputInt32(1) * kPointerSize);
- if (instr->InputAt(0)->IsFPRegister()) {
+ if (instr->InputAt(0)->IsSimd128Register()) {
+ __ Poke(i.InputSimd128Register(0), operand);
+ } else if (instr->InputAt(0)->IsFPRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
- __ Poke(i.InputRegister(0), operand);
+ __ Poke(i.InputOrZeroRegister64(0), operand);
}
- __ SetStackPointer(prev);
break;
}
case kArm64PokePair: {
@@ -1421,6 +1224,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kArm64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldr(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Ldr(i.OutputFloatRegister(), MemOperand(fp, offset));
+ }
+ } else {
+ __ Ldr(i.OutputRegister(), MemOperand(fp, offset));
+ }
+ break;
+ }
case kArm64Clz:
__ Clz(i.OutputRegister64(), i.InputRegister64(0));
break;
@@ -1652,28 +1472,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
case kArm64Float64ExtractHighWord32:
- // TODO(arm64): This should use MOV (to general) when NEON is supported.
- __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
- __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
+ __ Umov(i.OutputRegister32(), i.InputFloat64Register(0).V2S(), 1);
break;
- case kArm64Float64InsertLowWord32: {
- // TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
- __ Fmov(tmp, i.InputFloat64Register(0));
- __ Bfi(tmp, i.InputRegister(1), 0, 32);
- __ Fmov(i.OutputFloat64Register(), tmp);
+ case kArm64Float64InsertLowWord32:
+ DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
+ __ Ins(i.OutputFloat64Register().V2S(), 0, i.InputRegister32(1));
break;
- }
- case kArm64Float64InsertHighWord32: {
- // TODO(arm64): This should use MOV (from general) when NEON is supported.
- UseScratchRegisterScope scope(tasm());
- Register tmp = scope.AcquireX();
- __ Fmov(tmp.W(), i.InputFloat32Register(0));
- __ Bfi(tmp, i.InputRegister(1), 32, 32);
- __ Fmov(i.OutputFloat64Register(), tmp);
+ case kArm64Float64InsertHighWord32:
+ DCHECK(i.OutputFloat64Register().Is(i.InputFloat64Register(0)));
+ __ Ins(i.OutputFloat64Register().V2S(), 1, i.InputRegister32(1));
break;
- }
case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
@@ -1734,48 +1542,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrQ:
__ Str(i.InputSimd128Register(0), i.MemoryOperand(1));
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(Str);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(32);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(64);
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
@@ -1860,13 +1626,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Orr)
ATOMIC_BINOP_CASE(Xor, Eor)
#undef ATOMIC_BINOP_CASE
-#undef ASSEMBLE_BOUNDS_CHECK
-#undef ASSEMBLE_CHECKED_LOAD_FLOAT
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER
-#undef ASSEMBLE_CHECKED_LOAD_INTEGER_64
-#undef ASSEMBLE_CHECKED_STORE_FLOAT
-#undef ASSEMBLE_CHECKED_STORE_INTEGER
-#undef ASSEMBLE_CHECKED_STORE_INTEGER_64
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
#undef ASSEMBLE_ATOMIC_STORE_INTEGER
@@ -2437,8 +2196,6 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
__ Ret();
} else {
DCHECK(csp.Is(__ StackPointer()));
- // Initialize the jssp because it is required for the runtime call.
- __ Mov(jssp, csp);
gen_->AssembleSourcePosition(instr_);
__ Call(__ isolate()->builtins()->builtin_handle(trap_id),
RelocInfo::CODE_TARGET);
@@ -2512,12 +2269,6 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AlignFrame(16);
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
- __ SetStackPointer(csp);
- } else {
- __ SetStackPointer(jssp);
- }
-
// Save FP registers.
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
@@ -2540,10 +2291,10 @@ void CodeGenerator::FinishFrame(Frame* frame) {
void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
- if (descriptor->UseNativeStack()) {
- __ AssertCspAligned();
- }
+ __ AssertCspAligned();
+ // The frame has been previously padded in CodeGenerator::FinishFrame().
+ DCHECK_EQ(frame()->GetTotalFrameSlotCount() % 2, 0);
int shrink_slots =
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
@@ -2551,11 +2302,13 @@ void CodeGenerator::AssembleConstructFrame() {
descriptor->CalleeSavedRegisters());
CPURegList saves_fp = CPURegList(CPURegister::kVRegister, kDRegSizeInBits,
descriptor->CalleeSavedFPRegisters());
+ // The number of slots for returns has to be even to ensure the correct stack
+ // alignment.
+ const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
if (frame_access_state()->has_frame()) {
// Link the frame
if (descriptor->IsJSFunctionCall()) {
- DCHECK(!descriptor->UseNativeStack());
__ Prologue();
} else {
__ Push(lr, fp);
@@ -2566,7 +2319,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Create OSR entry if applicable
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the
// unoptimized frame is still on the stack. Optimized code uses OSR values
@@ -2604,10 +2357,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ EnterFrame(StackFrame::WASM_COMPILED);
}
DCHECK(__ StackPointer().Is(csp));
- __ SetStackPointer(jssp);
__ AssertStackConsistency();
- // Initialize the jssp because it is required for the runtime call.
- __ Mov(jssp, csp);
__ Mov(cp, Smi::kZero);
__ CallRuntimeDelayed(zone(), Runtime::kThrowWasmStackOverflow);
// We come from WebAssembly, there are no references for the GC.
@@ -2617,7 +2367,6 @@ void CodeGenerator::AssembleConstructFrame() {
if (FLAG_debug_code) {
__ Brk(0);
}
- __ SetStackPointer(csp);
__ AssertStackConsistency();
__ Bind(&done);
}
@@ -2625,6 +2374,7 @@ void CodeGenerator::AssembleConstructFrame() {
// Skip callee-saved slots, which are pushed below.
shrink_slots -= saves.Count();
shrink_slots -= saves_fp.Count();
+ shrink_slots -= returns;
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, i.e. claiming the extra slot that
@@ -2667,11 +2417,21 @@ void CodeGenerator::AssembleConstructFrame() {
// CPURegList::GetCalleeSaved(): x30 is missing.
// DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
__ PushCPURegList(saves);
+
+ if (returns != 0) {
+ __ Claim(returns);
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const int returns = RoundUp(frame()->GetReturnSlotCount(), 2);
+
+ if (returns != 0) {
+ __ Drop(returns);
+ }
+
// Restore registers.
CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
descriptor->CalleeSavedRegisters());
@@ -2698,33 +2458,22 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
} else {
__ Bind(&return_label_);
AssembleDeconstructFrame();
- if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
- }
}
} else {
AssembleDeconstructFrame();
- if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
- }
}
- } else if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1); // align
}
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
- __ Drop(pop_count);
+ __ DropArguments(pop_count);
} else {
Register pop_reg = g.ToRegister(pop);
__ Add(pop_reg, pop_reg, pop_count);
- __ Drop(pop_reg);
+ __ DropArguments(pop_reg);
}
- if (descriptor->UseNativeStack()) {
- __ AssertCspAligned();
- }
+ __ AssertCspAligned();
__ Ret();
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 6354dfc4db..820b55a99d 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -79,11 +79,10 @@ namespace compiler {
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64CompareAndBranch) \
- V(Arm64ClaimCSP) \
- V(Arm64ClaimJSSP) \
- V(Arm64PokeCSP) \
- V(Arm64PokeJSSP) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
V(Arm64PokePair) \
+ V(Arm64Peek) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
V(Arm64Float32Sub) \
@@ -326,8 +325,6 @@ namespace compiler {
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */ \
V(Operand2_R_SXTW) /* %r0 SXTW (signed extend word) */
-enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index 0294c828da..c2b0a4e386 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -128,6 +128,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64ExtractHighWord32:
case kArm64Float64InsertLowWord32:
case kArm64Float64InsertHighWord32:
+ case kArm64Float64Mod:
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
@@ -292,14 +293,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
+ case kArm64Peek:
return kIsLoadOperation;
- case kArm64Float64Mod: // This opcode will call a C Function which can
- // alter CSP. TODO(arm64): Remove once JSSP is gone.
- case kArm64ClaimCSP:
- case kArm64ClaimJSSP:
- case kArm64PokeCSP:
- case kArm64PokeJSSP:
+ case kArm64Claim:
+ case kArm64Poke:
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
@@ -387,16 +385,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Ldrsw:
return 11;
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- return 5;
-
case kArm64Str:
case kArm64StrD:
case kArm64StrS:
@@ -405,14 +393,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kArm64Strh:
return 1;
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
- return 1;
-
case kArm64Madd32:
case kArm64Mneg32:
case kArm64Msub32:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 201c0613c4..d6082c9f0a 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -43,7 +43,7 @@ class Arm64OperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -295,12 +295,12 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
if (nm.IsWord32And()) {
Int32BinopMatcher mright(right_node);
- if (mright.right().Is(0xff) || mright.right().Is(0xffff)) {
+ if (mright.right().Is(0xFF) || mright.right().Is(0xFFFF)) {
int32_t mask = mright.right().Value();
*left_op = g->UseRegister(left_node);
*right_op = g->UseRegister(mright.left().node());
*opcode |= AddressingModeField::encode(
- (mask == 0xff) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
+ (mask == 0xFF) ? kMode_Operand2_R_UXTB : kMode_Operand2_R_UXTH);
return true;
}
} else if (nm.IsWord32Sar()) {
@@ -488,7 +488,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -760,110 +761,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- // If the length is a constant power of two, allow the code generator to
- // pick a more efficient bounds check sequence by passing the length as an
- // immediate.
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.UseImmediate(length));
- return;
- }
- }
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- Arm64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- // If the length is a constant power of two, allow the code generator to
- // pick a more efficient bounds check sequence by passing the length as an
- // immediate.
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseImmediate(length), g.UseRegisterOrImmediateZero(value));
- return;
- }
- }
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.UseOperand(length, kArithmeticImm),
- g.UseRegisterOrImmediateZero(value));
-}
-
-
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@@ -950,7 +847,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -991,7 +888,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
// Ubfx cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -1105,16 +1002,16 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
if (selector->CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
- // Select Ubfx or Sbfx for (x << (K & 0x1f)) OP (K & 0x1f), where
- // OP is >>> or >> and (K & 0x1f) != 0.
+ // Select Ubfx or Sbfx for (x << (K & 0x1F)) OP (K & 0x1F), where
+ // OP is >>> or >> and (K & 0x1F) != 0.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && m.right().HasValue() &&
- (mleft.right().Value() & 0x1f) != 0 &&
- (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
+ (mleft.right().Value() & 0x1F) != 0 &&
+ (mleft.right().Value() & 0x1F) == (m.right().Value() & 0x1F)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
- int right_val = m.right().Value() & 0x1f;
+ int right_val = m.right().Value() & 0x1F;
DCHECK_NE(right_val, 0);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -1132,7 +1029,7 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -1160,7 +1057,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
// by Uint32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1f;
+ int shift = m.right().Value() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Umull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1176,7 +1073,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3f;
+ uint32_t lsb = m.right().Value() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
@@ -1211,7 +1108,7 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
// by Int32MulHigh.
Arm64OperandGenerator g(this);
Node* left = m.left().node();
- int shift = m.right().Value() & 0x1f;
+ int shift = m.right().Value() & 0x1F;
InstructionOperand const smull_operand = g.TempRegister();
Emit(kArm64Smull, smull_operand, g.UseRegister(left->InputAt(0)),
g.UseRegister(left->InputAt(1)));
@@ -1361,6 +1258,8 @@ void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1483,7 +1382,8 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
InstructionOperand in[] = {result, result};
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
} else {
@@ -1784,29 +1684,33 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
- bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
- bool to_native_stack = descriptor->UseNativeStack();
-
- bool always_claim = to_native_stack != from_native_stack;
-
+ // `arguments` includes alignment "holes". This means that slots bigger than
+ // kPointerSize, e.g. Simd128, will span across multiple arguments.
int claim_count = static_cast<int>(arguments->size());
int slot = claim_count - 1;
+ claim_count = RoundUp(claim_count, 2);
// Bump the stack pointer(s).
- if (claim_count > 0 || always_claim) {
+ if (claim_count > 0) {
// TODO(titzer): claim and poke probably take small immediates.
// TODO(titzer): it would be better to bump the csp here only
- // and emit paired stores with increment for non c frames.
- ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
- // ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
- // CSP and JSSP.
- Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
+ // and emit paired stores with increment for non c frames.
+ Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(claim_count));
+ }
+
+ if (claim_count > 0) {
+ // Store padding, which might be overwritten.
+ Emit(kArm64Poke, g.NoOutput(), g.UseImmediate(0),
+ g.TempImmediate(claim_count - 1));
}
// Poke the arguments into the stack.
- ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) {
- Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
- g.TempImmediate(slot));
+ Node* input_node = (*arguments)[slot].node;
+ // Skip any alignment holes in pushed nodes.
+ if (input_node != nullptr) {
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input_node),
+ g.TempImmediate(slot));
+ }
slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
// same type.
@@ -1816,6 +1720,29 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ Arm64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+
+ Emit(kArm64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1834,7 +1761,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2002,24 +1930,23 @@ void EmitBranchOrDeoptimize(InstructionSelector* selector,
} else {
DCHECK(cont->IsDeoptimize());
selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
}
}
// Try to emit TBZ, TBNZ, CBZ or CBNZ for certain comparisons of {node}
-// against zero, depending on the condition.
-bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
- FlagsCondition cond, FlagsContinuation* cont) {
- Int32BinopMatcher m_user(user);
- USE(m_user);
- DCHECK(m_user.right().Is(0) || m_user.left().Is(0));
-
+// against {value}, depending on the condition.
+bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, uint32_t value,
+ Node* user, FlagsCondition cond, FlagsContinuation* cont) {
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
switch (cond) {
case kSignedLessThan:
case kSignedGreaterThanOrEqual: {
+ // Here we handle sign tests, aka. comparisons with zero.
+ if (value != 0) return false;
// We don't generate TBZ/TBNZ for deoptimisations, as they have a
// shorter range than conditional branches and generating them for
// deoptimisations results in more veneers.
@@ -2045,9 +1972,29 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
return true;
}
case kEqual:
- case kNotEqual:
+ case kNotEqual: {
+ if (node->opcode() == IrOpcode::kWord32And) {
+ // Emit a tbz/tbnz if we are comparing with a single-bit mask:
+ // Branch(Word32Equal(Word32And(x, 1 << N), 1 << N), true, false)
+ Int32BinopMatcher m_and(node);
+ if (cont->IsBranch() && base::bits::IsPowerOfTwo(value) &&
+ m_and.right().Is(value) && selector->CanCover(user, node)) {
+ Arm64OperandGenerator g(selector);
+ // In the code generator, Equal refers to a bit being cleared. We want
+ // the opposite here so negate the condition.
+ cont->Negate();
+ selector->Emit(cont->Encode(kArm64TestAndBranch32), g.NoOutput(),
+ g.UseRegister(m_and.left().node()),
+ g.TempImmediate(base::bits::CountTrailingZeros(value)),
+ g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ return true;
+ }
+ }
+ } // Fall through.
case kUnsignedLessThanOrEqual:
case kUnsignedGreaterThan: {
+ if (value != 0) return false;
Arm64OperandGenerator g(selector);
cont->Overwrite(MapForCbz(cond));
EmitBranchOrDeoptimize(selector, kArm64CompareAndBranch32,
@@ -2062,15 +2009,20 @@ bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, Node* user,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
- ArchOpcode opcode = kArm64Cmp32;
FlagsCondition cond = cont->condition();
- if (m.right().Is(0)) {
- if (TryEmitCbzOrTbz(selector, m.left().node(), node, cond, cont)) return;
- } else if (m.left().Is(0)) {
+ if (m.right().HasValue()) {
+ if (TryEmitCbzOrTbz(selector, m.left().node(), m.right().Value(), node,
+ cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasValue()) {
FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz(selector, m.right().node(), node, commuted_cond, cont))
+ if (TryEmitCbzOrTbz(selector, m.right().node(), m.left().Value(), node,
+ commuted_cond, cont)) {
return;
+ }
}
+ ArchOpcode opcode = kArm64Cmp32;
ImmediateMode immediate_mode = kArithmeticImm;
if (m.right().Is(0) && (m.left().IsInt32Add() || m.left().IsWord32And())) {
// Emit flag setting add/and instructions for comparisons against zero.
@@ -2141,7 +2093,7 @@ bool TryEmitTestAndBranch(InstructionSelector* selector, Node* node,
Arm64OperandGenerator g(selector);
Matcher m(node);
if (cont->IsBranch() && m.right().HasValue() &&
- (base::bits::CountPopulation(m.right().Value()) == 1)) {
+ base::bits::IsPowerOfTwo(m.right().Value())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont->condition() == kEqual) || (cont->condition() == kNotEqual));
selector->Emit(
@@ -2356,7 +2308,8 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
DCHECK(cont->IsTrap());
selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
@@ -2376,14 +2329,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2648,7 +2601,7 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
return;
}
- Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
+ Emit(kArm64Float64InsertLowWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
@@ -2665,7 +2618,7 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
return;
}
- Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
+ Emit(kArm64Float64InsertHighWord32, g.DefineSameAsFirst(node),
g.UseRegister(left), g.UseRegister(right));
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 5406ec5766..53c3435b55 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -103,8 +103,9 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
// with the {control} node that already contains the right information.
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
- frame_state, effect, control);
+ control = graph()->NewNode(
+ common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 7e1fbfddb3..54a924fce4 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -14,6 +14,7 @@
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -949,7 +950,7 @@ void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
+void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
Handle<Name> name =
Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
@@ -957,19 +958,13 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
+ LanguageMode language_mode =
+ feedback.vector()->GetLanguageMode(feedback.slot());
const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
Node* node = NewNode(op, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
- BuildStoreGlobal(LanguageMode::kSloppy);
-}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrict() {
- BuildStoreGlobal(LanguageMode::kStrict);
-}
-
void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
PrepareEagerCheckpoint();
@@ -1609,7 +1604,8 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* op =
- javascript()->Call(arg_count, frequency, feedback, receiver_mode);
+ javascript()->Call(arg_count, frequency, feedback, receiver_mode,
+ GetSpeculationMode(slot_id));
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
op, args, static_cast<int>(arg_count), feedback.slot());
if (lowering.IsExit()) return;
@@ -1947,8 +1943,8 @@ void BytecodeGraphBuilder::VisitThrow() {
void BytecodeGraphBuilder::VisitAbort() {
BuildLoopExitsForFunctionExit(bytecode_analysis()->GetInLivenessFor(
bytecode_iterator().current_offset()));
- BailoutReason reason =
- static_cast<BailoutReason>(bytecode_iterator().GetIndexOperand(0));
+ AbortReason reason =
+ static_cast<AbortReason>(bytecode_iterator().GetIndexOperand(0));
NewNode(simplified()->RuntimeAbort(reason));
Node* control = NewNode(common()->Throw());
MergeControlToLeaveFunction(control);
@@ -2104,6 +2100,11 @@ CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
invocation_frequency_.value());
}
+SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
+ CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
+ return nexus.GetSpeculationMode();
+}
+
void BytecodeGraphBuilder::VisitBitwiseNot() {
BuildUnaryOp(javascript()->BitwiseNot());
}
@@ -2574,7 +2575,7 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
PrepareEagerCheckpoint();
Node* acc = environment()->LookupAccumulator();
- Node* acc_smi = NewNode(simplified()->CheckSmi(), acc);
+ Node* acc_smi = NewNode(simplified()->CheckSmi(VectorSlotPair()), acc);
BuildSwitchOnSmi(acc_smi);
}
@@ -2670,7 +2671,9 @@ void BytecodeGraphBuilder::VisitForInNext() {
// We need to rename the {index} here, as in case of OSR we loose the
// information that the {index} is always a valid unsigned Smi value.
index = graph()->NewNode(common()->TypeGuard(Type::UnsignedSmall()), index,
+ environment()->GetEffectDependency(),
environment()->GetControlDependency());
+ environment()->UpdateEffectDependency(index);
FeedbackSlot slot =
feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(3));
@@ -2736,14 +2739,16 @@ void BytecodeGraphBuilder::VisitRestoreGeneratorState() {
environment()->BindAccumulator(state, Environment::kAttachFrameState);
}
-void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
+void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* generator =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register generator_state_reg =
+ bytecode_iterator().GetRegisterOperand(1);
+ interpreter::Register first_reg = bytecode_iterator().GetRegisterOperand(2);
// We assume we are restoring registers starting fromm index 0.
CHECK_EQ(0, first_reg.index());
int register_count =
- static_cast<int>(bytecode_iterator().GetRegisterCountOperand(2));
+ static_cast<int>(bytecode_iterator().GetRegisterCountOperand(3));
// Bijection between registers and array indices must match that used in
// InterpreterAssembler::ExportRegisterFile.
@@ -2751,6 +2756,16 @@ void BytecodeGraphBuilder::VisitRestoreGeneratorRegisters() {
Node* value = NewNode(javascript()->GeneratorRestoreRegister(i), generator);
environment()->BindRegister(interpreter::Register(i), value);
}
+
+ // We're no longer resuming, so update the state register.
+ environment()->BindRegister(
+ generator_state_reg,
+ jsgraph()->SmiConstant(JSGeneratorObject::kGeneratorExecuting));
+
+ // Update the accumulator with the generator's input_or_debug_pos.
+ Node* input_or_debug_pos =
+ NewNode(javascript()->GeneratorRestoreInputOrDebugPos(), generator);
+ environment()->BindAccumulator(input_or_debug_pos);
}
void BytecodeGraphBuilder::VisitWide() {
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 562c3ddaea..91b857298c 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -16,6 +16,9 @@
namespace v8 {
namespace internal {
+
+class VectorSlotPair;
+
namespace compiler {
class Reduction;
@@ -152,7 +155,6 @@ class BytecodeGraphBuilder {
void BuildCreateArguments(CreateArgumentsType type);
Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
TypeofMode typeof_mode);
- void BuildStoreGlobal(LanguageMode language_mode);
enum class StoreMode {
// Check the prototype chain before storing.
@@ -232,6 +234,10 @@ class BytecodeGraphBuilder {
// feedback.
CallFrequency ComputeCallFrequency(int slot_id) const;
+ // Helper function to extract the speculation mode from the recorded type
+ // feedback.
+ SpeculationMode GetSpeculationMode(int slot_id) const;
+
// Control flow plumbing.
void BuildJump();
void BuildJumpIf(Node* condition);
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index dd4197d466..330b19fac3 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -224,7 +224,7 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
// The target for C calls is always an address (i.e. machine pointer).
MachineType target_type = MachineType::Pointer();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
if (set_initialize_root_flag) {
flags |= CallDescriptor::kInitializeRootRegister;
}
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index f24cec64a7..071f8952db 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -245,7 +245,12 @@ TNode<IntPtrT> CodeAssembler::IntPtrConstant(intptr_t value) {
}
TNode<Number> CodeAssembler::NumberConstant(double value) {
- return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
+ int smi_value;
+ if (DoubleToSmiInteger(value, &smi_value)) {
+ return UncheckedCast<Number>(SmiConstant(smi_value));
+ } else {
+ return UncheckedCast<Number>(raw_assembler()->NumberConstant(value));
+ }
}
TNode<Smi> CodeAssembler::SmiConstant(Smi* value) {
@@ -1357,13 +1362,13 @@ Node* CodeAssemblerVariable::value() const {
str << "#Use of unbound variable:"
<< "#\n Variable: " << *this << "#\n Current Block: ";
state_->PrintCurrentBlock(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
if (!state_->InsideBlock()) {
std::stringstream str;
str << "#Accessing variable value outside a block:"
<< "#\n Variable: " << *this;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
return impl_->value_;
@@ -1456,7 +1461,7 @@ void CodeAssemblerLabel::MergeVariables() {
}
str << "\n# Current Block: ";
state_->PrintCurrentBlock(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
}
@@ -1472,7 +1477,7 @@ void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
str << "Cannot bind the same label twice:"
<< "\n# current: " << debug_info
<< "\n# previous: " << *label_->block();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
@@ -1524,7 +1529,7 @@ void CodeAssemblerLabel::UpdateVariablesAfterBind() {
<< " vs. found=" << (not_found ? 0 : i->second.size())
<< "\n# Variable: " << *var_impl
<< "\n# Current Block: " << *label_->block();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
#endif // DEBUG
Node* phi = state_->raw_assembler_->Phi(
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 90a9d02fce..9f0d463dc1 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
+#include "src/objects/data-handler.h"
#include "src/runtime/runtime.h"
#include "src/zone/zone-containers.h"
@@ -26,6 +27,10 @@ namespace internal {
class Callable;
class CallInterfaceDescriptor;
class Isolate;
+class JSCollection;
+class JSWeakCollection;
+class JSWeakMap;
+class JSWeakSet;
class Factory;
class Zone;
@@ -252,7 +257,7 @@ class Node;
class RawMachineAssembler;
class RawMachineLabel;
-typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+typedef ZoneVector<CodeAssemblerVariable*> CodeAssemblerVariableList;
typedef std::function<void()> CodeAssemblerCallback;
@@ -1062,6 +1067,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool Word32ShiftIsSafe() const;
private:
+ // These two don't have definitions and are here only for catching use cases
+ // where the cast is not necessary.
+ TNode<Int32T> Signed(TNode<Int32T> x);
+ TNode<Uint32T> Unsigned(TNode<Uint32T> x);
+
RawMachineAssembler* raw_assembler() const;
// Calls respective callback registered in the state.
@@ -1157,7 +1167,7 @@ class CodeAssemblerLabel {
CodeAssembler* assembler,
const CodeAssemblerVariableList& merged_variables,
CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
- : CodeAssemblerLabel(assembler, merged_variables.length(),
+ : CodeAssemblerLabel(assembler, merged_variables.size(),
&(merged_variables[0]), type) {}
CodeAssemblerLabel(
CodeAssembler* assembler, size_t count,
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 3d43ab4765..0fb38e5933 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -310,7 +310,10 @@ MaybeHandle<HandlerTable> CodeGenerator::GetHandlerTable() const {
}
Handle<Code> CodeGenerator::FinalizeCode() {
- if (result_ != kSuccess) return Handle<Code>();
+ if (result_ != kSuccess) {
+ tasm()->AbortedCodeGeneration();
+ return Handle<Code>();
+ }
// Allocate exception handler table.
Handle<HandlerTable> table = HandlerTable::Empty(isolate());
@@ -915,9 +918,17 @@ int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
FrameStateDescriptor* const descriptor = entry.descriptor();
frame_state_offset++;
- Translation translation(
- &translations_, static_cast<int>(descriptor->GetFrameCount()),
- static_cast<int>(descriptor->GetJSFrameCount()), zone());
+ int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
+ Translation translation(&translations_,
+ static_cast<int>(descriptor->GetFrameCount()),
+ static_cast<int>(descriptor->GetJSFrameCount()),
+ update_feedback_count, zone());
+ if (entry.feedback().IsValid()) {
+ DeoptimizationLiteral literal =
+ DeoptimizationLiteral(entry.feedback().vector());
+ int literal_id = DefineDeoptimizationLiteral(literal);
+ translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt());
+ }
InstructionOperandIterator iter(instr, frame_state_offset);
BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
state_combine);
@@ -1000,8 +1011,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
literal = DeoptimizationLiteral(isolate()->factory()->true_value());
}
} else {
- // TODO(jarin,bmeurer): We currently pass in raw pointers to the
- // JSFunction::entry here. We should really consider fixing this.
DCHECK(type == MachineType::Int32() ||
type == MachineType::Uint32() ||
type.representation() == MachineRepresentation::kWord32 ||
@@ -1019,8 +1028,6 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
case Constant::kInt64:
// When pointers are 8 bytes, we can use int64 constants to represent
// Smis.
- // TODO(jarin,bmeurer): We currently pass in raw pointers to the
- // JSFunction::entry here. We should really consider fixing this.
DCHECK(type.representation() == MachineRepresentation::kWord64 ||
type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 70fdf71578..d9bc5c8173 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -138,9 +138,10 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition->opcode() == IrOpcode::kBooleanNot) {
NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
NodeProperties::ChangeOp(
- node, condition_is_true
- ? common()->DeoptimizeIf(p.kind(), p.reason())
- : common()->DeoptimizeUnless(p.kind(), p.reason()));
+ node, condition_is_true ? common()->DeoptimizeIf(p.kind(), p.reason(),
+ VectorSlotPair())
+ : common()->DeoptimizeUnless(
+ p.kind(), p.reason(), VectorSlotPair()));
return Changed(node);
}
Decision const decision = DecideCondition(condition);
@@ -148,8 +149,9 @@ Reduction CommonOperatorReducer::ReduceDeoptimizeConditional(Node* node) {
if (condition_is_true == (decision == Decision::kTrue)) {
ReplaceWithValue(node, dead(), effect, control);
} else {
- control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
- frame_state, effect, control);
+ control = graph()->NewNode(
+ common()->Deoptimize(p.kind(), p.reason(), VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), control);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index f43ff7e515..54af052d56 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -43,7 +43,8 @@ int ValueInputCountOfReturn(Operator const* const op) {
}
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
- return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason();
+ return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -51,11 +52,15 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
}
size_t hash_value(DeoptimizeParameters p) {
- return base::hash_combine(p.kind(), p.reason());
+ return base::hash_combine(p.kind(), p.reason(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ":" << p.reason();
+ os << p.kind() << ":" << p.reason();
+ if (p.feedback().IsValid()) {
+ os << "; " << p.feedback();
+ }
+ return os;
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -343,8 +348,7 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define COMMON_CACHED_OP_LIST(V) \
V(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1) \
- V(DeadValue, Operator::kFoldable, 0, 0, 0, 1, 0, 0) \
- V(Unreachable, Operator::kFoldable, 0, 1, 1, 0, 1, 0) \
+ V(Unreachable, Operator::kFoldable, 0, 1, 1, 1, 1, 0) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
@@ -409,7 +413,6 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define CACHED_DEOPTIMIZE_LIST(V) \
V(Eager, MinusZero) \
- V(Eager, NoReason) \
V(Eager, WrongMap) \
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
@@ -424,7 +427,6 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
V(Eager, LostPrecision) \
V(Eager, LostPrecisionOrNaN) \
- V(Eager, NoReason) \
V(Eager, NotAHeapNumber) \
V(Eager, NotANumberOrOddball) \
V(Eager, NotASmi) \
@@ -606,7 +608,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -622,7 +624,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -639,7 +641,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason)) {} // parameter
+ DeoptimizeParameters(kKind, kReason, VectorSlotPair())) {}
};
#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
@@ -817,17 +819,18 @@ const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
UNREACHABLE();
}
-const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
- DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimize##Kind##Reason##Operator; \
+const Operator* CommonOperatorBuilder::Deoptimize(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimize##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -836,17 +839,18 @@ const Operator* CommonOperatorBuilder::Deoptimize(DeoptimizeKind kind,
parameter); // parameter
}
-const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
- DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
+const Operator* CommonOperatorBuilder::DeoptimizeIf(
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -856,16 +860,17 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
}
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return new (zone()) Operator1<DeoptimizeParameters>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1131,7 +1136,7 @@ const Operator* CommonOperatorBuilder::TypeGuard(Type* type) {
return new (zone()) Operator1<Type*>( // --
IrOpcode::kTypeGuard, Operator::kPure, // opcode
"TypeGuard", // name
- 1, 0, 1, 1, 0, 0, // counts
+ 1, 1, 1, 1, 1, 0, // counts
type); // parameter
}
@@ -1278,6 +1283,11 @@ uint32_t ObjectIdOf(Operator const* op) {
}
}
+MachineRepresentation DeadValueRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kDeadValue, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
+
const Operator* CommonOperatorBuilder::FrameState(
BailoutId bailout_id, OutputFrameStateCombine state_combine,
const FrameStateFunctionInfo* function_info) {
@@ -1393,6 +1403,31 @@ CommonOperatorBuilder::CreateFrameStateFunctionInfo(
FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
}
+const Operator* CommonOperatorBuilder::DeadValue(MachineRepresentation rep) {
+ return new (zone()) Operator1<MachineRepresentation>( // --
+ IrOpcode::kDeadValue, Operator::kPure, // opcode
+ "DeadValue", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ rep); // parameter
+}
+
+#undef COMMON_CACHED_OP_LIST
+#undef CACHED_RETURN_LIST
+#undef CACHED_END_LIST
+#undef CACHED_EFFECT_PHI_LIST
+#undef CACHED_INDUCTION_VARIABLE_PHI_LIST
+#undef CACHED_LOOP_LIST
+#undef CACHED_MERGE_LIST
+#undef CACHED_DEOPTIMIZE_LIST
+#undef CACHED_DEOPTIMIZE_IF_LIST
+#undef CACHED_DEOPTIMIZE_UNLESS_LIST
+#undef CACHED_TRAP_IF_LIST
+#undef CACHED_TRAP_UNLESS_LIST
+#undef CACHED_PARAMETER_LIST
+#undef CACHED_PHI_LIST
+#undef CACHED_PROJECTION_LIST
+#undef CACHED_STATE_VALUES_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 06541d9a38..0e0614dced 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -11,6 +11,7 @@
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-handle-set.h"
@@ -52,15 +53,18 @@ int ValueInputCountOfReturn(Operator const* const op);
// Parameters for the {Deoptimize} operator.
class DeoptimizeParameters final {
public:
- DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason)
- : kind_(kind), reason_(reason) {}
+ DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
+ VectorSlotPair const feedback_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -338,6 +342,8 @@ ArgumentsStateType ArgumentsStateTypeOf(Operator const*) WARN_UNUSED_RESULT;
uint32_t ObjectIdOf(Operator const*);
+MachineRepresentation DeadValueRepresentationOf(Operator const*);
+
// Interface for building common operators that can be used at any level of IR,
// including JavaScript, mid-level, and low-level.
class V8_EXPORT_PRIVATE CommonOperatorBuilder final
@@ -346,7 +352,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
explicit CommonOperatorBuilder(Zone* zone);
const Operator* Dead();
- const Operator* DeadValue();
+ const Operator* DeadValue(MachineRepresentation rep);
const Operator* Unreachable();
const Operator* End(size_t control_input_count);
const Operator* Branch(BranchHint = BranchHint::kNone);
@@ -358,10 +364,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
- const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason);
- const Operator* DeoptimizeUnless(DeoptimizeKind kind,
- DeoptimizeReason reason);
+ const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
const Operator* TrapIf(int32_t trap_id);
const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index d40bc37b6d..523d37fe29 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -21,10 +21,8 @@ DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
graph_(graph),
common_(common),
dead_(graph->NewNode(common->Dead())),
- dead_value_(graph->NewNode(common->DeadValue())),
zone_(temp_zone) {
NodeProperties::SetType(dead_, Type::None());
- NodeProperties::SetType(dead_value_, Type::None());
}
namespace {
@@ -38,11 +36,11 @@ bool NoReturn(Node* node) {
NodeProperties::GetTypeOrAny(node)->IsNone();
}
-bool HasDeadInput(Node* node) {
+Node* FindDeadInput(Node* node) {
for (Node* input : node->inputs()) {
- if (NoReturn(input)) return true;
+ if (NoReturn(input)) return input;
}
- return false;
+ return nullptr;
}
} // namespace
@@ -209,17 +207,27 @@ Reduction DeadCodeElimination::ReducePhi(Node* node) {
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (PhiRepresentationOf(node->op()) == MachineRepresentation::kNone ||
+ MachineRepresentation rep = PhiRepresentationOf(node->op());
+ if (rep == MachineRepresentation::kNone ||
NodeProperties::GetTypeOrAny(node)->IsNone()) {
- return Replace(dead_value());
+ return Replace(DeadValue(node, rep));
+ }
+ int input_count = node->op()->ValueInputCount();
+ for (int i = 0; i < input_count; ++i) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (input->opcode() == IrOpcode::kDeadValue &&
+ DeadValueRepresentationOf(input->op()) != rep) {
+ NodeProperties::ReplaceValueInput(node, DeadValue(input, rep), i);
+ }
}
return NoChange();
}
Reduction DeadCodeElimination::ReducePureNode(Node* node) {
DCHECK_EQ(0, node->op()->EffectInputCount());
- if (HasDeadInput(node)) {
- return Replace(dead_value());
+ if (node->opcode() == IrOpcode::kDeadValue) return NoChange();
+ if (Node* input = FindDeadInput(node)) {
+ return Replace(DeadValue(input));
}
return NoChange();
}
@@ -234,8 +242,7 @@ Reduction DeadCodeElimination::ReduceUnreachableOrIfException(Node* node) {
return Replace(effect);
}
if (effect->opcode() == IrOpcode::kUnreachable) {
- RelaxEffectsAndControls(node);
- return Replace(dead_value());
+ return Replace(effect);
}
return NoChange();
}
@@ -246,10 +253,10 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
if (effect->opcode() == IrOpcode::kDead) {
return Replace(effect);
}
- if (HasDeadInput(node)) {
+ if (Node* input = FindDeadInput(node)) {
if (effect->opcode() == IrOpcode::kUnreachable) {
RelaxEffectsAndControls(node);
- return Replace(dead_value());
+ return Replace(DeadValue(input));
}
Node* control = node->op()->ControlInputCount() == 1
@@ -257,7 +264,8 @@ Reduction DeadCodeElimination::ReduceEffectNode(Node* node) {
: graph()->start();
Node* unreachable =
graph()->NewNode(common()->Unreachable(), effect, control);
- ReplaceWithValue(node, dead_value(), node, control);
+ NodeProperties::SetType(unreachable, Type::None());
+ ReplaceWithValue(node, DeadValue(input), node, control);
return Replace(unreachable);
}
@@ -270,11 +278,12 @@ Reduction DeadCodeElimination::ReduceDeoptimizeOrReturnOrTerminate(Node* node) {
node->opcode() == IrOpcode::kTerminate);
Reduction reduction = PropagateDeadControl(node);
if (reduction.Changed()) return reduction;
- if (HasDeadInput(node)) {
+ if (FindDeadInput(node) != nullptr) {
Node* effect = NodeProperties::GetEffectInput(node, 0);
Node* control = NodeProperties::GetControlInput(node, 0);
if (effect->opcode() != IrOpcode::kUnreachable) {
effect = graph()->NewNode(common()->Unreachable(), effect, control);
+ NodeProperties::SetType(effect, Type::None());
}
node->TrimInputCount(2);
node->ReplaceInput(0, effect);
@@ -322,6 +331,16 @@ void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
NodeProperties::ChangeOp(node, op);
}
+Node* DeadCodeElimination::DeadValue(Node* node, MachineRepresentation rep) {
+ if (node->opcode() == IrOpcode::kDeadValue) {
+ if (rep == DeadValueRepresentationOf(node->op())) return node;
+ node = NodeProperties::GetValueInput(node, 0);
+ }
+ Node* dead_value = graph()->NewNode(common()->DeadValue(rep), node);
+ NodeProperties::SetType(dead_value, Type::None());
+ return dead_value;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.h b/deps/v8/src/compiler/dead-code-elimination.h
index b1e403ca86..217d58ef31 100644
--- a/deps/v8/src/compiler/dead-code-elimination.h
+++ b/deps/v8/src/compiler/dead-code-elimination.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/machine-type.h"
namespace v8 {
namespace internal {
@@ -17,13 +18,23 @@ namespace compiler {
class CommonOperatorBuilder;
// Propagates {Dead} control and {DeadValue} values through the graph and
-// thereby removes dead code. When {DeadValue} hits the effect chain, a crashing
-// {Unreachable} node is inserted and the rest of the effect chain is collapsed.
-// We wait for the {EffectControlLinearizer} to connect {Unreachable} nodes to
-// the graph end, since this is much easier if there is no floating control.
-// We detect dead values based on types, pruning uses of DeadValue except for
-// uses by phi. These remaining uses are eliminated in the
-// {EffectControlLinearizer}, where they are replaced with dummy values.
+// thereby removes dead code.
+// We detect dead values based on types, replacing uses of nodes with
+// {Type::None()} with {DeadValue}. A pure node (other than a phi) using
+// {DeadValue} is replaced by {DeadValue}. When {DeadValue} hits the effect
+// chain, a crashing {Unreachable} node is inserted and the rest of the effect
+// chain is collapsed. We wait for the {EffectControlLinearizer} to connect
+// {Unreachable} nodes to the graph end, since this is much easier if there is
+// no floating control.
+// {DeadValue} has an input, which has to have {Type::None()}. This input is
+// important to maintain the dependency on the cause of the unreachable code.
+// {Unreachable} has a value output and {Type::None()} so it can be used by
+// {DeadValue}.
+// {DeadValue} nodes track a {MachineRepresentation} so they can be lowered to a
+// value-producing node. {DeadValue} has the runtime semantics of crashing and
+// behaves like a constant of its representation so it can be used in gap moves.
+// Since phi nodes are the only remaining use of {DeadValue}, this
+// representation is only adjusted for uses by phi nodes.
// In contrast to {DeadValue}, {Dead} can never remain in the graph.
class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
@@ -53,15 +64,16 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
void TrimMergeOrPhi(Node* node, int size);
+ Node* DeadValue(Node* none_node,
+ MachineRepresentation rep = MachineRepresentation::kNone);
+
Graph* graph() const { return graph_; }
CommonOperatorBuilder* common() const { return common_; }
Node* dead() const { return dead_; }
- Node* dead_value() const { return dead_value_; }
Graph* const graph_;
CommonOperatorBuilder* const common_;
Node* const dead_;
- Node* const dead_value_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(DeadCodeElimination);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 2372a0fe40..a47941e28d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -145,9 +145,10 @@ bool HasIncomingBackEdges(BasicBlock* block) {
return false;
}
-void RemoveRegionNode(Node* node) {
+void RemoveRenameNode(Node* node) {
DCHECK(IrOpcode::kFinishRegion == node->opcode() ||
- IrOpcode::kBeginRegion == node->opcode());
+ IrOpcode::kBeginRegion == node->opcode() ||
+ IrOpcode::kTypeGuard == node->opcode());
// Update the value/context uses to the value input of the finish node and
// the effect uses to the effect input.
for (Edge edge : node->use_edges()) {
@@ -318,28 +319,6 @@ void TryCloneBranch(Node* node, BasicBlock* block, Zone* temp_zone,
merge->Kill();
}
-Node* DummyValue(JSGraph* jsgraph, MachineRepresentation rep) {
- switch (rep) {
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kTaggedSigned:
- return jsgraph->SmiConstant(0xdead);
- case MachineRepresentation::kTaggedPointer:
- return jsgraph->TheHoleConstant();
- case MachineRepresentation::kWord64:
- return jsgraph->Int64Constant(0xdead);
- case MachineRepresentation::kWord32:
- return jsgraph->Int32Constant(0xdead);
- case MachineRepresentation::kFloat64:
- return jsgraph->Float64Constant(0xdead);
- case MachineRepresentation::kFloat32:
- return jsgraph->Float32Constant(0xdead);
- case MachineRepresentation::kBit:
- return jsgraph->Int32Constant(0);
- default:
- UNREACHABLE();
- }
-}
-
} // namespace
void EffectControlLinearizer::Run() {
@@ -369,7 +348,6 @@ void EffectControlLinearizer::Run() {
// Iterate over the phis and update the effect phis.
Node* effect_phi = nullptr;
Node* terminate = nullptr;
- int predecessor_count = static_cast<int>(block->PredecessorCount());
for (; instr < block->NodeCount(); instr++) {
Node* node = block->NodeAt(instr);
// Only go through the phis and effect phis.
@@ -380,19 +358,7 @@ void EffectControlLinearizer::Run() {
DCHECK_NE(IrOpcode::kIfException, control->opcode());
effect_phi = node;
} else if (node->opcode() == IrOpcode::kPhi) {
- DCHECK_EQ(predecessor_count, node->op()->ValueInputCount());
- for (int i = 0; i < predecessor_count; ++i) {
- if (NodeProperties::GetValueInput(node, i)->opcode() ==
- IrOpcode::kDeadValue) {
- // Phi uses of {DeadValue} must originate from unreachable code. Due
- // to schedule freedom between the effect and the control chain,
- // they might still appear in reachable code. So we replace them
- // with a dummy value.
- NodeProperties::ReplaceValueInput(
- node, DummyValue(jsgraph(), PhiRepresentationOf(node->op())),
- i);
- }
- }
+ // Just skip phis.
} else if (node->opcode() == IrOpcode::kTerminate) {
DCHECK_NULL(terminate);
terminate = node;
@@ -573,7 +539,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
region_observability_ = RegionObservability::kObservable;
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRegionNode(node);
+ return RemoveRenameNode(node);
}
if (node->opcode() == IrOpcode::kBeginRegion) {
// Determine the observability for this region and use that for all
@@ -583,7 +549,10 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
region_observability_ = RegionObservabilityOf(node->op());
// Update the value uses to the value input of the finish node and
// the effect uses to the effect input.
- return RemoveRegionNode(node);
+ return RemoveRenameNode(node);
+ }
+ if (node->opcode() == IrOpcode::kTypeGuard) {
+ return RemoveRenameNode(node);
}
// Special treatment for checkpoint nodes.
@@ -781,6 +750,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTruncateTaggedToWord32:
result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
+ case IrOpcode::kNumberToString:
+ result = LowerNumberToString(node);
+ break;
case IrOpcode::kObjectIsArrayBufferView:
result = LowerObjectIsArrayBufferView(node);
break;
@@ -847,12 +819,17 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kNewArgumentsElements:
result = LowerNewArgumentsElements(node);
break;
+ case IrOpcode::kNewConsString:
+ result = LowerNewConsString(node);
+ break;
case IrOpcode::kArrayBufferWasNeutered:
result = LowerArrayBufferWasNeutered(node);
break;
case IrOpcode::kSameValue:
result = LowerSameValue(node);
break;
+ case IrOpcode::kDeadValue:
+ result = LowerDeadValue(node);
case IrOpcode::kStringFromCharCode:
result = LowerStringFromCharCode(node);
break;
@@ -862,6 +839,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringIndexOf:
result = LowerStringIndexOf(node);
break;
+ case IrOpcode::kStringLength:
+ result = LowerStringLength(node);
+ break;
case IrOpcode::kStringToNumber:
result = LowerStringToNumber(node);
break;
@@ -874,6 +854,12 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kSeqStringCharCodeAt:
result = LowerSeqStringCharCodeAt(node);
break;
+ case IrOpcode::kStringCodePointAt:
+ result = LowerStringCodePointAt(node);
+ break;
+ case IrOpcode::kSeqStringCodePointAt:
+ result = LowerSeqStringCharCodeAt(node);
+ break;
case IrOpcode::kStringToLowerCaseIntl:
result = LowerStringToLowerCaseIntl(node);
break;
@@ -889,6 +875,9 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kStringLessThanOrEqual:
result = LowerStringLessThanOrEqual(node);
break;
+ case IrOpcode::kNumberIsFloat64Hole:
+ result = LowerNumberIsFloat64Hole(node);
+ break;
case IrOpcode::kCheckFloat64Hole:
result = LowerCheckFloat64Hole(node, frame_state);
break;
@@ -1136,6 +1125,7 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
Node* value = node->InputAt(0);
auto if_heapnumber = __ MakeDeferredLabel();
+ auto if_bigint = __ MakeDeferredLabel();
Node* zero = __ Int32Constant(0);
Node* fzero = __ Float64Constant(0.0);
@@ -1154,15 +1144,22 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
Node* value_map_bitfield =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
__ GotoIfNot(
- __ Word32Equal(__ Word32And(value_map_bitfield,
- __ Int32Constant(1 << Map::kIsUndetectable)),
- zero),
+ __ Word32Equal(
+ __ Word32And(value_map_bitfield,
+ __ Int32Constant(Map::IsUndetectableBit::kMask)),
+ zero),
done, zero);
// Check if {value} is a HeapNumber.
__ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
&if_heapnumber);
+ // Check if {value} is a BigInt.
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ __ GotoIf(__ Word32Equal(value_instance_type, __ Int32Constant(BIGINT_TYPE)),
+ &if_bigint);
+
// All other values that reach here are true.
__ Goto(done, __ Int32Constant(1));
@@ -1174,6 +1171,15 @@ void EffectControlLinearizer::TruncateTaggedPointerToBit(
__ LoadField(AccessBuilder::ForHeapNumberValue(), value);
__ Goto(done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
}
+
+ __ Bind(&if_bigint);
+ {
+ Node* bitfield = __ LoadField(AccessBuilder::ForBigIntBitfield(), value);
+ Node* length_is_zero = __ WordEqual(
+ __ WordAnd(bitfield, __ IntPtrConstant(BigInt::LengthBits::kMask)),
+ __ IntPtrConstant(0));
+ __ Goto(done, __ Word32Equal(length_is_zero, zero));
+ }
}
Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
@@ -1294,9 +1300,11 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
Node* index = node->InputAt(0);
Node* limit = node->InputAt(1);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThan(index, limit);
- __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, params.feedback(), check,
+ frame_state);
return index;
}
@@ -1305,9 +1313,12 @@ Node* EffectControlLinearizer::LowerMaskIndexWithBound(Node* node) {
if (mask_array_index_ == kMaskArrayIndex) {
Node* limit = node->InputAt(1);
- Node* mask = __ Word32Sar(__ Word32Or(__ Int32Sub(limit, index), index),
- __ Int32Constant(31));
- mask = __ Word32Xor(mask, __ Int32Constant(-1));
+ // mask = ((index - limit) & ~index) >> 31
+ // index = index & mask
+ Node* neg_index = __ Word32Xor(index, __ Int32Constant(-1));
+ Node* mask =
+ __ Word32Sar(__ Word32And(__ Int32Sub(index, limit), neg_index),
+ __ Int32Constant(31));
index = __ Word32And(index, mask);
}
return index;
@@ -1346,10 +1357,11 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* bitfield3 =
__ LoadField(AccessBuilder::ForMapBitField3(), value_map);
Node* if_not_deprecated = __ WordEqual(
- __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
+ __ Word32And(bitfield3,
+ __ Int32Constant(Map::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongMap, p.feedback(),
+ if_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
@@ -1360,8 +1372,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
- __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, p.feedback(),
+ check, frame_state);
}
// Reload the current map of the {value}.
@@ -1372,7 +1384,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
+ frame_state);
} else {
__ GotoIf(check, &done);
}
@@ -1390,7 +1403,8 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* map = __ HeapConstant(maps[i]);
Node* check = __ WordEqual(value_map, map);
if (i == map_count - 1) {
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
+ frame_state);
} else {
__ GotoIf(check, &done);
}
@@ -1423,6 +1437,7 @@ Node* EffectControlLinearizer::LowerCompareMaps(Node* node) {
Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
auto if_not_smi = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -1434,7 +1449,8 @@ Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
__ Bind(&if_not_smi);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check1, frame_state);
__ Goto(&done);
__ Bind(&done);
@@ -1452,8 +1468,8 @@ Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
Node* check = __ Uint32LessThanOrEqual(
__ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, check,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAJavaScriptObject, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1464,12 +1480,14 @@ Node* EffectControlLinearizer::LowerCheckSymbol(Node* node, Node* frame_state) {
Node* check =
__ WordEqual(value_map, __ HeapConstant(factory()->symbol_map()));
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASymbol, VectorSlotPair(), check,
+ frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_instance_type =
@@ -1477,7 +1495,8 @@ Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* check = __ Uint32LessThan(value_instance_type,
__ Uint32Constant(FIRST_NONSTRING_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, params.feedback(),
+ check, frame_state);
return value;
}
@@ -1494,7 +1513,8 @@ Node* EffectControlLinearizer::LowerCheckSeqString(Node* node,
value_instance_type,
__ Int32Constant(kStringRepresentationMask | kIsNotStringMask)),
__ Int32Constant(kSeqStringTag | kStringTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1510,7 +1530,8 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
__ Word32And(value_instance_type,
__ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
__ Int32Constant(kInternalizedTag));
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongInstanceType, VectorSlotPair(),
+ check, frame_state);
return value;
}
@@ -1518,7 +1539,7 @@ Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
void EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
__ DeoptimizeIfNot(DeoptimizeKind::kEager, DeoptimizeReasonOf(node->op()),
- value, frame_state);
+ VectorSlotPair(), value, frame_state);
}
Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
@@ -1528,7 +1549,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
Node* value = __ Int32AddWithOverflow(lhs, rhs);
Node* check = __ Projection(1, value);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
return __ Projection(0, value);
}
@@ -1539,7 +1561,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
Node* value = __ Int32SubWithOverflow(lhs, rhs);
Node* check = __ Projection(1, value);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
return __ Projection(0, value);
}
@@ -1567,11 +1590,13 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {rhs} is zero.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Check if {lhs} is zero, as that would produce minus zero.
check = __ Word32Equal(lhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
+ frame_state);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
@@ -1584,7 +1609,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if {rhs} is -1.
Node* minusone = __ Int32Constant(-1);
Node* is_minus_one = __ Word32Equal(rhs, minusone);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), is_minus_one,
+ frame_state);
__ Goto(&minint_check_done);
__ Bind(&minint_check_done);
@@ -1597,7 +1623,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
// Check if the remainder is non-zero.
Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -1645,7 +1672,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(vtrue0, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
__ Goto(&rhs_checked, vtrue0);
}
@@ -1679,7 +1707,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
// Check if we would have to return -0.
Node* check = __ Word32Equal(vtrue1, zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check,
+ frame_state);
__ Goto(&done, vtrue1);
}
@@ -1696,14 +1725,16 @@ Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Perform the actual unsigned integer division.
Node* value = __ Uint32Div(lhs, rhs);
// Check if the remainder is non-zero.
check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -1716,7 +1747,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
Node* check = __ Word32Equal(rhs, zero);
- __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, VectorSlotPair(), check,
+ frame_state);
// Perform the actual unsigned integer modulus.
return __ Uint32Mod(lhs, rhs);
@@ -1730,7 +1762,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
Node* projection = __ Int32MulWithOverflow(lhs, rhs);
Node* check = __ Projection(1, projection);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, VectorSlotPair(), check,
+ frame_state);
Node* value = __ Projection(0, projection);
@@ -1745,7 +1778,8 @@ Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
__ Bind(&if_zero);
// We may need to return negative zero.
Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, VectorSlotPair(), check_or,
+ frame_state);
__ Goto(&check_done);
__ Bind(&check_done);
@@ -1758,35 +1792,42 @@ Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* add = __ Int32AddWithOverflow(value, value);
Node* check = __ Projection(1, add);
- __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, params.feedback(), check,
+ frame_state);
return __ Projection(0, add);
}
Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* unsafe = __ Int32LessThan(value, __ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, unsafe, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kLostPrecision, params.feedback(), unsafe,
+ frame_state);
return value;
}
Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecision, params.feedback(), check,
+ frame_state);
return ChangeUint32ToSmi(value);
}
Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
- CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
Node* value32 = __ RoundFloat64ToInt32(value);
Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
- __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kLostPrecisionOrNaN, feedback,
+ check_same, frame_state);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
// Check if {value} is -0.
@@ -1801,7 +1842,8 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
__ Int32Constant(0));
- __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, feedback, check_negative,
+ frame_state);
__ Goto(&check_done);
__ Bind(&check_done);
@@ -1811,22 +1853,27 @@ Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
Node* frame_state) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
- return BuildCheckedFloat64ToInt32(mode, value, frame_state);
+ return BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), value,
+ frame_state);
}
Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
+ frame_state);
return ChangeSmiToInt32(value);
}
Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
Node* frame_state) {
- CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+ const CheckMinusZeroParameters& params =
+ CheckMinusZeroParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeDeferredLabel();
@@ -1842,9 +1889,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
__ Bind(&if_not_smi);
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check_map, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, params.feedback(),
+ check_map, frame_state);
Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
- vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+ vfalse = BuildCheckedFloat64ToInt32(params.mode(), params.feedback(), vfalse,
+ frame_state);
__ Goto(&done, vfalse);
__ Bind(&done);
@@ -1852,13 +1901,14 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
}
Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback, Node* value,
+ Node* frame_state) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
switch (mode) {
case CheckTaggedInputMode::kNumber: {
- __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, check_number,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotAHeapNumber, feedback,
+ check_number, frame_state);
break;
}
case CheckTaggedInputMode::kNumberOrOddball: {
@@ -1871,8 +1921,8 @@ Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
__ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
Node* check_oddball =
__ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
- __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
- frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotANumberOrOddball, feedback,
+ check_oddball, frame_state);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
__ Goto(&check_done);
@@ -1896,8 +1946,8 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
// In the Smi case, just convert to int32 and then float64.
// Otherwise, check heap numberness and load the number.
- Node* number =
- BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ mode, VectorSlotPair(), value, frame_state);
__ Goto(&done, number);
__ Bind(&if_smi);
@@ -1912,9 +1962,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kNotASmi, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1922,9 +1974,11 @@ Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
+ const CheckParameters& params = CheckParametersOf(node->op());
Node* check = ObjectIsSmi(value);
- __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, params.feedback(), check,
+ frame_state);
return value;
}
@@ -1950,7 +2004,8 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
Node* node, Node* frame_state) {
- CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
+ const CheckTaggedInputParameters& params =
+ CheckTaggedInputParametersOf(node->op());
Node* value = node->InputAt(0);
auto if_not_smi = __ MakeLabel();
@@ -1964,8 +2019,8 @@ Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
__ Bind(&if_not_smi);
- Node* number =
- BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ params.mode(), params.feedback(), value, frame_state);
number = __ TruncateFloat64ToWord32(number);
__ Goto(&done, number);
@@ -1980,6 +2035,19 @@ Node* EffectControlLinearizer::LowerAllocate(Node* node) {
return new_node;
}
+Node* EffectControlLinearizer::LowerNumberToString(Node* node) {
+ Node* argument = node->InputAt(0);
+
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kNumberToString);
+ Operator::Properties properties = Operator::kEliminatable;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), argument,
+ __ NoContextConstant());
+}
+
Node* EffectControlLinearizer::LowerObjectIsArrayBufferView(Node* node) {
Node* value = node->InputAt(0);
@@ -2039,9 +2107,10 @@ Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* vfalse = __ Word32Equal(
- __ Int32Constant(1 << Map::kIsCallable),
- __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ Node* vfalse =
+ __ Word32Equal(__ Int32Constant(Map::IsCallableBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsCallableBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2063,10 +2132,10 @@ Node* EffectControlLinearizer::LowerObjectIsConstructor(Node* node) {
Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* vfalse =
- __ Word32Equal(__ Int32Constant(1 << Map::kIsConstructor),
- __ Word32And(value_bit_field,
- __ Int32Constant(1 << Map::kIsConstructor)));
+ Node* vfalse = __ Word32Equal(
+ __ Int32Constant(Map::IsConstructorBit::kMask),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsConstructorBit::kMask)));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2089,10 +2158,10 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Int32Constant(1 << Map::kIsCallable),
+ __ Int32Constant(Map::IsCallableBit::kMask),
__ Word32And(value_bit_field,
- __ Int32Constant((1 << Map::kIsCallable) |
- (1 << Map::kIsUndetectable))));
+ __ Int32Constant((Map::IsCallableBit::kMask) |
+ (Map::IsUndetectableBit::kMask))));
__ Goto(&done, vfalse);
__ Bind(&if_smi);
@@ -2102,6 +2171,13 @@ Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerNumberIsFloat64Hole(Node* node) {
+ Node* value = node->InputAt(0);
+ Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(kHoleNanUpper32));
+ return check;
+}
+
Node* EffectControlLinearizer::LowerObjectIsMinusZero(Node* node) {
Node* value = node->InputAt(0);
Node* zero = __ Int32Constant(0);
@@ -2169,9 +2245,10 @@ Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
- Node* check2 = __ Word32Equal(
- __ Int32Constant(0),
- __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+ Node* check2 =
+ __ Word32Equal(__ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsCallableBit::kMask)));
__ Goto(&done, check2);
__ Bind(&if_primitive);
@@ -2283,9 +2360,10 @@ Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value_bit_field =
__ LoadField(AccessBuilder::ForMapBitField(), value_map);
Node* vfalse = __ Word32Equal(
- __ Word32Equal(__ Int32Constant(0),
- __ Word32And(value_bit_field,
- __ Int32Constant(1 << Map::kIsUndetectable))),
+ __ Word32Equal(
+ __ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(Map::IsUndetectableBit::kMask))),
__ Int32Constant(0));
__ Goto(&done, vfalse);
@@ -2511,6 +2589,52 @@ Node* EffectControlLinearizer::LowerNewArgumentsElements(Node* node) {
__ SmiConstant(mapped_count), __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerNewConsString(Node* node) {
+ Node* length = node->InputAt(0);
+ Node* first = node->InputAt(1);
+ Node* second = node->InputAt(2);
+
+ // Determine the instance types of {first} and {second}.
+ Node* first_map = __ LoadField(AccessBuilder::ForMap(), first);
+ Node* first_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), first_map);
+ Node* second_map = __ LoadField(AccessBuilder::ForMap(), second);
+ Node* second_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), second_map);
+
+ // Determine the proper map for the resulting ConsString.
+ // If both {first} and {second} are one-byte strings, we
+ // create a new ConsOneByteString, otherwise we create a
+ // new ConsString instead.
+ auto if_onebyte = __ MakeLabel();
+ auto if_twobyte = __ MakeLabel();
+ auto done = __ MakeLabel(MachineRepresentation::kTaggedPointer);
+ STATIC_ASSERT(kOneByteStringTag != 0);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ Node* instance_type = __ Word32And(first_instance_type, second_instance_type);
+ Node* encoding =
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask));
+ __ Branch(__ Word32Equal(encoding, __ Int32Constant(kTwoByteStringTag)),
+ &if_twobyte, &if_onebyte);
+ __ Bind(&if_onebyte);
+ __ Goto(&done,
+ jsgraph()->HeapConstant(factory()->cons_one_byte_string_map()));
+ __ Bind(&if_twobyte);
+ __ Goto(&done, jsgraph()->HeapConstant(factory()->cons_string_map()));
+ __ Bind(&done);
+ Node* result_map = done.PhiAt(0);
+
+ // Allocate the resulting ConsString.
+ Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(ConsString::kSize));
+ __ StoreField(AccessBuilder::ForMap(), result, result_map);
+ __ StoreField(AccessBuilder::ForNameHashField(), result,
+ jsgraph()->Int32Constant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), result, length);
+ __ StoreField(AccessBuilder::ForConsStringFirst(), result, first);
+ __ StoreField(AccessBuilder::ForConsStringSecond(), result, second);
+ return result;
+}
+
Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
Node* value = node->InputAt(0);
@@ -2538,6 +2662,15 @@ Node* EffectControlLinearizer::LowerSameValue(Node* node) {
__ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerDeadValue(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ if (input->opcode() != IrOpcode::kUnreachable) {
+ Node* unreachable = __ Unreachable();
+ NodeProperties::ReplaceValueInput(node, unreachable, 0);
+ }
+ return node;
+}
+
Node* EffectControlLinearizer::LowerStringToNumber(Node* node) {
Node* string = node->InputAt(0);
@@ -2580,19 +2713,25 @@ Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
__ NoContextConstant());
}
-Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+Node* EffectControlLinearizer::LowerStringCodePointAt(Node* node) {
Node* receiver = node->InputAt(0);
Node* position = node->InputAt(1);
+ Callable const callable =
+ Builtins::CallableFor(isolate(), Builtins::kStringCodePointAt);
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+ MachineType::TaggedSigned());
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LoadFromString(Node* receiver, Node* position,
+ Node* is_one_byte) {
auto one_byte_load = __ MakeLabel();
auto done = __ MakeLabel(MachineRepresentation::kWord32);
-
- Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
- Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
- Node* is_one_byte = __ Word32Equal(
- __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
- __ Int32Constant(kOneByteStringTag));
-
__ GotoIf(is_one_byte, &one_byte_load);
Node* two_byte_result = __ LoadElement(
AccessBuilder::ForSeqTwoByteStringCharacter(), receiver, position);
@@ -2607,6 +2746,85 @@ Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
return done.PhiAt(0);
}
+Node* EffectControlLinearizer::LowerSeqStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ return LoadFromString(receiver, position, is_one_byte);
+}
+
+Node* EffectControlLinearizer::LowerSeqStringCodePointAt(
+ Node* node, UnicodeEncoding encoding) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
+
+ Node* map = __ LoadField(AccessBuilder::ForMap(), receiver);
+ Node* instance_type = __ LoadField(AccessBuilder::ForMapInstanceType(), map);
+ Node* is_one_byte = __ Word32Equal(
+ __ Word32And(instance_type, __ Int32Constant(kStringEncodingMask)),
+ __ Int32Constant(kOneByteStringTag));
+
+ Node* first_char_code = LoadFromString(receiver, position, is_one_byte);
+
+ auto return_result = __ MakeLabel(MachineRepresentation::kWord32);
+
+ // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* first_out =
+ __ Word32Equal(__ Word32And(first_char_code, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xD800));
+ // Return first character code.
+ __ GotoIf(first_out, &return_result, first_char_code);
+ // Check if position + 1 is still in range.
+ Node* length = __ LoadField(AccessBuilder::ForStringLength(), receiver);
+ Node* next_position = __ Int32Add(position, __ Int32Constant(1));
+ Node* next_position_in_range = __ Int32LessThan(next_position, length);
+ __ GotoIf(next_position_in_range, &return_result, first_char_code);
+
+ // Load second character code.
+ Node* second_char_code = LoadFromString(receiver, next_position, is_one_byte);
+ // Check if first character code is outside of interval [0xD800, 0xDBFF].
+ Node* second_out =
+ __ Word32Equal(__ Word32And(second_char_code, __ Int32Constant(0xFC00)),
+ __ Int32Constant(0xDC00));
+ __ GotoIfNot(second_out, &return_result, first_char_code);
+
+ Node* result;
+ switch (encoding) {
+ case UnicodeEncoding::UTF16:
+ result = __ Word32Or(
+// Need to swap the order for big-endian platforms
+#if V8_TARGET_BIG_ENDIAN
+ __ Word32Shl(first_char_code, __ Int32Constant(16)),
+ second_char_code);
+#else
+ __ Word32Shl(second_char_code, __ Int32Constant(16)),
+ first_char_code);
+#endif
+ break;
+ case UnicodeEncoding::UTF32: {
+ // Convert UTF16 surrogate pair into |word32| code point, encoded as
+ // UTF32.
+ Node* surrogate_offset =
+ __ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+
+ // (lead << 10) + trail + SURROGATE_OFFSET
+ result = __ Int32Add(__ Word32Shl(first_char_code, __ Int32Constant(10)),
+ __ Int32Add(second_char_code, surrogate_offset));
+ break;
+ }
+ }
+ __ Goto(&return_result, result);
+
+ __ Bind(&return_result);
+ return return_result.PhiAt(0);
+}
+
Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
@@ -2836,6 +3054,12 @@ Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
position, __ NoContextConstant());
}
+Node* EffectControlLinearizer::LowerStringLength(Node* node) {
+ Node* subject = node->InputAt(0);
+
+ return __ LoadField(AccessBuilder::ForStringLength(), subject);
+}
+
Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
Node* node) {
Node* lhs = node->InputAt(0);
@@ -2872,7 +3096,8 @@ Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
Node* value = node->InputAt(0);
Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
__ Int32Constant(kHoleNanUpper32));
- __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -2881,7 +3106,8 @@ Node* EffectControlLinearizer::LowerCheckNotTaggedHole(Node* node,
Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = __ WordEqual(value, __ TheHoleConstant());
- __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kHole, VectorSlotPair(), check,
+ frame_state);
return value;
}
@@ -2918,8 +3144,8 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
__ Bind(&if_notsame);
{
// Now {val} could still be a non-internalized String that matches {exp}.
- __ DeoptimizeIf(DeoptimizeReason::kWrongName, ObjectIsSmi(val),
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kWrongName, VectorSlotPair(),
+ ObjectIsSmi(val), frame_state);
Node* val_map = __ LoadField(AccessBuilder::ForMap(), val);
Node* val_instance_type =
__ LoadField(AccessBuilder::ForMapInstanceType(), val_map);
@@ -2937,7 +3163,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
// Check that the {val} is a non-internalized String, if it's anything
// else it cannot match the recorded feedback {exp} anyways.
__ DeoptimizeIfNot(
- DeoptimizeReason::kWrongName,
+ DeoptimizeReason::kWrongName, VectorSlotPair(),
__ Word32Equal(__ Word32And(val_instance_type,
__ Int32Constant(kIsNotStringMask |
kIsNotInternalizedMask)),
@@ -2956,7 +3182,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
__ Call(common()->Call(desc), try_internalize_string_function, val);
// Now see if the results match.
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
__ WordEqual(exp, val_internalized), frame_state);
__ Goto(&if_same);
}
@@ -2966,7 +3192,7 @@ void EffectControlLinearizer::LowerCheckEqualsInternalizedString(
// The {val} is a ThinString, let's check the actual value.
Node* val_actual =
__ LoadField(AccessBuilder::ForThinStringActual(), val);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName,
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(),
__ WordEqual(exp, val_actual), frame_state);
__ Goto(&if_same);
}
@@ -2980,7 +3206,8 @@ void EffectControlLinearizer::LowerCheckEqualsSymbol(Node* node,
Node* exp = node->InputAt(0);
Node* val = node->InputAt(1);
Node* check = __ WordEqual(exp, val);
- __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, check, frame_state);
+ __ DeoptimizeIfNot(DeoptimizeReason::kWrongName, VectorSlotPair(), check,
+ frame_state);
}
Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
@@ -3135,7 +3362,7 @@ Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
Node* frame_state) {
- GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
+ GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
Node* index = node->InputAt(2);
@@ -3154,7 +3381,7 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
// We need to grow the {elements} for {object}.
Operator::Properties properties = Operator::kEliminatable;
Callable callable =
- (mode == GrowFastElementsMode::kDoubleElements)
+ (params.mode() == GrowFastElementsMode::kDoubleElements)
? Builtins::CallableFor(isolate(), Builtins::kGrowFastDoubleElements)
: Builtins::CallableFor(isolate(),
Builtins::kGrowFastSmiOrObjectElements);
@@ -3166,10 +3393,8 @@ Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
ChangeInt32ToSmi(index), __ NoContextConstant());
// Ensure that we were able to grow the {elements}.
- // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
- // but maybe we should just introduce a reason that makes sense.
- __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_elements),
- frame_state);
+ __ DeoptimizeIf(DeoptimizeReason::kCouldNotGrowElements, params.feedback(),
+ ObjectIsSmi(new_elements), frame_state);
__ Goto(&done, new_elements);
__ Bind(&done);
@@ -3723,12 +3948,13 @@ void EffectControlLinearizer::LowerStoreSignedSmallElement(Node* node) {
}
void EffectControlLinearizer::LowerRuntimeAbort(Node* node) {
- BailoutReason reason = BailoutReasonOf(node->op());
+ AbortReason reason = AbortReasonOf(node->op());
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kAbort;
CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- __ Call(desc, __ CEntryStubConstant(1), jsgraph()->SmiConstant(reason),
+ __ Call(desc, __ CEntryStubConstant(1),
+ jsgraph()->SmiConstant(static_cast<int>(reason)),
__ ExternalConstant(ExternalReference(id, isolate())),
__ Int32Constant(1), __ NoContextConstant());
}
@@ -4165,14 +4391,14 @@ Node* EffectControlLinearizer::LowerFindOrderedHashMapEntry(Node* node) {
Node* EffectControlLinearizer::ComputeIntegerHash(Node* value) {
// See v8::internal::ComputeIntegerHash()
- value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xffffffff)),
+ value = __ Int32Add(__ Word32Xor(value, __ Int32Constant(0xFFFFFFFF)),
__ Word32Shl(value, __ Int32Constant(15)));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(12)));
value = __ Int32Add(value, __ Word32Shl(value, __ Int32Constant(2)));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(4)));
value = __ Int32Mul(value, __ Int32Constant(2057));
value = __ Word32Xor(value, __ Word32Shr(value, __ Int32Constant(16)));
- value = __ Word32And(value, __ Int32Constant(0x3fffffff));
+ value = __ Word32And(value, __ Int32Constant(0x3FFFFFFF));
return value;
}
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 7cf6910386..47b1586d6d 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -90,6 +90,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerTruncateTaggedToWord32(Node* node);
Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
Node* LowerAllocate(Node* node);
+ Node* LowerNumberToString(Node* node);
Node* LowerObjectIsArrayBufferView(Node* node);
Node* LowerObjectIsBigInt(Node* node);
Node* LowerObjectIsCallable(Node* node);
@@ -104,22 +105,28 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* LowerObjectIsString(Node* node);
Node* LowerObjectIsSymbol(Node* node);
Node* LowerObjectIsUndetectable(Node* node);
+ Node* LowerNumberIsFloat64Hole(Node* node);
Node* LowerArgumentsFrame(Node* node);
Node* LowerArgumentsLength(Node* node);
Node* LowerNewDoubleElements(Node* node);
Node* LowerNewSmiOrObjectElements(Node* node);
Node* LowerNewArgumentsElements(Node* node);
+ Node* LowerNewConsString(Node* node);
Node* LowerArrayBufferWasNeutered(Node* node);
Node* LowerSameValue(Node* node);
+ Node* LowerDeadValue(Node* node);
Node* LowerStringToNumber(Node* node);
Node* LowerStringCharAt(Node* node);
Node* LowerStringCharCodeAt(Node* node);
Node* LowerSeqStringCharCodeAt(Node* node);
+ Node* LowerStringCodePointAt(Node* node);
+ Node* LowerSeqStringCodePointAt(Node* node, UnicodeEncoding encoding);
Node* LowerStringToLowerCaseIntl(Node* node);
Node* LowerStringToUpperCaseIntl(Node* node);
Node* LowerStringFromCharCode(Node* node);
Node* LowerStringFromCodePoint(Node* node);
Node* LowerStringIndexOf(Node* node);
+ Node* LowerStringLength(Node* node);
Node* LowerStringEqual(Node* node);
Node* LowerStringLessThan(Node* node);
Node* LowerStringLessThanOrEqual(Node* node);
@@ -156,9 +163,11 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
Node* AllocateHeapNumberWithValue(Node* node);
- Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+ Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback, Node* value,
Node* frame_state);
Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback,
Node* value,
Node* frame_state);
Node* BuildFloat64RoundDown(Node* value);
@@ -173,6 +182,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
Node* ChangeSmiToIntPtr(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
+ Node* LoadFromString(Node* receiver, Node* position, Node* is_one_byte);
Node* SmiMaxValueConstant();
Node* SmiShiftBitsConstant();
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index aa2a1b2f3a..16a9d78faf 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -33,18 +33,39 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(
arguments_elements_(zone),
zone_(zone) {}
-Node* EscapeAnalysisReducer::MaybeGuard(Node* original, Node* replacement) {
- // We might need to guard the replacement if the type of the {replacement}
- // node is not in a sub-type relation to the type of the the {original} node.
+Reduction EscapeAnalysisReducer::ReplaceNode(Node* original,
+ Node* replacement) {
+ const VirtualObject* vobject =
+ analysis_result().GetVirtualObject(replacement);
+ if (replacement->opcode() == IrOpcode::kDead ||
+ (vobject && !vobject->HasEscaped())) {
+ RelaxEffectsAndControls(original);
+ return Replace(replacement);
+ }
Type* const replacement_type = NodeProperties::GetType(replacement);
Type* const original_type = NodeProperties::GetType(original);
- if (!replacement_type->Is(original_type)) {
- Node* const control = NodeProperties::GetControlInput(original);
- replacement = jsgraph()->graph()->NewNode(
- jsgraph()->common()->TypeGuard(original_type), replacement, control);
- NodeProperties::SetType(replacement, original_type);
+ if (replacement_type->Is(original_type)) {
+ RelaxEffectsAndControls(original);
+ return Replace(replacement);
}
- return replacement;
+
+ // We need to guard the replacement if we would widen the type otherwise.
+ DCHECK_EQ(1, original->op()->EffectOutputCount());
+ DCHECK_EQ(1, original->op()->EffectInputCount());
+ DCHECK_EQ(1, original->op()->ControlInputCount());
+ Node* effect = NodeProperties::GetEffectInput(original);
+ Node* control = NodeProperties::GetControlInput(original);
+ original->TrimInputCount(0);
+ original->AppendInput(jsgraph()->zone(), replacement);
+ original->AppendInput(jsgraph()->zone(), effect);
+ original->AppendInput(jsgraph()->zone(), control);
+ NodeProperties::SetType(
+ original,
+ Type::Intersect(original_type, replacement_type, jsgraph()->zone()));
+ NodeProperties::ChangeOp(original,
+ jsgraph()->common()->TypeGuard(original_type));
+ ReplaceWithValue(original, original, original, control);
+ return NoChange();
}
namespace {
@@ -74,11 +95,7 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
DCHECK(node->opcode() != IrOpcode::kAllocate &&
node->opcode() != IrOpcode::kFinishRegion);
DCHECK_NE(replacement, node);
- if (replacement != jsgraph()->Dead()) {
- replacement = MaybeGuard(node, replacement);
- }
- RelaxEffectsAndControls(node);
- return Replace(replacement);
+ return ReplaceNode(node, replacement);
}
switch (node->opcode()) {
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index b89d4d03e8..29290d3a0a 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -97,7 +97,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
void ReduceFrameStateInputs(Node* node);
Node* ReduceDeoptState(Node* node, Node* effect, Deduplicator* deduplicator);
Node* ObjectIdNode(const VirtualObject* vobject);
- Node* MaybeGuard(Node* original, Node* replacement);
+ Reduction ReplaceNode(Node* original, Node* replacement);
JSGraph* jsgraph() const { return jsgraph_; }
EscapeAnalysisResult analysis_result() const { return analysis_result_; }
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index b3b1abb6df..4b773136a9 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -223,8 +223,12 @@ class EscapeAnalysisTracker : public ZoneObject {
replacement_ = replacement;
vobject_ =
replacement ? tracker_->virtual_objects_.Get(replacement) : nullptr;
- TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
- replacement->id());
+ if (replacement) {
+ TRACE("Set %s#%d as replacement.\n", replacement->op()->mnemonic(),
+ replacement->id());
+ } else {
+ TRACE("Set nullptr as replacement.\n");
+ }
}
void MarkForDeletion() { SetReplacement(tracker_->jsgraph_->Dead()); }
@@ -248,10 +252,6 @@ class EscapeAnalysisTracker : public ZoneObject {
Node* GetReplacementOf(Node* node) { return replacements_[node]; }
Node* ResolveReplacement(Node* node) {
if (Node* replacement = GetReplacementOf(node)) {
- // Replacements cannot have replacements. This is important to ensure
- // re-visitation: If a replacement is replaced, then all nodes accessing
- // the replacement have to be updated.
- DCHECK_NULL(GetReplacementOf(replacement));
return replacement;
}
return node;
@@ -768,7 +768,12 @@ EscapeAnalysis::EscapeAnalysis(JSGraph* jsgraph, Zone* zone)
jsgraph_(jsgraph) {}
Node* EscapeAnalysisResult::GetReplacementOf(Node* node) {
- return tracker_->GetReplacementOf(node);
+ Node* replacement = tracker_->GetReplacementOf(node);
+ // Replacements cannot have replacements. This is important to ensure
+ // re-visitation: If a replacement is replaced, then all nodes accessing
+ // the replacement have to be updated.
+ if (replacement) DCHECK_NULL(tracker_->GetReplacementOf(replacement));
+ return replacement;
}
Node* EscapeAnalysisResult::GetVirtualObjectField(const VirtualObject* vobject,
diff --git a/deps/v8/src/compiler/frame.cc b/deps/v8/src/compiler/frame.cc
index e0284c8ab4..0b6d7ac193 100644
--- a/deps/v8/src/compiler/frame.cc
+++ b/deps/v8/src/compiler/frame.cc
@@ -13,13 +13,22 @@ namespace internal {
namespace compiler {
Frame::Frame(int fixed_frame_size_in_slots)
- : frame_slot_count_(fixed_frame_size_in_slots),
+ : fixed_slot_count_(fixed_frame_size_in_slots),
+ frame_slot_count_(fixed_frame_size_in_slots),
spill_slot_count_(0),
+ return_slot_count_(0),
allocated_registers_(nullptr),
allocated_double_registers_(nullptr) {}
int Frame::AlignFrame(int alignment) {
int alignment_slots = alignment / kPointerSize;
+ // We have to align return slots separately, because they are claimed
+ // separately on the stack.
+ int return_delta =
+ alignment_slots - (return_slot_count_ & (alignment_slots - 1));
+ if (return_delta != alignment_slots) {
+ frame_slot_count_ += return_delta;
+ }
int delta = alignment_slots - (frame_slot_count_ & (alignment_slots - 1));
if (delta != alignment_slots) {
frame_slot_count_ += delta;
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index fe8008913d..f5c36dba17 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -22,7 +22,7 @@ class CallDescriptor;
// into them. Mutable state associated with the frame is stored separately in
// FrameAccessState.
//
-// Frames are divided up into three regions.
+// Frames are divided up into four regions.
// - The first is the fixed header, which always has a constant size and can be
// predicted before code generation begins depending on the type of code being
// generated.
@@ -33,11 +33,15 @@ class CallDescriptor;
// reserved after register allocation, since its size can only be precisely
// determined after register allocation once the number of used callee-saved
// register is certain.
+// - The fourth region is a scratch area for return values from other functions
+// called, if multiple returns cannot all be passed in registers. This region
+// Must be last in a stack frame, so that it is positioned immediately below
+// the stack frame of a callee to store to.
//
// The frame region immediately below the fixed header contains spill slots
// starting at slot 4 for JSFunctions. The callee-saved frame region below that
-// starts at 4+spill_slot_count_. Callee stack slots corresponding to
-// parameters are accessible through negative slot ids.
+// starts at 4+spill_slot_count_. Callee stack slots correspond to
+// parameters that are accessible through negative slot ids.
//
// Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its
@@ -73,7 +77,13 @@ class CallDescriptor;
// |- - - - - - - - -| | |
// | ... | Callee-saved |
// |- - - - - - - - -| | |
-// m+r+3 | callee-saved r | v v
+// m+r+3 | callee-saved r | v |
+// +-----------------+---- |
+// m+r+4 | return 0 | ^ |
+// |- - - - - - - - -| | |
+// | ... | Return |
+// |- - - - - - - - -| | |
+// | return q-1 | v v
// -----+-----------------+----- <-- stack ptr -------------
//
class Frame : public ZoneObject {
@@ -81,8 +91,9 @@ class Frame : public ZoneObject {
explicit Frame(int fixed_frame_size_in_slots);
inline int GetTotalFrameSlotCount() const { return frame_slot_count_; }
-
+ inline int GetFixedSlotCount() const { return fixed_slot_count_; }
inline int GetSpillSlotCount() const { return spill_slot_count_; }
+ inline int GetReturnSlotCount() const { return return_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
DCHECK_NULL(allocated_registers_);
@@ -112,19 +123,25 @@ class Frame : public ZoneObject {
}
int AllocateSpillSlot(int width, int alignment = 0) {
+ DCHECK_EQ(frame_slot_count_,
+ fixed_slot_count_ + spill_slot_count_ + return_slot_count_);
int frame_slot_count_before = frame_slot_count_;
- if (alignment <= kPointerSize) {
- AllocateAlignedFrameSlots(width);
- } else {
- // We need to allocate more place for spill slot
- // in case we need an aligned spill slot to be
- // able to properly align start of spill slot
- // and still have enough place to hold all the
- // data
- AllocateAlignedFrameSlots(width + alignment - kPointerSize);
+ if (alignment > kPointerSize) {
+ // Slots are pointer sized, so alignment greater than a pointer size
+ // requires allocating additional slots.
+ width += alignment - kPointerSize;
}
+ AllocateAlignedFrameSlots(width);
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
- return frame_slot_count_ - 1;
+ return frame_slot_count_ - return_slot_count_ - 1;
+ }
+
+ void EnsureReturnSlots(int count) {
+ if (count > return_slot_count_) {
+ count -= return_slot_count_;
+ frame_slot_count_ += count;
+ return_slot_count_ += count;
+ }
}
int AlignFrame(int alignment = kDoubleSize);
@@ -152,8 +169,10 @@ class Frame : public ZoneObject {
}
private:
+ int fixed_slot_count_;
int frame_slot_count_;
int spill_slot_count_;
+ int return_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;
diff --git a/deps/v8/src/compiler/gap-resolver.cc b/deps/v8/src/compiler/gap-resolver.cc
index 3dc1ee27c9..4542a73685 100644
--- a/deps/v8/src/compiler/gap-resolver.cc
+++ b/deps/v8/src/compiler/gap-resolver.cc
@@ -5,7 +5,6 @@
#include "src/compiler/gap-resolver.h"
#include <algorithm>
-#include <functional>
#include <set>
namespace v8 {
@@ -19,10 +18,6 @@ namespace {
const int kFloat32Bit = REP_BIT(MachineRepresentation::kFloat32);
const int kFloat64Bit = REP_BIT(MachineRepresentation::kFloat64);
-inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
- return !move->IsEliminated() && move->source().InterferesWith(destination);
-}
-
// Splits a FP move between two location operands into the equivalent series of
// moves between smaller sub-operands, e.g. a double move to two single moves.
// This helps reduce the number of cycles that would normally occur under FP
@@ -53,7 +48,7 @@ MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
src_index = src_loc.register_code() * aliases;
} else {
src_index = src_loc.index();
- // For operands that occuply multiple slots, the index refers to the last
+ // For operands that occupy multiple slots, the index refers to the last
// slot. On little-endian architectures, we start at the high slot and use a
// negative step so that register-to-slot moves are in the correct order.
src_step = -slot_size;
@@ -197,8 +192,11 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
- auto blocker = std::find_if(moves->begin(), moves->end(),
- std::bind2nd(std::ptr_fun(&Blocks), destination));
+ auto blocker =
+ std::find_if(moves->begin(), moves->end(), [&](MoveOperands* move) {
+ return !move->IsEliminated() &&
+ move->source().InterferesWith(destination);
+ });
if (blocker == moves->end()) {
// The easy case: This move is not blocked.
assembler_->AssembleMove(&source, &destination);
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 50001976a9..a0b2e0ff0a 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -134,6 +134,11 @@ Node* GraphAssembler::DebugBreak() {
current_effect_, current_control_);
}
+Node* GraphAssembler::Unreachable() {
+ return current_effect_ = graph()->NewNode(common()->Unreachable(),
+ current_effect_, current_control_);
+}
+
Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
Node* value) {
return current_effect_ =
@@ -164,24 +169,33 @@ Node* GraphAssembler::ToNumber(Node* value) {
value, NoContextConstant(), current_effect_);
}
-Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
+Node* GraphAssembler::BitcastWordToTagged(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->BitcastWordToTagged(), value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeKind::kEager, reason), condition,
- frame_state, current_effect_, current_control_);
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, current_effect_, current_control_);
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
- DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
return current_control_ = current_effect_ = graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason), condition, frame_state,
- current_effect_, current_control_);
+ common()->DeoptimizeUnless(kind, reason, feedback), condition,
+ frame_state, current_effect_, current_control_);
}
-Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason, Node* condition,
- Node* frame_state) {
- return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, condition,
+Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state) {
+ return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
frame_state);
}
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 3d3c2ed103..9ae74d0df5 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -8,6 +8,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -28,8 +29,7 @@ namespace compiler {
V(RoundFloat64ToInt32) \
V(TruncateFloat64ToWord32) \
V(Float64ExtractHighWord32) \
- V(Float64Abs) \
- V(BitcastWordToTagged)
+ V(Float64Abs)
#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
V(WordShl) \
@@ -193,9 +193,12 @@ class GraphAssembler {
// Debugging
Node* DebugBreak();
+ Node* Unreachable();
+
Node* Float64RoundDown(Node* value);
Node* ToNumber(Node* value);
+ Node* BitcastWordToTagged(Node* value);
Node* Allocate(PretenureFlag pretenure, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
@@ -209,12 +212,13 @@ class GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
- Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state);
Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
- Node* condition, Node* frame_state);
- Node* DeoptimizeIfNot(DeoptimizeReason reason, Node* condition,
+ VectorSlotPair const& feedback, Node* condition,
Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* condition, Node* frame_state);
template <typename... Args>
Node* Call(const CallDescriptor* desc, Args... args);
template <typename... Args>
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index e57dc18b5e..edabae0b8a 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -15,7 +15,6 @@ namespace compiler {
// Forward declarations.
class Graph;
-
// Trims dead nodes from the node graph.
class V8_EXPORT_PRIVATE GraphTrimmer final {
public:
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 8e9505bae1..47ded6a30c 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -175,17 +175,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xor_(result_, result_); }
-
- private:
- Register const result_;
-};
-
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -298,425 +287,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN, \
- SingleOrDouble) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(2); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineLoadFloat final : public OutOfLineCode { \
- public: \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- int32_t buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
- } else { \
- __ asm_instr(result_, \
- Operand(index1_, buffer_int_, rmode_buffer_)); \
- } \
- __ pop(index1_); \
- __ jmp(exit()); \
- __ bind(&oob); \
- __ pop(index1_); \
- __ xorp##SingleOrDouble(result_, result_); \
- __ divs##SingleOrDouble(result_, result_); \
- } \
- \
- private: \
- XMMRegister const result_; \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto buffer = i.InputRegister(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadFloat(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- OutOfLineCode* ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, i.MemoryOperand(2)); \
- __ bind(ool->exit()); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(2); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineLoadInteger final : public OutOfLineCode { \
- public: \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- int32_t buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- bool need_cache = result_ != index1_; \
- if (need_cache) __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
- } else { \
- __ asm_instr(result_, \
- Operand(index1_, buffer_int_, rmode_buffer_)); \
- } \
- if (need_cache) __ pop(index1_); \
- __ jmp(exit()); \
- __ bind(&oob); \
- if (need_cache) __ pop(index1_); \
- __ xor_(result_, result_); \
- } \
- \
- private: \
- Register const result_; \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto buffer = i.InputRegister(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(3); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto value = i.InputDoubleRegister(2); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), value); \
- __ bind(&done); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(3); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineStoreFloat final : public OutOfLineCode { \
- public: \
- OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineStoreFloat(CodeGenerator* gen, int32_t buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
- } else { \
- __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
- value_); \
- } \
- __ bind(&oob); \
- __ pop(index1_); \
- } \
- \
- private: \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- XMMRegister const value_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(4)->IsRegister()) { \
- auto buffer = i.InputRegister(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreFloat(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
- do { \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- if (instr->InputAt(1)->IsRegister()) { \
- __ cmp(offset, i.InputRegister(1)); \
- } else { \
- __ cmp(offset, i.InputImmediate(1)); \
- } \
- Label done; \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(i.MemoryOperand(3), value); \
- __ bind(&done); \
- } else { \
- auto index2 = i.InputInt32(0); \
- auto length = i.InputInt32(1); \
- auto index1 = i.InputRegister(3); \
- RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
- RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2), \
- rmode_length)); \
- class OutOfLineStoreInteger final : public OutOfLineCode { \
- public: \
- OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(buffer), \
- buffer_int_(0), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- OutOfLineStoreInteger(CodeGenerator* gen, int32_t buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode_length, \
- RelocInfo::Mode rmode_buffer) \
- : OutOfLineCode(gen), \
- buffer_reg_(no_reg), \
- buffer_int_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_length_(rmode_length), \
- rmode_buffer_(rmode_buffer) {} \
- \
- void Generate() final { \
- Label oob; \
- __ push(index1_); \
- __ lea(index1_, Operand(index1_, index2_)); \
- __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_), \
- rmode_length_)); \
- __ j(above_equal, &oob, Label::kNear); \
- if (buffer_reg_.is_valid()) { \
- __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_); \
- } else { \
- __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_), \
- value_); \
- } \
- __ bind(&oob); \
- __ pop(index1_); \
- } \
- \
- private: \
- Register const buffer_reg_; \
- int32_t const buffer_int_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- Value const value_; \
- RelocInfo::Mode rmode_length_; \
- RelocInfo::Mode rmode_buffer_; \
- }; \
- if (instr->InputAt(4)->IsRegister()) { \
- auto buffer = i.InputRegister(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } else { \
- auto buffer = i.InputInt32(4); \
- OutOfLineCode* ool = new (zone()) \
- OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
- rmode_length, rmode_buffer); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value); \
- __ bind(ool->exit()); \
- } \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- if (instr->InputAt(2)->IsRegister()) { \
- Register value = i.InputRegister(2); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
- } else { \
- Immediate value = i.InputImmediate(2); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
- } \
- } while (false)
-
#define ASSEMBLE_COMPARE(asm_instr) \
do { \
if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
@@ -1025,7 +595,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
+ __ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1449,6 +1019,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
+ case kLFence:
+ __ lfence();
+ break;
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -1892,6 +1465,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movss(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32Movdqu:
+ if (instr->HasOutput()) {
+ __ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ Movdqu(operand, i.InputSimd128Register(index));
+ }
+ break;
case kIA32BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
@@ -1978,6 +1560,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
}
break;
+ case kIA32PushSimd128:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ sub(esp, Immediate(kSimd128Size));
+ __ movups(Operand(esp, 0), i.InputSimd128Register(0));
+ } else {
+ __ movups(kScratchDoubleReg, i.InputOperand(0));
+ __ sub(esp, Immediate(kSimd128Size));
+ __ movups(Operand(esp, 0), kScratchDoubleReg);
+ }
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ break;
case kIA32Push:
if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
size_t index = 0;
@@ -1997,7 +1590,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kIA32Poke: {
- int const slot = MiscField::decode(instr->opcode());
+ int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
} else {
@@ -2005,6 +1598,214 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kIA32Peek: {
+ int reverse_slot = i.InputInt32(0) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ movsd(i.OutputDoubleRegister(), Operand(ebp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ movss(i.OutputFloatRegister(), Operand(ebp, offset));
+ }
+ } else {
+ __ mov(i.OutputRegister(), Operand(ebp, offset));
+ }
+ break;
+ }
+ case kSSEF32x4Splat: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputSimd128Register();
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kAVXF32x4Splat: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister src = i.InputFloatRegister(0);
+ __ vshufps(i.OutputSimd128Register(), src, src, 0x0);
+ break;
+ }
+ case kSSEF32x4ExtractLane: {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ XMMRegister dst = i.OutputFloatRegister();
+ int8_t lane = i.InputInt8(1);
+ if (lane != 0) {
+ DCHECK_LT(lane, 4);
+ __ shufps(dst, dst, lane);
+ }
+ break;
+ }
+ case kAVXF32x4ExtractLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ XMMRegister dst = i.OutputFloatRegister();
+ XMMRegister src = i.InputSimd128Register(0);
+ int8_t lane = i.InputInt8(1);
+ if (lane == 0) {
+ if (dst != src) __ vmovaps(dst, src);
+ } else {
+ DCHECK_LT(lane, 4);
+ __ vshufps(dst, src, src, lane);
+ }
+ break;
+ }
+ case kSSEF32x4ReplaceLane: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ insertps(i.OutputSimd128Register(), i.InputOperand(2),
+ i.InputInt8(1) << 4);
+ break;
+ }
+ case kAVXF32x4ReplaceLane: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(2), i.InputInt8(1) << 4);
+ break;
+ }
+ case kSSEF32x4Abs: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrld(kScratchDoubleReg, 1);
+ __ andps(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ psrld(dst, 1);
+ __ andps(dst, src);
+ }
+ break;
+ }
+ case kAVXF32x4Abs: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1);
+ __ vandps(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4Neg: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pslld(kScratchDoubleReg, 31);
+ __ xorps(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pslld(dst, 31);
+ __ xorps(dst, src);
+ }
+ break;
+ }
+ case kAVXF32x4Neg: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpslld(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ vxorps(i.OutputSimd128Register(), kScratchDoubleReg,
+ i.InputOperand(0));
+ break;
+ }
+ case kSSEF32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ addps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Add: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vaddps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ subps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Sub: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vsubps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ mulps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Mul: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vmulps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Min: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ minps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Min: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vminps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Max: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ maxps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Max: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vmaxps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Eq: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Ne: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Lt: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kSSEF32x4Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ break;
+ }
+ case kAVXF32x4Le: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
+ break;
+ }
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
@@ -2774,52 +2575,68 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+ case kIA32S128Zero: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ Pxor(dst, dst);
break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+ }
+ case kSSES128Not: {
+ XMMRegister dst = i.OutputSimd128Register();
+ Operand src = i.InputOperand(0);
+ if (src.is_reg(dst)) {
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(dst, kScratchDoubleReg);
+ } else {
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, src);
+ }
break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN, s);
+ }
+ case kAVXS128Not: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpxor(i.OutputSimd128Register(), kScratchDoubleReg, i.InputOperand(0));
break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN, d);
+ }
+ case kSSES128And: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pand(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+ }
+ case kAVXS128And: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+ }
+ case kSSES128Or: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ por(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+ }
+ case kAVXS128Or: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+ }
+ case kSSES128Xor: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ pxor(i.OutputSimd128Register(), i.InputOperand(1));
break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+ }
+ case kAVXS128Xor: {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
+ }
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
@@ -3038,7 +2855,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -3287,7 +3104,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3331,12 +3148,13 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
+ shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
__ sub(esp, Immediate(shrink_slots * kPointerSize));
}
@@ -3348,6 +3166,11 @@ void CodeGenerator::AssembleConstructFrame() {
if (((1 << i) & saves)) __ push(Register::from_code(i));
}
}
+
+ // Allocate return slots (located after callee-saved).
+ if (frame()->GetReturnSlotCount() > 0) {
+ __ sub(esp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -3356,6 +3179,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
const RegList saves = descriptor->CalleeSavedRegisters();
// Restore registers.
if (saves != 0) {
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ add(esp, Immediate(returns * kPointerSize));
+ }
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ pop(Register::from_code(i));
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index b9bf261022..a17d9f06ce 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -43,6 +43,7 @@ namespace compiler {
V(IA32Lzcnt) \
V(IA32Tzcnt) \
V(IA32Popcnt) \
+ V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -103,14 +104,45 @@ namespace compiler {
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
+ V(IA32Movdqu) \
V(IA32BitcastFI) \
V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32PushFloat32) \
V(IA32PushFloat64) \
+ V(IA32PushSimd128) \
V(IA32Poke) \
+ V(IA32Peek) \
V(IA32StackCheck) \
+ V(SSEF32x4Splat) \
+ V(AVXF32x4Splat) \
+ V(SSEF32x4ExtractLane) \
+ V(AVXF32x4ExtractLane) \
+ V(SSEF32x4ReplaceLane) \
+ V(AVXF32x4ReplaceLane) \
+ V(SSEF32x4Abs) \
+ V(AVXF32x4Abs) \
+ V(SSEF32x4Neg) \
+ V(AVXF32x4Neg) \
+ V(SSEF32x4Add) \
+ V(AVXF32x4Add) \
+ V(SSEF32x4Sub) \
+ V(AVXF32x4Sub) \
+ V(SSEF32x4Mul) \
+ V(AVXF32x4Mul) \
+ V(SSEF32x4Min) \
+ V(AVXF32x4Min) \
+ V(SSEF32x4Max) \
+ V(AVXF32x4Max) \
+ V(SSEF32x4Eq) \
+ V(AVXF32x4Eq) \
+ V(SSEF32x4Ne) \
+ V(AVXF32x4Ne) \
+ V(SSEF32x4Lt) \
+ V(AVXF32x4Lt) \
+ V(SSEF32x4Le) \
+ V(AVXF32x4Le) \
V(IA32I32x4Splat) \
V(IA32I32x4ExtractLane) \
V(SSEI32x4ReplaceLane) \
@@ -229,7 +261,16 @@ namespace compiler {
V(SSEI8x16GtU) \
V(AVXI8x16GtU) \
V(SSEI8x16GeU) \
- V(AVXI8x16GeU)
+ V(AVXI8x16GeU) \
+ V(IA32S128Zero) \
+ V(SSES128Not) \
+ V(AVXS128Not) \
+ V(SSES128And) \
+ V(AVXS128And) \
+ V(SSES128Or) \
+ V(AVXS128Or) \
+ V(SSES128Xor) \
+ V(AVXS128Xor)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 83c60e4455..db43c1ed1c 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -97,6 +97,34 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
+ case kSSEF32x4Splat:
+ case kAVXF32x4Splat:
+ case kSSEF32x4ExtractLane:
+ case kAVXF32x4ExtractLane:
+ case kSSEF32x4ReplaceLane:
+ case kAVXF32x4ReplaceLane:
+ case kSSEF32x4Abs:
+ case kAVXF32x4Abs:
+ case kSSEF32x4Neg:
+ case kAVXF32x4Neg:
+ case kSSEF32x4Add:
+ case kAVXF32x4Add:
+ case kSSEF32x4Sub:
+ case kAVXF32x4Sub:
+ case kSSEF32x4Mul:
+ case kAVXF32x4Mul:
+ case kSSEF32x4Min:
+ case kAVXF32x4Min:
+ case kSSEF32x4Max:
+ case kAVXF32x4Max:
+ case kSSEF32x4Eq:
+ case kAVXF32x4Eq:
+ case kSSEF32x4Ne:
+ case kAVXF32x4Ne:
+ case kSSEF32x4Lt:
+ case kAVXF32x4Lt:
+ case kSSEF32x4Le:
+ case kAVXF32x4Le:
case kIA32I32x4Splat:
case kIA32I32x4ExtractLane:
case kSSEI32x4ReplaceLane:
@@ -216,6 +244,15 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXI8x16GtU:
case kSSEI8x16GeU:
case kAVXI8x16GeU:
+ case kIA32S128Zero:
+ case kSSES128Not:
+ case kAVXS128Not:
+ case kSSES128And:
+ case kAVXS128And:
+ case kSSES128Or:
+ case kAVXS128Or:
+ case kSSES128Xor:
+ case kAVXS128Xor:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -235,16 +272,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Movl:
case kIA32Movss:
case kIA32Movsd:
+ case kIA32Movdqu:
// Moves are used for memory load/store operations.
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kIA32StackCheck:
+ case kIA32Peek:
return kIsLoadOperation;
case kIA32Push:
case kIA32PushFloat32:
case kIA32PushFloat64:
+ case kIA32PushSimd128:
case kIA32Poke:
+ case kLFence:
return kHasSideEffect;
#define CASE(Name) case k##Name:
@@ -262,18 +303,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kSSEFloat64Mul:
return 5;
case kIA32Imul:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index bae563d7b6..d8bf250ec6 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -225,6 +225,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), edx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -249,8 +254,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -339,8 +346,10 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -379,156 +388,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- IA32OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32BinopMatcher moffset(offset);
- InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
- ? g.UseImmediate(buffer)
- : g.UseRegister(buffer);
- Int32Matcher mlength(length);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
- if (mmlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mmlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- offset_operand, g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer), offset_operand);
- }
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- IA32OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value)
- : ((rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit)
- ? g.UseByteRegister(value)
- : g.UseRegister(value));
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32BinopMatcher moffset(offset);
- InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
- ? g.UseImmediate(buffer)
- : g.UseRegister(buffer);
- Int32Matcher mlength(length);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
- g.UseImmediate(length), value_operand,
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
- if (mmlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mmlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
- g.UseImmediate(length), value_operand,
- g.UseRegister(moffset.left().node()), buffer_operand);
- return;
- }
- }
- InstructionOperand offset_operand = g.UseRegister(offset);
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- if (g.CanBeImmediate(buffer)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, value_operand, offset_operand,
- g.UseImmediate(buffer));
- } else {
- Emit(opcode | AddressingModeField::encode(kMode_MR1), g.NoOutput(),
- offset_operand, length_operand, value_operand, g.UseRegister(buffer),
- offset_operand);
- }
-}
-
namespace {
// Shared routine for multiple binary operations.
@@ -599,7 +458,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1110,11 +970,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int const slot = static_cast<int>(n);
InstructionOperand value = g.CanBeImmediate(node)
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kIA32Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1123,29 +983,30 @@ void InstructionSelector::EmitPrepareArguments(
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- Node* input_node = input.node();
- if (input.node() == nullptr) continue;
- if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
+ if (input.node == nullptr) continue;
+ if (g.CanBeMemoryOperand(kIA32Push, node, input.node, effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kIA32Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
InstructionOperand value =
- g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
+ g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
: IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input.node()))
- ? g.UseRegister(input.node())
- : g.Use(input.node());
- if (input.type() == MachineType::Float32()) {
+ sequence()->IsFP(GetVirtualRegister(input.node))
+ ? g.UseRegister(input.node)
+ : g.Use(input.node);
+ if (input.location.GetType() == MachineType::Float32()) {
Emit(kIA32PushFloat32, g.NoOutput(), value);
- } else if (input.type() == MachineType::Float64()) {
+ } else if (input.location.GetType() == MachineType::Float64()) {
Emit(kIA32PushFloat64, g.NoOutput(), value);
+ } else if (input.location.GetType() == MachineType::Simd128()) {
+ Emit(kIA32PushSimd128, g.NoOutput(), value);
} else {
Emit(kIA32Push, g.NoOutput(), value);
}
@@ -1154,6 +1015,29 @@ void InstructionSelector::EmitPrepareArguments(
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ IA32OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ Emit(kIA32Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
+
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1181,7 +1065,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1203,7 +1088,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
} else {
@@ -1389,7 +1275,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1503,14 +1390,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1897,12 +1784,21 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) \
- V(I32x4) \
- V(I16x8) \
+#define SIMD_INT_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
V(I8x16)
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
@@ -1948,13 +1844,21 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16MinU) \
V(I8x16MaxU) \
V(I8x16GtU) \
- V(I8x16GeU)
-
-#define SIMD_UNOP_LIST(V) \
- V(I32x4Neg) \
- V(I16x8Neg) \
+ V(I8x16GeU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_INT_UNOP_LIST(V) \
+ V(I32x4Neg) \
+ V(I16x8Neg) \
V(I8x16Neg)
+#define SIMD_OTHER_UNOP_LIST(V) \
+ V(F32x4Abs) \
+ V(F32x4Neg) \
+ V(S128Not)
+
#define SIMD_SHIFT_OPCODES(V) \
V(I32x4Shl) \
V(I32x4ShrS) \
@@ -1963,11 +1867,38 @@ VISIT_ATOMIC_BINOP(Xor)
V(I16x8ShrS) \
V(I16x8ShrU)
+void InstructionSelector::VisitF32x4Splat(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4Splat, g.DefineAsRegister(node), operand0);
+ } else {
+ Emit(kSSEF32x4Splat, g.DefineSameAsFirst(node), operand0);
+ }
+}
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseImmediate(OpParameter<int32_t>(node));
+ if (IsSupported(AVX)) {
+ Emit(kAVXF32x4ExtractLane, g.DefineAsRegister(node), operand0, operand1);
+ } else {
+ Emit(kSSEF32x4ExtractLane, g.DefineSameAsFirst(node), operand0, operand1);
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32S128Zero, g.DefineAsRegister(node));
+}
+
+
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
VisitRO(this, node, kIA32##Type##Splat); \
}
-SIMD_TYPES(VISIT_SIMD_SPLAT)
+SIMD_INT_TYPES(VISIT_SIMD_SPLAT)
#undef VISIT_SIMD_SPLAT
#define VISIT_SIMD_EXTRACT_LANE(Type) \
@@ -1977,7 +1908,7 @@ SIMD_TYPES(VISIT_SIMD_SPLAT)
Emit(kIA32##Type##ExtractLane, g.DefineAsRegister(node), \
g.UseRegister(node->InputAt(0)), g.UseImmediate(lane)); \
}
-SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
+SIMD_INT_TYPES(VISIT_SIMD_EXTRACT_LANE)
#undef VISIT_SIMD_EXTRACT_LANE
#define VISIT_SIMD_REPLACE_LANE(Type) \
@@ -1994,7 +1925,8 @@ SIMD_TYPES(VISIT_SIMD_EXTRACT_LANE)
operand1, operand2); \
} \
}
-SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
+SIMD_INT_TYPES(VISIT_SIMD_REPLACE_LANE)
+VISIT_SIMD_REPLACE_LANE(F32x4)
#undef VISIT_SIMD_REPLACE_LANE
#define VISIT_SIMD_SHIFT(Opcode) \
@@ -2011,13 +1943,22 @@ SIMD_TYPES(VISIT_SIMD_REPLACE_LANE)
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
-#define VISIT_SIMD_UNOP(Opcode) \
+#define VISIT_SIMD_INT_UNOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
IA32OperandGenerator g(this); \
Emit(kIA32##Opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
}
-SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
-#undef VISIT_SIMD_UNOP
+SIMD_INT_UNOP_LIST(VISIT_SIMD_INT_UNOP)
+#undef VISIT_SIMD_INT_UNOP
+
+#define VISIT_SIMD_OTHER_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ IA32OperandGenerator g(this); \
+ InstructionCode opcode = IsSupported(AVX) ? kAVX##Opcode : kSSE##Opcode; \
+ Emit(opcode, g.DefineAsRegister(node), g.Use(node->InputAt(0))); \
+ }
+SIMD_OTHER_UNOP_LIST(VISIT_SIMD_OTHER_UNOP)
+#undef VISIT_SIMD_OTHER_UNOP
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -2039,7 +1980,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz;
+ MachineOperatorBuilder::kWord32Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt;
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index f5457ee562..df3078d739 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -68,20 +68,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
- V(CheckedLoadInt8) \
- V(CheckedLoadUint8) \
- V(CheckedLoadInt16) \
- V(CheckedLoadUint16) \
- V(CheckedLoadWord32) \
- V(CheckedLoadWord64) \
- V(CheckedLoadFloat32) \
- V(CheckedLoadFloat64) \
- V(CheckedStoreWord8) \
- V(CheckedStoreWord16) \
- V(CheckedStoreWord32) \
- V(CheckedStoreWord64) \
- V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64) \
V(ArchStackSlot) \
V(AtomicLoadInt8) \
V(AtomicLoadUint8) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index b1164767f2..f7afaab697 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -268,21 +268,7 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kIeee754Float64Sinh:
case kIeee754Float64Tan:
case kIeee754Float64Tanh:
-#ifdef V8_TARGET_ARCH_ARM64
- // This is an unfortunate effect of arm64 dual stack pointers:
- // * TruncateDoubleToI may call a stub, and the stub will push and pop
- // values onto the stack. Push updates both CSP and JSSP but pop only
- // restores JSSP.
- // * kIeee754XXX opcodes call a C Function and the call macro may update
- // CSP to meet alignment requirements but it will not bring back CSP to
- // its original value.
- // Those opcode cannot be reordered with instructions with side effects
- // such as Arm64ClaimCSP.
- // TODO(arm64): remove when JSSP is gone.
- return kHasSideEffect;
-#else
return kNoOpcodeFlags;
-#endif
case kArchStackPointer:
// ArchStackPointer instruction loads the current stack pointer value and
@@ -315,22 +301,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
case kArchThrowTerminator:
return kIsBlockTerminator;
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- return kIsLoadOperation;
-
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kArchStoreWithWriteBarrier:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 8334d1751a..7c7a2708c5 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -251,6 +251,23 @@ class OperandGenerator {
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
return Constant(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kDeadValue: {
+ switch (DeadValueRepresentationOf(node->op())) {
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord32:
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedSigned:
+ case MachineRepresentation::kTaggedPointer:
+ return Constant(static_cast<int32_t>(0));
+ case MachineRepresentation::kFloat64:
+ return Constant(static_cast<double>(0));
+ case MachineRepresentation::kFloat32:
+ return Constant(static_cast<float>(0));
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
default:
break;
}
@@ -350,8 +367,9 @@ class FlagsContinuation final {
static FlagsContinuation ForDeoptimize(FlagsCondition condition,
DeoptimizeKind kind,
DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* frame_state) {
- return FlagsContinuation(condition, kind, reason, frame_state);
+ return FlagsContinuation(condition, kind, reason, feedback, frame_state);
}
// Creates a new flags continuation for a boolean value.
@@ -382,6 +400,10 @@ class FlagsContinuation final {
DCHECK(IsDeoptimize());
return reason_;
}
+ VectorSlotPair const& feedback() const {
+ DCHECK(IsDeoptimize());
+ return feedback_;
+ }
Node* frame_state() const {
DCHECK(IsDeoptimize());
return frame_state_or_result_;
@@ -452,11 +474,13 @@ class FlagsContinuation final {
private:
FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state)
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state)
: mode_(kFlags_deoptimize),
condition_(condition),
kind_(kind),
reason_(reason),
+ feedback_(feedback),
frame_state_or_result_(frame_state) {
DCHECK_NOT_NULL(frame_state);
}
@@ -480,6 +504,7 @@ class FlagsContinuation final {
FlagsCondition condition_;
DeoptimizeKind kind_; // Only valid if mode_ == kFlags_deoptimize
DeoptimizeReason reason_; // Only valid if mode_ == kFlags_deoptimize
+ VectorSlotPair feedback_; // Only valid if mode_ == kFlags_deoptimize
Node* frame_state_or_result_; // Only valid if mode_ == kFlags_deoptimize
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index d19692e3dd..c94b42b458 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -668,7 +668,7 @@ struct CallBuffer {
const CallDescriptor* descriptor;
FrameStateDescriptor* frame_state_descriptor;
- NodeVector output_nodes;
+ ZoneVector<PushParameter> output_nodes;
InstructionOperandVector outputs;
InstructionOperandVector instruction_args;
ZoneVector<PushParameter> pushed_nodes;
@@ -693,26 +693,38 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
bool is_tail_call,
int stack_param_delta) {
OperandGenerator g(this);
- DCHECK_LE(call->op()->ValueOutputCount(),
- static_cast<int>(buffer->descriptor->ReturnCount()));
+ size_t ret_count = buffer->descriptor->ReturnCount();
+ DCHECK_LE(call->op()->ValueOutputCount(), ret_count);
DCHECK_EQ(
call->op()->ValueInputCount(),
static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
- if (buffer->descriptor->ReturnCount() > 0) {
+ if (ret_count > 0) {
// Collect the projections that represent multiple outputs from this call.
- if (buffer->descriptor->ReturnCount() == 1) {
- buffer->output_nodes.push_back(call);
+ if (ret_count == 1) {
+ PushParameter result = {call, buffer->descriptor->GetReturnLocation(0)};
+ buffer->output_nodes.push_back(result);
} else {
- buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
+ buffer->output_nodes.resize(ret_count);
+ int stack_count = 0;
+ for (size_t i = 0; i < ret_count; ++i) {
+ LinkageLocation location = buffer->descriptor->GetReturnLocation(i);
+ buffer->output_nodes[i] = PushParameter(nullptr, location);
+ if (location.IsCallerFrameSlot()) {
+ stack_count += location.GetSizeInPointers();
+ }
+ }
for (Edge const edge : call->use_edges()) {
if (!NodeProperties::IsValueEdge(edge)) continue;
- DCHECK_EQ(IrOpcode::kProjection, edge.from()->opcode());
- size_t const index = ProjectionIndexOf(edge.from()->op());
+ Node* node = edge.from();
+ DCHECK_EQ(IrOpcode::kProjection, node->opcode());
+ size_t const index = ProjectionIndexOf(node->op());
+
DCHECK_LT(index, buffer->output_nodes.size());
- DCHECK(!buffer->output_nodes[index]);
- buffer->output_nodes[index] = edge.from();
+ DCHECK(!buffer->output_nodes[index].node);
+ buffer->output_nodes[index].node = node;
}
+ frame_->EnsureReturnSlots(stack_count);
}
// Filter out the outputs that aren't live because no projection uses them.
@@ -722,22 +734,22 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
: buffer->frame_state_descriptor->state_combine()
.ConsumedOutputCount();
for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
- bool output_is_live = buffer->output_nodes[i] != nullptr ||
+ bool output_is_live = buffer->output_nodes[i].node != nullptr ||
i < outputs_needed_by_framestate;
if (output_is_live) {
- MachineRepresentation rep =
- buffer->descriptor->GetReturnType(static_cast<int>(i))
- .representation();
- LinkageLocation location =
- buffer->descriptor->GetReturnLocation(static_cast<int>(i));
+ LinkageLocation location = buffer->output_nodes[i].location;
+ MachineRepresentation rep = location.GetType().representation();
- Node* output = buffer->output_nodes[i];
+ Node* output = buffer->output_nodes[i].node;
InstructionOperand op = output == nullptr
? g.TempLocation(location)
: g.DefineAsLocation(output, location);
MarkAsRepresentation(rep, op);
- buffer->outputs.push_back(op);
+ if (!UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
+ buffer->outputs.push_back(op);
+ buffer->output_nodes[i].node = nullptr;
+ }
}
}
}
@@ -803,7 +815,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
int const state_id = sequence()->AddDeoptimizationEntry(
buffer->frame_state_descriptor, DeoptimizeKind::kLazy,
- DeoptimizeReason::kNoReason);
+ DeoptimizeReason::kUnknown, VectorSlotPair());
buffer->instruction_args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
@@ -842,8 +854,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
buffer->pushed_nodes.resize(stack_index + 1);
}
- PushParameter parameter(*iter, buffer->descriptor->GetInputType(index));
- buffer->pushed_nodes[stack_index] = parameter;
+ PushParameter param = {*iter, location};
+ buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
buffer->instruction_args.push_back(op);
@@ -890,7 +902,6 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
SetEffectLevel(node, effect_level);
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
- node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kCall ||
node->opcode() == IrOpcode::kCallWithCallerSavedRegisters ||
node->opcode() == IrOpcode::kProtectedLoad ||
@@ -960,7 +971,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
<< "only one predecessor." << std::endl
<< "# Current Block: " << *successor << std::endl
<< "# Node: " << *node;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
}
@@ -1026,7 +1037,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
case BasicBlock::kDeoptimize: {
DeoptimizeParameters p = DeoptimizeParametersOf(input->op());
Node* value = input->InputAt(0);
- return VisitDeoptimize(p.kind(), p.reason(), value);
+ return VisitDeoptimize(p.kind(), p.reason(), p.feedback(), value);
}
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
@@ -1136,6 +1147,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kUnreachable:
VisitUnreachable(node);
return;
+ case IrOpcode::kDeadValue:
+ VisitDeadValue(node);
+ return;
case IrOpcode::kComment:
VisitComment(node);
return;
@@ -1472,14 +1486,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kUnalignedStore:
return VisitUnalignedStore(node);
- case IrOpcode::kCheckedLoad: {
- MachineRepresentation rep =
- CheckedLoadRepresentationOf(node->op()).representation();
- MarkAsRepresentation(rep, node);
- return VisitCheckedLoad(node);
- }
- case IrOpcode::kCheckedStore:
- return VisitCheckedStore(node);
case IrOpcode::kInt32PairAdd:
MarkAsWord32(node);
MarkPairProjectionsAsWord32(node);
@@ -1525,6 +1531,8 @@ void InstructionSelector::VisitNode(Node* node) {
ATOMIC_CASE(Or)
ATOMIC_CASE(Xor)
#undef ATOMIC_CASE
+ case IrOpcode::kSpeculationFence:
+ return VisitSpeculationFence(node);
case IrOpcode::kProtectedLoad: {
LoadRepresentation type = LoadRepresentationOf(node->op());
MarkAsRepresentation(type.representation(), node);
@@ -2089,12 +2097,6 @@ void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
@@ -2102,73 +2104,36 @@ void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_IA32
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
-void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
-
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS
- // && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+ // && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_X64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2221,79 +2186,11 @@ void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
@@ -2310,21 +2207,6 @@ void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
@@ -2333,17 +2215,6 @@ void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
@@ -2352,35 +2223,6 @@ void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
// && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2398,38 +2240,7 @@ void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
// && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
- !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
- // && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_MIPS &&
- // !V8_TARGET_ARCH_MIPS64
-
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitS128Select(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64
// && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2582,15 +2393,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
- bool from_native_stack = linkage()->GetIncomingDescriptor()->UseNativeStack();
- bool to_native_stack = descriptor->UseNativeStack();
- if (from_native_stack != to_native_stack) {
- // (arm64 only) Mismatch in the use of stack pointers. One or the other
- // has to be restored manually by the code generator.
- flags |= to_native_stack ? CallDescriptor::kRestoreJSSP
- : CallDescriptor::kRestoreCSP;
- }
-
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
@@ -2618,6 +2420,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
&buffer.instruction_args.front());
if (instruction_selection_failed()) return;
call_instr->MarkAsCall();
+
+ EmitPrepareResults(&(buffer.output_nodes), descriptor, node);
}
void InstructionSelector::VisitCallWithCallerSavedRegisters(
@@ -2685,6 +2489,14 @@ void InstructionSelector::VisitTailCall(Node* node) {
Emit(kArchPrepareTailCall, g.NoOutput());
+ // Add an immediate operand that represents the first slot that is unused
+ // with respect to the stack pointer that has been updated for the tail call
+ // instruction. This is used by backends that need to pad arguments for stack
+ // alignment, in order to store an optional slot of padding above the
+ // arguments.
+ int optional_padding_slot = callee->GetFirstUnusedStackSlot();
+ buffer.instruction_args.push_back(g.TempImmediate(optional_padding_slot));
+
int first_unused_stack_slot =
(V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0) +
stack_param_delta;
@@ -2724,29 +2536,31 @@ void InstructionSelector::VisitReturn(Node* ret) {
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
- DeoptimizeKind kind, DeoptimizeReason reason, Node* frame_state) {
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, frame_state);
+ kind, reason, feedback, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, InstructionOperand output, InstructionOperand a,
InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
- Node* frame_state) {
+ VectorSlotPair const& feedback, Node* frame_state) {
size_t output_count = output.IsInvalid() ? 0 : 1;
InstructionOperand inputs[] = {a, b};
size_t input_count = arraysize(inputs);
return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
- kind, reason, frame_state);
+ kind, reason, feedback, frame_state);
}
Instruction* InstructionSelector::EmitDeoptimize(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state) {
+ DeoptimizeReason reason, VectorSlotPair const& feedback,
+ Node* frame_state) {
OperandGenerator g(this);
FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
InstructionOperandVector args(instruction_zone());
@@ -2757,7 +2571,7 @@ Instruction* InstructionSelector::EmitDeoptimize(
opcode |= MiscField::encode(static_cast<int>(input_count));
DCHECK_NE(DeoptimizeKind::kLazy, kind);
int const state_id =
- sequence()->AddDeoptimizationEntry(descriptor, kind, reason);
+ sequence()->AddDeoptimizationEntry(descriptor, kind, reason, feedback);
args.push_back(g.TempImmediate(state_id));
StateObjectDeduplicator deduplicator(instruction_zone());
AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -2775,8 +2589,10 @@ void InstructionSelector::EmitIdentity(Node* node) {
void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* value) {
- EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
+ EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason,
+ feedback, value);
}
void InstructionSelector::VisitThrow(Node* node) {
@@ -2794,6 +2610,12 @@ void InstructionSelector::VisitUnreachable(Node* node) {
Emit(kArchDebugBreak, g.NoOutput());
}
+void InstructionSelector::VisitDeadValue(Node* node) {
+ OperandGenerator g(this);
+ MarkAsRepresentation(DeadValueRepresentationOf(node->op()), node);
+ Emit(kArchDebugBreak, g.DefineAsConstant(node));
+}
+
void InstructionSelector::VisitComment(Node* node) {
OperandGenerator g(this);
InstructionOperand operand(g.UseImmediate(node));
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 2bd85d7dab..75c41c165f 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -10,6 +10,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/instruction-scheduler.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/globals.h"
@@ -30,17 +31,13 @@ class StateObjectDeduplicator;
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
-class PushParameter {
- public:
- PushParameter() : node_(nullptr), type_(MachineType::None()) {}
- PushParameter(Node* node, MachineType type) : node_(node), type_(type) {}
-
- Node* node() const { return node_; }
- MachineType type() const { return type_; }
+struct PushParameter {
+ PushParameter(Node* n = nullptr,
+ LinkageLocation l = LinkageLocation::ForAnyRegister())
+ : node(n), location(l) {}
- private:
- Node* node_;
- MachineType type_;
+ Node* node;
+ LinkageLocation location;
};
enum class FrameStateInputKind { kAny, kStackSlot };
@@ -115,15 +112,20 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
InstructionOperand a, InstructionOperand b,
DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
Node* frame_state);
Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, DeoptimizeKind kind,
- DeoptimizeReason reason, Node* frame_state);
+ DeoptimizeReason reason,
+ VectorSlotPair const& feedback,
+ Node* frame_state);
// ===========================================================================
// ============== Architecture-independent CPU feature methods. ==============
@@ -345,14 +347,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
void VisitSwitch(Node* node, const SwitchInfo& sw);
void VisitDeoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
- Node* value);
+ VectorSlotPair const& feedback, Node* value);
void VisitReturn(Node* ret);
void VisitThrow(Node* node);
void VisitRetain(Node* node);
void VisitUnreachable(Node* node);
+ void VisitDeadValue(Node* node);
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* descriptor, Node* node);
+ void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
+ const CallDescriptor* descriptor, Node* node);
void EmitIdentity(Node* node);
bool CanProduceSignalingNaN(Node* node);
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index b1b322e1ee..f335177b95 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -927,10 +927,10 @@ void InstructionSequence::MarkAsRepresentation(MachineRepresentation rep,
int InstructionSequence::AddDeoptimizationEntry(
FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason) {
+ DeoptimizeReason reason, VectorSlotPair const& feedback) {
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
deoptimization_entries_.push_back(
- DeoptimizationEntry(descriptor, kind, reason));
+ DeoptimizationEntry(descriptor, kind, reason, feedback));
return deoptimization_id;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index b0f6661274..7772f18ad9 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1317,17 +1317,22 @@ class DeoptimizationEntry final {
public:
DeoptimizationEntry() {}
DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
- DeoptimizeReason reason)
- : descriptor_(descriptor), kind_(kind), reason_(reason) {}
+ DeoptimizeReason reason, VectorSlotPair const& feedback)
+ : descriptor_(descriptor),
+ kind_(kind),
+ reason_(reason),
+ feedback_(feedback) {}
FrameStateDescriptor* descriptor() const { return descriptor_; }
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
FrameStateDescriptor* descriptor_ = nullptr;
DeoptimizeKind kind_ = DeoptimizeKind::kEager;
- DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
+ DeoptimizeReason reason_ = DeoptimizeReason::kUnknown;
+ VectorSlotPair feedback_ = VectorSlotPair();
};
typedef ZoneVector<DeoptimizationEntry> DeoptimizationVector;
@@ -1586,7 +1591,8 @@ class V8_EXPORT_PRIVATE InstructionSequence final
}
int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
- DeoptimizeKind kind, DeoptimizeReason reason);
+ DeoptimizeKind kind, DeoptimizeReason reason,
+ VectorSlotPair const& feedback);
DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
int GetDeoptimizationEntryCount() const {
return static_cast<int>(deoptimization_entries_.size());
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 042d9e0ef7..940f0904b3 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -316,9 +316,10 @@ void Int64Lowering::LowerNode(Node* node) {
case IrOpcode::kTailCall: {
CallDescriptor* descriptor =
const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
- if (DefaultLowering(node) ||
- (descriptor->ReturnCount() == 1 &&
- descriptor->GetReturnType(0) == MachineType::Int64())) {
+ bool returns_require_lowering =
+ GetReturnCountAfterLowering(descriptor) !=
+ static_cast<int>(descriptor->ReturnCount());
+ if (DefaultLowering(node) || returns_require_lowering) {
// Tail calls do not have return values, so adjusting the call
// descriptor is enough.
auto new_descriptor = GetI32WasmCallDescriptor(zone(), descriptor);
@@ -688,7 +689,7 @@ void Int64Lowering::LowerNode(Node* node) {
Int32Matcher m(shift);
if (m.HasValue()) {
// Precondition: 0 <= shift < 64.
- int32_t shift_value = m.Value() & 0x3f;
+ int32_t shift_value = m.Value() & 0x3F;
if (shift_value == 0) {
ReplaceNode(node, GetReplacementLow(input),
GetReplacementHigh(input));
@@ -705,7 +706,7 @@ void Int64Lowering::LowerNode(Node* node) {
low_input = GetReplacementHigh(input);
high_input = GetReplacementLow(input);
}
- int32_t masked_shift_value = shift_value & 0x1f;
+ int32_t masked_shift_value = shift_value & 0x1F;
Node* masked_shift =
graph()->NewNode(common()->Int32Constant(masked_shift_value));
Node* inv_shift = graph()->NewNode(
@@ -726,7 +727,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (!machine()->Word32ShiftIsSafe()) {
safe_shift =
graph()->NewNode(machine()->Word32And(), shift,
- graph()->NewNode(common()->Int32Constant(0x1f)));
+ graph()->NewNode(common()->Int32Constant(0x1F)));
}
// By creating this bit-mask with SAR and SHL we do not have to deal
@@ -750,7 +751,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (machine()->Word32ShiftIsSafe()) {
masked_shift6 =
graph()->NewNode(machine()->Word32And(), shift,
- graph()->NewNode(common()->Int32Constant(0x3f)));
+ graph()->NewNode(common()->Int32Constant(0x3F)));
}
Diamond lt32(
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index df6fdba3f0..7ff2bf6d5e 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -109,49 +109,22 @@ JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph,
namespace {
-MaybeHandle<Map> GetMapWitness(Node* node) {
+Maybe<InstanceType> GetInstanceTypeWitness(Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &maps);
- if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
- return maps[0];
- }
- return MaybeHandle<Map>();
-}
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
- DCHECK(!jsarray_map->is_dictionary_map());
- Isolate* isolate = jsarray_map->GetIsolate();
- Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number =
- descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
- DCHECK_NE(DescriptorArray::kNotFound, number);
- return descriptors->GetDetails(number).IsReadOnly();
-}
+ if (result == NodeProperties::kNoReceiverMaps || maps.size() == 0) {
+ return Nothing<InstanceType>();
+ }
-// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
-bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
- Isolate* const isolate = receiver_map->GetIsolate();
- if (!receiver_map->prototype()->IsJSArray()) return false;
- Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
- isolate);
- // Ensure that all prototypes of the {receiver} are stable.
- for (PrototypeIterator it(isolate, receiver_prototype, kStartAtReceiver);
- !it.IsAtEnd(); it.Advance()) {
- Handle<JSReceiver> current = PrototypeIterator::GetCurrent<JSReceiver>(it);
- if (!current->map()->is_stable()) return false;
+ InstanceType first_type = maps[0]->instance_type();
+ for (const Handle<Map>& map : maps) {
+ if (map->instance_type() != first_type) return Nothing<InstanceType>();
}
- return receiver_map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
- (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
- isolate->IsNoElementsProtectorIntact() &&
- isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
- !IsReadOnlyLengthDescriptor(receiver_map);
+ return Just(first_type);
}
bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
@@ -189,7 +162,7 @@ bool CanInlineJSArrayIteration(Handle<Map> receiver_map) {
Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
IterationKind kind) {
Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map)) {
+ if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map)) {
return ReduceArrayIterator(receiver_map, node, kind,
ArrayIteratorKind::kArray);
}
@@ -199,7 +172,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Node* node,
Reduction JSBuiltinReducer::ReduceTypedArrayIterator(Node* node,
IterationKind kind) {
Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
+ if (NodeProperties::GetMapWitness(node).ToHandle(&receiver_map) &&
receiver_map->instance_type() == JS_TYPED_ARRAY_TYPE) {
return ReduceArrayIterator(receiver_map, node, kind,
ArrayIteratorKind::kTypedArray);
@@ -313,8 +286,9 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
- Handle<Map> iterator_map, Node* node, IterationKind kind) {
+Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(InstanceType type,
+ Node* node,
+ IterationKind kind) {
Node* iterator = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -327,8 +301,8 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
return NoChange();
}
- ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
- iterator_map->instance_type());
+ ElementsKind elements_kind =
+ JSArrayIterator::ElementsKindForInstanceType(type);
if (IsHoleyElementsKind(elements_kind)) {
if (!isolate()->IsNoElementsProtectorIntact()) {
@@ -484,15 +458,16 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
return Replace(value);
}
-Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
- Handle<Map> iterator_map, Node* node, IterationKind kind) {
+Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(InstanceType type,
+ Node* node,
+ IterationKind kind) {
Node* iterator = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- ElementsKind elements_kind = JSArrayIterator::ElementsKindForInstanceType(
- iterator_map->instance_type());
+ ElementsKind elements_kind =
+ JSArrayIterator::ElementsKindForInstanceType(type);
Node* array = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayIteratorObject()),
@@ -725,65 +700,58 @@ Reduction JSBuiltinReducer::ReduceTypedArrayToStringTag(Node* node) {
}
Reduction JSBuiltinReducer::ReduceArrayIteratorNext(Node* node) {
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map)) {
- switch (receiver_map->instance_type()) {
- case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kKeys);
-
- case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kKeys);
-
- case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kEntries);
-
- case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kEntries);
-
- case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceTypedArrayIteratorNext(receiver_map, node,
- IterationKind::kValues);
-
- case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
- return ReduceFastArrayIteratorNext(receiver_map, node,
- IterationKind::kValues);
-
- default:
- // Slow array iterators are not reduced
- return NoChange();
- }
+ Maybe<InstanceType> maybe_type = GetInstanceTypeWitness(node);
+ if (!maybe_type.IsJust()) return NoChange();
+ InstanceType type = maybe_type.FromJust();
+ switch (type) {
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kKeys);
+
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kKeys);
+
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kEntries);
+
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kEntries);
+
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceTypedArrayIteratorNext(type, node, IterationKind::kValues);
+
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ return ReduceFastArrayIteratorNext(type, node, IterationKind::kValues);
+
+ default:
+ // Slow array iterators are not reduced
+ return NoChange();
}
- return NoChange();
}
// ES6 section 22.1.2.2 Array.isArray ( arg )
@@ -896,398 +864,6 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
-// ES6 section 22.1.3.17 Array.prototype.pop ( )
-Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
- Handle<Map> receiver_map;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Check if the {receiver} has any elements.
- Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->UndefinedConstant();
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // TODO(tebbi): We should trim the backing store if the capacity is too
- // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
-
- // Load the elements backing store from the {receiver}.
- Node* elements = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, efalse, if_false);
-
- // Ensure that we aren't popping from a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, efalse, if_false);
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, efalse, if_false);
-
- // Load the last entry from the {elements}.
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, length, efalse, if_false);
-
- // Store a hole to the element we just removed from the {receiver}.
- efalse = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 22.1.3.18 Array.prototype.push ( )
-Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- int const num_values = node->op()->ValueInputCount() - 2;
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- ZoneHandleSet<Map> receiver_maps;
- NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (receiver_maps.size() != 1) return NoChange();
- DCHECK_NE(NodeProperties::kNoReceiverMaps, result);
-
- // TODO(turbofan): Relax this to deal with multiple {receiver} maps.
- Handle<Map> receiver_map = receiver_maps[0];
- if (CanInlineArrayResizeOperation(receiver_map)) {
- // Collect the value inputs to push.
- std::vector<Node*> values(num_values);
- for (int i = 0; i < num_values; ++i) {
- values[i] = NodeProperties::GetValueInput(node, 2 + i);
- }
-
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // If the {receiver_maps} information is not reliable, we need
- // to check that the {receiver} still has one of these maps.
- if (result == NodeProperties::kUnreliableReceiverMaps) {
- if (receiver_map->is_stable()) {
- dependencies()->AssumeMapStable(receiver_map);
- } else {
- // TODO(turbofan): This is a potential - yet unlikely - deoptimization
- // loop, since we might not learn from this deoptimization in baseline
- // code. We need a way to learn from deoptimizations in optimized to
- // address these problems.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps),
- receiver, effect, control);
- }
- }
-
- // TODO(turbofan): Perform type checks on the {values}. We are not
- // guaranteed to learn from these checks in case they fail, as the witness
- // (i.e. the map check from the LoadIC for a.push) might not be executed in
- // baseline code (after we stored the value in the builtin and thereby
- // changed the elements kind of a) before be decide to optimize this
- // function again. We currently don't have a proper way to deal with this;
- // the proper solution here is to learn on deopt, i.e. disable
- // Array.prototype.push inlining for this function.
- for (auto& value : values) {
- if (IsSmiElementsKind(receiver_map->elements_kind())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
- } else if (IsDoubleElementsKind(receiver_map->elements_kind())) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
- // Make sure we do not store signaling NaNs into double arrays.
- value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
- }
- }
-
- // Load the "length" property of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
- Node* value = length;
-
- // Check if we have any {values} to push.
- if (num_values > 0) {
- // Compute the resulting "length" of the {receiver}.
- Node* new_length = value = graph()->NewNode(
- simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
-
- // Load the elements backing store of the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, effect, control);
- Node* elements_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- elements, effect, control);
-
- // TODO(turbofan): Check if we need to grow the {elements} backing store.
- // This will deopt if we cannot grow the array further, and we currently
- // don't necessarily learn from it. See the comment on the value type
- // check above.
- GrowFastElementsMode mode =
- IsDoubleElementsKind(receiver_map->elements_kind())
- ? GrowFastElementsMode::kDoubleElements
- : GrowFastElementsMode::kSmiOrObjectElements;
- elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode), receiver, elements,
- graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(num_values - 1)),
- elements_length, effect, control);
-
- // Update the JSArray::length field. Since this is observable,
- // there must be no other check after this.
- effect = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, new_length, effect, control);
-
- // Append the {values} to the {elements}.
- for (int i = 0; i < num_values; ++i) {
- Node* value = values[i];
- Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
- jsgraph()->Constant(i));
- effect = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, index, value, effect, control);
- }
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
-// ES6 section 22.1.3.22 Array.prototype.shift ( )
-Reduction JSBuiltinReducer::ReduceArrayShift(Node* node) {
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // TODO(turbofan): Extend this to also handle fast holey double elements
- // once we got the hole NaN mess sorted out in TurboFan/V8.
- Handle<Map> receiver_map;
- if (GetMapWitness(node).ToHandle(&receiver_map) &&
- CanInlineArrayResizeOperation(receiver_map) &&
- receiver_map->elements_kind() != HOLEY_DOUBLE_ELEMENTS) {
- // Install code dependencies on the {receiver} prototype maps and the
- // global array protector cell.
- dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- dependencies()->AssumePrototypeMapsStable(receiver_map);
-
- // Load length of the {receiver}.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, effect, control);
-
- // Return undefined if {receiver} has no elements.
- Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->UndefinedConstant();
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if we should take the fast-path.
- Node* check1 =
- graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
- jsgraph()->Constant(JSArray::kMaxCopyElements));
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
- {
- Node* elements = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
- receiver, etrue1, if_true1);
-
- // Load the first element here, which we return below.
- vtrue1 = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind())),
- elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
-
- // Ensure that we aren't shifting a copy-on-write backing store.
- if (IsSmiOrObjectElementsKind(receiver_map->elements_kind())) {
- elements = etrue1 =
- graph()->NewNode(simplified()->EnsureWritableFastElements(),
- receiver, elements, etrue1, if_true1);
- }
-
- // Shift the remaining {elements} by one towards the start.
- Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
- Node* eloop =
- graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* index = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2),
- jsgraph()->OneConstant(),
- jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
-
- {
- Node* check2 =
- graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
-
- if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
- etrue1 = eloop;
-
- Node* control = graph()->NewNode(common()->IfTrue(), branch2);
- Node* effect = etrue1;
-
- ElementAccess const access = AccessBuilder::ForFixedArrayElement(
- receiver_map->elements_kind());
- Node* value = effect =
- graph()->NewNode(simplified()->LoadElement(access), elements,
- index, effect, control);
- effect = graph()->NewNode(
- simplified()->StoreElement(access), elements,
- graph()->NewNode(simplified()->NumberSubtract(), index,
- jsgraph()->OneConstant()),
- value, effect, control);
-
- loop->ReplaceInput(1, control);
- eloop->ReplaceInput(1, effect);
- index->ReplaceInput(1,
- graph()->NewNode(simplified()->NumberAdd(), index,
- jsgraph()->OneConstant()));
- }
-
- // Compute the new {length}.
- length = graph()->NewNode(simplified()->NumberSubtract(), length,
- jsgraph()->OneConstant());
-
- // Store the new {length} to the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(receiver_map->elements_kind())),
- receiver, length, etrue1, if_true1);
-
- // Store a hole to the element we just removed from the {receiver}.
- etrue1 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(
- GetHoleyElementsKind(receiver_map->elements_kind()))),
- elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Call the generic C++ implementation.
- const int builtin_index = Builtins::kArrayShift;
- CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
- graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
- Builtins::name(builtin_index), node->op()->properties(),
- CallDescriptor::kNeedsFrameState);
- Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
- kArgvOnStack, true);
- Address builtin_entry = Builtins::CppEntryOf(builtin_index);
- Node* entry = jsgraph()->ExternalConstant(
- ExternalReference(builtin_entry, isolate()));
- Node* argc =
- jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
- if_false1 = efalse1 = vfalse1 =
- graph()->NewNode(common()->Call(desc), stub_code, receiver,
- jsgraph()->PaddingConstant(), argc, target,
- jsgraph()->UndefinedConstant(), entry, argc,
- context, frame_state, efalse1, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
-
- // Convert the hole to undefined. Do this last, so that we can optimize
- // conversion operator via some smart strength reduction in many cases.
- if (IsHoleyElementsKind(receiver_map->elements_kind())) {
- value =
- graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
- }
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- return NoChange();
-}
-
namespace {
bool HasInstanceTypeWitness(Node* receiver, Node* effect,
@@ -1451,6 +1027,7 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
index = effect = graph()->NewNode(
common()->Call(desc), jsgraph()->HeapConstant(callable.code()), table,
index, jsgraph()->NoContextConstant(), effect);
+ NodeProperties::SetType(index, type_cache_.kFixedArrayLengthType);
// Update the {index} and {table} on the {receiver}.
effect = graph()->NewNode(
@@ -1562,8 +1139,9 @@ Reduction JSBuiltinReducer::ReduceCollectionIteratorNext(
// Abort loop with resulting value.
Node* control = graph()->NewNode(common()->IfFalse(), branch1);
Node* effect = etrue0;
- Node* value = graph()->NewNode(
- common()->TypeGuard(Type::NonInternal()), entry_key, control);
+ Node* value = effect =
+ graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
+ entry_key, effect, control);
Node* done = jsgraph()->FalseConstant();
// Advance the index on the {receiver}.
@@ -2369,122 +1947,6 @@ Node* GetStringWitness(Node* node) {
} // namespace
-// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
- // We need at least target, receiver and index parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* index = NodeProperties::GetValueInput(node, 2);
- Type* index_type = NodeProperties::GetType(index);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
- if (Node* receiver = GetStringWitness(node)) {
- if (!index_type->Is(Type::Unsigned32())) {
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
-
- // Determine the {receiver} length.
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
-
- // Check if {index} is less than {receiver} length.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
- receiver_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- // Return the character from the {receiver} as single character string.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* masked_index = graph()->NewNode(
- simplified()->MaskIndexWithBound(), index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
- masked_index, if_true);
-
- // Return the empty string otherwise.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->EmptyStringConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
-
- return NoChange();
-}
-
-// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
- // We need at least target, receiver and index parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* index = NodeProperties::GetValueInput(node, 2);
- Type* index_type = NodeProperties::GetType(index);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
- if (Node* receiver = GetStringWitness(node)) {
- if (!index_type->Is(Type::Unsigned32())) {
- // Map -0 and NaN to 0 (as per ToInteger), and the values in
- // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
- // be considered out-of-bounds as well, because of the maximal
- // String length limit in V8.
- STATIC_ASSERT(String::kMaxLength <= kMaxInt);
- index = graph()->NewNode(simplified()->NumberToUint32(), index);
- }
-
- // Determine the {receiver} length.
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
-
- // Check if {index} is less than {receiver} length.
- Node* check = graph()->NewNode(simplified()->NumberLessThan(), index,
- receiver_length);
- Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check, control);
-
- // Load the character from the {receiver}.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-
- Node* masked_index = graph()->NewNode(
- simplified()->MaskIndexWithBound(), index, receiver_length);
-
- Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(),
- receiver, masked_index, if_true);
-
- // Return NaN otherwise.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->NaNConstant();
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
-
- ReplaceWithValue(node, value, effect, control);
- return Replace(value);
- }
- }
- }
-
- return NoChange();
-}
-
// ES6 String.prototype.concat(...args)
// #sec-string.prototype.concat
Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
@@ -2516,34 +1978,6 @@ Reduction JSBuiltinReducer::ReduceStringConcat(Node* node) {
return NoChange();
}
-// ES6 String.prototype.indexOf(searchString [, position])
-// #sec-string.prototype.indexof
-Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
- // We need at least target, receiver and search_string parameters.
- if (node->op()->ValueInputCount() >= 3) {
- Node* search_string = NodeProperties::GetValueInput(node, 2);
- Type* search_string_type = NodeProperties::GetType(search_string);
- Node* position = (node->op()->ValueInputCount() >= 4)
- ? NodeProperties::GetValueInput(node, 3)
- : jsgraph()->ZeroConstant();
- Type* position_type = NodeProperties::GetType(position);
-
- if (search_string_type->Is(Type::String()) &&
- position_type->Is(Type::SignedSmall())) {
- if (Node* receiver = GetStringWitness(node)) {
- RelaxEffectsAndControls(node);
- node->ReplaceInput(0, receiver);
- node->ReplaceInput(1, search_string);
- node->ReplaceInput(2, position);
- node->TrimInputCount(3);
- NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
- return Changed(node);
- }
- }
- }
- return NoChange();
-}
-
Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
if (Node* receiver = GetStringWitness(node)) {
Node* effect = NodeProperties::GetEffectInput(node);
@@ -2584,9 +2018,7 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
Node* index = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSStringIteratorIndex()),
receiver, effect, control);
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), string,
- effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), string);
// branch0: if (index < length)
Node* check0 =
@@ -2677,9 +2109,8 @@ Reduction JSBuiltinReducer::ReduceStringIteratorNext(Node* node) {
simplified()->StringFromCodePoint(UnicodeEncoding::UTF16), vtrue0);
// Update iterator.[[NextIndex]]
- Node* char_length = etrue0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), vtrue0,
- etrue0, if_true0);
+ Node* char_length =
+ graph()->NewNode(simplified()->StringLength(), vtrue0);
index = graph()->NewNode(simplified()->NumberAdd(), index, char_length);
etrue0 = graph()->NewNode(
simplified()->StoreField(AccessBuilder::ForJSStringIteratorIndex()),
@@ -2728,9 +2159,8 @@ Reduction JSBuiltinReducer::ReduceStringSlice(Node* node) {
if (start_type->Is(type_cache_.kSingletonMinusOne) &&
end_type->Is(Type::Undefined())) {
- Node* receiver_length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
Node* check =
graph()->NewNode(simplified()->NumberEqual(), receiver_length,
@@ -2855,12 +2285,6 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayIteratorNext(node);
case kArrayIsArray:
return ReduceArrayIsArray(node);
- case kArrayPop:
- return ReduceArrayPop(node);
- case kArrayPush:
- return ReduceArrayPush(node);
- case kArrayShift:
- return ReduceArrayShift(node);
case kDateNow:
return ReduceDateNow(node);
case kDateGetTime:
@@ -3024,14 +2448,8 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
- case kStringCharAt:
- return ReduceStringCharAt(node);
- case kStringCharCodeAt:
- return ReduceStringCharCodeAt(node);
case kStringConcat:
return ReduceStringConcat(node);
- case kStringIndexOf:
- return ReduceStringIndexOf(node);
case kStringIterator:
return ReduceStringIterator(node);
case kStringIteratorNext:
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 2b22b0ce7c..b3c44c7a0f 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -47,15 +47,13 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind,
ArrayIteratorKind iter_kind);
Reduction ReduceArrayIteratorNext(Node* node);
- Reduction ReduceFastArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ Reduction ReduceFastArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
- Reduction ReduceTypedArrayIteratorNext(Handle<Map> iterator_map, Node* node,
+ Reduction ReduceTypedArrayIteratorNext(InstanceType type, Node* node,
IterationKind kind);
Reduction ReduceTypedArrayToStringTag(Node* node);
Reduction ReduceArrayIsArray(Node* node);
- Reduction ReduceArrayPop(Node* node);
- Reduction ReduceArrayPush(Node* node);
- Reduction ReduceArrayShift(Node* node);
+
Reduction ReduceCollectionIterator(Node* node,
InstanceType collection_instance_type,
int collection_iterator_map_index);
@@ -110,11 +108,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
Reduction ReduceObjectCreate(Node* node);
- Reduction ReduceStringCharAt(Node* node);
- Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringConcat(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
- Reduction ReduceStringIndexOf(Node* node);
Reduction ReduceStringIterator(Node* node);
Reduction ReduceStringIteratorNext(Node* node);
Reduction ReduceStringSlice(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index c595b360d5..1f8e7a2cef 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-call-reducer.h"
#include "src/api.h"
+#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compilation-dependencies.h"
@@ -17,6 +18,7 @@
#include "src/feedback-vector-inl.h"
#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -90,20 +92,6 @@ Reduction JSCallReducer::ReduceBooleanConstructor(Node* node) {
return Replace(value);
}
-// ES6 section 20.1.1 The Number Constructor
-Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- CallParameters const& p = CallParametersOf(node->op());
-
- // Turn the {node} into a {JSToNumber} call.
- DCHECK_LE(2u, p.arity());
- Node* value = (p.arity() == 2) ? jsgraph()->ZeroConstant()
- : NodeProperties::GetValueInput(node, 2);
- NodeProperties::ReplaceValueInputs(node, value);
- NodeProperties::ChangeOp(node, javascript()->ToNumber());
- return Changed(node);
-}
-
// ES section #sec-object-constructor
Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -549,7 +537,7 @@ Reduction JSCallReducer::ReduceObjectPrototypeHasOwnProperty(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
control);
}
Node* value = jsgraph()->TrueConstant();
@@ -804,15 +792,37 @@ bool CanInlineArrayIteratingBuiltin(Handle<Map> receiver_map) {
isolate->IsAnyInitialArrayPrototype(receiver_prototype);
}
+Node* JSCallReducer::WireInLoopStart(Node* k, Node** control, Node** effect) {
+ Node* loop = *control =
+ graph()->NewNode(common()->Loop(2), *control, *control);
+ Node* eloop = *effect =
+ graph()->NewNode(common()->EffectPhi(2), *effect, *effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), k,
+ k, loop);
+}
+
+void JSCallReducer::WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
+ Node* control, Node* effect) {
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, k);
+ eloop->ReplaceInput(1, effect);
+}
+
Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -825,10 +835,193 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // By ensuring that {kind} is object or double, we can be polymorphic
+ // on different elements kinds.
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ if (IsSmiElementsKind(kind)) {
+ kind = FastSmiToObjectElementsKind(kind);
+ }
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = GetHoleyElementsKind(kind);
+ }
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+ checkpoint_params[3] = k;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ checkpoint_params[3] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
+ return Replace(jsgraph()->UndefinedConstant());
+}
+
+Reduction JSCallReducer::ReduceArrayReduce(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
return NoChange();
}
- if (receiver_maps.size() == 0) return NoChange();
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
? PACKED_DOUBLE_ELEMENTS
@@ -838,8 +1031,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
return NoChange();
}
- if (!IsFastElementsKind(next_kind) ||
- (IsDoubleElementsKind(next_kind) && IsHoleyElementsKind(next_kind))) {
+ if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
return NoChange();
}
if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
@@ -854,36 +1046,73 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
- Node* k = jsgraph()->ZeroConstant();
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
receiver, effect, control);
- std::vector<Node*> checkpoint_params(
- {receiver, fncallback, this_arg, k, original_length});
+ Node* k = jsgraph()->ZeroConstant();
+
+ std::vector<Node*> checkpoint_params({receiver, fncallback, k,
+ original_length,
+ jsgraph()->UndefinedConstant()});
const int stack_parameters = static_cast<int>(checkpoint_params.size());
// Check whether the given callback function is callable. Note that this has
// to happen outside the loop to make sure we also throw on empty arrays.
Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
outer_frame_state, ContinuationFrameStateMode::LAZY);
Node* check_fail = nullptr;
Node* check_throw = nullptr;
WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
&control, &check_fail, &check_throw);
+ // Set initial accumulator value
+ Node* cur = jsgraph()->TheHoleConstant();
+
+ Node* initial_element_check_fail = nullptr;
+ Node* initial_element_check_throw = nullptr;
+ if (node->op()->ValueInputCount() > 3) {
+ cur = NodeProperties::GetValueInput(node, 3);
+ } else {
+ Node* check =
+ graph()->NewNode(simplified()->NumberEqual(), original_length, k);
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ initial_element_check_fail =
+ graph()->NewNode(common()->IfTrue(), check_branch);
+ initial_element_check_throw = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
+ context, check_frame_state, effect, initial_element_check_fail);
+ control = graph()->NewNode(common()->IfFalse(), check_branch);
+
+ cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+ k = graph()->NewNode(simplified()->NumberAdd(), k,
+ jsgraph()->OneConstant());
+ }
+
// Start the loop.
Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
Node* eloop = effect =
graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
+ Node* kloop = k = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
- checkpoint_params[3] = k;
+ Node* curloop = cur = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
+ checkpoint_params[2] = k;
+ checkpoint_params[4] = curloop;
control = loop;
effect = eloop;
@@ -898,7 +1127,7 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = if_true;
Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopEagerDeoptContinuation,
+ jsgraph(), function, Builtins::kArrayReduceLoopEagerDeoptContinuation,
node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
outer_frame_state, ContinuationFrameStateMode::EAGER);
@@ -910,11 +1139,12 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
- graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->Constant(1));
- checkpoint_params[3] = next_k;
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ checkpoint_params[2] = next_k;
Node* hole_true = nullptr;
Node* hole_false = nullptr;
@@ -934,18 +1164,19 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
// The contract is that we don't leak "the hole" into "user JavaScript",
// so we must rename the {element} here to explicitly exclude "the hole"
// from the type of {element}.
- element = graph()->NewNode(common()->TypeGuard(Type::NonInternal()),
- element, control);
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
}
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
- jsgraph(), function, Builtins::kArrayForEachLoopLazyDeoptContinuation,
- node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ jsgraph(), function, Builtins::kArrayReduceLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
outer_frame_state, ContinuationFrameStateMode::LAZY);
- control = effect = graph()->NewNode(
- javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
- receiver, context, frame_state, effect, control);
+ Node* next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
// Rewire potential exception edges.
Node* on_exception = nullptr;
@@ -963,12 +1194,17 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
control = graph()->NewNode(common()->Merge(2), control, after_call_control);
effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
control);
+ next_cur =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
+ next_cur, control);
}
k = next_k;
+ cur = next_cur;
loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
+ kloop->ReplaceInput(1, k);
+ curloop->ReplaceInput(1, cur);
eloop->ReplaceInput(1, effect);
control = if_false;
@@ -982,19 +1218,271 @@ Reduction JSCallReducer::ReduceArrayForEach(Handle<JSFunction> function,
graph()->NewNode(common()->Throw(), check_throw, check_fail);
NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
- ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect, control);
- return Replace(jsgraph()->UndefinedConstant());
-}
+ if (node->op()->ValueInputCount() <= 3) {
+ // Wire up the branch for the case when an array is empty.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the
+ // successful completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), initial_element_check_throw,
+ initial_element_check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ ReplaceWithValue(node, curloop, effect, control);
+ return Replace(curloop);
+} // namespace compiler
+
+Reduction JSCallReducer::ReduceArrayReduceRight(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ ElementsKind kind = IsDoubleElementsKind(receiver_maps[0]->elements_kind())
+ ? PACKED_DOUBLE_ELEMENTS
+ : PACKED_ELEMENTS;
+ for (Handle<Map> receiver_map : receiver_maps) {
+ ElementsKind next_kind = receiver_map->elements_kind();
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
+ return NoChange();
+ }
+ if (!IsFastElementsKind(next_kind) || IsHoleyElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsDoubleElementsKind(kind) != IsDoubleElementsKind(next_kind)) {
+ return NoChange();
+ }
+ if (IsHoleyElementsKind(next_kind)) {
+ kind = HOLEY_ELEMENTS;
+ }
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(PACKED_ELEMENTS)),
+ receiver, effect, control);
+
+ Node* k = graph()->NewNode(simplified()->NumberSubtract(), original_length,
+ jsgraph()->OneConstant());
+
+ std::vector<Node*> checkpoint_params({receiver, fncallback, k,
+ original_length,
+ jsgraph()->UndefinedConstant()});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state, effect,
+ &control, &check_fail, &check_throw);
+
+ // Set initial accumulator value
+ Node* cur = nullptr;
+
+ Node* initial_element_check_fail = nullptr;
+ Node* initial_element_check_throw = nullptr;
+ if (node->op()->ValueInputCount() > 3) {
+ cur = NodeProperties::GetValueInput(node, 3);
+ } else {
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), original_length,
+ jsgraph()->SmiConstant(0));
+ Node* check_branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ initial_element_check_fail =
+ graph()->NewNode(common()->IfTrue(), check_branch);
+ initial_element_check_throw = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kThrowTypeError, 2),
+ jsgraph()->Constant(MessageTemplate::kReduceNoInitial), fncallback,
+ context, check_frame_state, effect, initial_element_check_fail);
+ control = graph()->NewNode(common()->IfFalse(), check_branch);
+
+ cur = SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+ k = graph()->NewNode(simplified()->NumberSubtract(), k,
+ jsgraph()->OneConstant());
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* kloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* curloop = cur = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), cur, cur, loop);
+ checkpoint_params[2] = k;
+ checkpoint_params[4] = curloop;
+
+ control = loop;
+ effect = eloop;
+
+ Node* continue_test = graph()->NewNode(simplified()->NumberLessThanOrEqual(),
+ jsgraph()->ZeroConstant(), k);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function,
+ Builtins::kArrayReduceRightLoopEagerDeoptContinuation, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ // Make sure the map hasn't changed during the iteration
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k = graph()->NewNode(simplified()->NumberSubtract(), k,
+ jsgraph()->OneConstant());
+ checkpoint_params[2] = next_k;
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayReduceRightLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters - 1,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ Node* next_cur = control = effect =
+ graph()->NewNode(javascript()->Call(6, p.frequency()), fncallback,
+ jsgraph()->UndefinedConstant(), cur, element, k,
+ receiver, context, frame_state, effect, control);
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ next_cur =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), cur,
+ next_cur, control);
+ }
+
+ k = next_k;
+ cur = next_cur;
+
+ loop->ReplaceInput(1, control);
+ kloop->ReplaceInput(1, k);
+ curloop->ReplaceInput(1, cur);
+ eloop->ReplaceInput(1, effect);
+
+ control = if_false;
+ effect = eloop;
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ if (node->op()->ValueInputCount() <= 3) {
+ // Wire up the branch for the case when an array is empty.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the
+ // successful completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), initial_element_check_throw,
+ initial_element_check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+ }
+
+ ReplaceWithValue(node, curloop, effect, control);
+ return Replace(curloop);
+} // namespace compiler
Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -1007,31 +1495,18 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
- return NoChange();
- }
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// Ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
const ElementsKind kind = receiver_maps[0]->elements_kind();
- // TODO(danno): Handle holey elements kinds.
- if (!IsFastPackedElementsKind(kind)) {
- return NoChange();
- }
-
for (Handle<Map> receiver_map : receiver_maps) {
- if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
- return NoChange();
- }
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
// We can handle different maps, as long as their elements kind are the
// same.
- if (receiver_map->elements_kind() != kind) {
- return NoChange();
- }
+ if (receiver_map->elements_kind() != kind) return NoChange();
}
dependencies()->AssumePropertyCell(factory()->species_protector());
@@ -1045,10 +1520,13 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
Node* k = jsgraph()->ZeroConstant();
- // Make sure the map hasn't changed before we construct the output array.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* original_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
@@ -1078,18 +1556,10 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
&control, &check_fail, &check_throw);
// Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
checkpoint_params[4] = k;
- control = loop;
- effect = eloop;
-
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -1108,15 +1578,44 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
// Make sure the map hasn't changed during the iteration
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
// This frame state is dealt with by hand in
// ArrayMapLoopLazyDeoptContinuation.
frame_state = CreateJavaScriptBuiltinContinuationFrameState(
@@ -1143,11 +1642,19 @@ Reduction JSCallReducer::ReduceArrayMap(Handle<JSFunction> function,
simplified()->TransitionAndStoreElement(double_map, fast_map), a, k,
callback_value, effect, control);
- k = next_k;
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_and_store_control = control;
+ Node* after_call_and_store_effect = effect;
+ control = hole_true;
+ effect = effect_true;
- loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
- eloop->ReplaceInput(1, effect);
+ control = graph()->NewNode(common()->Merge(2), control,
+ after_call_and_store_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect,
+ after_call_and_store_effect, control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
control = if_false;
effect = eloop;
@@ -1168,11 +1675,15 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
Node* node) {
if (!FLAG_turbo_inline_array_builtins) return NoChange();
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
- CallParameters const& p = CallParametersOf(node->op());
// Try to determine the {receiver} map.
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* fncallback = node->op()->ValueInputCount() > 2
@@ -1184,21 +1695,14 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
ZoneHandleSet<Map> receiver_maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
- if (result != NodeProperties::kReliableReceiverMaps) {
- return NoChange();
- }
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
// And ensure that any changes to the Array species constructor cause deopt.
if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
- if (receiver_maps.size() == 0) return NoChange();
-
const ElementsKind kind = receiver_maps[0]->elements_kind();
-
- // TODO(danno): Handle holey elements kinds.
- if (!IsFastPackedElementsKind(kind)) {
- return NoChange();
- }
+ // The output array is packed (filter doesn't visit holes).
+ const ElementsKind packed_kind = GetPackedElementsKind(kind);
for (Handle<Map> receiver_map : receiver_maps) {
if (!CanInlineArrayIteratingBuiltin(receiver_map)) {
@@ -1206,23 +1710,24 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// We can handle different maps, as long as their elements kind are the
// same.
- if (receiver_map->elements_kind() != kind) {
- return NoChange();
- }
+ if (receiver_map->elements_kind() != kind) return NoChange();
}
dependencies()->AssumePropertyCell(factory()->species_protector());
Handle<Map> initial_map(
- Map::cast(native_context()->GetInitialJSArrayMap(kind)));
+ Map::cast(native_context()->GetInitialJSArrayMap(packed_kind)));
Node* k = jsgraph()->ZeroConstant();
Node* to = jsgraph()->ZeroConstant();
- // Make sure the map hasn't changed before we construct the output array.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
Node* a; // Construct the output array.
{
@@ -1232,7 +1737,8 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
ab.Store(AccessBuilder::ForJSObjectPropertiesOrHash(), empty_fixed_array);
ab.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
- ab.Store(AccessBuilder::ForJSArrayLength(kind), jsgraph()->ZeroConstant());
+ ab.Store(AccessBuilder::ForJSArrayLength(packed_kind),
+ jsgraph()->ZeroConstant());
for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
ab.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
jsgraph()->UndefinedConstant());
@@ -1268,19 +1774,11 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// Start the loop.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
- NodeProperties::MergeControlToEnd(graph(), common(), terminate);
- Node* vloop = k = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
Node* v_to_loop = to = graph()->NewNode(
common()->Phi(MachineRepresentation::kTaggedSigned, 2), to, to, loop);
- control = loop;
- effect = eloop;
-
Node* continue_test =
graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
@@ -1305,15 +1803,45 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
}
// Make sure the map hasn't changed during the iteration.
- effect = graph()->NewNode(
- simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
- effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
- Node* element = SafeLoadElement(kind, receiver, control, &effect, &k);
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
Node* next_k =
graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+ Node* hole_true_vto = to;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
Node* callback_value = nullptr;
{
// This frame state is dealt with by hand in
@@ -1363,14 +1891,25 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
// We have to coerce callback_value to boolean, and only store the element in
// a if it's true. The checkpoint above protects against the case that
// growing {a} fails.
- to = DoFilterPostCallbackWork(kind, &control, &effect, a, to, element,
+ to = DoFilterPostCallbackWork(packed_kind, &control, &effect, a, to, element,
callback_value);
- k = next_k;
- loop->ReplaceInput(1, control);
- vloop->ReplaceInput(1, k);
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ to =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTaggedSigned, 2),
+ hole_true_vto, to, control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
v_to_loop->ReplaceInput(1, to);
- eloop->ReplaceInput(1, effect);
control = if_false;
effect = eloop;
@@ -1387,6 +1926,216 @@ Reduction JSCallReducer::ReduceArrayFilter(Handle<JSFunction> function,
return Replace(a);
}
+Reduction JSCallReducer::ReduceArrayFind(ArrayFindVariant variant,
+ Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Builtins::Name eager_continuation_builtin;
+ Builtins::Name lazy_continuation_builtin;
+ Builtins::Name after_callback_lazy_continuation_builtin;
+ if (variant == ArrayFindVariant::kFind) {
+ eager_continuation_builtin = Builtins::kArrayFindLoopEagerDeoptContinuation;
+ lazy_continuation_builtin = Builtins::kArrayFindLoopLazyDeoptContinuation;
+ after_callback_lazy_continuation_builtin =
+ Builtins::kArrayFindLoopAfterCallbackLazyDeoptContinuation;
+ } else {
+ DCHECK_EQ(ArrayFindVariant::kFindIndex, variant);
+ eager_continuation_builtin =
+ Builtins::kArrayFindIndexLoopEagerDeoptContinuation;
+ lazy_continuation_builtin =
+ Builtins::kArrayFindIndexLoopLazyDeoptContinuation;
+ after_callback_lazy_continuation_builtin =
+ Builtins::kArrayFindIndexLoopAfterCallbackLazyDeoptContinuation;
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(pwong): Handle holey double elements kinds.
+ if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} prototype maps and the
+ // global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, lazy_continuation_builtin, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, frame_state, effect,
+ &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+ checkpoint_params[3] = k;
+
+ // Check if we've iterated past the last element of the array.
+ Node* if_false = nullptr;
+ {
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(
+ common()->Branch(BranchHint::kTrue), continue_test, control);
+ control = graph()->NewNode(common()->IfTrue(), continue_branch);
+ if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ }
+
+ // Check the map hasn't changed during the iteration.
+ {
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, eager_continuation_builtin, node->InputAt(0),
+ context, &checkpoint_params[0], stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load k-th element from receiver.
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ // Increment k for the next iteration.
+ Node* next_k = checkpoint_params[3] =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ // Replace holes with undefined.
+ if (IsHoleyElementsKind(kind)) {
+ element = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant()),
+ jsgraph()->UndefinedConstant(), element);
+ }
+
+ Node* if_found_return_value =
+ (variant == ArrayFindVariant::kFind) ? element : k;
+
+ // Call the callback.
+ Node* callback_value = nullptr;
+ {
+ std::vector<Node*> call_checkpoint_params({receiver, fncallback, this_arg,
+ next_k, original_length,
+ if_found_return_value});
+ const int call_stack_parameters =
+ static_cast<int>(call_checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, after_callback_lazy_continuation_builtin,
+ node->InputAt(0), context, &call_checkpoint_params[0],
+ call_stack_parameters, outer_frame_state,
+ ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // Check whether the given callback function returned a truthy value.
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* efound_branch = effect;
+ Node* found_branch = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ boolean_result, control);
+ Node* if_found = graph()->NewNode(common()->IfTrue(), found_branch);
+ Node* if_notfound = graph()->NewNode(common()->IfFalse(), found_branch);
+ control = if_notfound;
+
+ // Close the loop.
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_found, if_false);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), efound_branch, eloop, control);
+
+ Node* if_not_found_value = (variant == ArrayFindVariant::kFind)
+ ? jsgraph()->UndefinedConstant()
+ : jsgraph()->MinusOneConstant();
+ Node* return_value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ if_found_return_value, if_not_found_value, control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
Node** effect, Node* a, Node* to,
Node* element,
@@ -1411,8 +2160,8 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
// We know that {to} is in Unsigned31 range here, being smaller than
// {original_length} at all times.
- Node* checked_to =
- graph()->NewNode(common()->TypeGuard(Type::Unsigned31()), to, if_true);
+ Node* checked_to = etrue = graph()->NewNode(
+ common()->TypeGuard(Type::Unsigned31()), to, etrue, if_true);
Node* elements_length = etrue = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
etrue, if_true);
@@ -1420,9 +2169,9 @@ Node* JSCallReducer::DoFilterPostCallbackWork(ElementsKind kind, Node** control,
GrowFastElementsMode mode =
IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
: GrowFastElementsMode::kSmiOrObjectElements;
- elements = etrue =
- graph()->NewNode(simplified()->MaybeGrowFastElements(mode), a, elements,
- checked_to, elements_length, etrue, if_true);
+ elements = etrue = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, VectorSlotPair()), a,
+ elements, checked_to, elements_length, etrue, if_true);
// Update the length of {a}.
Node* new_length_a = graph()->NewNode(simplified()->NumberAdd(), checked_to,
@@ -1489,14 +2238,15 @@ void JSCallReducer::RewirePostCallbackExceptionEdges(Node* check_throw,
}
Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
- Node* control, Node** effect, Node** k) {
+ Node* control, Node** effect, Node** k,
+ const VectorSlotPair& feedback) {
// Make sure that the access is still in bounds, since the callback could have
// changed the array's size.
Node* length = *effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
*effect, control);
- *k = *effect = graph()->NewNode(simplified()->CheckBounds(), *k, length,
- *effect, control);
+ *k = *effect = graph()->NewNode(simplified()->CheckBounds(feedback), *k,
+ length, *effect, control);
// Reload the elements pointer before calling the callback, since the previous
// callback might have resized the array causing the elements buffer to be
@@ -1514,6 +2264,455 @@ Node* JSCallReducer::SafeLoadElement(ElementsKind kind, Node* receiver,
return element;
}
+Reduction JSCallReducer::ReduceArrayEvery(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* vloop = k = WireInLoopStart(k, &control, &effect);
+ Node *loop = control, *eloop = effect;
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check;
+ if (IsDoubleElementsKind(kind)) {
+ check = graph()->NewNode(simplified()->NumberIsFloat64Hole(), element);
+ } else {
+ check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ }
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayEveryLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArrayEveryLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We have to coerce callback_value to boolean.
+ Node* if_false_callback;
+ Node* efalse_callback;
+ {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check_boolean_result, control);
+ if_false_callback = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ efalse_callback = effect;
+
+ // Nothing to do in the true case.
+ control = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ WireInLoopEnd(loop, eloop, vloop, next_k, control, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_false, if_false_callback);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), eloop, efalse_callback, control);
+ Node* return_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->TrueConstant(), jsgraph()->FalseConstant(), control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
+Reduction JSCallReducer::ReduceArraySome(Handle<JSFunction> function,
+ Node* node) {
+ if (!FLAG_turbo_inline_array_builtins) return NoChange();
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ // Try to determine the {receiver} map.
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* fncallback = node->op()->ValueInputCount() > 2
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* this_arg = node->op()->ValueInputCount() > 3
+ ? NodeProperties::GetValueInput(node, 3)
+ : jsgraph()->UndefinedConstant();
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+
+ // And ensure that any changes to the Array species constructor cause deopt.
+ if (!isolate()->IsArraySpeciesLookupChainIntact()) return NoChange();
+
+ if (receiver_maps.size() == 0) return NoChange();
+
+ const ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ // TODO(pwong): Handle holey double elements kinds.
+ if (IsDoubleElementsKind(kind) && IsHoleyElementsKind(kind)) {
+ return NoChange();
+ }
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayIteratingBuiltin(receiver_map)) return NoChange();
+ // We can handle different maps, as long as their elements kind are the
+ // same.
+ if (receiver_map->elements_kind() != kind) return NoChange();
+ }
+
+ dependencies()->AssumePropertyCell(factory()->species_protector());
+
+ Node* k = jsgraph()->ZeroConstant();
+
+ // If we have unreliable maps, we need a map check.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Make sure the map hasn't changed before we construct the output array.
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps), receiver,
+ effect, control);
+
+ Node* original_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check whether the given callback function is callable. Note that this has
+ // to happen outside the loop to make sure we also throw on empty arrays.
+ Node* check_fail = nullptr;
+ Node* check_throw = nullptr;
+ {
+ // This frame state doesn't ever call the deopt continuation, it's only
+ // necessary to specifiy a continuation in order to handle the exceptional
+ // case.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* check_frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+ WireInCallbackIsCallableCheck(fncallback, context, check_frame_state,
+ effect, &control, &check_fail, &check_throw);
+ }
+
+ // Start the loop.
+ Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* vloop = k = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), k, k, loop);
+
+ Node* continue_test =
+ graph()->NewNode(simplified()->NumberLessThan(), k, original_length);
+ Node* continue_branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ continue_test, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), continue_branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), continue_branch);
+ control = if_true;
+
+ {
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopEagerDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::EAGER);
+
+ effect =
+ graph()->NewNode(common()->Checkpoint(), frame_state, effect, control);
+ }
+
+ // Make sure the map hasn't changed during the iteration.
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+
+ Node* element =
+ SafeLoadElement(kind, receiver, control, &effect, &k, p.feedback());
+
+ Node* next_k =
+ graph()->NewNode(simplified()->NumberAdd(), k, jsgraph()->OneConstant());
+
+ Node* hole_true = nullptr;
+ Node* hole_false = nullptr;
+ Node* effect_true = effect;
+
+ if (IsHoleyElementsKind(kind)) {
+ // Holey elements kind require a hole check and skipping of the element in
+ // the case of a hole.
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), element,
+ jsgraph()->TheHoleConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ hole_true = graph()->NewNode(common()->IfTrue(), branch);
+ hole_false = graph()->NewNode(common()->IfFalse(), branch);
+ control = hole_false;
+
+ // The contract is that we don't leak "the hole" into "user JavaScript",
+ // so we must rename the {element} here to explicitly exclude "the hole"
+ // from the type of {element}.
+ element = effect = graph()->NewNode(
+ common()->TypeGuard(Type::NonInternal()), element, effect, control);
+ }
+
+ Node* callback_value = nullptr;
+ {
+ // This frame state is dealt with by hand in
+ // Builtins::kArrayEveryLoopLazyDeoptContinuation.
+ std::vector<Node*> checkpoint_params(
+ {receiver, fncallback, this_arg, k, original_length});
+ const int stack_parameters = static_cast<int>(checkpoint_params.size());
+
+ Node* frame_state = CreateJavaScriptBuiltinContinuationFrameState(
+ jsgraph(), function, Builtins::kArraySomeLoopLazyDeoptContinuation,
+ node->InputAt(0), context, &checkpoint_params[0], stack_parameters,
+ outer_frame_state, ContinuationFrameStateMode::LAZY);
+
+ callback_value = control = effect = graph()->NewNode(
+ javascript()->Call(5, p.frequency()), fncallback, this_arg, element, k,
+ receiver, context, frame_state, effect, control);
+ }
+
+ // Rewire potential exception edges.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ RewirePostCallbackExceptionEdges(check_throw, on_exception, effect,
+ &check_fail, &control);
+ }
+
+ // We have to coerce callback_value to boolean.
+ Node* if_true_callback;
+ Node* etrue_callback;
+ {
+ Node* boolean_result =
+ graph()->NewNode(simplified()->ToBoolean(), callback_value);
+ Node* check_boolean_result =
+ graph()->NewNode(simplified()->ReferenceEqual(), boolean_result,
+ jsgraph()->TrueConstant());
+ Node* boolean_branch = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse), check_boolean_result, control);
+ if_true_callback = graph()->NewNode(common()->IfTrue(), boolean_branch);
+ etrue_callback = effect;
+
+ // Nothing to do in the false case.
+ control = graph()->NewNode(common()->IfFalse(), boolean_branch);
+ }
+
+ if (IsHoleyElementsKind(kind)) {
+ Node* after_call_control = control;
+ Node* after_call_effect = effect;
+ control = hole_true;
+ effect = effect_true;
+
+ control = graph()->NewNode(common()->Merge(2), control, after_call_control);
+ effect = graph()->NewNode(common()->EffectPhi(2), effect, after_call_effect,
+ control);
+ }
+
+ loop->ReplaceInput(1, control);
+ vloop->ReplaceInput(1, next_k);
+ eloop->ReplaceInput(1, effect);
+
+ control = graph()->NewNode(common()->Merge(2), if_false, if_true_callback);
+ effect =
+ graph()->NewNode(common()->EffectPhi(2), eloop, etrue_callback, control);
+ Node* return_value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant(), control);
+
+ // Wire up the branch for the case when IsCallable fails for the callback.
+ // Since {check_throw} is an unconditional throw, it's impossible to
+ // return a successful completion. Therefore, we simply connect the successful
+ // completion to the graph end.
+ Node* throw_node =
+ graph()->NewNode(common()->Throw(), check_throw, check_fail);
+ NodeProperties::MergeControlToEnd(graph(), common(), throw_node);
+
+ ReplaceWithValue(node, return_value, effect, control);
+ return Replace(return_value);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(Node* node,
Handle<JSFunction> function) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
@@ -1911,8 +3110,6 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceFunctionPrototypeCall(node);
case Builtins::kFunctionPrototypeHasInstance:
return ReduceFunctionPrototypeHasInstance(node);
- case Builtins::kNumberConstructor:
- return ReduceNumberConstructor(node);
case Builtins::kObjectConstructor:
return ReduceObjectConstructor(node);
case Builtins::kObjectGetPrototypeOf:
@@ -1941,8 +3138,30 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceArrayMap(function, node);
case Builtins::kArrayFilter:
return ReduceArrayFilter(function, node);
+ case Builtins::kArrayReduce:
+ return ReduceArrayReduce(function, node);
+ case Builtins::kArrayReduceRight:
+ return ReduceArrayReduceRight(function, node);
+ case Builtins::kArrayPrototypeFind:
+ return ReduceArrayFind(ArrayFindVariant::kFind, function, node);
+ case Builtins::kArrayPrototypeFindIndex:
+ return ReduceArrayFind(ArrayFindVariant::kFindIndex, function, node);
+ case Builtins::kArrayEvery:
+ return ReduceArrayEvery(function, node);
+ case Builtins::kArrayPrototypePush:
+ return ReduceArrayPrototypePush(node);
+ case Builtins::kArrayPrototypePop:
+ return ReduceArrayPrototypePop(node);
+ case Builtins::kArrayPrototypeShift:
+ return ReduceArrayPrototypeShift(node);
case Builtins::kReturnReceiver:
return ReduceReturnReceiver(node);
+ case Builtins::kStringPrototypeIndexOf:
+ return ReduceStringPrototypeIndexOf(function, node);
+ case Builtins::kStringPrototypeCharAt:
+ return ReduceStringPrototypeCharAt(node);
+ case Builtins::kStringPrototypeCharCodeAt:
+ return ReduceStringPrototypeCharCodeAt(node);
default:
break;
}
@@ -2046,9 +3265,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Check that the {target} is still the {target_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
target_function);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Specialize the JSCall node to the {target_function}.
NodeProperties::ReplaceValueInput(node, target_function, 0);
@@ -2119,9 +3338,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Check that the {target} is still the {array_function}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), target,
array_function);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Turn the {node} into a {JSCreateArray} call.
NodeProperties::ReplaceEffectInput(node, effect);
@@ -2142,9 +3361,9 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Check that the {new_target} is still the {new_target_feedback}.
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
new_target, new_target_feedback);
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongCallTarget), check,
+ effect, control);
// Specialize the JSConstruct node to the {new_target_feedback}.
NodeProperties::ReplaceValueInput(node, new_target_feedback, arity + 1);
@@ -2297,6 +3516,47 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
return NoChange();
}
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+Reduction JSCallReducer::ReduceStringPrototypeIndexOf(
+ Handle<JSFunction> function, Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (node->op()->ValueInputCount() >= 3) {
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* new_receiver = effect = graph()->NewNode(
+ simplified()->CheckString(p.feedback()), receiver, effect, control);
+
+ Node* search_string = NodeProperties::GetValueInput(node, 2);
+ Node* new_search_string = effect =
+ graph()->NewNode(simplified()->CheckString(p.feedback()), search_string,
+ effect, control);
+
+ Node* new_position = jsgraph()->ZeroConstant();
+ if (node->op()->ValueInputCount() >= 4) {
+ Node* position = NodeProperties::GetValueInput(node, 3);
+ new_position = effect = graph()->NewNode(
+ simplified()->CheckSmi(p.feedback()), position, effect, control);
+ }
+
+ NodeProperties::ReplaceEffectInput(node, effect);
+ RelaxEffectsAndControls(node);
+ node->ReplaceInput(0, new_receiver);
+ node->ReplaceInput(1, new_search_string);
+ node->ReplaceInput(2, new_position);
+ node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstructWithArrayLike, node->opcode());
CallFrequency frequency = CallFrequencyOf(node->op());
@@ -2328,9 +3588,9 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -2339,6 +3599,571 @@ Reduction JSCallReducer::ReduceSoftDeoptimize(Node* node,
return Changed(node);
}
+namespace {
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ Isolate* isolate = jsarray_map->GetIsolate();
+ Handle<Name> length_string = isolate->factory()->length_string();
+ DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+ int number =
+ descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
+ DCHECK_NE(DescriptorArray::kNotFound, number);
+ return descriptors->GetDetails(number).IsReadOnly();
+}
+
+// TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
+bool CanInlineArrayResizeOperation(Handle<Map> receiver_map) {
+ Isolate* const isolate = receiver_map->GetIsolate();
+ if (!receiver_map->prototype()->IsJSArray()) return false;
+ Handle<JSArray> receiver_prototype(JSArray::cast(receiver_map->prototype()),
+ isolate);
+ return receiver_map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(receiver_map->elements_kind()) &&
+ !receiver_map->is_dictionary_map() && receiver_map->is_extensible() &&
+ isolate->IsAnyInitialArrayPrototype(receiver_prototype) &&
+ !IsReadOnlyLengthDescriptor(receiver_map);
+}
+
+} // namespace
+
+// ES6 section 22.1.3.18 Array.prototype.push ( )
+Reduction JSCallReducer::ReduceArrayPrototypePush(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+
+ int const num_values = node->op()->ValueInputCount() - 2;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Try to determine the {receiver} map(s).
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Collect the value inputs to push.
+ std::vector<Node*> values(num_values);
+ for (int i = 0; i < num_values; ++i) {
+ values[i] = NodeProperties::GetValueInput(node, 2 + i);
+ }
+
+ for (auto& value : values) {
+ if (IsSmiElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ value, effect, control);
+ } else if (IsDoubleElementsKind(kind)) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(p.feedback()),
+ value, effect, control);
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+ Node* value = length;
+
+ // Check if we have any {values} to push.
+ if (num_values > 0) {
+ // Compute the resulting "length" of the {receiver}.
+ Node* new_length = value = graph()->NewNode(
+ simplified()->NumberAdd(), length, jsgraph()->Constant(num_values));
+
+ // Load the elements backing store of the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+ Node* elements_length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
+ effect, control);
+
+ GrowFastElementsMode mode =
+ IsDoubleElementsKind(kind) ? GrowFastElementsMode::kDoubleElements
+ : GrowFastElementsMode::kSmiOrObjectElements;
+ elements = effect = graph()->NewNode(
+ simplified()->MaybeGrowFastElements(mode, p.feedback()), receiver,
+ elements,
+ graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(num_values - 1)),
+ elements_length, effect, control);
+
+ // Update the JSArray::length field. Since this is observable,
+ // there must be no other check after this.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, new_length, effect, control);
+
+ // Append the {values} to the {elements}.
+ for (int i = 0; i < num_values; ++i) {
+ Node* value = values[i];
+ Node* index = graph()->NewNode(simplified()->NumberAdd(), length,
+ jsgraph()->Constant(i));
+ effect = graph()->NewNode(
+ simplified()->StoreElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, index, value, effect, control);
+ }
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 22.1.3.17 Array.prototype.pop ( )
+Reduction JSCallReducer::ReduceArrayPrototypePop(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ // TODO(turbofan): Extend this to also handle fast holey double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
+ return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load the "length" property of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Check if the {receiver} has any elements.
+ Node* check = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = jsgraph()->UndefinedConstant();
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
+ // Load the elements backing store from the {receiver}.
+ Node* elements = efalse = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ efalse, if_false);
+
+ // Ensure that we aren't popping from a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
+ elements, efalse, if_false);
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, efalse, if_false);
+
+ // Load the last entry from the {elements}.
+ vfalse = efalse = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, length, efalse, if_false);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ efalse = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), efalse, if_false);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 22.1.3.22 Array.prototype.shift ( )
+Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ if (!isolate()->IsNoElementsProtectorIntact()) return NoChange();
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ DCHECK_NE(0, receiver_maps.size());
+
+ ElementsKind kind = receiver_maps[0]->elements_kind();
+ for (Handle<Map> receiver_map : receiver_maps) {
+ if (!CanInlineArrayResizeOperation(receiver_map)) return NoChange();
+ // TODO(turbofan): Extend this to also handle fast holey double elements
+ // once we got the hole NaN mess sorted out in TurboFan/V8.
+ if (receiver_map->elements_kind() == HOLEY_DOUBLE_ELEMENTS)
+ return NoChange();
+ if (!UnionElementsKindUptoPackedness(&kind, receiver_map->elements_kind()))
+ return NoChange();
+ }
+
+ // Install code dependencies on the {receiver} global array protector cell.
+ dependencies()->AssumePropertyCell(factory()->no_elements_protector());
+
+ // If the {receiver_maps} information is not reliable, we need
+ // to check that the {receiver} still has one of these maps.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(CheckMapsFlag::kNone,
+ receiver_maps, p.feedback()),
+ receiver, effect, control);
+ }
+
+ // Load length of the {receiver}.
+ Node* length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayLength(kind)), receiver,
+ effect, control);
+
+ // Return undefined if {receiver} has no elements.
+ Node* check0 = graph()->NewNode(simplified()->NumberEqual(), length,
+ jsgraph()->ZeroConstant());
+ Node* branch0 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->UndefinedConstant();
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* efalse0 = effect;
+ Node* vfalse0;
+ {
+ // Check if we should take the fast-path.
+ Node* check1 =
+ graph()->NewNode(simplified()->NumberLessThanOrEqual(), length,
+ jsgraph()->Constant(JSArray::kMaxCopyElements));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = efalse0;
+ Node* vtrue1;
+ {
+ Node* elements = etrue1 = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, etrue1, if_true1);
+
+ // Load the first element here, which we return below.
+ vtrue1 = etrue1 = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement(kind)),
+ elements, jsgraph()->ZeroConstant(), etrue1, if_true1);
+
+ // Ensure that we aren't shifting a copy-on-write backing store.
+ if (IsSmiOrObjectElementsKind(kind)) {
+ elements = etrue1 =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, etrue1, if_true1);
+ }
+
+ // Shift the remaining {elements} by one towards the start.
+ Node* loop = graph()->NewNode(common()->Loop(2), if_true1, if_true1);
+ Node* eloop =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, etrue1, loop);
+ Node* terminate = graph()->NewNode(common()->Terminate(), eloop, loop);
+ NodeProperties::MergeControlToEnd(graph(), common(), terminate);
+ Node* index = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2),
+ jsgraph()->OneConstant(),
+ jsgraph()->Constant(JSArray::kMaxCopyElements - 1), loop);
+
+ {
+ Node* check2 =
+ graph()->NewNode(simplified()->NumberLessThan(), index, length);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, loop);
+
+ if_true1 = graph()->NewNode(common()->IfFalse(), branch2);
+ etrue1 = eloop;
+
+ Node* control = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* effect = etrue1;
+
+ ElementAccess const access = AccessBuilder::ForFixedArrayElement(kind);
+ Node* value = effect =
+ graph()->NewNode(simplified()->LoadElement(access), elements, index,
+ effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreElement(access), elements,
+ graph()->NewNode(simplified()->NumberSubtract(),
+ index, jsgraph()->OneConstant()),
+ value, effect, control);
+
+ loop->ReplaceInput(1, control);
+ eloop->ReplaceInput(1, effect);
+ index->ReplaceInput(1,
+ graph()->NewNode(simplified()->NumberAdd(), index,
+ jsgraph()->OneConstant()));
+ }
+
+ // Compute the new {length}.
+ length = graph()->NewNode(simplified()->NumberSubtract(), length,
+ jsgraph()->OneConstant());
+
+ // Store the new {length} to the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSArrayLength(kind)),
+ receiver, length, etrue1, if_true1);
+
+ // Store a hole to the element we just removed from the {receiver}.
+ etrue1 = graph()->NewNode(
+ simplified()->StoreElement(
+ AccessBuilder::ForFixedArrayElement(GetHoleyElementsKind(kind))),
+ elements, length, jsgraph()->TheHoleConstant(), etrue1, if_true1);
+ }
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* efalse1 = efalse0;
+ Node* vfalse1;
+ {
+ // Call the generic C++ implementation.
+ const int builtin_index = Builtins::kArrayShift;
+ CallDescriptor const* const desc = Linkage::GetCEntryStubCallDescriptor(
+ graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
+ Builtins::name(builtin_index), node->op()->properties(),
+ CallDescriptor::kNeedsFrameState);
+ Node* stub_code =
+ jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack, true);
+ Address builtin_entry = Builtins::CppEntryOf(builtin_index);
+ Node* entry = jsgraph()->ExternalConstant(
+ ExternalReference(builtin_entry, isolate()));
+ Node* argc =
+ jsgraph()->Constant(BuiltinArguments::kNumExtraArgsWithReceiver);
+ if_false1 = efalse1 = vfalse1 =
+ graph()->NewNode(common()->Call(desc), stub_code, receiver,
+ jsgraph()->PaddingConstant(), argc, target,
+ jsgraph()->UndefinedConstant(), entry, argc, context,
+ frame_state, efalse1, if_false1);
+ }
+
+ if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ efalse0 =
+ graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue1, vfalse1, if_false0);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
+ Node* value =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue0, vfalse0, control);
+
+ // Convert the hole to undefined. Do this last, so that we can optimize
+ // conversion operator via some smart strength reduction in many cases.
+ if (IsHoleyElementsKind(kind)) {
+ value =
+ graph()->NewNode(simplified()->ConvertTaggedHoleToUndefined(), value);
+ }
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+ if (node->op()->ValueInputCount() >= 3) {
+ index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ NodeProperties::GetValueInput(node, 2),
+ effect, control);
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Return the character from the {receiver} as single character string.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+
+ Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ masked_index, if_true);
+
+ // Return the empty string otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->EmptyStringConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
+// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
+Reduction JSCallReducer::ReduceStringPrototypeCharCodeAt(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ if (p.speculation_mode() == SpeculationMode::kDisallowSpeculation) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* index = jsgraph()->ZeroConstant();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ receiver = effect = graph()->NewNode(simplified()->CheckString(p.feedback()),
+ receiver, effect, control);
+ if (node->op()->ValueInputCount() >= 3) {
+ index = effect = graph()->NewNode(simplified()->CheckSmi(p.feedback()),
+ NodeProperties::GetValueInput(node, 2),
+ effect, control);
+
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
+ // Determine the {receiver} length.
+ Node* receiver_length =
+ graph()->NewNode(simplified()->StringLength(), receiver);
+
+ // Check if {index} is less than {receiver} length.
+ Node* check =
+ graph()->NewNode(simplified()->NumberLessThan(), index, receiver_length);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ // Load the character from the {receiver}.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+
+ Node* masked_index = graph()->NewNode(simplified()->MaskIndexWithBound(),
+ index, receiver_length);
+
+ Node* vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
+ masked_index, if_true);
+
+ // Return NaN otherwise.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = jsgraph()->NaNConstant();
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* value = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 6e2353c4c1..b2656b6be8 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -15,6 +15,7 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
+class VectorSlotPair;
namespace compiler {
@@ -24,7 +25,6 @@ class CommonOperatorBuilder;
class JSGraph;
class JSOperatorBuilder;
class SimplifiedOperatorBuilder;
-class VectorSlotPair;
// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
@@ -55,7 +55,6 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(Node* node, Handle<JSFunction> function);
- Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeBind(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
@@ -73,8 +72,18 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceReflectHas(Node* node);
Reduction ReduceArrayForEach(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayReduce(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayReduceRight(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayMap(Handle<JSFunction> function, Node* node);
Reduction ReduceArrayFilter(Handle<JSFunction> function, Node* node);
+ enum class ArrayFindVariant : uint8_t { kFind, kFindIndex };
+ Reduction ReduceArrayFind(ArrayFindVariant variant,
+ Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayEvery(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArraySome(Handle<JSFunction> function, Node* node);
+ Reduction ReduceArrayPrototypePush(Node* node);
+ Reduction ReduceArrayPrototypePop(Node* node);
+ Reduction ReduceArrayPrototypeShift(Node* node);
Reduction ReduceCallOrConstructWithArrayLikeOrSpread(
Node* node, int arity, CallFrequency const& frequency,
VectorSlotPair const& feedback);
@@ -85,6 +94,10 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceJSCallWithArrayLike(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
Reduction ReduceReturnReceiver(Node* node);
+ Reduction ReduceStringPrototypeIndexOf(Handle<JSFunction> function,
+ Node* node);
+ Reduction ReduceStringPrototypeCharAt(Node* node);
+ Reduction ReduceStringPrototypeCharCodeAt(Node* node);
Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
@@ -107,10 +120,20 @@ class JSCallReducer final : public AdvancedReducer {
Node* effect, Node** check_fail,
Node** control);
+ // Begin the central loop of a higher-order array builtin. A Loop is wired
+ // into {control}, an EffectPhi into {effect}, and the array index {k} is
+ // threaded into a Phi, which is returned. It's helpful to save the
+ // value of {control} as the loop node, and of {effect} as the corresponding
+ // EffectPhi after function return.
+ Node* WireInLoopStart(Node* k, Node** control, Node** effect);
+ void WireInLoopEnd(Node* loop, Node* eloop, Node* vloop, Node* k,
+ Node* control, Node* effect);
+
// Load receiver[k], first bounding k by receiver array length.
// k is thusly changed, and the effect is changed as well.
Node* SafeLoadElement(ElementsKind kind, Node* receiver, Node* control,
- Node** effect, Node** k);
+ Node** effect, Node** k,
+ const VectorSlotPair& feedback);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 9b0601f8f1..d3b9ee4e70 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -525,7 +525,7 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
// This has to be kept in sync with src/runtime/runtime-array.cc,
// where this limit is protected.
length = effect = graph()->NewNode(
- simplified()->CheckBounds(), length,
+ simplified()->CheckBounds(VectorSlotPair()), length,
jsgraph()->Constant(JSArray::kInitialMaxFastElementArray), effect,
control);
@@ -617,15 +617,16 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node,
if (IsSmiElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
}
}
} else if (IsDoubleElementsKind(elements_kind)) {
for (auto& value : values) {
if (!NodeProperties::GetType(value)->Is(Type::Number())) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
}
// Make sure we do not store signaling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
@@ -913,6 +914,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
DCHECK(!function_map->is_dictionary_map());
// Emit code to allocate the JSFunction instance.
+ STATIC_ASSERT(JSFunction::kSizeWithoutPrototype == 7 * kPointerSize);
AllocationBuilder a(jsgraph(), effect, control);
a.Allocate(function_map->instance_size());
a.Store(AccessBuilder::ForMap(), function_map);
@@ -980,9 +982,9 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
AllocationBuilder aa(jsgraph(), effect, graph()->start());
aa.AllocateArray(2, factory()->fixed_array_map());
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
- jsgraph()->Constant(0), key);
+ jsgraph()->ZeroConstant(), key);
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
- jsgraph()->Constant(1), value);
+ jsgraph()->OneConstant(), value);
Node* elements = aa.Finish();
AllocationBuilder a(jsgraph(), elements, graph()->start());
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index d06717717d..c09dcbc1b3 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -251,32 +251,17 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- // Load global object from the context.
- Node* native_context = effect =
- graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, control);
- Node* global = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), native_context,
- jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, control);
- NodeProperties::ReplaceEffectInput(node, effect);
- node->InsertInput(zone(), 0, global);
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
if (outer_state->opcode() != IrOpcode::kFrameState) {
Callable callable =
- CodeFactory::StoreGlobalIC(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalICTrampoline);
ReplaceWithStubCall(node, callable, flags);
} else {
Callable callable =
- CodeFactory::StoreGlobalICInOptimizedCode(isolate(), p.language_mode());
+ Builtins::CallableFor(isolate(), Builtins::kStoreGlobalIC);
Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
- node->InsertInput(zone(), 4, vector);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
}
@@ -708,6 +693,10 @@ void JSGenericLowering::LowerJSGeneratorRestoreContinuation(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
+void JSGenericLowering::LowerJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ UNREACHABLE(); // Eliminated in typed lowering.
+}
+
void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 12c610da56..cb3c620117 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -305,10 +305,6 @@ Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
-Node* JSGraph::DeadValue() {
- return CACHED(kDeadValue, graph()->NewNode(common()->DeadValue()));
-}
-
void JSGraph::GetCachedNodes(NodeVector* nodes) {
cache_.GetCachedNodes(nodes);
for (size_t i = 0; i < arraysize(cached_nodes_); i++) {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index a685fd69a8..f5b4bdc181 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -155,9 +155,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
- // Sentinel for a value resulting from unreachable computations.
- Node* DeadValue();
-
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
SimplifiedOperatorBuilder* simplified() const { return simplified_; }
@@ -199,7 +196,6 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kEmptyStateValues,
kSingleDeadTypedStateValues,
kDead,
- kDeadValue,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 9cff51985a..c9909dcb75 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -6,6 +6,7 @@
#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
@@ -556,6 +557,8 @@ void JSInliningHeuristic::CreateOrReuseDispatch(Node* node, Node* callee,
Node** if_successes,
Node** calls, Node** inputs,
int input_count) {
+ SourcePositionTable::Scope position(
+ source_positions_, source_positions_->GetSourcePosition(node));
if (TryReuseDispatch(node, callee, candidate, if_successes, calls, inputs,
input_count)) {
return;
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index dffa5cfd6a..f4f24f41b4 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -22,6 +22,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
inliner_(editor, local_zone, info, jsgraph, source_positions),
candidates_(local_zone),
seen_(local_zone),
+ source_positions_(source_positions),
jsgraph_(jsgraph) {}
const char* reducer_name() const override { return "JSInliningHeuristic"; }
@@ -85,6 +86,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
JSInliner inliner_;
Candidates candidates_;
ZoneSet<NodeId> seen_;
+ SourcePositionTable* source_positions_;
JSGraph* const jsgraph_;
int cumulative_count_ = 0;
};
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 2322b8ac3a..dc1ec521f2 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -135,7 +135,8 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
// TODO(bmeurer): Move MergeControlToEnd() to the AdvancedReducer.
Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason),
+ common()->Deoptimize(DeoptimizeKind::kEager,
+ DeoptimizeReason::kDeoptimizeNow, VectorSlotPair()),
frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index a6786da157..b2f8c567e2 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -20,6 +20,7 @@
#include "src/feedback-vector.h"
#include "src/field-index-inl.h"
#include "src/isolate-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -596,8 +597,8 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
representation = MachineRepresentation::kTaggedPointer;
} else {
// Check that the {value} is a Smi.
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
property_cell_value_type = Type::SignedSmall();
representation = MachineRepresentation::kTaggedSigned;
}
@@ -1061,13 +1062,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (access_mode == AccessMode::kStore) return NoChange();
// Ensure that the {receiver} is actually a String.
- receiver = effect = graph()->NewNode(simplified()->CheckString(), receiver,
- effect, control);
+ receiver = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), receiver, effect, control);
// Determine the {receiver} length.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
+ Node* length = graph()->NewNode(simplified()->StringLength(), receiver);
// Load the single character string from {receiver} or yield undefined
// if the {index} is out of bounds (depending on the {load_mode}).
@@ -1425,9 +1424,9 @@ Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize =
- graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft, reason),
- frame_state, effect, control);
+ Node* deoptimize = graph()->NewNode(
+ common()->Deoptimize(DeoptimizeKind::kSoft, reason, VectorSlotPair()),
+ frame_state, effect, control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
Revisit(graph()->end());
@@ -1504,7 +1503,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, enumerator);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
+ simplified()->CheckIf(DeoptimizeReason::kWrongMap), check, effect,
control);
}
@@ -1525,9 +1524,9 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
simplified()->BooleanNot(),
graph()->NewNode(simplified()->ReferenceEqual(), enum_indices,
jsgraph()->EmptyFixedArrayConstant()));
- effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckIf(DeoptimizeReason::kWrongEnumIndices), check,
+ effect, control);
// Determine the index from the {enum_indices}.
index = effect = graph()->NewNode(
@@ -1775,7 +1774,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), value, constant_value);
effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
check, effect, control);
value = constant_value;
} else if (access_info.IsAccessorConstant()) {
@@ -1809,8 +1808,9 @@ JSNativeContextSpecialization::BuildPropertyStore(
access_mode == AccessMode::kStoreInLiteral);
switch (field_representation) {
case MachineRepresentation::kFloat64: {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
if (!field_index.is_inobject() || field_index.is_hidden_field() ||
!FLAG_unbox_double_fields) {
if (access_info.HasTransitionMap()) {
@@ -1852,8 +1852,8 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->NumberEqual(),
current_value, value);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
- control);
+ simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
+ effect, control);
return ValueEffectControl(value, effect, control);
}
break;
@@ -1871,14 +1871,14 @@ JSNativeContextSpecialization::BuildPropertyStore(
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
current_value, value);
effect = graph()->NewNode(
- simplified()->CheckIf(DeoptimizeReason::kNoReason), check, effect,
- control);
+ simplified()->CheckIf(DeoptimizeReason::kWrongValue), check,
+ effect, control);
return ValueEffectControl(value, effect, control);
}
if (field_representation == MachineRepresentation::kTaggedSigned) {
- value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
- effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
field_access.write_barrier_kind = kNoWriteBarrier;
} else if (field_representation ==
@@ -2007,7 +2007,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
Node* name = NodeProperties::GetValueInput(node, 1);
Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
jsgraph()->HeapConstant(cached_name));
- effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongName),
check, effect, control);
Node* value = NodeProperties::GetValueInput(node, 2);
@@ -2127,13 +2127,14 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, effect, control);
}
// Access the actual element.
@@ -2279,13 +2280,14 @@ JSNativeContextSpecialization::BuildElementAccess(
// Check that the {index} is a valid array index, we do the actual
// bounds check below and just skip the store below if it's out of
// bounds for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(Smi::kMaxValue),
- effect, control);
+ index = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(Smi::kMaxValue), effect, control);
} else {
// Check that the {index} is in the valid range for the {receiver}.
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, effect, control);
}
// Compute the element access.
@@ -2406,11 +2408,12 @@ JSNativeContextSpecialization::BuildElementAccess(
} else {
DCHECK_EQ(AccessMode::kStore, access_mode);
if (IsSmiElementsKind(elements_kind)) {
- value = effect =
- graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ value = effect = graph()->NewNode(
+ simplified()->CheckSmi(VectorSlotPair()), value, effect, control);
} else if (IsDoubleElementsKind(elements_kind)) {
- value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
- effect, control);
+ value = effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), value,
+ effect, control);
// Make sure we do not store signalling NaNs into double arrays.
value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
}
@@ -2443,8 +2446,9 @@ JSNativeContextSpecialization::BuildElementAccess(
jsgraph()->Constant(JSObject::kMaxGap))
: graph()->NewNode(simplified()->NumberAdd(), length,
jsgraph()->OneConstant());
- index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
- limit, effect, control);
+ index = effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ limit, effect, control);
// Grow {elements} backing store if necessary.
GrowFastElementsMode mode =
@@ -2452,8 +2456,8 @@ JSNativeContextSpecialization::BuildElementAccess(
? GrowFastElementsMode::kDoubleElements
: GrowFastElementsMode::kSmiOrObjectElements;
elements = effect = graph()->NewNode(
- simplified()->MaybeGrowFastElements(mode), receiver, elements,
- index, elements_length, effect, control);
+ simplified()->MaybeGrowFastElements(mode, VectorSlotPair()),
+ receiver, elements, index, elements_length, effect, control);
// Also update the "length" property if {receiver} is a JSArray.
if (receiver_is_jsarray) {
@@ -2505,9 +2509,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
dependencies()->AssumePropertyCell(factory()->no_elements_protector());
// Ensure that the {index} is a valid String length.
- index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
- jsgraph()->Constant(String::kMaxLength),
- *effect, *control);
+ index = *effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), index,
+ jsgraph()->Constant(String::kMaxLength), *effect, *control);
// Load the single character string from {receiver} or yield
// undefined if the {index} is not within the valid bounds.
@@ -2531,8 +2535,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
vtrue, vfalse, *control);
} else {
// Ensure that {index} is less than {receiver} length.
- index = *effect = graph()->NewNode(simplified()->CheckBounds(), index,
- length, *effect, *control);
+ index = *effect =
+ graph()->NewNode(simplified()->CheckBounds(VectorSlotPair()), index,
+ length, *effect, *control);
Node* masked_index =
graph()->NewNode(simplified()->MaskIndexWithBound(), index, length);
@@ -2579,8 +2584,8 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
common()->Select(MachineRepresentation::kTaggedSigned),
graph()->NewNode(simplified()->ObjectIsSmi(), properties), properties,
jsgraph()->SmiConstant(PropertyArray::kNoHashSentinel));
- hash = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()), hash,
- control);
+ hash = effect = graph()->NewNode(common()->TypeGuard(Type::SignedSmall()),
+ hash, effect, control);
hash =
graph()->NewNode(simplified()->NumberShiftLeft(), hash,
jsgraph()->Constant(PropertyArray::HashField::kShift));
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 5b5e6589d2..0ddf859cff 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,9 +9,9 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/feedback-vector.h"
#include "src/handles-inl.h"
#include "src/objects-inl.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -28,29 +28,6 @@ CallFrequency CallFrequencyOf(Operator const* op) {
return OpParameter<CallFrequency>(op);
}
-VectorSlotPair::VectorSlotPair() {}
-
-
-int VectorSlotPair::index() const {
- return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
-}
-
-
-bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return lhs.slot() == rhs.slot() &&
- lhs.vector().location() == rhs.vector().location();
-}
-
-
-bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(VectorSlotPair const& p) {
- return base::hash_combine(p.slot(), p.vector().location());
-}
-
std::ostream& operator<<(std::ostream& os,
ConstructForwardVarargsParameters const& p) {
@@ -599,6 +576,7 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
V(StackCheck, Operator::kNoWrite, 0, 0) \
V(Debugger, Operator::kNoProperties, 0, 0) \
V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
@@ -645,6 +623,7 @@ struct JSOperatorGlobalCache final {
Name##Operator<BinaryOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
Name##Operator<BinaryOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<BinaryOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<BinaryOperationHint::kAny> k##Name##AnyOperator;
BINARY_OP_LIST(BINARY_OP)
#undef BINARY_OP
@@ -667,6 +646,7 @@ struct JSOperatorGlobalCache final {
k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
Name##Operator<CompareOperationHint::kSymbol> k##Name##SymbolOperator; \
+ Name##Operator<CompareOperationHint::kBigInt> k##Name##BigIntOperator; \
Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
@@ -703,6 +683,8 @@ CACHED_OP_LIST(CACHED_OP)
return &cache_.k##Name##NumberOrOddballOperator; \
case BinaryOperationHint::kString: \
return &cache_.k##Name##StringOperator; \
+ case BinaryOperationHint::kBigInt: \
+ return &cache_.k##Name##BigIntOperator; \
case BinaryOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -729,6 +711,8 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##StringOperator; \
case CompareOperationHint::kSymbol: \
return &cache_.k##Name##SymbolOperator; \
+ case CompareOperationHint::kBigInt: \
+ return &cache_.k##Name##BigIntOperator; \
case CompareOperationHint::kReceiver: \
return &cache_.k##Name##ReceiverOperator; \
case CompareOperationHint::kAny: \
@@ -763,8 +747,10 @@ const Operator* JSOperatorBuilder::CallForwardVarargs(size_t arity,
const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode) {
- CallParameters parameters(arity, frequency, feedback, convert_mode);
+ ConvertReceiverMode convert_mode,
+ SpeculationMode speculation_mode) {
+ CallParameters parameters(arity, frequency, feedback, convert_mode,
+ speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCall, Operator::kNoProperties, // opcode
"JSCall", // name
@@ -781,9 +767,10 @@ const Operator* JSOperatorBuilder::CallWithArrayLike(CallFrequency frequency) {
}
const Operator* JSOperatorBuilder::CallWithSpread(
- uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback) {
+ uint32_t arity, CallFrequency frequency, VectorSlotPair const& feedback,
+ SpeculationMode speculation_mode) {
CallParameters parameters(arity, frequency, feedback,
- ConvertReceiverMode::kAny);
+ ConvertReceiverMode::kAny, speculation_mode);
return new (zone()) Operator1<CallParameters>( // --
IrOpcode::kJSCallWithSpread, Operator::kNoProperties, // opcode
"JSCallWithSpread", // name
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 94a9b1fdb6..3875234d5a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -10,6 +10,7 @@
#include "src/handles.h"
#include "src/runtime/runtime.h"
#include "src/type-hints.h"
+#include "src/vector-slot-pair.h"
namespace v8 {
namespace internal {
@@ -18,7 +19,6 @@ class AllocationSite;
class BoilerplateDescription;
class ConstantElementsPair;
class SharedFunctionInfo;
-class FeedbackVector;
namespace compiler {
@@ -59,32 +59,6 @@ std::ostream& operator<<(std::ostream&, CallFrequency);
CallFrequency CallFrequencyOf(Operator const* op) WARN_UNUSED_RESULT;
-// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
-// is used to access the type feedback for a certain {Node}.
-class V8_EXPORT_PRIVATE VectorSlotPair {
- public:
- VectorSlotPair();
- VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
- : vector_(vector), slot_(slot) {}
-
- bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
-
- Handle<FeedbackVector> vector() const { return vector_; }
- FeedbackSlot slot() const { return slot_; }
-
- int index() const;
-
- private:
- const Handle<FeedbackVector> vector_;
- const FeedbackSlot slot_;
-};
-
-bool operator==(VectorSlotPair const&, VectorSlotPair const&);
-bool operator!=(VectorSlotPair const&, VectorSlotPair const&);
-
-size_t hash_value(VectorSlotPair const&);
-
-
// Defines the flags for a JavaScript call forwarding parameters. This
// is used as parameter by JSConstructForwardVarargs operators.
class ConstructForwardVarargsParameters final {
@@ -187,8 +161,10 @@ class CallParameters final {
public:
CallParameters(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
- ConvertReceiverMode convert_mode)
+ ConvertReceiverMode convert_mode,
+ SpeculationMode speculation_mode)
: bit_field_(ArityField::encode(arity) |
+ SpeculationModeField::encode(speculation_mode) |
ConvertReceiverModeField::encode(convert_mode)),
frequency_(frequency),
feedback_(feedback) {}
@@ -200,6 +176,10 @@ class CallParameters final {
}
VectorSlotPair const& feedback() const { return feedback_; }
+ SpeculationMode speculation_mode() const {
+ return SpeculationModeField::decode(bit_field_);
+ }
+
bool operator==(CallParameters const& that) const {
return this->bit_field_ == that.bit_field_ &&
this->frequency_ == that.frequency_ &&
@@ -212,7 +192,8 @@ class CallParameters final {
return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
}
- typedef BitField<size_t, 0, 29> ArityField;
+ typedef BitField<size_t, 0, 28> ArityField;
+ typedef BitField<SpeculationMode, 28, 1> SpeculationModeField;
typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
uint32_t const bit_field_;
@@ -693,11 +674,13 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* Call(
size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
- ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny);
+ ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
+ SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
const Operator* CallWithArrayLike(CallFrequency frequency);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency frequency = CallFrequency(),
- VectorSlotPair const& feedback = VectorSlotPair());
+ VectorSlotPair const& feedback = VectorSlotPair(),
+ SpeculationMode speculation_mode = SpeculationMode::kAllowSpeculation);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
@@ -761,8 +744,9 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
// Used to implement Ignition's RestoreGeneratorState bytecode.
const Operator* GeneratorRestoreContinuation();
- // Used to implement Ignition's RestoreGeneratorRegisters bytecode.
+ // Used to implement Ignition's ResumeGenerator bytecode.
const Operator* GeneratorRestoreRegister(int index);
+ const Operator* GeneratorRestoreInputOrDebugPos();
const Operator* StackCheck();
const Operator* Debugger();
diff --git a/deps/v8/src/compiler/js-type-hint-lowering.cc b/deps/v8/src/compiler/js-type-hint-lowering.cc
index a7ce12cdb4..0ec63600a2 100644
--- a/deps/v8/src/compiler/js-type-hint-lowering.cc
+++ b/deps/v8/src/compiler/js-type-hint-lowering.cc
@@ -38,6 +38,7 @@ bool BinaryOperationHintToNumberOperationHint(
case BinaryOperationHint::kAny:
case BinaryOperationHint::kNone:
case BinaryOperationHint::kString:
+ case BinaryOperationHint::kBigInt:
break;
}
return false;
@@ -90,6 +91,7 @@ class JSSpeculativeBinopBuilder final {
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -493,7 +495,8 @@ Node* JSTypeHintLowering::TryBuildSoftDeopt(FeedbackNexus& nexus, Node* effect,
DeoptimizeReason reason) const {
if ((flags() & kBailoutOnUninitialized) && nexus.IsUninitialized()) {
Node* deoptimize = jsgraph()->graph()->NewNode(
- jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason),
+ jsgraph()->common()->Deoptimize(DeoptimizeKind::kSoft, reason,
+ VectorSlotPair()),
jsgraph()->Dead(), effect, control);
Node* frame_state = NodeProperties::FindFrameStateBefore(deoptimize);
deoptimize->ReplaceInput(0, frame_state);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 2380c7c0f4..c265caf9f0 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -47,6 +47,7 @@ class JSBinopReduction final {
case CompareOperationHint::kNone:
case CompareOperationHint::kString:
case CompareOperationHint::kSymbol:
+ case CompareOperationHint::kBigInt:
case CompareOperationHint::kReceiver:
case CompareOperationHint::kInternalizedString:
break;
@@ -156,14 +157,16 @@ class JSBinopReduction final {
// CheckString node.
void CheckInputsToString() {
if (!left_type()->Is(Type::String())) {
- Node* left_input = graph()->NewNode(simplified()->CheckString(), left(),
- effect(), control());
+ Node* left_input =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), left(),
+ effect(), control());
node_->ReplaceInput(0, left_input);
update_effect(left_input);
}
if (!right_type()->Is(Type::String())) {
- Node* right_input = graph()->NewNode(simplified()->CheckString(), right(),
- effect(), control());
+ Node* right_input =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()), right(),
+ effect(), control());
node_->ReplaceInput(1, right_input);
update_effect(right_input);
}
@@ -308,7 +311,8 @@ class JSBinopReduction final {
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
return simplified()->NumberLessThanOrEqual();
case IrOpcode::kSpeculativeNumberAdd:
- return simplified()->NumberAdd();
+ // Handled by ReduceSpeculativeNumberAdd.
+ UNREACHABLE();
case IrOpcode::kSpeculativeNumberSubtract:
return simplified()->NumberSubtract();
case IrOpcode::kSpeculativeNumberMultiply:
@@ -539,13 +543,15 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
if (r.LeftInputIs(empty_string_type_)) {
- Node* value = effect = graph()->NewNode(simplified()->CheckString(),
- r.right(), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ r.right(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
} else if (r.RightInputIs(empty_string_type_)) {
- Node* value = effect = graph()->NewNode(simplified()->CheckString(),
- r.left(), effect, control);
+ Node* value = effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ r.left(), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -594,6 +600,9 @@ Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
if ((hint == NumberOperationHint::kNumber ||
hint == NumberOperationHint::kNumberOrOddball) &&
r.BothInputsAre(Type::NumberOrUndefinedOrNullOrBoolean())) {
+ // We intentionally do this only in the Number and NumberOrOddball hint case
+ // because simplified lowering of these speculative ops may do some clever
+ // reductions in the other cases.
r.ConvertInputsToNumber();
return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
Type::Number());
@@ -634,22 +643,22 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// Make sure {first} is actually a String.
Type* first_type = NodeProperties::GetType(first);
if (!first_type->Is(Type::String())) {
- first = effect =
- graph()->NewNode(simplified()->CheckString(), first, effect, control);
+ first = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), first, effect, control);
first_type = NodeProperties::GetType(first);
}
// Make sure {second} is actually a String.
Type* second_type = NodeProperties::GetType(second);
if (!second_type->Is(Type::String())) {
- second = effect =
- graph()->NewNode(simplified()->CheckString(), second, effect, control);
+ second = effect = graph()->NewNode(
+ simplified()->CheckString(VectorSlotPair()), second, effect, control);
second_type = NodeProperties::GetType(second);
}
// Determine the {first} length.
- Node* first_length = BuildGetStringLength(first, &effect, control);
- Node* second_length = BuildGetStringLength(second, &effect, control);
+ Node* first_length = BuildGetStringLength(first);
+ Node* second_length = BuildGetStringLength(second);
// Compute the resulting length.
Node* length =
@@ -661,9 +670,9 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
// has the additional benefit of not holding on to the lazy {frame_state}
// and thus potentially reduces the number of live ranges and allows for
// more truncations.
- length = effect = graph()->NewNode(simplified()->CheckBounds(), length,
- jsgraph()->Constant(String::kMaxLength),
- effect, control);
+ length = effect = graph()->NewNode(
+ simplified()->CheckBounds(VectorSlotPair()), length,
+ jsgraph()->Constant(String::kMaxLength), effect, control);
} else {
// Check if we would overflow the allowed maximum string length.
Node* check =
@@ -698,40 +707,25 @@ Reduction JSTypedLowering::ReduceCreateConsString(Node* node) {
Revisit(graph()->end());
}
control = graph()->NewNode(common()->IfTrue(), branch);
+ length = effect =
+ graph()->NewNode(common()->TypeGuard(type_cache_.kStringLengthType),
+ length, effect, control);
}
- // Figure out the map for the resulting ConsString.
- // TODO(turbofan): We currently just use the cons_string_map here for
- // the sake of simplicity; we could also try to be smarter here and
- // use the one_byte_cons_string_map instead when the resulting ConsString
- // contains only one byte characters.
- Node* value_map = jsgraph()->HeapConstant(factory()->cons_string_map());
-
- // Allocate the resulting ConsString.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(ConsString::kSize, NOT_TENURED, Type::OtherString());
- a.Store(AccessBuilder::ForMap(), value_map);
- a.Store(AccessBuilder::ForNameHashField(),
- jsgraph()->Constant(Name::kEmptyHashField));
- a.Store(AccessBuilder::ForStringLength(), length);
- a.Store(AccessBuilder::ForConsStringFirst(), first);
- a.Store(AccessBuilder::ForConsStringSecond(), second);
-
- // Morph the {node} into a {FinishRegion}.
- ReplaceWithValue(node, node, node, control);
- a.FinishAndChange(node);
- return Changed(node);
+ Node* value =
+ graph()->NewNode(simplified()->NewConsString(), length, first, second);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
}
-Node* JSTypedLowering::BuildGetStringLength(Node* value, Node** effect,
- Node* control) {
+Node* JSTypedLowering::BuildGetStringLength(Node* value) {
+ // TODO(bmeurer): Get rid of this hack and instead have a way to
+ // express the string length in the types.
HeapObjectMatcher m(value);
Node* length =
(m.HasValue() && m.Value()->IsString())
? jsgraph()->Constant(Handle<String>::cast(m.Value())->length())
- : (*effect) = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()),
- value, *effect, control);
+ : graph()->NewNode(simplified()->StringLength(), value);
return length;
}
@@ -866,9 +860,9 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node) {
ReplaceWithValue(node, replacement);
return Replace(replacement);
}
- if (r.OneInputCannotBe(Type::NumberOrString())) {
- // For values with canonical representation (i.e. neither String, nor
- // Number) an empty type intersection means the values cannot be strictly
+ if (r.OneInputCannotBe(Type::NumericOrString())) {
+ // For values with canonical representation (i.e. neither String nor
+ // Numeric) an empty type intersection means the values cannot be strictly
// equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->FalseConstant();
@@ -1015,6 +1009,7 @@ Reduction JSTypedLowering::ReduceJSToNumberOrNumeric(Node* node) {
NodeProperties::ChangeOp(node, simplified()->PlainPrimitiveToNumber());
return Changed(node);
}
+ // TODO(neis): Reduce ToNumeric to ToNumber if input can't be BigInt?
return NoChange();
}
@@ -1051,7 +1046,9 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
return Replace(jsgraph()->HeapConstant(
factory()->NumberToString(factory()->NewNumber(input_type->Min()))));
}
- // TODO(turbofan): js-typed-lowering of ToString(x:number)
+ if (input_type->Is(Type::Number())) {
+ return Replace(graph()->NewNode(simplified()->NumberToString(), input));
+ }
return NoChange();
}
@@ -1133,16 +1130,12 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
Type* receiver_type = NodeProperties::GetType(receiver);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
Handle<Name> name = NamedAccessOf(node->op()).name();
// Optimize "length" property of strings.
if (name.is_identical_to(factory()->length_string()) &&
receiver_type->Is(Type::String())) {
- Node* value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
- effect, control);
- ReplaceWithValue(node, value, effect);
+ Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
+ ReplaceWithValue(node, value);
return Replace(value);
}
return NoChange();
@@ -1783,7 +1776,7 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
receiver_map, cache_type);
effect =
- graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongMap),
check, effect, control);
// Since the change to LoadElement() below is effectful, we connect
@@ -2098,6 +2091,22 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
return Changed(element);
}
+Reduction JSTypedLowering::ReduceJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSGeneratorRestoreInputOrDebugPos, node->opcode());
+
+ FieldAccess input_or_debug_pos_field =
+ AccessBuilder::ForJSGeneratorObjectInputOrDebugPos();
+ const Operator* new_op = simplified()->LoadField(input_or_debug_pos_field);
+
+ // Mutate the node in-place.
+ DCHECK(OperatorProperties::HasContextInput(node->op()));
+ DCHECK(!OperatorProperties::HasContextInput(new_op));
+ node->RemoveInput(NodeProperties::FirstContextIndex(node));
+
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+}
+
Reduction JSTypedLowering::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSEqual:
@@ -2183,6 +2192,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSGeneratorRestoreContinuation(node);
case IrOpcode::kJSGeneratorRestoreRegister:
return ReduceJSGeneratorRestoreRegister(node);
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ return ReduceJSGeneratorRestoreInputOrDebugPos(node);
// TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
// fooling anyone. Consider moving this into a separate reducer.
case IrOpcode::kSpeculativeNumberAdd:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 8b00c1d32c..d72303f495 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -73,6 +73,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
+ Reduction ReduceJSGeneratorRestoreInputOrDebugPos(Node* node);
Reduction ReduceNumberBinop(Node* node);
Reduction ReduceInt32Binop(Node* node);
Reduction ReduceUI32Shift(Node* node, Signedness signedness);
@@ -85,8 +86,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
// Helper for ReduceJSLoadModule and ReduceJSStoreModule.
Node* BuildGetModuleCell(Node* node);
- // Helpers for ReduceJSCreateConsString and ReduceJSStringConcat.
- Node* BuildGetStringLength(Node* value, Node** effect, Node* control);
+ // Helpers for ReduceJSCreateConsString.
+ Node* BuildGetStringLength(Node* value);
Factory* factory() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 03b8074f0f..5df50e64f5 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -75,33 +75,42 @@ bool CallDescriptor::HasSameReturnLocationsAs(
return true;
}
-int CallDescriptor::GetStackParameterDelta(
- CallDescriptor const* tail_caller) const {
- int callee_slots_above_sp = 0;
+int CallDescriptor::GetFirstUnusedStackSlot() const {
+ int slots_above_sp = 0;
for (size_t i = 0; i < InputCount(); ++i) {
LinkageLocation operand = GetInputLocation(i);
if (!operand.IsRegister()) {
int new_candidate =
-operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > callee_slots_above_sp) {
- callee_slots_above_sp = new_candidate;
+ if (new_candidate > slots_above_sp) {
+ slots_above_sp = new_candidate;
}
}
}
- int tail_caller_slots_above_sp = 0;
- if (tail_caller != nullptr) {
- for (size_t i = 0; i < tail_caller->InputCount(); ++i) {
- LinkageLocation operand = tail_caller->GetInputLocation(i);
- if (!operand.IsRegister()) {
- int new_candidate =
- -operand.GetLocation() + operand.GetSizeInPointers() - 1;
- if (new_candidate > tail_caller_slots_above_sp) {
- tail_caller_slots_above_sp = new_candidate;
- }
+ return slots_above_sp;
+}
+
+int CallDescriptor::GetStackParameterDelta(
+ CallDescriptor const* tail_caller) const {
+ int callee_slots_above_sp = GetFirstUnusedStackSlot();
+ int tail_caller_slots_above_sp = tail_caller->GetFirstUnusedStackSlot();
+ int stack_param_delta = callee_slots_above_sp - tail_caller_slots_above_sp;
+ if (kPadArguments) {
+ // Adjust stack delta when it is odd.
+ if (stack_param_delta % 2 != 0) {
+ if (callee_slots_above_sp % 2 != 0) {
+ // The delta is odd due to the callee - we will need to add one slot
+ // of padding.
+ ++stack_param_delta;
+ } else {
+ // The delta is odd because of the caller. We already have one slot of
+ // padding that we can reuse for arguments, so we will need one fewer
+ // slot.
+ --stack_param_delta;
}
}
}
- return callee_slots_above_sp - tail_caller_slots_above_sp;
+ return stack_param_delta;
}
bool CallDescriptor::CanTailCall(const Node* node) const {
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 9e79a9af00..ade1d6902f 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -177,17 +177,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kNeedsFrameState = 1u << 0,
kHasExceptionHandler = 1u << 1,
kCanUseRoots = 1u << 2,
- // (arm64 only) native stack should be used for arguments.
- kUseNativeStack = 1u << 3,
- // (arm64 only) call instruction has to restore JSSP or CSP.
- kRestoreJSSP = 1u << 4,
- kRestoreCSP = 1u << 5,
// Causes the code generator to initialize the root register.
- kInitializeRootRegister = 1u << 6,
+ kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
- kNoAllocate = 1u << 7,
+ kNoAllocate = 1u << 4,
// Push argument count as part of function prologue.
- kPushArgumentCount = 1u << 8
+ kPushArgumentCount = 1u << 5,
+ // Use retpoline for this call if indirect.
+ kRetpoline = 1u << 6
};
typedef base::Flags<Flag> Flags;
@@ -197,12 +194,14 @@ class V8_EXPORT_PRIVATE CallDescriptor final
RegList callee_saved_registers,
RegList callee_saved_fp_registers, Flags flags,
const char* debug_name = "",
- const RegList allocatable_registers = 0)
+ const RegList allocatable_registers = 0,
+ size_t stack_return_count = 0)
: kind_(kind),
target_type_(target_type),
target_loc_(target_loc),
location_sig_(location_sig),
stack_param_count_(stack_param_count),
+ stack_return_count_(stack_return_count),
properties_(properties),
callee_saved_registers_(callee_saved_registers),
callee_saved_fp_registers_(callee_saved_fp_registers),
@@ -232,6 +231,9 @@ class V8_EXPORT_PRIVATE CallDescriptor final
// The number of stack parameters to the call.
size_t StackParameterCount() const { return stack_param_count_; }
+ // The number of stack return values from the call.
+ size_t StackReturnCount() const { return stack_return_count_; }
+
// The number of parameters to the JS function call.
size_t JSParameterCount() const {
DCHECK(IsJSFunctionCall());
@@ -248,7 +250,6 @@ class V8_EXPORT_PRIVATE CallDescriptor final
Flags flags() const { return flags_; }
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
- bool UseNativeStack() const { return flags() & kUseNativeStack; }
bool PushArgumentCount() const { return flags() & kPushArgumentCount; }
bool InitializeRootRegister() const {
return flags() & kInitializeRootRegister;
@@ -293,7 +294,10 @@ class V8_EXPORT_PRIVATE CallDescriptor final
bool HasSameReturnLocationsAs(const CallDescriptor* other) const;
- int GetStackParameterDelta(const CallDescriptor* tail_caller = nullptr) const;
+ // Returns the first stack slot that is not used by the stack parameters.
+ int GetFirstUnusedStackSlot() const;
+
+ int GetStackParameterDelta(const CallDescriptor* tail_caller) const;
bool CanTailCall(const Node* call) const;
@@ -318,6 +322,7 @@ class V8_EXPORT_PRIVATE CallDescriptor final
const LinkageLocation target_loc_;
const LocationSignature* const location_sig_;
const size_t stack_param_count_;
+ const size_t stack_return_count_;
const Operator::Properties properties_;
const RegList callee_saved_registers_;
const RegList callee_saved_fp_registers_;
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 0313e57909..7888f5a21e 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -811,12 +811,12 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
}
Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
- GrowFastElementsMode mode = GrowFastElementsModeOf(node->op());
+ GrowFastElementsParameters params = GrowFastElementsParametersOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (mode == GrowFastElementsMode::kDoubleElements) {
+ if (params.mode() == GrowFastElementsMode::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
state = state->SetMaps(
node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index a9cd46d975..d6b88b13f5 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace compiler {
-#define OFFSET(x) ((x)&0x1f)
+#define OFFSET(x) ((x)&0x1F)
#define BIT(x) (1u << OFFSET(x))
#define INDEX(x) ((x) >> 5)
diff --git a/deps/v8/src/compiler/loop-peeling.cc b/deps/v8/src/compiler/loop-peeling.cc
index 5f8857c5df..ae5b0dfbac 100644
--- a/deps/v8/src/compiler/loop-peeling.cc
+++ b/deps/v8/src/compiler/loop-peeling.cc
@@ -4,6 +4,7 @@
#include "src/compiler/loop-peeling.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
@@ -107,7 +108,7 @@ struct Peeling {
// The vector which contains the mapped nodes.
NodeVector* pairs;
- Peeling(Graph* graph, Zone* tmp_zone, size_t max, NodeVector* p)
+ Peeling(Graph* graph, size_t max, NodeVector* p)
: node_map(graph, static_cast<uint32_t>(max)), pairs(p) {}
Node* map(Node* node) {
@@ -121,10 +122,13 @@ struct Peeling {
pairs->push_back(copy);
}
- void CopyNodes(Graph* graph, Zone* tmp_zone, Node* dead, NodeRange nodes) {
- NodeVector inputs(tmp_zone);
+ void CopyNodes(Graph* graph, Zone* tmp_zone_, Node* dead, NodeRange nodes,
+ SourcePositionTable* source_positions) {
+ NodeVector inputs(tmp_zone_);
// Copy all the nodes first.
for (Node* node : nodes) {
+ SourcePositionTable::Scope position(
+ source_positions, source_positions->GetSourcePosition(node));
inputs.clear();
for (Node* input : node->inputs()) {
inputs.push_back(map(input));
@@ -166,13 +170,13 @@ Node* PeeledIteration::map(Node* node) {
return node;
}
-bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
+bool LoopPeeler::CanPeel(LoopTree::Loop* loop) {
// Look for returns and if projections that are outside the loop but whose
// control input is inside the loop.
- Node* loop_node = loop_tree->GetLoopControl(loop);
- for (Node* node : loop_tree->LoopNodes(loop)) {
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
+ for (Node* node : loop_tree_->LoopNodes(loop)) {
for (Node* use : node->uses()) {
- if (!loop_tree->Contains(loop, use)) {
+ if (!loop_tree_->Contains(loop, use)) {
bool unmarked_exit;
switch (node->opcode()) {
case IrOpcode::kLoopExit:
@@ -187,7 +191,7 @@ bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
}
if (unmarked_exit) {
if (FLAG_trace_turbo_loop) {
- Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
PrintF(
"Cannot peel loop %i. Loop exit without explicit mark: Node %i "
"(%s) is inside "
@@ -203,47 +207,45 @@ bool LoopPeeler::CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop) {
return true;
}
-
-PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* tmp_zone) {
- if (!CanPeel(loop_tree, loop)) return nullptr;
+PeeledIteration* LoopPeeler::Peel(LoopTree::Loop* loop) {
+ if (!CanPeel(loop)) return nullptr;
//============================================================================
// Construct the peeled iteration.
//============================================================================
- PeeledIterationImpl* iter = new (tmp_zone) PeeledIterationImpl(tmp_zone);
+ PeeledIterationImpl* iter = new (tmp_zone_) PeeledIterationImpl(tmp_zone_);
size_t estimated_peeled_size = 5 + (loop->TotalSize()) * 2;
- Peeling peeling(graph, tmp_zone, estimated_peeled_size, &iter->node_pairs_);
+ Peeling peeling(graph_, estimated_peeled_size, &iter->node_pairs_);
- Node* dead = graph->NewNode(common->Dead());
+ Node* dead = graph_->NewNode(common_->Dead());
// Map the loop header nodes to their entry values.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
peeling.Insert(node, node->InputAt(kAssumedLoopEntryIndex));
}
// Copy all the nodes of loop body for the peeled iteration.
- peeling.CopyNodes(graph, tmp_zone, dead, loop_tree->BodyNodes(loop));
+ peeling.CopyNodes(graph_, tmp_zone_, dead, loop_tree_->BodyNodes(loop),
+ source_positions_);
//============================================================================
// Replace the entry to the loop with the output of the peeled iteration.
//============================================================================
- Node* loop_node = loop_tree->GetLoopControl(loop);
+ Node* loop_node = loop_tree_->GetLoopControl(loop);
Node* new_entry;
int backedges = loop_node->InputCount() - 1;
if (backedges > 1) {
// Multiple backedges from original loop, therefore multiple output edges
// from the peeled iteration.
- NodeVector inputs(tmp_zone);
+ NodeVector inputs(tmp_zone_);
for (int i = 1; i < loop_node->InputCount(); i++) {
inputs.push_back(peeling.map(loop_node->InputAt(i)));
}
Node* merge =
- graph->NewNode(common->Merge(backedges), backedges, &inputs[0]);
+ graph_->NewNode(common_->Merge(backedges), backedges, &inputs[0]);
// Merge values from the multiple output edges of the peeled iteration.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
if (node->opcode() == IrOpcode::kLoop) continue; // already done.
inputs.clear();
for (int i = 0; i < backedges; i++) {
@@ -252,8 +254,8 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
for (Node* input : inputs) {
if (input != inputs[0]) { // Non-redundant phi.
inputs.push_back(merge);
- const Operator* op = common->ResizeMergeOrPhi(node->op(), backedges);
- Node* phi = graph->NewNode(op, backedges + 1, &inputs[0]);
+ const Operator* op = common_->ResizeMergeOrPhi(node->op(), backedges);
+ Node* phi = graph_->NewNode(op, backedges + 1, &inputs[0]);
node->ReplaceInput(0, phi);
break;
}
@@ -263,7 +265,7 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
} else {
// Only one backedge, simply replace the input to loop with output of
// peeling.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
node->ReplaceInput(0, peeling.map(node->InputAt(1)));
}
new_entry = peeling.map(loop_node->InputAt(1));
@@ -273,23 +275,23 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
//============================================================================
// Change the exit and exit markers to merge/phi/effect-phi.
//============================================================================
- for (Node* exit : loop_tree->ExitNodes(loop)) {
+ for (Node* exit : loop_tree_->ExitNodes(loop)) {
switch (exit->opcode()) {
case IrOpcode::kLoopExit:
// Change the loop exit node to a merge node.
exit->ReplaceInput(1, peeling.map(exit->InputAt(0)));
- NodeProperties::ChangeOp(exit, common->Merge(2));
+ NodeProperties::ChangeOp(exit, common_->Merge(2));
break;
case IrOpcode::kLoopExitValue:
// Change exit marker to phi.
- exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
+ exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
NodeProperties::ChangeOp(
- exit, common->Phi(MachineRepresentation::kTagged, 2));
+ exit, common_->Phi(MachineRepresentation::kTagged, 2));
break;
case IrOpcode::kLoopExitEffect:
// Change effect exit marker to effect phi.
- exit->InsertInput(graph->zone(), 1, peeling.map(exit->InputAt(0)));
- NodeProperties::ChangeOp(exit, common->EffectPhi(2));
+ exit->InsertInput(graph_->zone(), 1, peeling.map(exit->InputAt(0)));
+ NodeProperties::ChangeOp(exit, common_->EffectPhi(2));
break;
default:
break;
@@ -298,15 +300,11 @@ PeeledIteration* LoopPeeler::Peel(Graph* graph, CommonOperatorBuilder* common,
return iter;
}
-namespace {
-
-void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* temp_zone) {
+void LoopPeeler::PeelInnerLoops(LoopTree::Loop* loop) {
// If the loop has nested loops, peel inside those.
if (!loop->children().empty()) {
for (LoopTree::Loop* inner_loop : loop->children()) {
- PeelInnerLoops(graph, common, loop_tree, inner_loop, temp_zone);
+ PeelInnerLoops(inner_loop);
}
return;
}
@@ -314,15 +312,17 @@ void PeelInnerLoops(Graph* graph, CommonOperatorBuilder* common,
if (loop->TotalSize() > LoopPeeler::kMaxPeeledNodes) return;
if (FLAG_trace_turbo_loop) {
PrintF("Peeling loop with header: ");
- for (Node* node : loop_tree->HeaderNodes(loop)) {
+ for (Node* node : loop_tree_->HeaderNodes(loop)) {
PrintF("%i ", node->id());
}
PrintF("\n");
}
- LoopPeeler::Peel(graph, common, loop_tree, loop, temp_zone);
+ Peel(loop);
}
+namespace {
+
void EliminateLoopExit(Node* node) {
DCHECK_EQ(IrOpcode::kLoopExit, node->opcode());
// The exit markers take the loop exit as input. We iterate over uses
@@ -347,21 +347,18 @@ void EliminateLoopExit(Node* node) {
} // namespace
-// static
-void LoopPeeler::PeelInnerLoopsOfTree(Graph* graph,
- CommonOperatorBuilder* common,
- LoopTree* loop_tree, Zone* temp_zone) {
- for (LoopTree::Loop* loop : loop_tree->outer_loops()) {
- PeelInnerLoops(graph, common, loop_tree, loop, temp_zone);
+void LoopPeeler::PeelInnerLoopsOfTree() {
+ for (LoopTree::Loop* loop : loop_tree_->outer_loops()) {
+ PeelInnerLoops(loop);
}
- EliminateLoopExits(graph, temp_zone);
+ EliminateLoopExits(graph_, tmp_zone_);
}
// static
-void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* temp_zone) {
- ZoneQueue<Node*> queue(temp_zone);
- ZoneVector<bool> visited(graph->NodeCount(), false, temp_zone);
+void LoopPeeler::EliminateLoopExits(Graph* graph, Zone* tmp_zone) {
+ ZoneQueue<Node*> queue(tmp_zone);
+ ZoneVector<bool> visited(graph->NodeCount(), false, tmp_zone);
queue.push(graph->end());
while (!queue.empty()) {
Node* node = queue.front();
diff --git a/deps/v8/src/compiler/loop-peeling.h b/deps/v8/src/compiler/loop-peeling.h
index 301e4b8b6c..cd08900dcd 100644
--- a/deps/v8/src/compiler/loop-peeling.h
+++ b/deps/v8/src/compiler/loop-peeling.h
@@ -13,6 +13,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+class SourcePositionTable;
+
// Represents the output of peeling a loop, which is basically the mapping
// from the body of the loop to the corresponding nodes in the peeled
// iteration.
@@ -31,15 +33,28 @@ class CommonOperatorBuilder;
// Implements loop peeling.
class V8_EXPORT_PRIVATE LoopPeeler {
public:
- static bool CanPeel(LoopTree* loop_tree, LoopTree::Loop* loop);
- static PeeledIteration* Peel(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, LoopTree::Loop* loop,
- Zone* tmp_zone);
- static void PeelInnerLoopsOfTree(Graph* graph, CommonOperatorBuilder* common,
- LoopTree* loop_tree, Zone* tmp_zone);
-
- static void EliminateLoopExits(Graph* graph, Zone* temp_zone);
+ LoopPeeler(Graph* graph, CommonOperatorBuilder* common, LoopTree* loop_tree,
+ Zone* tmp_zone, SourcePositionTable* source_positions)
+ : graph_(graph),
+ common_(common),
+ loop_tree_(loop_tree),
+ tmp_zone_(tmp_zone),
+ source_positions_(source_positions) {}
+ bool CanPeel(LoopTree::Loop* loop);
+ PeeledIteration* Peel(LoopTree::Loop* loop);
+ void PeelInnerLoopsOfTree();
+
+ static void EliminateLoopExits(Graph* graph, Zone* tmp_zone);
static const size_t kMaxPeeledNodes = 1000;
+
+ private:
+ Graph* const graph_;
+ CommonOperatorBuilder* const common_;
+ LoopTree* const loop_tree_;
+ Zone* const tmp_zone_;
+ SourcePositionTable* const source_positions_;
+
+ void PeelInnerLoops(LoopTree::Loop* loop);
};
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.cc b/deps/v8/src/compiler/loop-variable-optimizer.cc
index 069c86414c..1e93de5124 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.cc
+++ b/deps/v8/src/compiler/loop-variable-optimizer.cc
@@ -301,7 +301,8 @@ const InductionVariable* LoopVariableOptimizer::FindInductionVariable(
InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
DCHECK_EQ(2, phi->op()->ValueInputCount());
- DCHECK_EQ(IrOpcode::kLoop, NodeProperties::GetControlInput(phi)->opcode());
+ Node* loop = NodeProperties::GetControlInput(phi);
+ DCHECK_EQ(IrOpcode::kLoop, loop->opcode());
Node* initial = phi->InputAt(0);
Node* arith = phi->InputAt(1);
InductionVariable::ArithmeticType arithmeticType;
@@ -318,17 +319,20 @@ InductionVariable* LoopVariableOptimizer::TryGetInductionVariable(Node* phi) {
}
// TODO(jarin) Support both sides.
- // XXX
- if (arith->InputAt(0) != phi) {
- if ((arith->InputAt(0)->opcode() != IrOpcode::kJSToNumber &&
- arith->InputAt(0)->opcode() != IrOpcode::kSpeculativeToNumber) ||
- arith->InputAt(0)->InputAt(0) != phi) {
- return nullptr;
+ if (arith->InputAt(0) != phi) return nullptr;
+
+ Node* effect_phi = nullptr;
+ for (Node* use : loop->uses()) {
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ DCHECK_NULL(effect_phi);
+ effect_phi = use;
}
}
+ if (!effect_phi) return nullptr;
+
Node* incr = arith->InputAt(1);
- return new (zone())
- InductionVariable(phi, arith, incr, initial, zone(), arithmeticType);
+ return new (zone()) InductionVariable(phi, effect_phi, arith, incr, initial,
+ zone(), arithmeticType);
}
void LoopVariableOptimizer::DetectInductionVariables(Node* loop) {
@@ -398,10 +402,14 @@ void LoopVariableOptimizer::ChangeToPhisAndInsertGuards() {
Type* backedge_type = NodeProperties::GetType(backedge_value);
Type* phi_type = NodeProperties::GetType(induction_var->phi());
if (!backedge_type->Is(phi_type)) {
- Node* backedge_control =
- NodeProperties::GetControlInput(induction_var->phi())->InputAt(1);
- Node* rename = graph()->NewNode(common()->TypeGuard(phi_type),
- backedge_value, backedge_control);
+ Node* loop = NodeProperties::GetControlInput(induction_var->phi());
+ Node* backedge_control = loop->InputAt(1);
+ Node* backedge_effect =
+ NodeProperties::GetEffectInput(induction_var->effect_phi(), 1);
+ Node* rename =
+ graph()->NewNode(common()->TypeGuard(phi_type), backedge_value,
+ backedge_effect, backedge_control);
+ induction_var->effect_phi()->ReplaceInput(1, rename);
induction_var->phi()->ReplaceInput(1, rename);
}
}
diff --git a/deps/v8/src/compiler/loop-variable-optimizer.h b/deps/v8/src/compiler/loop-variable-optimizer.h
index 8054ec16c8..9eec614070 100644
--- a/deps/v8/src/compiler/loop-variable-optimizer.h
+++ b/deps/v8/src/compiler/loop-variable-optimizer.h
@@ -18,6 +18,7 @@ class Node;
class InductionVariable : public ZoneObject {
public:
Node* phi() const { return phi_; }
+ Node* effect_phi() const { return effect_phi_; }
Node* arith() const { return arith_; }
Node* increment() const { return increment_; }
Node* init_value() const { return init_value_; }
@@ -39,9 +40,10 @@ class InductionVariable : public ZoneObject {
private:
friend class LoopVariableOptimizer;
- InductionVariable(Node* phi, Node* arith, Node* increment, Node* init_value,
- Zone* zone, ArithmeticType arithmeticType)
+ InductionVariable(Node* phi, Node* effect_phi, Node* arith, Node* increment,
+ Node* init_value, Zone* zone, ArithmeticType arithmeticType)
: phi_(phi),
+ effect_phi_(effect_phi),
arith_(arith),
increment_(increment),
init_value_(init_value),
@@ -53,6 +55,7 @@ class InductionVariable : public ZoneObject {
void AddLowerBound(Node* bound, ConstraintKind kind);
Node* phi_;
+ Node* effect_phi_;
Node* arith_;
Node* increment_;
Node* init_value_;
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 8393a749bb..43f1518461 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -116,10 +116,6 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
- case IrOpcode::kCheckedLoad:
- representation_vector_[node->id()] = PromoteRepresentation(
- CheckedLoadRepresentationOf(node->op()).representation());
- break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
@@ -165,10 +161,6 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] = PromoteRepresentation(
StoreRepresentationOf(node->op()).representation());
break;
- case IrOpcode::kCheckedStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
- break;
case IrOpcode::kUnalignedStore:
representation_vector_[node->id()] = PromoteRepresentation(
UnalignedStoreRepresentationOf(node->op()));
@@ -273,6 +265,11 @@ class MachineRepresentationInferrer {
MachineRepresentation::kFloat64;
}
break;
+ case IrOpcode::kI32x4ReplaceLane:
+ case IrOpcode::kI32x4Splat:
+ representation_vector_[node->id()] =
+ MachineRepresentation::kSimd128;
+ break;
#undef LABEL
default:
break;
@@ -377,6 +374,14 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(node, 0,
MachineRepresentation::kSimd128);
break;
+ case IrOpcode::kI32x4ReplaceLane:
+ CheckValueInputRepresentationIs(node, 0,
+ MachineRepresentation::kSimd128);
+ CheckValueInputForInt32Op(node, 1);
+ break;
+ case IrOpcode::kI32x4Splat:
+ CheckValueInputForInt32Op(node, 0);
+ break;
#define LABEL(opcode) case IrOpcode::k##opcode:
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
@@ -562,7 +567,7 @@ class MachineRepresentationChecker {
str << "Node #" << node->id() << ":" << *node->op()
<< " in the machine graph is not being checked.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
break;
}
@@ -592,7 +597,7 @@ class MachineRepresentationChecker {
<< input_representation << " which doesn't have a " << representation
<< " representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
@@ -611,7 +616,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputIsTaggedOrPointer(Node const* node, int index) {
@@ -644,7 +649,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged or pointer representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
@@ -661,7 +666,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
break;
}
default:
@@ -672,7 +677,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have an int32-compatible representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForInt64Op(Node const* node, int index) {
@@ -687,7 +692,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
break;
}
@@ -700,7 +705,7 @@ class MachineRepresentationChecker {
<< input_representation
<< " which doesn't have a kWord64 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForFloat32Op(Node const* node, int index) {
@@ -714,7 +719,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat32 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckValueInputForFloat64Op(Node const* node, int index) {
@@ -728,7 +733,7 @@ class MachineRepresentationChecker {
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat64 representation.";
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
void CheckCallInputs(Node const* node) {
@@ -755,7 +760,7 @@ class MachineRepresentationChecker {
}
if (should_log_error) {
PrintDebugHelp(str, node);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index e589f0cbd8..97c83b1b82 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -293,7 +293,7 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
// (x >> K) < C => x < (C << K)
// when C < (M >> K)
const uint32_t c = m.right().Value();
- const uint32_t k = mleft.right().Value() & 0x1f;
+ const uint32_t k = mleft.right().Value() & 0x1F;
if (c < static_cast<uint32_t>(kMaxInt >> k)) {
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1, Uint32Constant(c << k));
@@ -684,7 +684,6 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kCheckedStore:
return ReduceStore(node);
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -923,10 +922,7 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
NodeMatcher nm(node);
MachineRepresentation rep;
int value_input;
- if (nm.IsCheckedStore()) {
- rep = CheckedStoreRepresentationOf(node->op());
- value_input = 3;
- } else if (nm.IsStore()) {
+ if (nm.IsStore()) {
rep = StoreRepresentationOf(node->op()).representation();
value_input = 2;
} else {
@@ -941,9 +937,9 @@ Reduction MachineOperatorReducer::ReduceStore(Node* node) {
case IrOpcode::kWord32And: {
Uint32BinopMatcher m(value);
if (m.right().HasValue() && ((rep == MachineRepresentation::kWord8 &&
- (m.right().Value() & 0xff) == 0xff) ||
+ (m.right().Value() & 0xFF) == 0xFF) ||
(rep == MachineRepresentation::kWord16 &&
- (m.right().Value() & 0xffff) == 0xffff))) {
+ (m.right().Value() & 0xFFFF) == 0xFFFF))) {
node->ReplaceInput(value_input, m.left().node());
return Changed(node);
}
@@ -1029,12 +1025,12 @@ Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
(node->opcode() == IrOpcode::kWord32Shr) ||
(node->opcode() == IrOpcode::kWord32Sar));
if (machine()->Word32ShiftIsSafe()) {
- // Remove the explicit 'and' with 0x1f if the shift provided by the machine
+ // Remove the explicit 'and' with 0x1F if the shift provided by the machine
// instruction matches that required by JavaScript.
Int32BinopMatcher m(node);
if (m.right().IsWord32And()) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().Is(0x1f)) {
+ if (mright.right().Is(0x1F)) {
node->ReplaceInput(1, mright.left().node());
return Changed(node);
}
@@ -1088,7 +1084,7 @@ Reduction MachineOperatorReducer::ReduceWord32Shr(Node* node) {
if (m.left().IsWord32And() && m.right().HasValue()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
- uint32_t shift = m.right().Value() & 0x1f;
+ uint32_t shift = m.right().Value() & 0x1F;
uint32_t mask = mleft.right().Value();
if ((mask >> shift) == 0) {
// (m >>> s) == 0 implies ((x & m) >>> s) == 0
@@ -1180,7 +1176,7 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
if (m.left().IsWord32Shl()) {
Uint32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() &&
- (mleft.right().Value() & 0x1f) >=
+ (mleft.right().Value() & 0x1F) >=
base::bits::CountTrailingZeros(mask)) {
// (x << L) & (-1 << K) => x << L iff L >= K
return Replace(mleft.node());
@@ -1344,7 +1340,7 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF00000000)) |
+ (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF00000000}) |
mrhs.Value()));
}
return NoChange();
@@ -1357,7 +1353,7 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
Uint32Matcher mrhs(node->InputAt(1));
if (mlhs.HasValue() && mrhs.HasValue()) {
return ReplaceFloat64(bit_cast<double>(
- (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF)) |
+ (bit_cast<uint64_t>(mlhs.Value()) & uint64_t{0xFFFFFFFF}) |
(static_cast<uint64_t>(mrhs.Value()) << 32)));
}
return NoChange();
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2603b1d18e..66178308be 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -59,17 +59,6 @@ UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
return OpParameter<UnalignedStoreRepresentation>(op);
}
-CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
- return OpParameter<CheckedLoadRepresentation>(op);
-}
-
-
-CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
- return OpParameter<CheckedStoreRepresentation>(op);
-}
-
bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
}
@@ -149,7 +138,6 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
PURE_BINARY_OP_LIST_64(V) \
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
- V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1) \
V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
@@ -474,14 +462,6 @@ struct MachineOperatorGlobalCache {
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
"UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct ProtectedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
ProtectedLoad##Type##Operator() \
@@ -492,7 +472,6 @@ struct MachineOperatorGlobalCache {
}; \
Load##Type##Operator kLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- CheckedLoad##Type##Operator kCheckedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
@@ -547,15 +526,6 @@ struct MachineOperatorGlobalCache {
"UnalignedStore", 3, 1, 1, 0, 1, 0, \
MachineRepresentation::Type) {} \
}; \
- struct CheckedStore##Type##Operator final \
- : public Operator1<CheckedStoreRepresentation> { \
- CheckedStore##Type##Operator() \
- : Operator1<CheckedStoreRepresentation>( \
- IrOpcode::kCheckedStore, \
- Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
- "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
- } \
- }; \
struct ProtectedStore##Type##Operator \
: public Operator1<StoreRepresentation> { \
explicit ProtectedStore##Type##Operator() \
@@ -572,7 +542,6 @@ struct MachineOperatorGlobalCache {
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
- CheckedStore##Type##Operator kCheckedStore##Type; \
ProtectedStore##Type##Operator kProtectedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -644,10 +613,25 @@ struct MachineOperatorGlobalCache {
BitcastWordToTaggedOperator()
: Operator(IrOpcode::kBitcastWordToTagged,
Operator::kEliminatable | Operator::kNoWrite,
- "BitcastWordToTagged", 1, 0, 0, 1, 0, 0) {}
+ "BitcastWordToTagged", 1, 1, 1, 1, 1, 0) {}
};
BitcastWordToTaggedOperator kBitcastWordToTagged;
+ struct BitcastTaggedToWordOperator : public Operator {
+ BitcastTaggedToWordOperator()
+ : Operator(IrOpcode::kBitcastTaggedToWord,
+ Operator::kEliminatable | Operator::kNoWrite,
+ "BitcastTaggedToWord", 1, 1, 1, 1, 1, 0) {}
+ };
+ BitcastTaggedToWordOperator kBitcastTaggedToWord;
+
+ struct SpeculationFenceOperator : public Operator {
+ SpeculationFenceOperator()
+ : Operator(IrOpcode::kSpeculationFence, Operator::kNoThrow,
+ "SpeculationFence", 0, 1, 1, 0, 1, 0) {}
+ };
+ SpeculationFenceOperator kSpeculationFence;
+
struct DebugAbortOperator : public Operator {
DebugAbortOperator()
: Operator(IrOpcode::kDebugAbort, Operator::kNoThrow, "DebugAbort", 1,
@@ -823,6 +807,10 @@ const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
return &cache_.kBitcastWordToTagged;
}
+const Operator* MachineOperatorBuilder::BitcastTaggedToWord() {
+ return &cache_.kBitcastTaggedToWord;
+}
+
const Operator* MachineOperatorBuilder::DebugAbort() {
return &cache_.kDebugAbort;
}
@@ -835,33 +823,6 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
return new (zone_) CommentOperator(msg);
}
-const Operator* MachineOperatorBuilder::CheckedLoad(
- CheckedLoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kCheckedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
-
-const Operator* MachineOperatorBuilder::CheckedStore(
- CheckedStoreRepresentation rep) {
- switch (rep) {
-#define STORE(kRep) \
- case MachineRepresentation::kRep: \
- return &cache_.kCheckedStore##kRep;
- MACHINE_REPRESENTATION_LIST(STORE)
-#undef STORE
- case MachineRepresentation::kBit:
- case MachineRepresentation::kNone:
- break;
- }
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::AtomicLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -952,6 +913,11 @@ const Operator* MachineOperatorBuilder::AtomicXor(MachineType rep) {
UNREACHABLE();
}
+const OptionalOperator MachineOperatorBuilder::SpeculationFence() {
+ return OptionalOperator(flags_ & kSpeculationFence,
+ &cache_.kSpeculationFence);
+}
+
#define SIMD_LANE_OPS(Type, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane( \
int32_t lane_index) { \
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 3b6634c8bc..10b4b15701 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -83,17 +83,6 @@ typedef MachineRepresentation UnalignedStoreRepresentation;
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const*);
-// A CheckedLoad needs a MachineType.
-typedef MachineType CheckedLoadRepresentation;
-
-CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
-
-
-// A CheckedStore needs a MachineType.
-typedef MachineRepresentation CheckedStoreRepresentation;
-
-CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-
class StackSlotRepresentation final {
public:
StackSlotRepresentation(int size, int alignment)
@@ -154,13 +143,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord64ReverseBytes = 1u << 19,
kInt32AbsWithOverflow = 1u << 20,
kInt64AbsWithOverflow = 1u << 21,
+ kSpeculationFence = 1u << 22,
kAllOptionalOps =
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
kWord32ReverseBits | kWord64ReverseBits | kWord32ReverseBytes |
- kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow
+ kWord64ReverseBytes | kInt32AbsWithOverflow | kInt64AbsWithOverflow |
+ kSpeculationFence
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -606,11 +597,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
- // checked-load heap, index, length
- const Operator* CheckedLoad(CheckedLoadRepresentation);
- // checked-store heap, index, length, value
- const Operator* CheckedStore(CheckedStoreRepresentation);
-
// atomic-load [base + index]
const Operator* AtomicLoad(LoadRepresentation rep);
// atomic-store [base + index], value
@@ -630,6 +616,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* AtomicXor(MachineType rep);
+ const OptionalOperator SpeculationFence();
+
// Target machine word-size assumed by this builder.
bool Is32() const { return word() == MachineRepresentation::kWord32; }
bool Is64() const { return word() == MachineRepresentation::kWord64; }
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 767ada506a..596204e214 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -92,8 +92,6 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
- case IrOpcode::kCheckedLoad:
- case IrOpcode::kCheckedStore:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index b7301749cf..3b57081c9e 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -143,46 +143,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle final : public OutOfLineCode {
- public:
- OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- FloatRegister const result_;
-};
-
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, zero_reg); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRound : public OutOfLineCode {
public:
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
@@ -391,82 +351,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
} // namespace
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
- do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Addu(kScratchReg, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- auto offset = i.InputOperand(0).immediate(); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (IsMipsArchVariant(kMips32r6)) { \
__ cfc1(kScratchReg, FCSR); \
@@ -787,7 +671,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
- __ Call(at, i.InputRegister(0), 0);
+ __ Call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -816,7 +700,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
- __ Jump(at, i.InputRegister(0), 0);
+ __ Jump(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -834,7 +718,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
}
__ lw(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Call(at, Code::kHeaderSize - kHeapObjectTag);
@@ -998,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
- __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
@@ -1203,7 +1088,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ srl(kScratchReg, src, 1);
@@ -1742,14 +1627,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMipsPush:
if (instr->InputAt(0)->IsFPRegister()) {
- __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
- __ Subu(sp, sp, Operand(kDoubleSize));
- frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
+ switch (op->representation()) {
+ case MachineRepresentation::kFloat32:
+ __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize));
+ __ Subu(sp, sp, Operand(kFloatSize));
+ frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+ break;
+ case MachineRepresentation::kFloat64:
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Subu(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+ break;
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
} else {
__ Push(i.InputRegister(0));
frame_access_state()->IncreaseSPDelta(1);
}
break;
+ case kMipsPeek: {
+ int reverse_slot = MiscField::decode(instr->opcode());
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ }
+ } else {
+ __ lw(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
case kMipsStackClaim: {
__ Subu(sp, sp, Operand(i.InputInt32(0)));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
@@ -1773,46 +1689,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(sb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(sh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(sw);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
- break;
- case kCheckedLoadWord64:
- case kCheckedStoreWord64:
- UNREACHABLE(); // currently unsupported checked int64 load/store.
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
@@ -2593,7 +2469,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
- unsigned lane = shuffle & 0xff;
+ unsigned lane = shuffle & 0xFF;
if (FLAG_debug_code) {
// range of all four lanes, for unary instruction,
// should belong to the same range, which can be one of these:
@@ -2601,7 +2477,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (lane >= 4) {
int32_t shuffle_helper = shuffle;
for (int i = 0; i < 4; ++i) {
- lane = shuffle_helper & 0xff;
+ lane = shuffle_helper & 0xFF;
CHECK_GE(lane, 4);
shuffle_helper >>= 8;
}
@@ -2609,7 +2485,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- lane = shuffle & 0xff;
+ lane = shuffle & 0xFF;
if (lane >= 4) {
lane -= 4;
}
@@ -3163,7 +3039,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -3438,7 +3314,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3451,10 +3327,12 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const int returns = frame()->GetReturnSlotCount();
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= 2 * base::bits::CountPopulation(saves_fpu);
+ shrink_slots -= returns;
if (shrink_slots > 0) {
__ Subu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3469,12 +3347,22 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPush(saves);
DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Subu(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
int pop_count = static_cast<int>(descriptor->StackParameterCount());
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Addu(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 3a2a873e48..dd789d0196 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -128,6 +128,7 @@ namespace compiler {
V(MipsFloat32Min) \
V(MipsFloat64Min) \
V(MipsPush) \
+ V(MipsPeek) \
V(MipsStoreToStackSlot) \
V(MipsByteSwap32) \
V(MipsStackClaim) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1053763f0d..35b8a2396d 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -36,7 +36,7 @@ class MipsOperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -92,18 +92,6 @@ class MipsOperandGenerator final : public OperandGenerator {
case kMipsSwc1:
case kMipsLdc1:
case kMipsSdc1:
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
// true even for 32b values, offsets > 16b
// are handled in assembler-mips.cc
return is_int32(value);
@@ -233,7 +221,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -432,7 +421,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -531,7 +520,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
@@ -1181,8 +1170,8 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- if (input.node()) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -1191,19 +1180,53 @@ void InstructionSelector::EmitPrepareArguments(
// Possibly align stack here for functions.
int push_count = static_cast<int>(descriptor->StackParameterCount());
if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
Emit(kMipsStackClaim, g.NoOutput(),
- g.TempImmediate(push_count << kPointerSizeLog2));
+ g.TempImmediate(stack_size << kPointerSizeLog2));
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(n << kPointerSizeLog2));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ MipsOperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ ++reverse_slot;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ Emit(kMipsPeek | MiscField::encode(reverse_slot), result);
+ }
+ if (output.location.GetType() == MachineType::Float64()) {
+ // Float64 require an implicit second slot.
+ ++reverse_slot;
+ }
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1312,99 +1335,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- MipsOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- MipsOperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- default:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
- g.UseRegister(buffer));
-}
-
-
namespace {
// Shared routine for multiple compare operations.
static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
@@ -1417,7 +1347,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1630,7 +1561,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->frame_state());
+ cont->feedback(), cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -1652,14 +1583,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2057,6 +1988,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 6d43750b1c..d4463008c8 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -143,46 +143,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadSingle final : public OutOfLineCode {
- public:
- OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<float>::quiet_NaN());
- }
-
- private:
- FloatRegister const result_;
-};
-
-
-class OutOfLineLoadDouble final : public OutOfLineCode {
- public:
- OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ Move(result_, std::numeric_limits<double>::quiet_NaN());
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
- public:
- OutOfLineLoadInteger(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ mov(result_, zero_reg); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRound : public OutOfLineCode {
public:
OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
@@ -403,109 +363,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
}
} // namespace
-#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
- __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
- Operand(zero_reg)); \
- } else { \
- __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
- } \
- } while (0)
-
-#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
- do { \
- if (!length.is_reg() && base::bits::IsPowerOfTwo(length.immediate())) { \
- __ Or(kScratchReg, zero_reg, Operand(offset)); \
- __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
- __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
- } else { \
- __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
- } \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
- do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
- ool->entry()); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
- ool->entry()); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
- } \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZeroRegister(2); \
- ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
- } while (0)
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
if (kArchVariant == kMips64r6) { \
@@ -833,14 +690,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->InputAt(0)->IsImmediate()) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt64());
- __ Jump(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
+ __ Call(wasm_code, info()->IsWasm() ? RelocInfo::WASM_CALL
: RelocInfo::JS_TO_WASM_CALL);
} else {
__ daddiu(at, i.InputRegister(0), 0);
- __ Jump(at);
+ __ Call(at);
}
+ RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
- frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallCodeObjectFromJSFunction:
@@ -886,7 +743,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
- __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp,
+ Operand(kScratchReg));
}
__ Ld(at, FieldMemOperand(func, JSFunction::kCodeOffset));
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1050,7 +908,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code && alignment > 0) {
// Verify that the output_register is properly aligned
__ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
- __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg,
Operand(zero_reg));
}
if (alignment == 2 * kPointerSize) {
@@ -1369,7 +1227,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint32_t B0 = 0x55555555; // (T)~(T)0/3
uint32_t B1 = 0x33333333; // (T)~(T)0/15*3
- uint32_t B2 = 0x0f0f0f0f; // (T)~(T)0/255*15
+ uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
uint32_t value = 0x01010101; // (T)~(T)0/255
uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ srl(kScratchReg, src, 1);
@@ -1394,7 +1252,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register dst = i.OutputRegister();
uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
- uint64_t B2 = 0x0f0f0f0f0f0f0f0fl; // (T)~(T)0/255*15
+ uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
uint64_t value = 0x0101010101010101l; // (T)~(T)0/255
uint64_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
__ dsrl(kScratchReg, src, 1);
@@ -2041,6 +1899,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(1);
}
break;
+ case kMips64Peek: {
+ // The incoming value is 0-based, but we need a 1-based value.
+ int reverse_slot = MiscField::decode(instr->opcode()) + 1;
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(op->representation(), MachineRepresentation::kFloat32);
+ __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ }
+ } else {
+ __ Ld(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
case kMips64StackClaim: {
__ Dsubu(sp, sp, Operand(i.InputInt32(0)));
frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
@@ -2063,48 +1939,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lb);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lbu);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lh);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lhu);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Lw);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER(Ld);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Single, Lwc1);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Double, Ldc1);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sh);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sw);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER(Sd);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Single, Swc1);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Double, Sdc1);
- break;
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
@@ -2183,7 +2017,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
- __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
+ __ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
i.InputRegister(0), Operand(i.InputRegister(1)));
break;
case kMips64S128Zero: {
@@ -2889,7 +2723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (src0 == src1) {
// Unary S32x4 shuffles are handled with shf.w instruction
- unsigned lane = shuffle & 0xff;
+ unsigned lane = shuffle & 0xFF;
if (FLAG_debug_code) {
// range of all four lanes, for unary instruction,
// should belong to the same range, which can be one of these:
@@ -2897,7 +2731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (lane >= 4) {
int32_t shuffle_helper = shuffle;
for (int i = 0; i < 4; ++i) {
- lane = shuffle_helper & 0xff;
+ lane = shuffle_helper & 0xFF;
CHECK_GE(lane, 4);
shuffle_helper >>= 8;
}
@@ -2905,7 +2739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
uint32_t i8 = 0;
for (int i = 0; i < 4; i++) {
- lane = shuffle & 0xff;
+ lane = shuffle & 0xFF;
if (lane >= 4) {
lane -= 4;
}
@@ -3465,7 +3299,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -3747,7 +3581,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3760,10 +3594,12 @@ void CodeGenerator::AssembleConstructFrame() {
const RegList saves = descriptor->CalleeSavedRegisters();
const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
+ const int returns = frame()->GetReturnSlotCount();
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are pushed below.
shrink_slots -= base::bits::CountPopulation(saves);
shrink_slots -= base::bits::CountPopulation(saves_fpu);
+ shrink_slots -= returns;
if (shrink_slots > 0) {
__ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
}
@@ -3779,11 +3615,21 @@ void CodeGenerator::AssembleConstructFrame() {
__ MultiPush(saves);
DCHECK_EQ(kNumCalleeSaved, base::bits::CountPopulation(saves) + 1);
}
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Dsubu(sp, sp, Operand(returns * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Daddu(sp, sp, Operand(returns * kPointerSize));
+ }
+
// Restore GP registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
@@ -3816,7 +3662,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
int pop_count = static_cast<int>(descriptor->StackParameterCount());
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_count += g.ToConstant(pop).ToInt32();
} else {
Register pop_reg = g.ToRegister(pop);
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 1b420d3819..3058812bec 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -156,6 +156,7 @@ namespace compiler {
V(Mips64Float64Min) \
V(Mips64Float64SilenceNaN) \
V(Mips64Push) \
+ V(Mips64Peek) \
V(Mips64StoreToStackSlot) \
V(Mips64ByteSwap64) \
V(Mips64ByteSwap32) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 0b490c7d77..38f077c4e6 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -36,7 +36,7 @@ class Mips64OperandGenerator final : public OperandGenerator {
InstructionOperand UseRegisterOrImmediateZero(Node* node) {
if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
(IsFloatConstant(node) &&
- (bit_cast<int64_t>(GetFloatConstantValue(node)) == V8_INT64_C(0)))) {
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
return UseImmediate(node);
}
return UseRegister(node);
@@ -106,20 +106,6 @@ class Mips64OperandGenerator final : public OperandGenerator {
case kMips64Swc1:
case kMips64Ldc1:
case kMips64Sdc1:
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
return is_int32(value);
default:
return is_int16(value);
@@ -329,7 +315,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -540,7 +527,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int32 shifts use `value % 32`.
- uint32_t lsb = mleft.right().Value() & 0x1f;
+ uint32_t lsb = mleft.right().Value() & 0x1F;
// Ext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -590,7 +577,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue()) {
// Any shift value can match; int64 shifts use `value % 64`.
- uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
+ uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
// Dext cannot extract bits past the register size, however since
// shifting the original value would have introduced some zeros we can
@@ -720,7 +707,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
void InstructionSelector::VisitWord32Shr(Node* node) {
Int32BinopMatcher m(node);
if (m.left().IsWord32And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x1f;
+ uint32_t lsb = m.right().Value() & 0x1F;
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Ext for Shr(And(x, mask), imm) where the result of the mask is
@@ -813,7 +800,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitWord64Shr(Node* node) {
Int64BinopMatcher m(node);
if (m.left().IsWord64And() && m.right().HasValue()) {
- uint32_t lsb = m.right().Value() & 0x3f;
+ uint32_t lsb = m.right().Value() & 0x3F;
Int64BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && mleft.right().Value() != 0) {
// Select Dext for Shr(And(x, mask), imm) where the result of the mask is
@@ -1676,7 +1663,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot << kPointerSizeLog2));
++slot;
}
@@ -1688,14 +1675,36 @@ void InstructionSelector::EmitPrepareArguments(
}
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
- Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ Mips64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ Emit(kMips64Peek | MiscField::encode(reverse_slot), result);
+ }
+ reverse_slot += output.location.GetSizeInPointers();
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
@@ -1806,127 +1815,6 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
}
}
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.DefineAsRegister(node), offset_operand,
- g.UseImmediate(length), g.UseRegister(buffer));
- return;
- }
- }
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), offset_operand, length_operand,
- g.UseRegister(buffer));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- Mips64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged:
- case MachineRepresentation::kSimd128:
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand offset_operand = g.CanBeImmediate(offset, opcode)
- ? g.UseImmediate(offset)
- : g.UseRegister(offset);
-
- InstructionOperand length_operand = (!g.CanBeImmediate(offset, opcode))
- ? g.CanBeImmediate(length, opcode)
- ? g.UseImmediate(length)
- : g.UseRegister(length)
- : g.UseRegister(length);
-
- if (length->opcode() == IrOpcode::kInt32Constant) {
- Int32Matcher m(length);
- if (m.IsPowerOf2()) {
- Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
- g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
- return;
- }
- }
-
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
- g.UseRegister(buffer));
-}
-
-
namespace {
// Shared routine for multiple compare operations.
@@ -1940,7 +1828,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -2111,7 +2000,8 @@ void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
selector->Emit(
kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
- g.TempImmediate(BailoutReason::kUnsupportedNonPrimitiveCompare));
+ g.TempImmediate(
+ static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
}
VisitWordCompare(selector, node, opcode, cont, false);
@@ -2157,7 +2047,7 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->kind(), cont->reason(),
- cont->frame_state());
+ cont->feedback(), cont->frame_state());
} else if (cont->IsTrap()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
g.TempImmediate(cont->trap_id()));
@@ -2297,14 +2187,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2750,6 +2640,8 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
V(I32x4) \
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index e312dc4354..22004337eb 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -12,6 +12,7 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -462,6 +463,20 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
if (IsSame(receiver, effect)) receiver = GetValueInput(effect, 0);
break;
}
+ case IrOpcode::kEffectPhi: {
+ Node* control = GetControlInput(effect);
+ if (control->opcode() != IrOpcode::kLoop) {
+ DCHECK(control->opcode() == IrOpcode::kDead ||
+ control->opcode() == IrOpcode::kMerge);
+ return kNoReceiverMaps;
+ }
+
+ // Continue search for receiver map outside the loop. Since operations
+ // inside the loop may change the map, the result is unreliable.
+ effect = GetEffectInput(effect, 0);
+ result = kUnreliableReceiverMaps;
+ continue;
+ }
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
@@ -488,6 +503,19 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
}
// static
+MaybeHandle<Map> NodeProperties::GetMapWitness(Node* node) {
+ ZoneHandleSet<Map> maps;
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
+ return maps[0];
+ }
+ return MaybeHandle<Map>();
+}
+
+// static
bool NodeProperties::NoObservableSideEffectBetween(Node* effect,
Node* dominator) {
while (effect != dominator) {
@@ -538,19 +566,19 @@ bool NodeProperties::CanBePrimitive(Node* receiver, Node* effect) {
bool NodeProperties::CanBeNullOrUndefined(Node* receiver, Node* effect) {
if (CanBePrimitive(receiver, effect)) {
switch (receiver->opcode()) {
- case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
- case IrOpcode::kCheckSymbol:
- case IrOpcode::kCheckString:
case IrOpcode::kCheckSeqString:
- case IrOpcode::kCheckInternalizedString:
- case IrOpcode::kToBoolean:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckSymbol:
case IrOpcode::kJSToInteger:
case IrOpcode::kJSToLength:
case IrOpcode::kJSToName:
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToNumeric:
case IrOpcode::kJSToString:
+ case IrOpcode::kToBoolean:
return false;
case IrOpcode::kHeapConstant: {
Handle<HeapObject> value = HeapObjectMatcher(receiver).Value();
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 5ccc15c1ab..abc6622c83 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -8,6 +8,7 @@
#include "src/compiler/node.h"
#include "src/compiler/types.h"
#include "src/globals.h"
+#include "src/objects/map.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -153,6 +154,8 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static InferReceiverMapsResult InferReceiverMaps(
Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+ static MaybeHandle<Map> GetMapWitness(Node* node);
+
// Walks up the {effect} chain to check that there's no observable side-effect
// between the {effect} and it's {dominator}. Aborts the walk if there's join
// in the effect chain.
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 3c3650b8f4..ec6c720af2 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -175,24 +175,25 @@
V(JSConstructWithArrayLike) \
V(JSConstructWithSpread)
-#define JS_OTHER_OP_LIST(V) \
- JS_CONSTRUCT_OP_LIST(V) \
- V(JSCallForwardVarargs) \
- V(JSCall) \
- V(JSCallWithArrayLike) \
- V(JSCallWithSpread) \
- V(JSCallRuntime) \
- V(JSForInEnumerate) \
- V(JSForInNext) \
- V(JSForInPrepare) \
- V(JSLoadMessage) \
- V(JSStoreMessage) \
- V(JSLoadModule) \
- V(JSStoreModule) \
- V(JSGeneratorStore) \
- V(JSGeneratorRestoreContinuation) \
- V(JSGeneratorRestoreRegister) \
- V(JSStackCheck) \
+#define JS_OTHER_OP_LIST(V) \
+ JS_CONSTRUCT_OP_LIST(V) \
+ V(JSCallForwardVarargs) \
+ V(JSCall) \
+ V(JSCallWithArrayLike) \
+ V(JSCallWithSpread) \
+ V(JSCallRuntime) \
+ V(JSForInEnumerate) \
+ V(JSForInNext) \
+ V(JSForInPrepare) \
+ V(JSLoadMessage) \
+ V(JSStoreMessage) \
+ V(JSLoadModule) \
+ V(JSStoreModule) \
+ V(JSGeneratorStore) \
+ V(JSGeneratorRestoreContinuation) \
+ V(JSGeneratorRestoreRegister) \
+ V(JSGeneratorRestoreInputOrDebugPos) \
+ V(JSStackCheck) \
V(JSDebugger)
#define JS_OP_LIST(V) \
@@ -317,6 +318,7 @@
V(NumberTrunc) \
V(NumberToBoolean) \
V(NumberToInt32) \
+ V(NumberToString) \
V(NumberToUint32) \
V(NumberToUint8Clamped) \
V(NumberSilenceNaN)
@@ -332,9 +334,12 @@
V(StringCharAt) \
V(StringCharCodeAt) \
V(SeqStringCharCodeAt) \
+ V(StringCodePointAt) \
+ V(SeqStringCodePointAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
V(StringIndexOf) \
+ V(StringLength) \
V(StringToLowerCaseIntl) \
V(StringToUpperCaseIntl) \
V(CheckBounds) \
@@ -371,6 +376,7 @@
V(TransitionAndStoreNumberElement) \
V(TransitionAndStoreNonNumberElement) \
V(ToBoolean) \
+ V(NumberIsFloat64Hole) \
V(ObjectIsArrayBufferView) \
V(ObjectIsBigInt) \
V(ObjectIsCallable) \
@@ -390,6 +396,7 @@
V(NewDoubleElements) \
V(NewSmiOrObjectElements) \
V(NewArgumentsElements) \
+ V(NewConsString) \
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
@@ -593,8 +600,6 @@
V(LoadStackPointer) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
- V(CheckedLoad) \
- V(CheckedStore) \
V(UnalignedLoad) \
V(UnalignedStore) \
V(Int32PairAdd) \
@@ -614,6 +619,7 @@
V(AtomicAnd) \
V(AtomicOr) \
V(AtomicXor) \
+ V(SpeculationFence) \
V(UnsafePointerAdd)
#define MACHINE_SIMD_OP_LIST(V) \
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 46d6557b21..5819655633 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -24,6 +24,8 @@ OperationTyper::OperationTyper(Isolate* isolate, Zone* zone)
Type* truncating_to_zero = Type::MinusZeroOrNaN();
DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
+ singleton_NaN_string_ = Type::HeapConstant(factory->NaN_string(), zone);
+ singleton_zero_string_ = Type::HeapConstant(factory->zero_string(), zone);
singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
@@ -503,6 +505,14 @@ Type* OperationTyper::NumberToInt32(Type* type) {
return Type::Signed32();
}
+Type* OperationTyper::NumberToString(Type* type) {
+ DCHECK(type->Is(Type::Number()));
+
+ if (type->Is(Type::NaN())) return singleton_NaN_string_;
+ if (type->Is(cache_.kZeroOrMinusZero)) return singleton_zero_string_;
+ return Type::SeqString();
+}
+
Type* OperationTyper::NumberToUint32(Type* type) {
DCHECK(type->Is(Type::Number()));
diff --git a/deps/v8/src/compiler/operation-typer.h b/deps/v8/src/compiler/operation-typer.h
index 4a9c4ffb08..282cb0c750 100644
--- a/deps/v8/src/compiler/operation-typer.h
+++ b/deps/v8/src/compiler/operation-typer.h
@@ -94,6 +94,8 @@ class V8_EXPORT_PRIVATE OperationTyper {
Type* infinity_;
Type* minus_infinity_;
+ Type* singleton_NaN_string_;
+ Type* singleton_zero_string_;
Type* singleton_false_;
Type* singleton_true_;
Type* singleton_the_hole_;
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 045d695ecf..b4567ab04f 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -253,11 +253,6 @@ class PipelineData {
source_position_output_ = source_position_output;
}
- std::vector<trap_handler::ProtectedInstructionData>* protected_instructions()
- const {
- return protected_instructions_;
- }
-
JumpOptimizationInfo* jump_optimization_info() const {
return jump_optimization_info_;
}
@@ -435,21 +430,26 @@ class PipelineImpl final {
template <typename Phase, typename Arg0, typename Arg1>
void Run(Arg0 arg_0, Arg1 arg_1);
- // Run the graph creation and initial optimization passes.
+ // Step A. Run the graph creation and initial optimization passes.
bool CreateGraph();
- // Run the concurrent optimization passes.
+ // B. Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
- // Run the code assembly pass.
+ // Substep B.1. Produce a scheduled graph.
+ void ComputeScheduledGraph();
+
+ // Substep B.2. Select instructions from a scheduled graph.
+ bool SelectInstructions(Linkage* linkage);
+
+ // Step C. Run the code assembly pass.
void AssembleCode(Linkage* linkage);
- // Run the code finalization pass.
+ // Step D. Run the code finalization pass.
Handle<Code> FinalizeCode();
- bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
void RunPrintAndVerify(const char* phase, bool untyped = false);
- Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
+ Handle<Code> GenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* descriptor, bool run_verifier);
@@ -803,7 +803,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (!pipeline_.CreateGraph()) {
if (isolate->has_pending_exception()) return FAILED; // Stack overflowed.
- return AbortOptimization(kGraphBuildingFailed);
+ return AbortOptimization(BailoutReason::kGraphBuildingFailed);
}
if (compilation_info()->is_osr()) data_.InitializeOsrHelper();
@@ -826,8 +826,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
Isolate* isolate) {
Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
- if (compilation_info()->bailout_reason() == kNoReason) {
- return AbortOptimization(kCodeGenerationFailed);
+ if (compilation_info()->bailout_reason() == BailoutReason::kNoReason) {
+ return AbortOptimization(BailoutReason::kCodeGenerationFailed);
}
return FAILED;
}
@@ -964,7 +964,8 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
pipeline_.RunPrintAndVerify("Optimized Machine", true);
}
- if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
+ pipeline_.ComputeScheduledGraph();
+ if (!pipeline_.SelectInstructions(&linkage_)) return FAILED;
pipeline_.AssembleCode(&linkage_);
return SUCCEEDED;
}
@@ -995,9 +996,7 @@ PipelineWasmCompilationJob::Status PipelineWasmCompilationJob::FinalizeJobImpl(
}
void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
-#if !DEBUG
- return;
-#endif
+#if DEBUG
// We expect the only embedded objects to be those originating from
// a snapshot, which are immovable.
DisallowHeapAllocation no_gc;
@@ -1038,6 +1037,7 @@ void PipelineWasmCompilationJob::ValidateImmovableEmbeddedObjects() const {
}
CHECK(is_immovable || is_wasm || is_allowed_stub);
}
+#endif
}
template <typename Phase>
@@ -1269,8 +1269,9 @@ struct LoopPeelingPhase {
LoopTree* loop_tree =
LoopFinder::BuildLoopTree(data->jsgraph()->graph(), temp_zone);
- LoopPeeler::PeelInnerLoopsOfTree(data->graph(), data->common(), loop_tree,
- temp_zone);
+ LoopPeeler(data->graph(), data->common(), loop_tree, temp_zone,
+ data->source_positions())
+ .PeelInnerLoopsOfTree();
}
};
@@ -1880,7 +1881,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
if (FLAG_turbo_escape) {
Run<EscapeAnalysisPhase>();
if (data->compilation_failed()) {
- info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+ info()->AbortOptimization(
+ BailoutReason::kCyclicObjectStateDetectedInEscapeAnalysis);
data->EndPhaseKind();
return false;
}
@@ -1941,7 +1943,9 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
data->source_positions()->RemoveDecorator();
- return ScheduleAndSelectInstructions(linkage, true);
+ ComputeScheduledGraph();
+
+ return SelectInstructions(linkage);
}
Handle<Code> Pipeline::GenerateCodeForCodeStub(
@@ -1982,7 +1986,7 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(
}
pipeline.Run<VerifyGraphPhase>(false, true);
- return pipeline.ScheduleAndGenerateCode(call_descriptor);
+ return pipeline.GenerateCode(call_descriptor);
}
// static
@@ -2043,7 +2047,12 @@ Handle<Code> Pipeline::GenerateCodeForTesting(
// TODO(rossberg): Should this really be untyped?
pipeline.RunPrintAndVerify("Machine", true);
- return pipeline.ScheduleAndGenerateCode(call_descriptor);
+ // Ensure we have a schedule.
+ if (data.schedule() == nullptr) {
+ pipeline.ComputeScheduledGraph();
+ }
+
+ return pipeline.GenerateCode(call_descriptor);
}
// static
@@ -2082,19 +2091,26 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
return !data.compilation_failed();
}
-bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
- bool trim_graph) {
- CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+void PipelineImpl::ComputeScheduledGraph() {
PipelineData* data = this->data_;
- DCHECK_NOT_NULL(data->graph());
+ // We should only schedule the graph if it is not scheduled yet.
+ DCHECK_NULL(data->schedule());
- if (trim_graph) {
- Run<LateGraphTrimmingPhase>();
- RunPrintAndVerify("Late trimmed", true);
- }
- if (data->schedule() == nullptr) Run<ComputeSchedulePhase>();
+ Run<LateGraphTrimmingPhase>();
+ RunPrintAndVerify("Late trimmed", true);
+
+ Run<ComputeSchedulePhase>();
TraceSchedule(data->info(), data->isolate(), data->schedule());
+}
+
+bool PipelineImpl::SelectInstructions(Linkage* linkage) {
+ CallDescriptor* call_descriptor = linkage->GetIncomingDescriptor();
+ PipelineData* data = this->data_;
+
+ // We should have a scheduled graph.
+ DCHECK_NOT_NULL(data->graph());
+ DCHECK_NOT_NULL(data->schedule());
if (FLAG_turbo_profiling) {
data->set_profiler_data(BasicBlockInstrumentor::Instrument(
@@ -2138,7 +2154,7 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
// Select and schedule instructions covering the scheduled graph.
Run<InstructionSelectionPhase>(linkage);
if (data->compilation_failed()) {
- info()->AbortOptimization(kCodeGenerationFailed);
+ info()->AbortOptimization(BailoutReason::kCodeGenerationFailed);
data->EndPhaseKind();
return false;
}
@@ -2177,7 +2193,8 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
Run<FrameElisionPhase>();
if (data->compilation_failed()) {
- info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+ info()->AbortOptimization(
+ BailoutReason::kNotEnoughVirtualRegistersRegalloc);
data->EndPhaseKind();
return false;
}
@@ -2208,6 +2225,8 @@ Handle<Code> PipelineImpl::FinalizeCode() {
Run<FinalizeCodePhase>();
Handle<Code> code = data->code();
+ if (code.is_null()) return code;
+
if (data->profiler_data()) {
#if ENABLE_DISASSEMBLER
std::ostringstream os;
@@ -2245,12 +2264,11 @@ Handle<Code> PipelineImpl::FinalizeCode() {
return code;
}
-Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
- CallDescriptor* call_descriptor) {
+Handle<Code> PipelineImpl::GenerateCode(CallDescriptor* call_descriptor) {
Linkage linkage(call_descriptor);
- // Schedule the graph, perform instruction selection and register allocation.
- if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
+ // Perform instruction selection and register allocation.
+ if (!SelectInstructions(&linkage)) return Handle<Code>();
// Generate the final machine code.
AssembleCode(&linkage);
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 2dca7794eb..b5b6b5f142 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -76,7 +76,7 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(
+ V8_EXPORT_PRIVATE static Handle<Code> GenerateCodeForTesting(
CompilationInfo* info, Isolate* isolate, CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule = nullptr,
SourcePositionTable* source_positions = nullptr);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 11fde27fc9..7fc537784c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -118,48 +118,6 @@ static inline bool HasRegisterInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadNAN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(
- result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadNAN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(
- result_, Double(std::numeric_limits<double>::quiet_NaN()), kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ li(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
@@ -653,134 +611,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
#define CleanUInt32(x)
#endif
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
- __ bge(ool->entry()); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(result, operand); \
- } \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Register result = i.OutputRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ bge(ool->entry()); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(result, operand); \
- } \
- __ bind(ool->exit()); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ frsp(kScratchDoubleReg, value); \
- /* removed frsp as instruction-selector checked */ \
- /* value to be kFloat32 */ \
- if (mode == kMode_MRI) { \
- __ stfs(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ stfsx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- if (mode == kMode_MRI) { \
- __ stfd(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ stfdx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ cmplw(offset, i.InputRegister(2)); \
- } else { \
- __ cmplwi(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- Register value = i.InputRegister(3); \
- if (mode == kMode_MRI) { \
- __ asm_instr(value, operand); \
- } else { \
- CleanUInt32(offset); \
- __ asm_instrx(value, operand); \
- } \
- __ bind(&done); \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
- } while (0)
-
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
@@ -1003,8 +833,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (instr->InputAt(0)->IsImmediate()) {
+#ifdef V8_TARGET_ARCH_PPC64
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+#else
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
+#endif
__ Call(wasm_code, rmode);
} else {
__ Call(i.InputRegister(0));
@@ -1072,7 +907,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ cmp(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -2021,58 +1856,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_StoreDouble:
ASSEMBLE_STORE_DOUBLE();
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
- __ extsb(i.OutputRegister(), i.OutputRegister());
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
- break;
- case kCheckedLoadWord64:
-#if V8_TARGET_ARCH_PPC64
- ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
- break;
- case kCheckedStoreWord64:
-#if V8_TARGET_ARCH_PPC64
- ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT32();
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_DOUBLE();
- break;
-
case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
@@ -2208,7 +1991,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2382,7 +2165,7 @@ void CodeGenerator::AssembleConstructFrame() {
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -2555,10 +2338,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
// converts it to qnan on ia32/x64
if (src.type() == Constant::kFloat32) {
uint32_t val = src.ToFloat32AsInt();
- if ((val & 0x7f800000) == 0x7f800000) {
+ if ((val & 0x7F800000) == 0x7F800000) {
uint64_t dval = static_cast<uint64_t>(val);
- dval = ((dval & 0xc0000000) << 32) | ((dval & 0x40000000) << 31) |
- ((dval & 0x40000000) << 30) | ((dval & 0x7fffffff) << 29);
+ dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
+ ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29);
value = Double(dval);
} else {
value = Double(static_cast<double>(src.ToFloat32()));
@@ -2672,69 +2455,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
return;
- // Dispatch on the source and destination operand kinds. Not all
- // combinations are possible.
- if (source->IsRegister()) {
- // Register-register.
- Register temp = kScratchReg;
- Register src = g.ToRegister(source);
- if (destination->IsRegister()) {
- Register dst = g.ToRegister(destination);
- __ mr(temp, src);
- __ mr(src, dst);
- __ mr(dst, temp);
- } else {
- DCHECK(destination->IsStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ mr(temp, src);
- __ LoadP(src, dst);
- __ StoreP(temp, dst);
- }
-#if V8_TARGET_ARCH_PPC64
- } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
-#else
- } else if (source->IsStackSlot()) {
- DCHECK(destination->IsStackSlot());
-#endif
- Register temp_0 = kScratchReg;
- Register temp_1 = r0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ LoadP(temp_0, src);
- __ LoadP(temp_1, dst);
- __ StoreP(temp_0, dst);
- __ StoreP(temp_1, src);
- } else if (source->IsFPRegister()) {
- DoubleRegister temp = kScratchDoubleReg;
- DoubleRegister src = g.ToDoubleRegister(source);
- if (destination->IsFPRegister()) {
- DoubleRegister dst = g.ToDoubleRegister(destination);
- __ fmr(temp, src);
- __ fmr(src, dst);
- __ fmr(dst, temp);
- } else {
- DCHECK(destination->IsFPStackSlot());
- MemOperand dst = g.ToMemOperand(destination);
- __ fmr(temp, src);
- __ lfd(src, dst);
- __ stfd(temp, dst);
- }
-#if !V8_TARGET_ARCH_PPC64
- } else if (source->IsFPStackSlot()) {
- DCHECK(destination->IsFPStackSlot());
- DoubleRegister temp_0 = kScratchDoubleReg;
- DoubleRegister temp_1 = d0;
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
- __ lfd(temp_0, src);
- __ lfd(temp_1, dst);
- __ stfd(temp_0, dst);
- __ stfd(temp_1, src);
-#endif
- } else {
- // No other combinations are possible.
- UNREACHABLE();
- }
}
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 8454590ee2..fced5565df 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -154,7 +154,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -366,101 +367,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
-#if V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned));
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- PPCOperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
-#if V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_PPC64
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, kInt16Imm_Unsigned), g.UseRegister(value));
-}
-
-
template <typename Matcher>
static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
ArchOpcode opcode, bool left_can_cover,
@@ -553,7 +459,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.left().IsWord32Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
+ sh = (32 - sh) & 0x1F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -592,7 +498,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -756,7 +662,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
+ sh = (32 - sh) & 0x1F;
if (mb >= me) {
Emit(kPPC_RotLeftAndMask32, g.DefineAsRegister(node),
g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
@@ -782,7 +688,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
@@ -1033,6 +939,8 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
@@ -1553,7 +1461,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1800,14 +1709,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -1989,7 +1898,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
- Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kPPC_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
}
@@ -1997,8 +1906,8 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
for (PushParameter input : base::Reversed(*arguments)) {
// Skip any alignment holes in pushed nodes.
- if (input.node() == nullptr) continue;
- Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node()));
+ if (input.node == nullptr) continue;
+ Emit(kPPC_Push, g.NoOutput(), g.UseRegister(input.node));
}
}
}
@@ -2164,6 +2073,190 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ // TODO(John): Port.
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -2176,7 +2269,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
- // We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
+ // We omit kWord32ShiftIsSafe as s[rl]w use 0x3F as a mask rather than 0x1F.
}
// static
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index 5e79cbdfec..bead0618f6 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -69,8 +69,9 @@ bool PropertyAccessBuilder::TryBuildStringCheck(MapHandles const& maps,
} else {
// Monormorphic string access (ignoring the fact that there are multiple
// String maps).
- *receiver = *effect = graph()->NewNode(simplified()->CheckString(),
- *receiver, *effect, control);
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckString(VectorSlotPair()),
+ *receiver, *effect, control);
}
return true;
}
@@ -82,8 +83,9 @@ bool PropertyAccessBuilder::TryBuildNumberCheck(MapHandles const& maps,
Node* control) {
if (HasOnlyNumberMaps(maps)) {
// Monomorphic number access (we also deal with Smis here).
- *receiver = *effect = graph()->NewNode(simplified()->CheckNumber(),
- *receiver, *effect, control);
+ *receiver = *effect =
+ graph()->NewNode(simplified()->CheckNumber(VectorSlotPair()), *receiver,
+ *effect, control);
return true;
}
return false;
@@ -175,8 +177,9 @@ Node* PropertyAccessBuilder::BuildCheckValue(Node* receiver, Node** effect,
Node* expected = jsgraph()->HeapConstant(value);
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(), receiver, expected);
- *effect = graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kNoReason),
- check, *effect, control);
+ *effect =
+ graph()->NewNode(simplified()->CheckIf(DeoptimizeReason::kWrongValue),
+ check, *effect, control);
return expected;
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index bed2f628d9..ed67c06cc7 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -134,7 +134,6 @@ void RawMachineAssembler::Return(Node* value) {
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* v1, Node* v2) {
Node* values[] = {Int32Constant(0), v1, v2};
Node* ret = MakeNode(common()->Return(2), 3, values);
@@ -142,7 +141,6 @@ void RawMachineAssembler::Return(Node* v1, Node* v2) {
current_block_ = nullptr;
}
-
void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
Node* values[] = {Int32Constant(0), v1, v2, v3};
Node* ret = MakeNode(common()->Return(3), 4, values);
@@ -150,6 +148,24 @@ void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3) {
current_block_ = nullptr;
}
+void RawMachineAssembler::Return(Node* v1, Node* v2, Node* v3, Node* v4) {
+ Node* values[] = {Int32Constant(0), v1, v2, v3, v4};
+ Node* ret = MakeNode(common()->Return(4), 5, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
+void RawMachineAssembler::Return(int count, Node* vs[]) {
+ typedef Node* Node_ptr;
+ Node** values = new Node_ptr[count + 1];
+ values[0] = Int32Constant(0);
+ for (int i = 0; i < count; ++i) values[i + 1] = vs[i];
+ Node* ret = MakeNode(common()->Return(count), count + 1, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+ delete[] values;
+}
+
void RawMachineAssembler::PopAndReturn(Node* pop, Node* value) {
Node* values[] = {pop, value};
Node* ret = MakeNode(common()->Return(1), 2, values);
@@ -172,6 +188,14 @@ void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2,
current_block_ = nullptr;
}
+void RawMachineAssembler::PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3,
+ Node* v4) {
+ Node* values[] = {pop, v1, v2, v3, v4};
+ Node* ret = MakeNode(common()->Return(4), 5, values);
+ schedule()->AddReturn(CurrentBlock(), ret);
+ current_block_ = nullptr;
+}
+
void RawMachineAssembler::DebugAbort(Node* message) {
AddNode(machine()->DebugAbort(), message);
}
@@ -430,7 +454,7 @@ void RawMachineAssembler::Bind(RawMachineLabel* label,
str << "Binding label without closing previous block:"
<< "\n# label: " << info
<< "\n# previous block: " << *current_block_;
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
Bind(label);
current_block_->set_debug_info(info);
@@ -495,7 +519,7 @@ RawMachineLabel::~RawMachineLabel() {
} else {
str << "A label has been used but it's not bound.";
}
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
#endif // DEBUG
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 9fc3590875..1cc56b3379 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -186,6 +186,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
old_value, new_value);
}
+ Node* SpeculationFence() {
+ return AddNode(machine()->SpeculationFence().op());
+ }
+
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
return AddNode(machine()->WordAnd(), a, b);
@@ -828,9 +832,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
void Return(Node* value);
void Return(Node* v1, Node* v2);
void Return(Node* v1, Node* v2, Node* v3);
+ void Return(Node* v1, Node* v2, Node* v3, Node* v4);
+ void Return(int count, Node* v[]);
void PopAndReturn(Node* pop, Node* value);
void PopAndReturn(Node* pop, Node* v1, Node* v2);
void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3);
+ void PopAndReturn(Node* pop, Node* v1, Node* v2, Node* v3, Node* v4);
void Bind(RawMachineLabel* label);
void Deoptimize(Node* state);
void DebugAbort(Node* message);
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 3a40e8d5bf..eedf946fb6 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -5,6 +5,7 @@
#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
namespace v8 {
namespace internal {
@@ -19,26 +20,36 @@ Reduction RedundancyElimination::Reduce(Node* node) {
if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckEqualsInternalizedString:
+ case IrOpcode::kCheckEqualsSymbol:
case IrOpcode::kCheckFloat64Hole:
case IrOpcode::kCheckHeapObject:
case IrOpcode::kCheckIf:
case IrOpcode::kCheckInternalizedString:
+ case IrOpcode::kCheckNotTaggedHole:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckReceiver:
+ case IrOpcode::kCheckSeqString:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
- case IrOpcode::kCheckSeqString:
- case IrOpcode::kCheckNotTaggedHole:
+ case IrOpcode::kCheckSymbol:
case IrOpcode::kCheckedFloat64ToInt32:
case IrOpcode::kCheckedInt32Add:
- case IrOpcode::kCheckedInt32Sub:
case IrOpcode::kCheckedInt32Div:
case IrOpcode::kCheckedInt32Mod:
case IrOpcode::kCheckedInt32Mul:
- case IrOpcode::kCheckedTaggedToFloat64:
+ case IrOpcode::kCheckedInt32Sub:
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToFloat64:
case IrOpcode::kCheckedTaggedToInt32:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedTruncateTaggedToWord32:
+ case IrOpcode::kCheckedUint32Div:
+ case IrOpcode::kCheckedUint32Mod:
case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
return ReduceCheckNode(node);
case IrOpcode::kSpeculativeNumberAdd:
case IrOpcode::kSpeculativeNumberSubtract:
@@ -124,13 +135,43 @@ RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
namespace {
-bool IsCompatibleCheck(Node const* a, Node const* b) {
+// Does check {a} subsume check {b}?
+bool CheckSubsumes(Node const* a, Node const* b) {
if (a->op() != b->op()) {
if (a->opcode() == IrOpcode::kCheckInternalizedString &&
b->opcode() == IrOpcode::kCheckString) {
// CheckInternalizedString(node) implies CheckString(node)
- } else {
+ } else if (a->opcode() != b->opcode()) {
return false;
+ } else {
+ switch (a->opcode()) {
+ case IrOpcode::kCheckBounds:
+ case IrOpcode::kCheckSmi:
+ case IrOpcode::kCheckString:
+ case IrOpcode::kCheckNumber:
+ break;
+ case IrOpcode::kCheckedInt32ToTaggedSigned:
+ case IrOpcode::kCheckedTaggedSignedToInt32:
+ case IrOpcode::kCheckedTaggedToTaggedPointer:
+ case IrOpcode::kCheckedTaggedToTaggedSigned:
+ case IrOpcode::kCheckedUint32ToInt32:
+ case IrOpcode::kCheckedUint32ToTaggedSigned:
+ break;
+ case IrOpcode::kCheckedFloat64ToInt32:
+ case IrOpcode::kCheckedTaggedToInt32: {
+ const CheckMinusZeroParameters& ap =
+ CheckMinusZeroParametersOf(a->op());
+ const CheckMinusZeroParameters& bp =
+ CheckMinusZeroParametersOf(b->op());
+ if (ap.mode() != bp.mode()) {
+ return false;
+ }
+ break;
+ }
+ default:
+ DCHECK(!IsCheckedWithFeedback(a->op()));
+ return false;
+ }
}
}
for (int i = a->op()->ValueInputCount(); --i >= 0;) {
@@ -143,7 +184,7 @@ bool IsCompatibleCheck(Node const* a, Node const* b) {
Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
for (Check const* check = head_; check != nullptr; check = check->next) {
- if (IsCompatibleCheck(check->node, node)) {
+ if (CheckSubsumes(check->node, node)) {
DCHECK(!check->node->IsDead());
return check->node;
}
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index b0a345a57f..f8a5a9c504 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -216,7 +216,9 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedSigned),
+ node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed31())) {
op = simplified()->ChangeInt31ToTaggedSigned();
@@ -224,14 +226,14 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
}
} else if (output_type->Is(Type::Unsigned32()) &&
use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedUint32ToTaggedSigned();
+ op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -247,7 +249,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -256,17 +258,18 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
use_info.type_check() == TypeCheckKind::kSignedSmall) {
// float64 -> uint32 -> tagged signed
node = InsertChangeFloat64ToUint32(node);
- op = simplified()->CheckedUint32ToTaggedSigned();
+ op = simplified()->CheckedUint32ToTaggedSigned(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
node = InsertConversion(node, op, use_node);
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
}
} else {
return TypeError(node, output_rep, output_type,
@@ -279,12 +282,13 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
node = InsertConversion(node, op, use_node);
if (SmiValuesAre32Bits()) {
op = simplified()->ChangeInt32ToTagged();
} else {
- op = simplified()->CheckedInt32ToTaggedSigned();
+ op = simplified()->CheckedInt32ToTaggedSigned(use_info.feedback());
}
} else {
return TypeError(node, output_rep, output_type,
@@ -292,7 +296,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
}
} else if (CanBeTaggedPointer(output_rep)) {
if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedToTaggedSigned();
+ op = simplified()->CheckedTaggedToTaggedSigned(use_info.feedback());
} else if (output_type->Is(Type::SignedSmall())) {
op = simplified()->ChangeTaggedToTaggedSigned();
} else {
@@ -304,7 +308,7 @@ Node* RepresentationChanger::GetTaggedSignedRepresentationFor(
// TODO(turbofan): Consider adding a Bailout operator that just deopts.
// Also use that for MachineRepresentation::kPointer case above.
node = InsertChangeBitToTagged(node);
- op = simplified()->CheckedTaggedToTaggedSigned();
+ op = simplified()->CheckedTaggedToTaggedSigned(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedSigned);
@@ -334,7 +338,9 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
Operator const* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTaggedPointer),
+ node);
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -378,7 +384,7 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
}
// TODO(turbofan): Consider adding a Bailout operator that just deopts
// for TaggedSigned output representation.
- op = simplified()->CheckedTaggedToTaggedPointer();
+ op = simplified()->CheckedTaggedToTaggedPointer(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kTaggedPointer);
@@ -411,7 +417,8 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kTagged), node);
} else if (output_rep == MachineRepresentation::kBit) {
if (output_type->Is(Type::Boolean())) {
op = simplified()->ChangeBitToTagged();
@@ -489,7 +496,8 @@ Node* RepresentationChanger::GetFloat32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat32), node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
// int32 -> float64 -> float32
@@ -549,7 +557,8 @@ Node* RepresentationChanger::GetFloat64RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kFloat64), node);
} else if (IsWord(output_rep)) {
if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeInt32ToFloat64();
@@ -626,7 +635,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
const Operator* op = nullptr;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
@@ -637,7 +647,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -655,7 +666,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -671,12 +683,13 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
- op = simplified()->CheckedTaggedSignedToInt32();
+ op = simplified()->CheckedTaggedSignedToInt32(use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedTaggedToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
- : CheckForMinusZeroMode::kDontCheckForMinusZero);
+ : CheckForMinusZeroMode::kDontCheckForMinusZero,
+ use_info.feedback());
} else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
} else if (use_info.truncation().IsUsedAsWord32()) {
@@ -684,10 +697,10 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
op = simplified()->TruncateTaggedToWord32();
} else if (use_info.type_check() == TypeCheckKind::kNumber) {
op = simplified()->CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode::kNumber);
+ CheckTaggedInputMode::kNumber, use_info.feedback());
} else if (use_info.type_check() == TypeCheckKind::kNumberOrOddball) {
op = simplified()->CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode::kNumberOrOddball);
+ CheckTaggedInputMode::kNumberOrOddball, use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -704,7 +717,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
if (output_type->Is(Type::Signed32())) {
return node;
} else if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->CheckedUint32ToInt32();
+ op = simplified()->CheckedUint32ToInt32(use_info.feedback());
} else {
return TypeError(node, output_rep, output_type,
MachineRepresentation::kWord32);
@@ -762,7 +775,8 @@ Node* RepresentationChanger::GetBitRepresentationFor(
const Operator* op;
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kBit), node);
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
if (output_type->Is(Type::BooleanOrNullOrUndefined())) {
@@ -807,7 +821,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
Node* node, MachineRepresentation output_rep, Type* output_type) {
if (output_type->Is(Type::None())) {
// This is an impossible value; it should not be used at runtime.
- return jsgraph()->DeadValue();
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord32), node);
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word64
}
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 52a3e75c8a..b23a3dac5b 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -147,13 +147,18 @@ inline std::ostream& operator<<(std::ostream& os, TypeCheckKind type_check) {
// to the preferred representation. The preferred representation might be
// insufficient to do the conversion (e.g. word32->float64 conv), so we also
// need the signedness information to produce the correct value.
+// Additionally, use info may contain {CheckParameters} which contains
+// information for the deoptimizer such as a CallIC on which speculation
+// should be disallowed if the check fails.
class UseInfo {
public:
UseInfo(MachineRepresentation representation, Truncation truncation,
- TypeCheckKind type_check = TypeCheckKind::kNone)
+ TypeCheckKind type_check = TypeCheckKind::kNone,
+ const VectorSlotPair& feedback = VectorSlotPair())
: representation_(representation),
truncation_(truncation),
- type_check_(type_check) {}
+ type_check_(type_check),
+ feedback_(feedback) {}
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
@@ -187,14 +192,16 @@ class UseInfo {
return UseInfo(MachineRepresentation::kTaggedPointer, Truncation::Any(),
TypeCheckKind::kHeapObject);
}
- static UseInfo CheckedSignedSmallAsTaggedSigned() {
+ static UseInfo CheckedSignedSmallAsTaggedSigned(
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kTaggedSigned, Truncation::Any(),
- TypeCheckKind::kSignedSmall);
+ TypeCheckKind::kSignedSmall, feedback);
}
- static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros) {
+ static UseInfo CheckedSignedSmallAsWord32(IdentifyZeros identify_zeros,
+ const VectorSlotPair& feedback) {
return UseInfo(MachineRepresentation::kWord32,
- Truncation::Any(identify_zeros),
- TypeCheckKind::kSignedSmall);
+ Truncation::Any(identify_zeros), TypeCheckKind::kSignedSmall,
+ feedback);
}
static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
return UseInfo(MachineRepresentation::kWord32,
@@ -238,11 +245,13 @@ class UseInfo {
? CheckForMinusZeroMode::kDontCheckForMinusZero
: CheckForMinusZeroMode::kCheckForMinusZero;
}
+ const VectorSlotPair& feedback() const { return feedback_; }
private:
MachineRepresentation representation_;
Truncation truncation_;
TypeCheckKind type_check_;
+ VectorSlotPair feedback_;
};
// Contains logic related to changing the representation of values for constants
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index f49a8e540c..c0d3146be1 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -156,45 +156,6 @@ static inline bool HasStackSlotInput(Instruction* instr, size_t index) {
namespace {
-class OutOfLineLoadNAN32 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
- kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-class OutOfLineLoadNAN64 final : public OutOfLineCode {
- public:
- OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final {
- __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
- kScratchReg);
- }
-
- private:
- DoubleRegister const result_;
-};
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
-
- private:
- Register const result_;
-};
-
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
@@ -938,102 +899,6 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) {
__ asm_instr(value, operand); \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width) \
- do { \
- DoubleRegister result = i.OutputDoubleRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
- __ bge(ool->entry()); \
- __ CleanUInt32(offset); \
- __ asm_instr(result, operand); \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- Register result = i.OutputRegister(); \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- auto ool = new (zone()) OutOfLineLoadZero(this, result); \
- __ bge(ool->entry()); \
- __ CleanUInt32(offset); \
- __ asm_instr(result, operand); \
- __ bind(ool->exit()); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT32() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ CleanUInt32(offset); \
- __ StoreFloat32(value, operand); \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_DOUBLE() \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- DCHECK_EQ(kMode_MRR, mode); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- DoubleRegister value = i.InputDoubleRegister(3); \
- __ CleanUInt32(offset); \
- __ StoreDouble(value, operand); \
- __ bind(&done); \
- } while (0)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, index); \
- Register offset = operand.rb(); \
- if (HasRegisterInput(instr, 2)) { \
- __ CmpLogical32(offset, i.InputRegister(2)); \
- } else { \
- __ CmpLogical32(offset, i.InputImmediate(2)); \
- } \
- __ bge(&done); \
- Register value = i.InputRegister(3); \
- __ CleanUInt32(offset); \
- __ asm_instr(value, operand); \
- __ bind(&done); \
- } while (0)
-
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
}
@@ -1219,8 +1084,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
if (instr->InputAt(0)->IsImmediate()) {
+#ifdef V8_TARGET_ARCH_S390X
+ Address wasm_code = reinterpret_cast<Address>(
+ i.ToConstant(instr->InputAt(0)).ToInt64());
+#else
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
+#endif
__ Call(wasm_code, rmode);
} else {
__ Call(i.InputRegister(0));
@@ -1283,7 +1153,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ LoadP(kScratchReg,
FieldMemOperand(func, JSFunction::kContextOffset));
__ CmpP(cp, kScratchReg);
- __ Assert(eq, kWrongFunctionContext);
+ __ Assert(eq, AbortReason::kWrongFunctionContext);
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeOffset));
__ AddP(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -2107,7 +1977,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertDoubleToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToNearest);
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2116,7 +1986,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertDoubleToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2127,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lghi(i.OutputRegister(1), Operand(1));
}
__ ConvertDoubleToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2143,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ ConvertDoubleToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2156,7 +2026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertFloat32ToInt32(i.OutputRegister(0), i.InputDoubleRegister(0),
kRoundToZero);
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2165,7 +2035,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Label done;
__ ConvertFloat32ToUnsignedInt32(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
__ lghi(i.OutputRegister(0), Operand::Zero());
__ bind(&done);
break;
@@ -2177,7 +2047,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
__ ConvertFloat32ToUnsignedInt64(i.OutputRegister(0),
i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2192,7 +2062,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lghi(i.OutputRegister(1), Operand(1));
}
__ ConvertFloat32ToInt64(i.OutputRegister(0), i.InputDoubleRegister(0));
- __ b(Condition(0xe), &done, Label::kNear); // normal case
+ __ b(Condition(0xE), &done, Label::kNear); // normal case
if (i.OutputCount() > 1) {
__ lghi(i.OutputRegister(1), Operand::Zero());
} else {
@@ -2334,56 +2204,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Lay:
__ lay(i.OutputRegister(), i.MemoryOperand());
break;
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadB);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
- break;
- case kCheckedLoadWord64:
-#if V8_TARGET_ARCH_S390X
- ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
- break;
- case kCheckedStoreWord64:
-#if V8_TARGET_ARCH_S390X
- ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
-#else
- UNREACHABLE();
-#endif
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT32();
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_DOUBLE();
- break;
case kAtomicLoadInt8:
__ LoadB(i.OutputRegister(), i.MemoryOperand());
break;
@@ -2629,7 +2449,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
if (FLAG_debug_code) {
- __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
}
}
}
@@ -2762,7 +2582,7 @@ void CodeGenerator::AssembleConstructFrame() {
frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 54f5a0c68b..457c5a1d82 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -613,7 +613,8 @@ void VisitUnaryOp(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -688,7 +689,8 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsTrap()) {
inputs[input_count++] = g.UseImmediate(cont->trap_id());
selector->Emit(opcode, output_count, outputs, input_count, inputs);
@@ -850,99 +852,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, OperandMode::kUint32Imm));
-}
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- S390OperandGenerator g(this);
- Node* const base = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
-#if V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
-#endif
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
-#if !V8_TARGET_ARCH_S390X
- case MachineRepresentation::kWord64: // Fall through.
-#endif
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- AddressingMode addressingMode = kMode_MRR;
- Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset),
- g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
-}
-
#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation(value);
@@ -987,7 +896,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
if (m.left().IsWord64Shr()) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
} else {
// Adjust the mask such that it doesn't include any rotated bits.
if (me < sh) me = sh;
@@ -1075,7 +984,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
IsContiguousMask64((uint64_t)(mleft.right().Value()) >> sh, &mb, &me)) {
// Adjust the mask such that it doesn't include any rotated bits.
if (mb > 63 - sh) mb = 63 - sh;
- sh = (64 - sh) & 0x3f;
+ sh = (64 - sh) & 0x3F;
if (mb >= me) {
bool match = false;
ArchOpcode opcode;
@@ -1249,6 +1158,8 @@ void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
VisitWord32UnaryOp(this, node, kS390_Abs32, OperandMode::kNone);
}
@@ -1728,7 +1639,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1816,7 +1728,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
DCHECK(input_count <= 8 && output_count <= 1);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1921,7 +1834,8 @@ void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -2183,14 +2097,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2351,7 +2265,7 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kStackFrameExtraParamSlot;
for (PushParameter input : (*arguments)) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
++slot;
}
@@ -2361,19 +2275,20 @@ void InstructionSelector::EmitPrepareArguments(
int slot = 0;
for (PushParameter input : *arguments) {
- if (input.node() == nullptr) continue;
- num_slots +=
- input.type().representation() == MachineRepresentation::kFloat64
- ? kDoubleSize / kPointerSize
- : 1;
+ if (input.node == nullptr) continue;
+ num_slots += input.location.GetType().representation() ==
+ MachineRepresentation::kFloat64
+ ? kDoubleSize / kPointerSize
+ : 1;
}
Emit(kS390_StackClaim, g.NoOutput(), g.TempImmediate(num_slots));
for (PushParameter input : *arguments) {
// Skip any alignment holes in pushed nodes.
- if (input.node()) {
- Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ if (input.node) {
+ Emit(kS390_StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
g.TempImmediate(slot));
- slot += input.type().representation() == MachineRepresentation::kFloat64
+ slot += input.location.GetType().representation() ==
+ MachineRepresentation::kFloat64
? (kDoubleSize / kPointerSize)
: 1;
}
@@ -2489,6 +2404,190 @@ void InstructionSelector::VisitAtomicOr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitAtomicXor(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI16x8GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GtU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16GeU(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS128Zero(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ExtractLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ // TODO(John): Port.
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitF32x4Max(Node* node) { UNIMPLEMENTED(); }
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 582fbd6424..423d757a4f 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -19,8 +19,8 @@ namespace {
static const int kNumLanes32 = 4;
static const int kNumLanes16 = 8;
static const int kNumLanes8 = 16;
-static const int32_t kMask16 = 0xffff;
-static const int32_t kMask8 = 0xff;
+static const int32_t kMask16 = 0xFFFF;
+static const int32_t kMask8 = 0xFF;
static const int32_t kShift16 = 16;
static const int32_t kShift8 = 24;
} // anonymous
@@ -595,7 +595,7 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
Node* min = graph()->NewNode(
common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
Node* max = graph()->NewNode(common()->Float64Constant(
- static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
+ static_cast<double>(is_signed ? kMaxInt : 0xFFFFFFFFu)));
for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
@@ -913,7 +913,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
DCHECK_EQ(1, node->InputCount());
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
Node* rep_node[kNumLanes32];
- Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
+ Node* mask = graph()->NewNode(common()->Int32Constant(0xFFFFFFFF));
for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 9bdb7cfbaf..6e6c011fc1 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -92,7 +92,8 @@ UseInfo CheckedUseInfoAsWord32FromHint(
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
- return UseInfo::CheckedSignedSmallAsWord32(identify_zeros);
+ return UseInfo::CheckedSignedSmallAsWord32(identify_zeros,
+ VectorSlotPair());
case NumberOperationHint::kSigned32:
return UseInfo::CheckedSigned32AsWord32(identify_zeros);
case NumberOperationHint::kNumber:
@@ -1345,17 +1346,6 @@ class RepresentationSelector {
void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
- // only eliminate an unused speculative number operation if we know that
- // the inputs are PlainPrimitive, which excludes everything that's might
- // have side effects or throws during a ToNumber conversion. We are only
- // allowed to perform a number addition if neither input is a String, even
- // if the value is never used, so we further limit to NumberOrOddball in
- // order to explicitly exclude String inputs.
- if (BothInputsAre(node, Type::NumberOrOddball())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
-
if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
(GetUpperBound(node)->Is(Type::Signed32()) ||
GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1377,13 +1367,6 @@ class RepresentationSelector {
void VisitSpeculativeNumberModulus(Node* node, Truncation truncation,
SimplifiedLowering* lowering) {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) &&
(truncation.IsUsedAsWord32() ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()))) {
@@ -1514,8 +1497,20 @@ class RepresentationSelector {
return VisitLeaf(node, MachineRepresentation::kWord64);
case IrOpcode::kExternalConstant:
return VisitLeaf(node, MachineType::PointerRepresentation());
- case IrOpcode::kNumberConstant:
- return VisitLeaf(node, MachineRepresentation::kTagged);
+ case IrOpcode::kNumberConstant: {
+ double const value = OpParameter<double>(node);
+ int value_as_int;
+ if (DoubleToSmiInteger(value, &value_as_int)) {
+ VisitLeaf(node, MachineRepresentation::kTaggedSigned);
+ if (lower()) {
+ intptr_t smi = bit_cast<intptr_t>(Smi::FromInt(value_as_int));
+ DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(smi));
+ }
+ return;
+ }
+ VisitLeaf(node, MachineRepresentation::kTagged);
+ return;
+ }
case IrOpcode::kHeapConstant:
return VisitLeaf(node, MachineRepresentation::kTaggedPointer);
case IrOpcode::kPointerConstant: {
@@ -1668,13 +1663,6 @@ class RepresentationSelector {
case IrOpcode::kSpeculativeNumberLessThan:
case IrOpcode::kSpeculativeNumberLessThanOrEqual:
case IrOpcode::kSpeculativeNumberEqual: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
// Number comparisons reduce to integer comparisons for integer inputs.
if (TypeOf(node->InputAt(0))->Is(Type::Unsigned32()) &&
TypeOf(node->InputAt(1))->Is(Type::Unsigned32())) {
@@ -1707,8 +1695,10 @@ class RepresentationSelector {
Node* rhs = node->InputAt(1);
if (IsNodeRepresentationTagged(lhs) &&
IsNodeRepresentationTagged(rhs)) {
- VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
- MachineRepresentation::kBit);
+ VisitBinop(
+ node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(VectorSlotPair()),
+ MachineRepresentation::kBit);
ChangeToPureOp(
node, changer_->TaggedSignedOperatorFor(node->opcode()));
@@ -1755,13 +1745,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberMultiply: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::Integral32()) &&
(NodeProperties::GetType(node)->Is(Type::Signed32()) ||
NodeProperties::GetType(node)->Is(Type::Unsigned32()) ||
@@ -1785,7 +1768,7 @@ class RepresentationSelector {
// Handle the case when no int32 checks on inputs are necessary
// (but an overflow check is needed on the output).
if (BothInputsAre(node, Type::Signed32())) {
- // If both the inputs the feedback are int32, use the overflow op.
+ // If both inputs and feedback are int32, use the overflow op.
if (hint == NumberOperationHint::kSignedSmall ||
hint == NumberOperationHint::kSigned32) {
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -1836,13 +1819,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberDivide: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAreUnsigned32(node) && truncation.IsUsedAsWord32()) {
// => unsigned Uint32Div
VisitWord32TruncatingBinop(node);
@@ -2014,13 +1990,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftLeft: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -2050,13 +2019,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftRight: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that's
- // might have side effects or throws during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
if (BothInputsAre(node, Type::NumberOrOddball())) {
Type* rhs_type = GetUpperBound(node->InputAt(1));
VisitBinop(node, UseInfo::TruncatingWord32(),
@@ -2086,13 +2048,6 @@ class RepresentationSelector {
return;
}
case IrOpcode::kSpeculativeNumberShiftRightLogical: {
- // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we
- // can only eliminate an unused speculative number operation if we know
- // that the inputs are PlainPrimitive, which excludes everything that
- // might have side effects or throw during a ToNumber conversion.
- if (BothInputsAre(node, Type::PlainPrimitive())) {
- if (truncation.IsUnused()) return VisitUnused(node);
- }
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type* rhs_type = GetUpperBound(node->InputAt(1));
if (rhs_type->Is(type_cache_.kZeroish) &&
@@ -2107,8 +2062,8 @@ class RepresentationSelector {
MachineRepresentation::kWord32, Type::Unsigned31());
if (lower()) {
node->RemoveInput(1);
- NodeProperties::ChangeOp(node,
- simplified()->CheckedUint32ToInt32());
+ NodeProperties::ChangeOp(
+ node, simplified()->CheckedUint32ToInt32(VectorSlotPair()));
}
return;
}
@@ -2315,6 +2270,11 @@ class RepresentationSelector {
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
+ case IrOpcode::kNumberToString: {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kNumberToUint32: {
// Just change representation if necessary.
VisitUnop(node, UseInfo::TruncatingWord32(),
@@ -2365,6 +2325,13 @@ class RepresentationSelector {
return VisitUnop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
+ case IrOpcode::kNewConsString: {
+ ProcessInput(node, 0, UseInfo::TaggedSigned()); // length
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // first
+ ProcessInput(node, 2, UseInfo::AnyTagged()); // second
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual: {
@@ -2391,6 +2358,12 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kStringCodePointAt: {
+ // TODO(turbofan): Allow builtins to return untagged values.
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kStringFromCharCode: {
VisitUnop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kTaggedPointer);
@@ -2408,6 +2381,14 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTaggedSigned);
return;
}
+ case IrOpcode::kStringLength: {
+ // TODO(bmeurer): The input representation should be TaggedPointer.
+ // Fix this once we have a dedicated StringConcat/JSStringAdd
+ // operator, which marks it's output as TaggedPointer properly.
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedSigned);
+ return;
+ }
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl: {
VisitUnop(node, UseInfo::AnyTagged(),
@@ -2479,13 +2460,17 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckSmi: {
+ const CheckParameters& params = CheckParametersOf(node->op());
if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
VisitUnop(node,
- UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros),
+ UseInfo::CheckedSignedSmallAsWord32(kDistinguishZeros,
+ params.feedback()),
MachineRepresentation::kWord32);
} else {
- VisitUnop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
- MachineRepresentation::kTaggedSigned);
+ VisitUnop(
+ node,
+ UseInfo::CheckedSignedSmallAsTaggedSigned(params.feedback()),
+ MachineRepresentation::kTaggedSigned);
}
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
@@ -2589,6 +2574,11 @@ class RepresentationSelector {
}
return;
}
+ case IrOpcode::kNumberIsFloat64Hole: {
+ VisitUnop(node, UseInfo::TruncatingFloat64(),
+ MachineRepresentation::kBit);
+ return;
+ }
case IrOpcode::kTransitionAndStoreElement: {
Type* value_type = TypeOf(node->InputAt(2));
@@ -2977,7 +2967,6 @@ class RepresentationSelector {
}
ProcessRemainingInputs(node, 1);
SetOutput(node, representation);
- if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
@@ -3702,7 +3691,7 @@ void SimplifiedLowering::DoShift(Node* node, Operator const* op,
if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
Node* const rhs = NodeProperties::GetValueInput(node, 1);
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
- jsgraph()->Int32Constant(0x1f)));
+ jsgraph()->Int32Constant(0x1F)));
}
ChangeToPureOp(node, op);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index eaa148ee04..a78d885e6e 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -20,7 +20,7 @@ class RepresentationSelector;
class SourcePositionTable;
class TypeCache;
-class SimplifiedLowering final {
+class V8_EXPORT_PRIVATE SimplifiedLowering final {
public:
SimplifiedLowering(JSGraph* jsgraph, Zone* zone,
SourcePositionTable* source_positions);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 04bbc7bba8..9978bae122 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -149,9 +149,7 @@ CheckFloat64HoleMode CheckFloat64HoleModeOf(const Operator* op) {
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kChangeFloat64ToTagged ||
- op->opcode() == IrOpcode::kCheckedInt32Mul ||
- op->opcode() == IrOpcode::kCheckedFloat64ToInt32 ||
- op->opcode() == IrOpcode::kCheckedTaggedToInt32);
+ op->opcode() == IrOpcode::kCheckedInt32Mul);
return OpParameter<CheckForMinusZeroMode>(op);
}
@@ -215,15 +213,20 @@ size_t hash_value(MapsParameterInfo const& p) { return hash_value(p.maps()); }
bool operator==(CheckMapsParameters const& lhs,
CheckMapsParameters const& rhs) {
- return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+ return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps() &&
+ lhs.feedback() == rhs.feedback();
}
size_t hash_value(CheckMapsParameters const& p) {
- return base::hash_combine(p.flags(), p.maps());
+ return base::hash_combine(p.flags(), p.maps(), p.feedback());
}
std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
- return os << p.flags() << p.maps_info();
+ os << p.flags() << p.maps_info();
+ if (p.feedback().IsValid()) {
+ os << "; " << p.feedback();
+ }
+ return os;
}
CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
@@ -256,8 +259,7 @@ std::ostream& operator<<(std::ostream& os, CheckTaggedInputMode mode) {
}
CheckTaggedInputMode CheckTaggedInputModeOf(const Operator* op) {
- DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64 ||
- op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ DCHECK(op->opcode() == IrOpcode::kCheckedTaggedToFloat64);
return OpParameter<CheckTaggedInputMode>(op);
}
@@ -271,9 +273,28 @@ std::ostream& operator<<(std::ostream& os, GrowFastElementsMode mode) {
UNREACHABLE();
}
-GrowFastElementsMode GrowFastElementsModeOf(const Operator* op) {
+bool operator==(const GrowFastElementsParameters& lhs,
+ const GrowFastElementsParameters& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+inline size_t hash_value(const GrowFastElementsParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const GrowFastElementsParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << params.feedback();
+ }
+ return os;
+}
+
+const GrowFastElementsParameters& GrowFastElementsParametersOf(
+ const Operator* op) {
DCHECK_EQ(IrOpcode::kMaybeGrowFastElements, op->opcode());
- return OpParameter<GrowFastElementsMode>(op);
+ return OpParameter<GrowFastElementsParameters>(op);
}
bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
@@ -520,9 +541,9 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
return OpParameter<UnicodeEncoding>(op);
}
-BailoutReason BailoutReasonOf(const Operator* op) {
+AbortReason AbortReasonOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kRuntimeAbort, op->opcode());
- return OpParameter<BailoutReason>(op);
+ return static_cast<AbortReason>(OpParameter<int>(op));
}
DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
@@ -530,6 +551,54 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
return OpParameter<DeoptimizeReason>(op);
}
+const CheckTaggedInputParameters& CheckTaggedInputParametersOf(
+ const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kCheckedTruncateTaggedToWord32);
+ return OpParameter<CheckTaggedInputParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CheckTaggedInputParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << "; " << params.feedback();
+ }
+ return os;
+}
+
+size_t hash_value(const CheckTaggedInputParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckTaggedInputParameters const& lhs,
+ CheckTaggedInputParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
+const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op) {
+ DCHECK(IrOpcode::kCheckedTaggedToInt32 == op->opcode() ||
+ IrOpcode::kCheckedFloat64ToInt32 == op->opcode());
+ return OpParameter<CheckMinusZeroParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os,
+ const CheckMinusZeroParameters& params) {
+ os << params.mode();
+ if (params.feedback().IsValid()) {
+ os << "; " << params.feedback();
+ }
+ return os;
+}
+
+size_t hash_value(const CheckMinusZeroParameters& params) {
+ return base::hash_combine(params.mode(), params.feedback());
+}
+
+bool operator==(CheckMinusZeroParameters const& lhs,
+ CheckMinusZeroParameters const& rhs) {
+ return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
+}
+
#define PURE_OP_LIST(V) \
V(BooleanNot, Operator::kNoProperties, 1, 0) \
V(NumberEqual, Operator::kCommutative, 2, 0) \
@@ -581,6 +650,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(NumberTrunc, Operator::kNoProperties, 1, 0) \
V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
@@ -588,8 +658,11 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(SeqStringCharCodeAt, Operator::kNoProperties, 2, 1) \
+ V(StringCodePointAt, Operator::kNoProperties, 2, 1) \
+ V(SeqStringCodePointAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
V(TypeOf, Operator::kNoProperties, 1, 1) \
@@ -626,6 +699,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(ObjectIsString, Operator::kNoProperties, 1, 0) \
V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
V(SameValue, Operator::kCommutative, 2, 0) \
V(ReferenceEqual, Operator::kCommutative, 2, 0) \
@@ -633,6 +707,7 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(StringLessThan, Operator::kNoProperties, 2, 0) \
V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0) \
V(MaskIndexWithBound, Operator::kNoProperties, 2, 0)
#define SPECULATIVE_NUMBER_BINOP_LIST(V) \
@@ -642,30 +717,32 @@ DeoptimizeReason DeoptimizeReasonOf(const Operator* op) {
V(SpeculativeNumberLessThanOrEqual)
#define CHECKED_OP_LIST(V) \
- V(CheckBounds, 2, 1) \
+ V(CheckEqualsInternalizedString, 2, 0) \
+ V(CheckEqualsSymbol, 2, 0) \
V(CheckHeapObject, 1, 1) \
V(CheckInternalizedString, 1, 1) \
- V(CheckNumber, 1, 1) \
+ V(CheckNotTaggedHole, 1, 1) \
V(CheckReceiver, 1, 1) \
- V(CheckSmi, 1, 1) \
- V(CheckString, 1, 1) \
V(CheckSeqString, 1, 1) \
V(CheckSymbol, 1, 1) \
- V(CheckNotTaggedHole, 1, 1) \
- V(CheckEqualsInternalizedString, 2, 0) \
- V(CheckEqualsSymbol, 2, 0) \
V(CheckedInt32Add, 2, 1) \
- V(CheckedInt32Sub, 2, 1) \
V(CheckedInt32Div, 2, 1) \
V(CheckedInt32Mod, 2, 1) \
+ V(CheckedInt32Sub, 2, 1) \
V(CheckedUint32Div, 2, 1) \
- V(CheckedUint32Mod, 2, 1) \
- V(CheckedUint32ToInt32, 1, 1) \
- V(CheckedUint32ToTaggedSigned, 1, 1) \
+ V(CheckedUint32Mod, 2, 1)
+
+#define CHECKED_WITH_FEEDBACK_OP_LIST(V) \
+ V(CheckBounds, 2, 1) \
+ V(CheckNumber, 1, 1) \
+ V(CheckSmi, 1, 1) \
+ V(CheckString, 1, 1) \
V(CheckedInt32ToTaggedSigned, 1, 1) \
V(CheckedTaggedSignedToInt32, 1, 1) \
+ V(CheckedTaggedToTaggedPointer, 1, 1) \
V(CheckedTaggedToTaggedSigned, 1, 1) \
- V(CheckedTaggedToTaggedPointer, 1, 1)
+ V(CheckedUint32ToInt32, 1, 1) \
+ V(CheckedUint32ToTaggedSigned, 1, 1)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, value_input_count, control_input_count) \
@@ -689,6 +766,18 @@ struct SimplifiedOperatorGlobalCache final {
CHECKED_OP_LIST(CHECKED)
#undef CHECKED
+#define CHECKED_WITH_FEEDBACK(Name, value_input_count, value_output_count) \
+ struct Name##Operator final : public Operator1<CheckParameters> { \
+ Name##Operator() \
+ : Operator1<CheckParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, \
+ #Name, value_input_count, 1, 1, value_output_count, 1, 0, \
+ CheckParameters(VectorSlotPair())) {} \
+ }; \
+ Name##Operator k##Name;
+ CHECKED_WITH_FEEDBACK_OP_LIST(CHECKED_WITH_FEEDBACK)
+#undef CHECKED_WITH_FEEDBACK
+
template <DeoptimizeReason kDeoptimizeReason>
struct CheckIfOperator final : public Operator1<DeoptimizeReason> {
CheckIfOperator()
@@ -772,12 +861,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckForMinusZeroMode kMode>
struct CheckedFloat64ToInt32Operator final
- : public Operator1<CheckForMinusZeroMode> {
+ : public Operator1<CheckMinusZeroParameters> {
CheckedFloat64ToInt32Operator()
- : Operator1<CheckForMinusZeroMode>(
+ : Operator1<CheckMinusZeroParameters>(
IrOpcode::kCheckedFloat64ToInt32,
Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32",
- 1, 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
};
CheckedFloat64ToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
kCheckedFloat64ToInt32CheckForMinusZeroOperator;
@@ -786,12 +876,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckForMinusZeroMode kMode>
struct CheckedTaggedToInt32Operator final
- : public Operator1<CheckForMinusZeroMode> {
+ : public Operator1<CheckMinusZeroParameters> {
CheckedTaggedToInt32Operator()
- : Operator1<CheckForMinusZeroMode>(
+ : Operator1<CheckMinusZeroParameters>(
IrOpcode::kCheckedTaggedToInt32,
Operator::kFoldable | Operator::kNoThrow, "CheckedTaggedToInt32",
- 1, 1, 1, 1, 1, 0, kMode) {}
+ 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(kMode, VectorSlotPair())) {}
};
CheckedTaggedToInt32Operator<CheckForMinusZeroMode::kCheckForMinusZero>
kCheckedTaggedToInt32CheckForMinusZeroOperator;
@@ -814,12 +905,13 @@ struct SimplifiedOperatorGlobalCache final {
template <CheckTaggedInputMode kMode>
struct CheckedTruncateTaggedToWord32Operator final
- : public Operator1<CheckTaggedInputMode> {
+ : public Operator1<CheckTaggedInputParameters> {
CheckedTruncateTaggedToWord32Operator()
- : Operator1<CheckTaggedInputMode>(
+ : Operator1<CheckTaggedInputParameters>(
IrOpcode::kCheckedTruncateTaggedToWord32,
Operator::kFoldable | Operator::kNoThrow,
- "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0, kMode) {}
+ "CheckedTruncateTaggedToWord32", 1, 1, 1, 1, 1, 0,
+ CheckTaggedInputParameters(kMode, VectorSlotPair())) {}
};
CheckedTruncateTaggedToWord32Operator<CheckTaggedInputMode::kNumber>
kCheckedTruncateTaggedToWord32NumberOperator;
@@ -867,6 +959,20 @@ struct SimplifiedOperatorGlobalCache final {
};
EnsureWritableFastElementsOperator kEnsureWritableFastElements;
+ template <GrowFastElementsMode kMode>
+ struct GrowFastElementsOperator final
+ : public Operator1<GrowFastElementsParameters> {
+ GrowFastElementsOperator()
+ : Operator1(IrOpcode::kMaybeGrowFastElements, Operator::kNoThrow,
+ "MaybeGrowFastElements", 4, 1, 1, 1, 1, 0,
+ GrowFastElementsParameters(kMode, VectorSlotPair())) {}
+ };
+
+ GrowFastElementsOperator<GrowFastElementsMode::kDoubleElements>
+ kGrowFastElementsOperatorDoubleElements;
+ GrowFastElementsOperator<GrowFastElementsMode::kSmiOrObjectElements>
+ kGrowFastElementsOperatorSmiOrObjectElements;
+
struct LoadFieldByIndexOperator final : public Operator {
LoadFieldByIndexOperator()
: Operator( // --
@@ -934,13 +1040,38 @@ GET_FROM_CACHE(FindOrderedHashMapEntryForInt32Key)
GET_FROM_CACHE(LoadFieldByIndex)
#undef GET_FROM_CACHE
-const Operator* SimplifiedOperatorBuilder::RuntimeAbort(BailoutReason reason) {
- return new (zone()) Operator1<BailoutReason>( // --
- IrOpcode::kRuntimeAbort, // opcode
- Operator::kNoThrow | Operator::kNoDeopt, // flags
- "RuntimeAbort", // name
- 0, 1, 1, 0, 1, 0, // counts
- reason); // parameter
+#define GET_FROM_CACHE_WITH_FEEDBACK(Name, value_input_count, \
+ value_output_count) \
+ const Operator* SimplifiedOperatorBuilder::Name( \
+ const VectorSlotPair& feedback) { \
+ if (!feedback.IsValid()) { \
+ return &cache_.k##Name; \
+ } \
+ return new (zone()) Operator1<CheckParameters>( \
+ IrOpcode::k##Name, Operator::kFoldable | Operator::kNoThrow, #Name, \
+ value_input_count, 1, 1, value_output_count, 1, 0, \
+ CheckParameters(feedback)); \
+ }
+CHECKED_WITH_FEEDBACK_OP_LIST(GET_FROM_CACHE_WITH_FEEDBACK)
+#undef GET_FROM_CACHE_WITH_FEEDBACK
+
+bool IsCheckedWithFeedback(const Operator* op) {
+#define CASE(Name, ...) case IrOpcode::k##Name:
+ switch (op->opcode()) {
+ CHECKED_WITH_FEEDBACK_OP_LIST(CASE) return true;
+ default:
+ return false;
+ }
+#undef CASE
+}
+
+const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kRuntimeAbort, // opcode
+ Operator::kNoThrow | Operator::kNoDeopt, // flags
+ "RuntimeAbort", // name
+ 0, 1, 1, 0, 1, 0, // counts
+ static_cast<int>(reason)); // parameter
}
const Operator* SimplifiedOperatorBuilder::CheckIf(DeoptimizeReason reason) {
@@ -977,25 +1108,35 @@ const Operator* SimplifiedOperatorBuilder::CheckedInt32Mul(
}
const Operator* SimplifiedOperatorBuilder::CheckedFloat64ToInt32(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedFloat64ToInt32DontCheckForMinusZeroOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedFloat64ToInt32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedFloat64ToInt32", 1, 1,
+ 1, 1, 1, 0, CheckMinusZeroParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToInt32(
- CheckForMinusZeroMode mode) {
- switch (mode) {
- case CheckForMinusZeroMode::kCheckForMinusZero:
- return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
- case CheckForMinusZeroMode::kDontCheckForMinusZero:
- return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ CheckForMinusZeroMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckForMinusZeroMode::kCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32CheckForMinusZeroOperator;
+ case CheckForMinusZeroMode::kDontCheckForMinusZero:
+ return &cache_.kCheckedTaggedToInt32DontCheckForMinusZeroOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckMinusZeroParameters>(
+ IrOpcode::kCheckedTaggedToInt32, Operator::kFoldable | Operator::kNoThrow,
+ "CheckedTaggedToInt32", 1, 1, 1, 1, 1, 0,
+ CheckMinusZeroParameters(mode, feedback));
}
const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
@@ -1010,19 +1151,25 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
}
const Operator* SimplifiedOperatorBuilder::CheckedTruncateTaggedToWord32(
- CheckTaggedInputMode mode) {
- switch (mode) {
- case CheckTaggedInputMode::kNumber:
- return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
- case CheckTaggedInputMode::kNumberOrOddball:
- return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ CheckTaggedInputMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case CheckTaggedInputMode::kNumber:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOperator;
+ case CheckTaggedInputMode::kNumberOrOddball:
+ return &cache_.kCheckedTruncateTaggedToWord32NumberOrOddballOperator;
+ }
}
- UNREACHABLE();
+ return new (zone()) Operator1<CheckTaggedInputParameters>(
+ IrOpcode::kCheckedTruncateTaggedToWord32,
+ Operator::kFoldable | Operator::kNoThrow, "CheckedTruncateTaggedToWord32",
+ 1, 1, 1, 1, 1, 0, CheckTaggedInputParameters(mode, feedback));
}
-const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
- ZoneHandleSet<Map> maps) {
- CheckMapsParameters const parameters(flags, maps);
+const Operator* SimplifiedOperatorBuilder::CheckMaps(
+ CheckMapsFlags flags, ZoneHandleSet<Map> maps,
+ const VectorSlotPair& feedback) {
+ CheckMapsParameters const parameters(flags, maps, feedback);
return new (zone()) Operator1<CheckMapsParameters>( // --
IrOpcode::kCheckMaps, // opcode
Operator::kNoThrow | Operator::kNoWrite, // flags
@@ -1096,13 +1243,21 @@ const Operator* SimplifiedOperatorBuilder::EnsureWritableFastElements() {
}
const Operator* SimplifiedOperatorBuilder::MaybeGrowFastElements(
- GrowFastElementsMode mode) {
- return new (zone()) Operator1<GrowFastElementsMode>( // --
- IrOpcode::kMaybeGrowFastElements, // opcode
- Operator::kNoThrow, // flags
- "MaybeGrowFastElements", // name
- 4, 1, 1, 1, 1, 0, // counts
- mode); // parameter
+ GrowFastElementsMode mode, const VectorSlotPair& feedback) {
+ if (!feedback.IsValid()) {
+ switch (mode) {
+ case GrowFastElementsMode::kDoubleElements:
+ return &cache_.kGrowFastElementsOperatorDoubleElements;
+ case GrowFastElementsMode::kSmiOrObjectElements:
+ return &cache_.kGrowFastElementsOperatorSmiOrObjectElements;
+ }
+ }
+ return new (zone()) Operator1<GrowFastElementsParameters>( // --
+ IrOpcode::kMaybeGrowFastElements, // opcode
+ Operator::kNoThrow, // flags
+ "MaybeGrowFastElements", // name
+ 4, 1, 1, 1, 1, 0, // counts
+ GrowFastElementsParameters(mode, feedback)); // parameter
}
const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
@@ -1160,6 +1315,23 @@ bool IsRestLengthOf(const Operator* op) {
return OpParameter<ArgumentsLengthParameters>(op).is_rest_length;
}
+bool operator==(CheckParameters const& lhs, CheckParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(CheckParameters const& p) { return hash_value(p.feedback()); }
+
+std::ostream& operator<<(std::ostream& os, CheckParameters const& p) {
+ return os << p.feedback();
+}
+
+CheckParameters const& CheckParametersOf(Operator const* op) {
+#define MAKE_OR(name, arg2, arg3) op->opcode() == IrOpcode::k##name ||
+ CHECK((CHECKED_WITH_FEEDBACK_OP_LIST(MAKE_OR) false));
+#undef MAKE_OR
+ return OpParameter<CheckParameters>(op);
+}
+
const Operator* SimplifiedOperatorBuilder::NewDoubleElements(
PretenureFlag pretenure) {
return new (zone()) Operator1<PretenureFlag>( // --
@@ -1292,6 +1464,7 @@ const Operator* SimplifiedOperatorBuilder::TransitionAndStoreNonNumberElement(
#undef PURE_OP_LIST
#undef SPECULATIVE_NUMBER_BINOP_LIST
+#undef CHECKED_WITH_FEEDBACK_OP_LIST
#undef CHECKED_OP_LIST
#undef ACCESS_OP_LIST
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 0ed46b0e7a..10961cf452 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -16,6 +16,7 @@
#include "src/machine-type.h"
#include "src/objects.h"
#include "src/type-hints.h"
+#include "src/vector-slot-pair.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -91,6 +92,28 @@ ExternalArrayType ExternalArrayTypeOf(const Operator* op) WARN_UNUSED_RESULT;
// The ConvertReceiverMode is used as parameter by ConvertReceiver operators.
ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
+// A the parameters for several Check nodes. The {feedback} parameter is
+// optional. If {feedback} references a valid CallIC slot and this MapCheck
+// fails, then speculation on that CallIC slot will be disabled.
+class CheckParameters final {
+ public:
+ explicit CheckParameters(const VectorSlotPair& feedback)
+ : feedback_(feedback) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ VectorSlotPair feedback_;
+};
+
+bool operator==(CheckParameters const&, CheckParameters const&);
+
+size_t hash_value(CheckParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckParameters const&);
+
+CheckParameters const& CheckParametersOf(Operator const*) WARN_UNUSED_RESULT;
+
enum class CheckFloat64HoleMode : uint8_t {
kNeverReturnHole, // Never return the hole (deoptimize instead).
kAllowReturnHole // Allow to return the hole (signaling NaN).
@@ -111,7 +134,32 @@ size_t hash_value(CheckTaggedInputMode);
std::ostream& operator<<(std::ostream&, CheckTaggedInputMode);
-CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*) WARN_UNUSED_RESULT;
+CheckTaggedInputMode CheckTaggedInputModeOf(const Operator*);
+
+class CheckTaggedInputParameters {
+ public:
+ CheckTaggedInputParameters(CheckTaggedInputMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckTaggedInputMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ CheckTaggedInputMode mode_;
+ VectorSlotPair feedback_;
+};
+
+const CheckTaggedInputParameters& CheckTaggedInputParametersOf(const Operator*)
+ WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream&,
+ const CheckTaggedInputParameters& params);
+
+size_t hash_value(const CheckTaggedInputParameters& params);
+
+bool operator==(CheckTaggedInputParameters const&,
+ CheckTaggedInputParameters const&);
enum class CheckForMinusZeroMode : uint8_t {
kCheckForMinusZero,
@@ -125,6 +173,30 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+class CheckMinusZeroParameters {
+ public:
+ CheckMinusZeroParameters(CheckForMinusZeroMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ CheckForMinusZeroMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ CheckForMinusZeroMode mode_;
+ VectorSlotPair feedback_;
+};
+
+const CheckMinusZeroParameters& CheckMinusZeroParametersOf(const Operator* op)
+ WARN_UNUSED_RESULT;
+
+std::ostream& operator<<(std::ostream&, const CheckMinusZeroParameters& params);
+
+size_t hash_value(const CheckMinusZeroParameters& params);
+
+bool operator==(CheckMinusZeroParameters const&,
+ CheckMinusZeroParameters const&);
+
// Flags for map checks.
enum class CheckMapsFlag : uint8_t {
kNone = 0u,
@@ -155,19 +227,24 @@ bool operator!=(MapsParameterInfo const&, MapsParameterInfo const&);
size_t hash_value(MapsParameterInfo const&);
-// A descriptor for map checks.
+// A descriptor for map checks. The {feedback} parameter is optional.
+// If {feedback} references a valid CallIC slot and this MapCheck fails,
+// then speculation on that CallIC slot will be disabled.
class CheckMapsParameters final {
public:
- CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
- : flags_(flags), maps_info_(maps) {}
+ CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps,
+ const VectorSlotPair& feedback)
+ : flags_(flags), maps_info_(maps), feedback_(feedback) {}
CheckMapsFlags flags() const { return flags_; }
ZoneHandleSet<Map> const& maps() const { return maps_info_.maps(); }
MapsParameterInfo const& maps_info() const { return maps_info_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
private:
CheckMapsFlags const flags_;
MapsParameterInfo const maps_info_;
+ VectorSlotPair const feedback_;
};
bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
@@ -197,7 +274,29 @@ inline size_t hash_value(GrowFastElementsMode mode) {
std::ostream& operator<<(std::ostream&, GrowFastElementsMode);
-GrowFastElementsMode GrowFastElementsModeOf(const Operator*) WARN_UNUSED_RESULT;
+class GrowFastElementsParameters {
+ public:
+ GrowFastElementsParameters(GrowFastElementsMode mode,
+ const VectorSlotPair& feedback)
+ : mode_(mode), feedback_(feedback) {}
+
+ GrowFastElementsMode mode() const { return mode_; }
+ const VectorSlotPair& feedback() const { return feedback_; }
+
+ private:
+ GrowFastElementsMode mode_;
+ VectorSlotPair feedback_;
+};
+
+bool operator==(const GrowFastElementsParameters&,
+ const GrowFastElementsParameters&);
+
+inline size_t hash_value(const GrowFastElementsParameters&);
+
+std::ostream& operator<<(std::ostream&, const GrowFastElementsParameters&);
+
+const GrowFastElementsParameters& GrowFastElementsParametersOf(const Operator*)
+ WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
class ElementsTransition final {
@@ -270,6 +369,8 @@ class AllocateParameters {
PretenureFlag pretenure_;
};
+bool IsCheckedWithFeedback(const Operator* op);
+
size_t hash_value(AllocateParameters);
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
@@ -282,7 +383,7 @@ Type* AllocateTypeOf(const Operator* op) WARN_UNUSED_RESULT;
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
-BailoutReason BailoutReasonOf(const Operator* op) WARN_UNUSED_RESULT;
+AbortReason AbortReasonOf(const Operator* op) WARN_UNUSED_RESULT;
DeoptimizeReason DeoptimizeReasonOf(const Operator* op) WARN_UNUSED_RESULT;
@@ -364,6 +465,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* NumberTrunc();
const Operator* NumberToBoolean();
const Operator* NumberToInt32();
+ const Operator* NumberToString();
const Operator* NumberToUint32();
const Operator* NumberToUint8Clamped();
@@ -402,9 +504,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* SeqStringCharCodeAt();
+ const Operator* StringCodePointAt();
+ const Operator* SeqStringCodePointAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
const Operator* StringIndexOf();
+ const Operator* StringLength();
const Operator* StringToLowerCaseIntl();
const Operator* StringToUpperCaseIntl();
@@ -435,49 +540,52 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
- const Operator* CheckBounds();
- const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
const Operator* MaskIndexWithBound();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
+ const Operator* CheckBounds(const VectorSlotPair& feedback);
+ const Operator* CheckEqualsInternalizedString();
+ const Operator* CheckEqualsSymbol();
+ const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
const Operator* CheckHeapObject();
+ const Operator* CheckIf(DeoptimizeReason deoptimize_reason);
const Operator* CheckInternalizedString();
- const Operator* CheckNumber();
- const Operator* CheckSmi();
- const Operator* CheckString();
+ const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>,
+ const VectorSlotPair& = VectorSlotPair());
+ const Operator* CheckNotTaggedHole();
+ const Operator* CheckNumber(const VectorSlotPair& feedback);
+ const Operator* CheckReceiver();
const Operator* CheckSeqString();
+ const Operator* CheckSmi(const VectorSlotPair& feedback);
+ const Operator* CheckString(const VectorSlotPair& feedback);
const Operator* CheckSymbol();
- const Operator* CheckReceiver();
+ const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
const Operator* CheckedInt32Add();
- const Operator* CheckedInt32Sub();
const Operator* CheckedInt32Div();
const Operator* CheckedInt32Mod();
- const Operator* CheckedUint32Div();
- const Operator* CheckedUint32Mod();
const Operator* CheckedInt32Mul(CheckForMinusZeroMode);
- const Operator* CheckedInt32ToTaggedSigned();
- const Operator* CheckedUint32ToInt32();
- const Operator* CheckedUint32ToTaggedSigned();
- const Operator* CheckedFloat64ToInt32(CheckForMinusZeroMode);
- const Operator* CheckedTaggedSignedToInt32();
- const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode);
+ const Operator* CheckedInt32Sub();
+ const Operator* CheckedInt32ToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedSignedToInt32(const VectorSlotPair& feedback);
const Operator* CheckedTaggedToFloat64(CheckTaggedInputMode);
- const Operator* CheckedTaggedToTaggedSigned();
- const Operator* CheckedTaggedToTaggedPointer();
- const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode);
+ const Operator* CheckedTaggedToInt32(CheckForMinusZeroMode,
+ const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToTaggedPointer(const VectorSlotPair& feedback);
+ const Operator* CheckedTaggedToTaggedSigned(const VectorSlotPair& feedback);
+ const Operator* CheckedTruncateTaggedToWord32(CheckTaggedInputMode,
+ const VectorSlotPair& feedback);
+ const Operator* CheckedUint32Div();
+ const Operator* CheckedUint32Mod();
+ const Operator* CheckedUint32ToInt32(const VectorSlotPair& feedback);
+ const Operator* CheckedUint32ToTaggedSigned(const VectorSlotPair& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode);
- const Operator* CheckFloat64Hole(CheckFloat64HoleMode);
- const Operator* CheckNotTaggedHole();
const Operator* ConvertTaggedHoleToUndefined();
- const Operator* CheckEqualsInternalizedString();
- const Operator* CheckEqualsSymbol();
-
const Operator* ObjectIsArrayBufferView();
const Operator* ObjectIsBigInt();
const Operator* ObjectIsCallable();
@@ -493,6 +601,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ObjectIsSymbol();
const Operator* ObjectIsUndetectable();
+ const Operator* NumberIsFloat64Hole();
+
const Operator* ArgumentsFrame();
const Operator* ArgumentsLength(int formal_parameter_count,
bool is_rest_length);
@@ -503,6 +613,9 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// new-arguments-elements arguments-frame, arguments-length
const Operator* NewArgumentsElements(int mapped_count);
+ // new-cons-string length, first, second
+ const Operator* NewConsString();
+
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
@@ -510,7 +623,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* EnsureWritableFastElements();
// maybe-grow-fast-elements object, elements, index, length
- const Operator* MaybeGrowFastElements(GrowFastElementsMode mode);
+ const Operator* MaybeGrowFastElements(GrowFastElementsMode mode,
+ const VectorSlotPair& feedback);
// transition-elements-kind object, from-map, to-map
const Operator* TransitionElementsKind(ElementsTransition transition);
@@ -549,7 +663,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StoreTypedElement(ExternalArrayType const&);
// Abort (for terminating execution on internal error).
- const Operator* RuntimeAbort(BailoutReason reason);
+ const Operator* RuntimeAbort(AbortReason reason);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index 30586f307c..26c47e0cb5 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -109,7 +109,7 @@ int StateValuesHashKey(Node** nodes, size_t count) {
for (size_t i = 0; i < count; i++) {
hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
}
- return static_cast<int>(hash & 0x7fffffff);
+ return static_cast<int>(hash & 0x7FFFFFFF);
}
} // namespace
diff --git a/deps/v8/src/compiler/store-store-elimination.cc b/deps/v8/src/compiler/store-store-elimination.cc
index 1ed12d245b..672acb203d 100644
--- a/deps/v8/src/compiler/store-store-elimination.cc
+++ b/deps/v8/src/compiler/store-store-elimination.cc
@@ -326,13 +326,11 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(Node* node,
}
bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
- return node->opcode() == IrOpcode::kCheckedLoad ||
- node->opcode() == IrOpcode::kLoadElement ||
+ return node->opcode() == IrOpcode::kLoadElement ||
node->opcode() == IrOpcode::kLoad ||
node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kEffectPhi ||
node->opcode() == IrOpcode::kStoreElement ||
- node->opcode() == IrOpcode::kCheckedStore ||
node->opcode() == IrOpcode::kUnsafePointerAdd ||
node->opcode() == IrOpcode::kRetain;
}
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 346aa47bfc..428688abde 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -44,6 +44,8 @@ class TypeCache final {
Type* const kSingletonOne = CreateRange(1.0, 1.0);
Type* const kSingletonTen = CreateRange(10.0, 10.0);
Type* const kSingletonMinusOne = CreateRange(-1.0, -1.0);
+ Type* const kZeroOrMinusZero =
+ Type::Union(kSingletonZero, Type::MinusZero(), zone());
Type* const kZeroOrUndefined =
Type::Union(kSingletonZero, Type::Undefined(), zone());
Type* const kTenOrUndefined =
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 605a96c944..12c9a194b8 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -254,6 +254,9 @@ class Typer::Visitor : public Reducer {
Type* TypeUnaryOp(Node* node, UnaryTyperFun);
Type* TypeBinaryOp(Node* node, BinaryTyperFun);
+ static Type* BinaryNumberOpTyper(Type* lhs, Type* rhs, Typer* t,
+ BinaryTyperFun f);
+
enum ComparisonOutcomeFlags {
kComparisonTrue = 1,
kComparisonFalse = 2,
@@ -399,7 +402,6 @@ Type* Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
return input->IsNone() ? Type::None() : f(input, typer_);
}
-
Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
Type* left = Operand(node, 0);
Type* right = Operand(node, 1);
@@ -407,6 +409,23 @@ Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
: f(left, right, typer_);
}
+Type* Typer::Visitor::BinaryNumberOpTyper(Type* lhs, Type* rhs, Typer* t,
+ BinaryTyperFun f) {
+ lhs = ToNumeric(lhs, t);
+ rhs = ToNumeric(rhs, t);
+ bool lhs_is_number = lhs->Is(Type::Number());
+ bool rhs_is_number = rhs->Is(Type::Number());
+ if (lhs_is_number && rhs_is_number) {
+ return f(lhs, rhs, t);
+ }
+ if (lhs_is_number || rhs_is_number) {
+ return Type::Number();
+ }
+ if (lhs->Is(Type::BigInt()) || rhs->Is(Type::BigInt())) {
+ return Type::BigInt();
+ }
+ return Type::Numeric();
+}
Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
ComparisonOutcome outcome, Typer* t) {
@@ -417,7 +436,6 @@ Typer::Visitor::ComparisonOutcome Typer::Visitor::Invert(
return result;
}
-
Type* Typer::Visitor::FalsifyUndefined(ComparisonOutcome outcome, Typer* t) {
if ((outcome & kComparisonFalse) != 0 ||
(outcome & kComparisonUndefined) != 0) {
@@ -947,7 +965,7 @@ Type* Typer::Visitor::TypeDead(Node* node) { return Type::None(); }
Type* Typer::Visitor::TypeDeadValue(Node* node) { return Type::None(); }
-Type* Typer::Visitor::TypeUnreachable(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeUnreachable(Node* node) { return Type::None(); }
// JS comparison operators.
@@ -1052,53 +1070,23 @@ Type* Typer::Visitor::JSGreaterThanOrEqualTyper(
Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseOr(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseOr);
}
-
Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseAnd(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseAnd);
}
-
Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberBitwiseXor(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberBitwiseXor);
}
-
Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
- return NumberShiftLeft(ToNumber(lhs, t), ToNumber(rhs, t), t);
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberShiftLeft(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberShiftLeft);
}
-
Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberShiftRight(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberShiftRight);
}
@@ -1120,51 +1108,27 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
}
}
// The addition must be numeric.
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberAdd(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberAdd);
}
Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberSubtract(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberSubtract);
}
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberMultiply(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberMultiply);
}
Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberDivide(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberDivide);
}
Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
- lhs = ToNumeric(lhs, t);
- rhs = ToNumeric(rhs, t);
- if (lhs->Is(Type::Number()) && rhs->Is(Type::Number())) {
- return NumberModulus(lhs, rhs, t);
- }
- return Type::Numeric();
+ return BinaryNumberOpTyper(lhs, rhs, t, NumberModulus);
}
Type* Typer::Visitor::JSExponentiateTyper(Type* lhs, Type* rhs, Typer* t) {
+ // TODO(neis): Refine using BinaryNumberOpTyper?
return Type::Numeric();
}
@@ -1556,7 +1520,17 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kDateGetTime:
return t->cache_.kJSDateValueType;
+ // Symbol functions.
+ case kSymbolConstructor:
+ return Type::Symbol();
+
+ // BigInt functions.
+ case kBigIntConstructor:
+ return Type::BigInt();
+
// Number functions.
+ case kNumberConstructor:
+ return Type::Number();
case kNumberIsFinite:
case kNumberIsInteger:
case kNumberIsNaN:
@@ -1570,6 +1544,8 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
return Type::String();
// String functions.
+ case kStringConstructor:
+ return Type::String();
case kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
@@ -1850,6 +1826,10 @@ Type* Typer::Visitor::TypeJSGeneratorRestoreRegister(Node* node) {
return Type::Any();
}
+Type* Typer::Visitor::TypeJSGeneratorRestoreInputOrDebugPos(Node* node) {
+ return Type::Any();
+}
+
Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
@@ -1968,6 +1948,14 @@ Type* Typer::Visitor::TypeSeqStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
+Type* Typer::Visitor::TypeStringCodePointAt(Node* node) {
+ return Type::Range(0.0, String::kMaxCodePoint, zone());
+}
+
+Type* Typer::Visitor::TypeSeqStringCodePointAt(Node* node) {
+ return Type::Range(0.0, String::kMaxCodePoint, zone());
+}
+
Type* Typer::Visitor::TypeStringFromCharCode(Node* node) {
return TypeUnaryOp(node, StringFromCharCodeTyper);
}
@@ -1976,7 +1964,13 @@ Type* Typer::Visitor::TypeStringFromCodePoint(Node* node) {
return TypeUnaryOp(node, StringFromCodePointTyper);
}
-Type* Typer::Visitor::TypeStringIndexOf(Node* node) { UNREACHABLE(); }
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
+ return Type::Range(-1.0, String::kMaxLength, zone());
+}
+
+Type* Typer::Visitor::TypeStringLength(Node* node) {
+ return typer_->cache_.kStringLengthType;
+}
Type* Typer::Visitor::TypeMaskIndexWithBound(Node* node) {
return Type::Union(Operand(node, 0), typer_->cache_.kSingletonZero, zone());
@@ -2151,6 +2145,10 @@ Type* Typer::Visitor::TypeObjectIsMinusZero(Node* node) {
return TypeUnaryOp(node, ObjectIsMinusZero);
}
+Type* Typer::Visitor::TypeNumberIsFloat64Hole(Node* node) {
+ return Type::Boolean();
+}
+
Type* Typer::Visitor::TypeObjectIsNaN(Node* node) {
return TypeUnaryOp(node, ObjectIsNaN);
}
@@ -2205,6 +2203,10 @@ Type* Typer::Visitor::TypeNewArgumentsElements(Node* node) {
return Type::OtherInternal();
}
+Type* Typer::Visitor::TypeNewConsString(Node* node) {
+ return Type::OtherNonSeqString();
+}
+
Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 1b6ca6b53f..a3e90d579a 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -309,6 +309,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
+ case LOAD_HANDLER_TYPE:
+ case STORE_HANDLER_TYPE:
case CONTEXT_EXTENSION_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
case CODE_DATA_CONTAINER_TYPE:
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index d791ec25c5..c4c371dab3 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -170,12 +170,11 @@ namespace compiler {
V(NumberOrHole, kNumber | kHole) \
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | \
kHole) \
- V(NumberOrString, kNumber | kString) \
V(NumericOrString, kNumeric | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(NumberOrUndefinedOrNullOrBoolean, \
kNumber | kNullOrUndefined | kBoolean) \
- V(PlainPrimitive, kNumberOrString | kBoolean | \
+ V(PlainPrimitive, kNumber | kString | kBoolean | \
kNullOrUndefined) \
V(Primitive, kSymbol | kBigInt | kPlainPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index e0c40df63b..a66a73f5d3 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -51,7 +51,7 @@ class Verifier::Visitor {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " should never have a type";
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckTypeIs(Node* node, Type* type) {
@@ -62,7 +62,7 @@ class Verifier::Visitor {
NodeProperties::GetType(node)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckTypeMaybe(Node* node, Type* type) {
@@ -73,7 +73,7 @@ class Verifier::Visitor {
NodeProperties::GetType(node)->PrintTo(str);
str << " must intersect ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckValueInputIs(Node* node, int i, Type* type) {
@@ -86,7 +86,7 @@ class Verifier::Visitor {
NodeProperties::GetType(input)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
void CheckOutput(Node* node, Node* use, int count, const char* kind) {
@@ -95,7 +95,7 @@ class Verifier::Visitor {
str << "GraphError: node #" << node->id() << ":" << *node->op()
<< " does not produce " << kind << " output used by node #"
<< use->id() << ":" << *use->op();
- FATAL(str.str().c_str());
+ FATAL("%s", str.str().c_str());
}
}
};
@@ -236,10 +236,19 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// Dead is never connected to the graph.
UNREACHABLE();
case IrOpcode::kDeadValue:
+ CheckValueInputIs(node, 0, Type::None());
CheckTypeIs(node, Type::None());
break;
case IrOpcode::kUnreachable:
- CheckNotTyped(node);
+ CheckTypeIs(node, Type::None());
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (NodeProperties::IsValueEdge(edge) && all.IsLive(use)) {
+ // {Unreachable} nodes can only be used by {DeadValue}, because they
+ // don't actually produce a value.
+ CHECK_EQ(IrOpcode::kDeadValue, use->opcode());
+ }
+ }
break;
case IrOpcode::kBranch: {
// Branch uses are IfTrue and IfFalse.
@@ -826,6 +835,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckTypeIs(node, Type::Any());
break;
+ case IrOpcode::kJSGeneratorRestoreInputOrDebugPos:
+ CheckTypeIs(node, Type::Any());
+ break;
+
case IrOpcode::kJSStackCheck:
case IrOpcode::kJSDebugger:
// Type is empty.
@@ -984,6 +997,11 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Number());
CheckTypeIs(node, Type::Signed32());
break;
+ case IrOpcode::kNumberToString:
+ // Number -> String
+ CheckValueInputIs(node, 0, Type::Number());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kNumberToUint32:
case IrOpcode::kNumberToUint8Clamped:
// Number -> Unsigned32
@@ -1041,6 +1059,18 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, Type::Unsigned32());
CheckTypeIs(node, Type::UnsignedSmall());
break;
+ case IrOpcode::kStringCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
+ case IrOpcode::kSeqStringCodePointAt:
+ // (String, Unsigned32) -> UnsignedSmall
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::UnsignedSmall());
+ break;
case IrOpcode::kStringFromCharCode:
// Number -> String
CheckValueInputIs(node, 0, Type::Number());
@@ -1058,6 +1088,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 2, Type::SignedSmall());
CheckTypeIs(node, Type::SignedSmall());
break;
+ case IrOpcode::kStringLength:
+ CheckValueInputIs(node, 0, Type::String());
+ CheckTypeIs(node, TypeCache::Get().kStringLengthType);
+ break;
case IrOpcode::kStringToLowerCaseIntl:
case IrOpcode::kStringToUpperCaseIntl:
CheckValueInputIs(node, 0, Type::String());
@@ -1094,6 +1128,10 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kNumberIsFloat64Hole:
+ CheckValueInputIs(node, 0, Type::NumberOrHole());
+ CheckTypeIs(node, Type::Boolean());
+ break;
case IrOpcode::kFindOrderedHashMapEntry:
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::SignedSmall());
@@ -1122,6 +1160,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
Code::kMaxArguments, zone));
CheckTypeIs(node, Type::OtherInternal());
break;
+ case IrOpcode::kNewConsString:
+ CheckValueInputIs(node, 0, TypeCache::Get().kStringLengthType);
+ CheckValueInputIs(node, 1, Type::String());
+ CheckValueInputIs(node, 2, Type::String());
+ CheckTypeIs(node, Type::OtherString());
+ break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
@@ -1591,8 +1635,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
- case IrOpcode::kCheckedLoad:
- case IrOpcode::kCheckedStore:
case IrOpcode::kAtomicLoad:
case IrOpcode::kAtomicStore:
case IrOpcode::kAtomicExchange:
@@ -1602,6 +1644,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kAtomicAnd:
case IrOpcode::kAtomicOr:
case IrOpcode::kAtomicXor:
+ case IrOpcode::kSpeculationFence:
#define SIMD_MACHINE_OP_CASE(Name) case IrOpcode::k##Name:
MACHINE_SIMD_OP_LIST(SIMD_MACHINE_OP_CASE)
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index a04c7b3e5d..9bbf5f3a3f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -34,6 +34,8 @@
#include "src/log-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/memory-tracing.h"
+#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@@ -68,6 +70,13 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
}
+bool ContainsSimd(wasm::FunctionSig* sig) {
+ for (wasm::ValueType t : sig->all()) {
+ if (t == wasm::kWasmS128) return true;
+ }
+ return false;
+}
+
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
@@ -79,21 +88,15 @@ WasmGraphBuilder::WasmGraphBuilder(
jsgraph_(jsgraph),
centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
env_(env),
- signature_tables_(zone),
function_tables_(zone),
- function_table_sizes_(zone),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
+ has_simd_(ContainsSimd(sig)),
untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
runtime_exception_support_(exception_support),
sig_(sig),
source_position_table_(source_position_table) {
- for (size_t i = sig->parameter_count(); i > 0 && !has_simd_; --i) {
- if (sig->GetParam(i - 1) == wasm::kWasmS128) has_simd_ = true;
- }
- for (size_t i = sig->return_count(); i > 0 && !has_simd_; --i) {
- if (sig->GetReturn(i - 1) == wasm::kWasmS128) has_simd_ = true;
- }
+ DCHECK_IMPLIES(use_trap_handler(), trap_handler::IsTrapHandlerEnabled());
DCHECK_NOT_NULL(jsgraph_);
}
@@ -561,9 +564,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->Float64Sqrt();
break;
case wasm::kExprI32SConvertF64:
- return BuildI32SConvertF64(input, position);
+ return BuildI32SConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF64:
+ return BuildI32SConvertF64(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF64:
- return BuildI32UConvertF64(input, position);
+ return BuildI32UConvertF64(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32UConvertSatF64:
+ return BuildI32UConvertF64(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF64:
return BuildI32AsmjsSConvertF64(input);
case wasm::kExprI32AsmjsUConvertF64:
@@ -584,9 +593,15 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->RoundUint32ToFloat32();
break;
case wasm::kExprI32SConvertF32:
- return BuildI32SConvertF32(input, position);
+ return BuildI32SConvertF32(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32SConvertSatF32:
+ return BuildI32SConvertF32(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32UConvertF32:
- return BuildI32UConvertF32(input, position);
+ return BuildI32UConvertF32(input, position, NumericImplementation::kTrap);
+ case wasm::kExprI32UConvertSatF32:
+ return BuildI32UConvertF32(input, position,
+ NumericImplementation::kSaturate);
case wasm::kExprI32AsmjsSConvertF32:
return BuildI32AsmjsSConvertF32(input);
case wasm::kExprI32AsmjsUConvertF32:
@@ -964,7 +979,7 @@ Node* WasmGraphBuilder::Unreachable(wasm::WasmCodePosition position) {
}
Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
- static const int32_t kMask32 = 0x1f;
+ static const int32_t kMask32 = 0x1F;
if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int32Matcher match(node);
@@ -980,7 +995,7 @@ Node* WasmGraphBuilder::MaskShiftCount32(Node* node) {
}
Node* WasmGraphBuilder::MaskShiftCount64(Node* node) {
- static const int64_t kMask64 = 0x3f;
+ static const int64_t kMask64 = 0x3F;
if (!jsgraph()->machine()->Word32ShiftIsSafe()) {
// Shifts by constants are so common we pattern-match them here.
Int64Matcher match(node);
@@ -1009,9 +1024,8 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
return false;
}
-Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
- MachineType memtype,
- wasm::ValueType wasmtype) {
+Node* WasmGraphBuilder::BuildChangeEndiannessStore(
+ Node* node, MachineRepresentation mem_rep, wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1040,23 +1054,22 @@ Node* WasmGraphBuilder::BuildChangeEndiannessStore(Node* node,
break;
}
- if (memtype.representation() == MachineRepresentation::kWord8) {
+ if (mem_rep == MachineRepresentation::kWord8) {
// No need to change endianness for byte size, return original node
return node;
}
- if (wasmtype == wasm::kWasmI64 &&
- memtype.representation() < MachineRepresentation::kWord64) {
+ if (wasmtype == wasm::kWasmI64 && mem_rep < MachineRepresentation::kWord64) {
// In case we store lower part of WasmI64 expression, we can truncate
// upper 32bits
value = graph()->NewNode(m->TruncateInt64ToInt32(), value);
valueSizeInBytes = 1 << ElementSizeLog2Of(wasm::kWasmI32);
valueSizeInBits = 8 * valueSizeInBytes;
- if (memtype.representation() == MachineRepresentation::kWord16) {
+ if (mem_rep == MachineRepresentation::kWord16) {
value =
graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
}
} else if (wasmtype == wasm::kWasmI32 &&
- memtype.representation() == MachineRepresentation::kWord16) {
+ mem_rep == MachineRepresentation::kWord16) {
value =
graph()->NewNode(m->Word32Shl(), value, jsgraph()->Int32Constant(16));
}
@@ -1325,7 +1338,7 @@ Node* WasmGraphBuilder::BuildF32CopySign(Node* left, Node* right) {
wasm::kExprF32ReinterpretI32,
Binop(wasm::kExprI32Ior,
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, left),
- jsgraph()->Int32Constant(0x7fffffff)),
+ jsgraph()->Int32Constant(0x7FFFFFFF)),
Binop(wasm::kExprI32And, Unop(wasm::kExprI32ReinterpretF32, right),
jsgraph()->Int32Constant(0x80000000))));
@@ -1338,7 +1351,7 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
wasm::kExprF64ReinterpretI64,
Binop(wasm::kExprI64Ior,
Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, left),
- jsgraph()->Int64Constant(0x7fffffffffffffff)),
+ jsgraph()->Int64Constant(0x7FFFFFFFFFFFFFFF)),
Binop(wasm::kExprI64And, Unop(wasm::kExprI64ReinterpretF64, right),
jsgraph()->Int64Constant(0x8000000000000000))));
@@ -1350,78 +1363,177 @@ Node* WasmGraphBuilder::BuildF64CopySign(Node* left, Node* right) {
Node* high_word_right =
graph()->NewNode(m->Float64ExtractHighWord32(), right);
- Node* new_high_word =
- Binop(wasm::kExprI32Ior, Binop(wasm::kExprI32And, high_word_left,
- jsgraph()->Int32Constant(0x7fffffff)),
- Binop(wasm::kExprI32And, high_word_right,
- jsgraph()->Int32Constant(0x80000000)));
+ Node* new_high_word = Binop(wasm::kExprI32Ior,
+ Binop(wasm::kExprI32And, high_word_left,
+ jsgraph()->Int32Constant(0x7FFFFFFF)),
+ Binop(wasm::kExprI32And, high_word_right,
+ jsgraph()->Int32Constant(0x80000000)));
return graph()->NewNode(m->Float64InsertHighWord32(), left, new_high_word);
#endif
}
-Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
+// Helper classes for float to int conversions.
+struct WasmGraphBuilder::IntConvertOps {
+ MachineRepresentation word_rep() const {
+ return MachineRepresentation::kWord32;
+ }
+ Node* zero() const { return builder_->Int32Constant(0); }
+ virtual Node* min() const = 0;
+ virtual Node* max() const = 0;
+ virtual ~IntConvertOps() = default;
+
+ protected:
+ explicit IntConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
+ WasmGraphBuilder* builder_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(IntConvertOps);
+};
+
+struct I32SConvertOps final : public WasmGraphBuilder::IntConvertOps {
+ explicit I32SConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::IntConvertOps(builder) {}
+ ~I32SConvertOps() = default;
+ Node* min() const {
+ return builder_->Int32Constant(std::numeric_limits<int32_t>::min());
+ }
+ Node* max() const {
+ return builder_->Int32Constant(std::numeric_limits<int32_t>::max());
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(I32SConvertOps);
+};
+
+struct I32UConvertOps final : public WasmGraphBuilder::IntConvertOps {
+ explicit I32UConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::IntConvertOps(builder) {}
+ ~I32UConvertOps() = default;
+ Node* min() const {
+ return builder_->Int32Constant(std::numeric_limits<uint32_t>::min());
+ }
+ Node* max() const {
+ return builder_->Int32Constant(std::numeric_limits<uint32_t>::max());
+ }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(I32UConvertOps);
+};
+
+struct WasmGraphBuilder::FloatConvertOps {
+ virtual Node* zero() const = 0;
+ virtual wasm::WasmOpcode trunc_op() const = 0;
+ virtual wasm::WasmOpcode ne_op() const = 0;
+ virtual wasm::WasmOpcode lt_op() const = 0;
+ virtual ~FloatConvertOps() = default;
+
+ protected:
+ explicit FloatConvertOps(WasmGraphBuilder* builder) : builder_(builder) {}
+ WasmGraphBuilder* builder_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(FloatConvertOps);
+};
+
+struct F32ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
+ explicit F32ConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::FloatConvertOps(builder) {}
+ ~F32ConvertOps() = default;
+ Node* zero() const { return builder_->Float32Constant(0.0); }
+ wasm::WasmOpcode trunc_op() const { return wasm::kExprF32Trunc; }
+ wasm::WasmOpcode ne_op() const { return wasm::kExprF32Ne; }
+ wasm::WasmOpcode lt_op() const { return wasm::kExprF32Lt; }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(F32ConvertOps);
+};
+
+struct F64ConvertOps final : public WasmGraphBuilder::FloatConvertOps {
+ explicit F64ConvertOps(WasmGraphBuilder* builder)
+ : WasmGraphBuilder::FloatConvertOps(builder) {}
+ ~F64ConvertOps() = default;
+ Node* zero() const { return builder_->Float64Constant(0.0); }
+ wasm::WasmOpcode trunc_op() const { return wasm::kExprF64Trunc; }
+ wasm::WasmOpcode ne_op() const { return wasm::kExprF64Ne; }
+ wasm::WasmOpcode lt_op() const { return wasm::kExprF64Lt; }
+ DISALLOW_IMPLICIT_CONSTRUCTORS(F64ConvertOps);
+};
+
+Node* WasmGraphBuilder::BuildConvertCheck(Node* test, Node* result, Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops) {
+ switch (impl) {
+ case NumericImplementation::kTrap:
+ TrapIfTrue(wasm::kTrapFloatUnrepresentable, test, position);
+ return result;
+ case NumericImplementation::kSaturate: {
+ Diamond tl_d(graph(), jsgraph()->common(), test, BranchHint::kFalse);
+ tl_d.Chain(*control_);
+ Diamond nan_d(graph(), jsgraph()->common(),
+ Binop(float_ops->ne_op(), input, input), // Checks if NaN.
+ BranchHint::kFalse);
+ nan_d.Nest(tl_d, true);
+ Diamond sat_d(graph(), jsgraph()->common(),
+ Binop(float_ops->lt_op(), input, float_ops->zero()),
+ BranchHint::kNone);
+ sat_d.Nest(nan_d, false);
+ Node* sat_val =
+ sat_d.Phi(int_ops->word_rep(), int_ops->min(), int_ops->max());
+ Node* nan_val = nan_d.Phi(int_ops->word_rep(), int_ops->zero(), sat_val);
+ return tl_d.Phi(int_ops->word_rep(), nan_val, result);
+ }
+ }
+ UNREACHABLE();
+}
+
+Node* WasmGraphBuilder::BuildI32ConvertOp(
+ Node* input, wasm::WasmCodePosition position, NumericImplementation impl,
+ const Operator* op, wasm::WasmOpcode check_op, const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops) {
// Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF32Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
+ Node* trunc = Unop(float_ops->trunc_op(), input);
+ Node* result = graph()->NewNode(op, trunc);
// Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF32SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
+ // truncated input value, then there has been an overflow and we
+ // trap/saturate.
+ Node* check = Unop(check_op, result);
+ Node* overflow = Binop(float_ops->ne_op(), trunc, check);
+ return BuildConvertCheck(overflow, result, input, position, impl, int_ops,
+ float_ops);
+}
- return result;
+Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32SConvertOps int_ops(this);
+ F32ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat32ToInt32(),
+ wasm::kExprF32SConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32SConvertOps int_ops(this);
+ F64ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->ChangeFloat64ToInt32(),
+ wasm::kExprF64SConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF32Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
-
- // Convert the result back to f32. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF32UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32UConvertOps int_ops(this);
+ F32ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat32ToUint32(),
+ wasm::kExprF32UConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input,
- wasm::WasmCodePosition position) {
- MachineOperatorBuilder* m = jsgraph()->machine();
- // Truncation of the input value is needed for the overflow check later.
- Node* trunc = Unop(wasm::kExprF64Trunc, input);
- Node* result = graph()->NewNode(m->TruncateFloat64ToUint32(), trunc);
-
- // Convert the result back to f64. If we end up at a different value than the
- // truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, trunc, check);
- TrapIfTrue(wasm::kTrapFloatUnrepresentable, overflow, position);
-
- return result;
+ wasm::WasmCodePosition position,
+ NumericImplementation impl) {
+ I32UConvertOps int_ops(this);
+ F64ConvertOps float_ops(this);
+ return BuildI32ConvertOp(input, position, impl,
+ jsgraph()->machine()->TruncateFloat64ToUint32(),
+ wasm::kExprF64UConvertI32, &int_ops, &float_ops);
}
Node* WasmGraphBuilder::BuildI32AsmjsSConvertF32(Node* input) {
@@ -1861,8 +1973,7 @@ Node* WasmGraphBuilder::Throw(uint32_t tag,
break;
}
default:
- CHECK(false);
- break;
+ UNREACHABLE();
}
}
DCHECK_EQ(encoded_size, index);
@@ -1961,8 +2072,7 @@ Node** WasmGraphBuilder::GetExceptionValues(
break;
}
default:
- CHECK(false);
- break;
+ UNREACHABLE();
}
values[i] = value;
}
@@ -2330,7 +2440,7 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args,
// Make room for the wasm_context parameter at index 1, just after code.
memmove(&args[2], &args[1], params * sizeof(Node*));
- args[1] = wasm_context_;
+ args[1] = wasm_context_.get();
// Add effect and control inputs.
args[params + 2] = *effect_;
@@ -2364,7 +2474,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args, Node*** rets,
DCHECK_NULL(args[0]);
wasm::FunctionSig* sig = env_->module->functions[index].sig;
if (FLAG_wasm_jit_to_native) {
- // Simply encode the index of the target.
+ // Just encode the function index. This will be patched at instantiation.
Address code = reinterpret_cast<Address>(index);
args[0] = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(code), RelocInfo::WASM_CALL);
@@ -2396,45 +2506,39 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* key = args[0];
// Bounds check against the table size.
- Node* size = function_table_sizes_[table_index];
+ Node* size = function_tables_[table_index].size;
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
TrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
- Node* table_address = function_tables_[table_index];
+ Node* table_address = function_tables_[table_index].table_addr;
Node* table = graph()->NewNode(
jsgraph()->machine()->Load(MachineType::AnyTagged()), table_address,
jsgraph()->IntPtrConstant(0), *effect_, *control_);
- Node* signatures_address = signature_tables_[table_index];
- Node* signatures = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::AnyTagged()), signatures_address,
- jsgraph()->IntPtrConstant(0), *effect_, *control_);
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
- // [sig1, sig2, sig3, ...., code1, code2, code3 ...]
+ // [sig1, code1, sig2, code2, sig3, code3, ...]
+ static_assert(compiler::kFunctionTableEntrySize == 2, "consistency");
+ static_assert(compiler::kFunctionTableSignatureOffset == 0, "consistency");
+ static_assert(compiler::kFunctionTableCodeOffset == 1, "consistency");
ElementAccess access = AccessBuilder::ForFixedArrayElement();
const int fixed_offset = access.header_size - access.tag();
- {
- Node* load_sig = graph()->NewNode(
- machine->Load(MachineType::AnyTagged()), signatures,
- graph()->NewNode(machine->Int32Add(),
- graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2)),
- Int32Constant(fixed_offset)),
- *effect_, *control_);
- int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
- CHECK_GE(sig_index, 0);
- Node* sig_match =
- graph()->NewNode(machine->WordEqual(), load_sig,
- jsgraph()->SmiConstant(canonical_sig_num));
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- }
+ Node* key_offset = graph()->NewNode(machine->Word32Shl(), key,
+ Int32Constant(kPointerSizeLog2 + 1));
+ Node* load_sig =
+ graph()->NewNode(machine->Load(MachineType::AnyTagged()), table,
+ graph()->NewNode(machine->Int32Add(), key_offset,
+ Int32Constant(fixed_offset)),
+ *effect_, *control_);
+ int32_t canonical_sig_num = env_->module->signature_ids[sig_index];
+ CHECK_GE(sig_index, 0);
+ Node* sig_match = graph()->NewNode(machine->WordEqual(), load_sig,
+ jsgraph()->SmiConstant(canonical_sig_num));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
// Load code object from the table. It is held by a Foreign.
Node* entry = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
- graph()->NewNode(machine->Int32Add(),
- graph()->NewNode(machine->Word32Shl(), key,
- Int32Constant(kPointerSizeLog2)),
- Uint32Constant(fixed_offset)),
+ graph()->NewNode(machine->Int32Add(), key_offset,
+ Uint32Constant(fixed_offset + kPointerSize)),
*effect_, *control_);
if (FLAG_wasm_jit_to_native) {
Node* address = graph()->NewNode(
@@ -2715,12 +2819,8 @@ Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
}
Node* WasmGraphBuilder::BuildChangeUint32ToSmi(Node* value) {
- if (jsgraph()->machine()->Is64()) {
- value =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), value);
- }
- return graph()->NewNode(jsgraph()->machine()->WordShl(), value,
- BuildSmiShiftBitsConstant());
+ return graph()->NewNode(jsgraph()->machine()->WordShl(),
+ Uint32ToUintptr(value), BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildChangeSmiToFloat64(Node* value) {
@@ -2826,7 +2926,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
// the wasm function could not be re-imported into another wasm module.
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
args[pos++] = *effect_;
args[pos++] = *control_;
@@ -2841,7 +2941,7 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(WasmCodeWrapper wasm_code,
int pos = 0;
args[pos++] = wasm_code_node;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
// Convert JS parameters to wasm numbers.
for (int i = 0; i < wasm_count; ++i) {
@@ -3177,7 +3277,7 @@ void WasmGraphBuilder::BuildCWasmEntry(Address wasm_context_address) {
int pos = 0;
args[pos++] = code_obj;
- args[pos++] = wasm_context_;
+ args[pos++] = wasm_context_.get();
int offset = 0;
for (wasm::ValueType type : sig_->parameters()) {
@@ -3232,7 +3332,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
// Load the memory start.
Node* mem_start = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_start))),
*effect_, *control_);
@@ -3241,7 +3341,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
// Load the memory size.
Node* mem_size = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_size))),
*effect_, *control_);
@@ -3251,7 +3351,7 @@ void WasmGraphBuilder::InitContextCache(WasmContextCacheNodes* context_cache) {
if (untrusted_code_mitigations_) {
// Load the memory mask.
Node* mem_mask = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::Uint32()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, mem_mask))),
*effect_, *control_);
@@ -3353,12 +3453,12 @@ void WasmGraphBuilder::GetGlobalBaseAndOffset(MachineType mem_type,
// possible to express in the graph, and would essentially constitute a
// "mem2reg" optimization in TurboFan.
globals_start_ = graph()->NewNode(
- jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_,
+ jsgraph()->machine()->Load(MachineType::UintPtr()), wasm_context_.get(),
jsgraph()->Int32Constant(
static_cast<int32_t>(offsetof(WasmContext, globals_start))),
graph()->start(), graph()->start());
}
- *base_node = globals_start_;
+ *base_node = globals_start_.get();
*offset_node = jsgraph()->Int32Constant(offset);
if (mem_type == MachineType::Simd128() && offset != 0) {
@@ -3392,7 +3492,7 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
}
return graph()->NewNode(
jsgraph()->machine()->Word32Shr(), mem_size,
- jsgraph()->Int32Constant(WhichPowerOf2(wasm::WasmModule::kPageSize)));
+ jsgraph()->Int32Constant(WhichPowerOf2(wasm::kWasmPageSize)));
}
void WasmGraphBuilder::EnsureFunctionTableNodes() {
@@ -3401,25 +3501,21 @@ void WasmGraphBuilder::EnsureFunctionTableNodes() {
for (size_t i = 0; i < tables_size; ++i) {
wasm::GlobalHandleAddress function_handle_address =
env_->function_tables[i];
- wasm::GlobalHandleAddress signature_handle_address =
- env_->signature_tables[i];
- function_tables_.push_back(jsgraph()->RelocatableIntPtrConstant(
+ Node* table_addr = jsgraph()->RelocatableIntPtrConstant(
reinterpret_cast<intptr_t>(function_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE));
- signature_tables_.push_back(jsgraph()->RelocatableIntPtrConstant(
- reinterpret_cast<intptr_t>(signature_handle_address),
- RelocInfo::WASM_GLOBAL_HANDLE));
+ RelocInfo::WASM_GLOBAL_HANDLE);
uint32_t table_size = env_->module->function_tables[i].initial_size;
- function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+ Node* size = jsgraph()->RelocatableInt32Constant(
static_cast<uint32_t>(table_size),
- RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+ function_tables_.push_back({table_addr, size});
}
}
Node* WasmGraphBuilder::BuildModifyThreadInWasmFlag(bool new_value) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
- if (!trap_handler::UseTrapHandler()) {
+ if (!use_trap_handler()) {
return *control_;
}
@@ -3507,46 +3603,55 @@ Node* WasmGraphBuilder::SetGlobal(uint32_t index, Node* val) {
return node;
}
-Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
+Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
uint32_t offset,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
- if (FLAG_wasm_no_bounds_checks) return index;
+ if (FLAG_wasm_no_bounds_checks) return Uint32ToUintptr(index);
DCHECK_NOT_NULL(context_cache_);
Node* mem_size = context_cache_->mem_size;
DCHECK_NOT_NULL(mem_size);
auto m = jsgraph()->machine();
- if (trap_handler::UseTrapHandler() && enforce_check == kCanOmitBoundsCheck) {
+ if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
// Simply zero out the 32-bits on 64-bit targets and let the trap handler
// do its job.
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
- : index;
+ return Uint32ToUintptr(index);
}
- uint32_t min_size = env_->module->initial_pages * wasm::WasmModule::kPageSize;
+ uint32_t min_size = env_->module->initial_pages * wasm::kWasmPageSize;
uint32_t max_size =
(env_->module->has_maximum_pages ? env_->module->maximum_pages
: wasm::kV8MaxWasmMemoryPages) *
- wasm::WasmModule::kPageSize;
-
- byte access_size = wasm::WasmOpcodes::MemSize(memtype);
+ wasm::kWasmPageSize;
if (access_size > max_size || offset > max_size - access_size) {
// The access will be out of bounds, even for the largest memory.
- TrapIfEq32(wasm::kTrapMemOutOfBounds, jsgraph()->Int32Constant(0), 0,
- position);
+ TrapIfEq32(wasm::kTrapMemOutOfBounds, Int32Constant(0), 0, position);
return jsgraph()->IntPtrConstant(0);
}
- uint32_t end_offset = offset + access_size;
-
- if (end_offset > min_size) {
+ DCHECK_LE(1, access_size);
+ // This computation cannot overflow, since
+ // {offset <= max_size - access_size <= kMaxUint32 - access_size}.
+ // It also cannot underflow, since {access_size >= 1}.
+ uint32_t end_offset = offset + access_size - 1;
+ Node* end_offset_node = Int32Constant(end_offset);
+
+ // The accessed memory is [index + offset, index + end_offset].
+ // Check that the last read byte (at {index + end_offset}) is in bounds.
+ // 1) Check that {end_offset < mem_size}. This also ensures that we can safely
+ // compute {effective_size} as {mem_size - end_offset)}.
+ // {effective_size} is >= 1 if condition 1) holds.
+ // 2) Check that {index + end_offset < mem_size} by
+ // - computing {effective_size} as {mem_size - end_offset} and
+ // - checking that {index < effective_size}.
+
+ if (end_offset >= min_size) {
// The end offset is larger than the smallest memory.
// Dynamically check the end offset against the actual memory size, which
// is not known at compile time.
- Node* cond =
- graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(),
- jsgraph()->Int32Constant(end_offset), mem_size);
+ Node* cond = graph()->NewNode(jsgraph()->machine()->Uint32LessThan(),
+ end_offset_node, mem_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
} else {
// The end offset is within the bounds of the smallest memory, so only
@@ -3554,22 +3659,17 @@ Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
Uint32Matcher match(index);
if (match.HasValue()) {
uint32_t index_val = match.Value();
- if (index_val <= min_size - end_offset) {
+ if (index_val < min_size - end_offset) {
// The input index is a constant and everything is statically within
// bounds of the smallest possible memory.
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index)
- : index;
+ return Uint32ToUintptr(index);
}
}
}
- // Compute the effective size of the memory, which is the size of the memory
- // minus the statically known offset, minus the byte size of the access minus
- // one.
- // This produces a positive number since {end_offset <= min_size <= mem_size}.
- Node* effective_size =
- graph()->NewNode(jsgraph()->machine()->Int32Sub(), mem_size,
- jsgraph()->Int32Constant(end_offset - 1));
+ // This produces a positive number, since {end_offset < min_size <= mem_size}.
+ Node* effective_size = graph()->NewNode(jsgraph()->machine()->Int32Sub(),
+ mem_size, end_offset_node);
// Introduce the actual bounds check.
Node* cond = graph()->NewNode(m->Uint32LessThan(), index, effective_size);
@@ -3581,7 +3681,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
DCHECK_NOT_NULL(mem_mask);
index = graph()->NewNode(m->Word32And(), index, mem_mask);
}
- return m->Is64() ? graph()->NewNode(m->ChangeUint32ToUint64(), index) : index;
+ return Uint32ToUintptr(index);
}
const Operator* WasmGraphBuilder::GetSafeLoadOperator(int offset,
@@ -3609,21 +3709,28 @@ Node* WasmGraphBuilder::TraceMemoryOperation(bool is_store,
MachineRepresentation rep,
Node* index, uint32_t offset,
wasm::WasmCodePosition position) {
+ int kAlign = 4; // Ensure that the LSB is 0, such that this looks like a Smi.
+ Node* info = graph()->NewNode(
+ jsgraph()->machine()->StackSlot(sizeof(wasm::MemoryTracingInfo), kAlign));
+
Node* address = graph()->NewNode(jsgraph()->machine()->Int32Add(),
Int32Constant(offset), index);
- Node* addr_low = BuildChangeInt32ToSmi(graph()->NewNode(
- jsgraph()->machine()->Word32And(), address, Int32Constant(0xffff)));
- Node* addr_high = BuildChangeInt32ToSmi(graph()->NewNode(
- jsgraph()->machine()->Word32Shr(), address, Int32Constant(16)));
- int32_t rep_i = static_cast<int32_t>(rep);
- Node* params[] = {
- jsgraph()->SmiConstant(is_store), // is_store
- jsgraph()->SmiConstant(rep_i), // mem rep
- addr_low, // address lower half word
- addr_high // address higher half word
+ auto store = [&](int offset, MachineRepresentation rep, Node* data) {
+ *effect_ = graph()->NewNode(
+ jsgraph()->machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ info, jsgraph()->Int32Constant(offset), data, *effect_, *control_);
};
- Node* call =
- BuildCallToRuntime(Runtime::kWasmTraceMemory, params, arraysize(params));
+ // Store address, is_store, and mem_rep.
+ store(offsetof(wasm::MemoryTracingInfo, address),
+ MachineRepresentation::kWord32, address);
+ store(offsetof(wasm::MemoryTracingInfo, is_store),
+ MachineRepresentation::kWord8,
+ jsgraph()->Int32Constant(is_store ? 1 : 0));
+ store(offsetof(wasm::MemoryTracingInfo, mem_rep),
+ MachineRepresentation::kWord8,
+ jsgraph()->Int32Constant(static_cast<int>(rep)));
+
+ Node* call = BuildCallToRuntime(Runtime::kWasmTraceMemory, &info, 1);
SetSourcePosition(call, position);
return call;
}
@@ -3636,11 +3743,12 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
// Wasm semantics throw on OOB. Introduce explicit bounds check and
// conditioning when not using the trap handler.
- index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(memtype), index, offset,
+ position, kCanOmitBoundsCheck);
if (memtype.representation() == MachineRepresentation::kWord8 ||
jsgraph()->machine()->UnalignedLoadSupported(memtype.representation())) {
- if (trap_handler::UseTrapHandler()) {
+ if (use_trap_handler()) {
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
SetSourcePosition(load, position);
@@ -3650,7 +3758,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
}
} else {
// TODO(eholk): Support unaligned loads with trap handlers.
- DCHECK(!trap_handler::UseTrapHandler());
+ DCHECK(!use_trap_handler());
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -3682,35 +3790,36 @@ Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
return load;
}
-Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
+Node* WasmGraphBuilder::StoreMem(MachineRepresentation mem_rep, Node* index,
uint32_t offset, uint32_t alignment, Node* val,
wasm::WasmCodePosition position,
wasm::ValueType type) {
Node* store;
- index = BoundsCheckMem(memtype, index, offset, position, kCanOmitBoundsCheck);
+ index = BoundsCheckMem(wasm::WasmOpcodes::MemSize(mem_rep), index, offset,
+ position, kCanOmitBoundsCheck);
#if defined(V8_TARGET_BIG_ENDIAN)
- val = BuildChangeEndiannessStore(val, memtype, type);
+ val = BuildChangeEndiannessStore(val, mem_rep, type);
#endif
- if (memtype.representation() == MachineRepresentation::kWord8 ||
- jsgraph()->machine()->UnalignedStoreSupported(memtype.representation())) {
- if (trap_handler::UseTrapHandler()) {
- store = graph()->NewNode(
- jsgraph()->machine()->ProtectedStore(memtype.representation()),
- MemBuffer(offset), index, val, *effect_, *control_);
+ if (mem_rep == MachineRepresentation::kWord8 ||
+ jsgraph()->machine()->UnalignedStoreSupported(mem_rep)) {
+ if (use_trap_handler()) {
+ store =
+ graph()->NewNode(jsgraph()->machine()->ProtectedStore(mem_rep),
+ MemBuffer(offset), index, val, *effect_, *control_);
SetSourcePosition(store, position);
} else {
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ StoreRepresentation rep(mem_rep, kNoWriteBarrier);
store =
graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
index, val, *effect_, *control_);
}
} else {
// TODO(eholk): Support unaligned stores with trap handlers.
- DCHECK(!trap_handler::UseTrapHandler());
- UnalignedStoreRepresentation rep(memtype.representation());
+ DCHECK(!use_trap_handler());
+ UnalignedStoreRepresentation rep(mem_rep);
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
MemBuffer(offset), index, val, *effect_, *control_);
@@ -3719,8 +3828,7 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
*effect_ = store;
if (FLAG_wasm_trace_memory) {
- TraceMemoryOperation(true, memtype.representation(), index, offset,
- position);
+ TraceMemoryOperation(true, mem_rep, index, offset, position);
}
return store;
@@ -3772,10 +3880,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
+ index = Uint32ToUintptr(index);
Node* load = graph()->NewNode(jsgraph()->machine()->Load(type), mem_start,
index, *effect_, bounds_check.if_true);
Node* value_phi =
@@ -3788,6 +3893,11 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
return value_phi;
}
+Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
+ if (jsgraph()->machine()->Is32()) return node;
+ return graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), node);
+}
+
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
DCHECK_NOT_NULL(context_cache_);
@@ -3814,10 +3924,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
graph()->NewNode(jsgraph()->machine()->Word32And(), index, mem_mask);
}
- if (jsgraph()->machine()->Is64()) {
- index =
- graph()->NewNode(jsgraph()->machine()->ChangeUint32ToUint64(), index);
- }
+ index = Uint32ToUintptr(index);
const Operator* store_op = jsgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
Node* store = graph()->NewNode(store_op, mem_start, index, val, *effect_,
@@ -4302,22 +4409,24 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// TODO(gdeepti): Add alignment validation, traps on misalignment
Node* node;
switch (opcode) {
-#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_BINOP(Name, Operation, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_BINOP_LIST(BUILD_ATOMIC_BINOP)
#undef BUILD_ATOMIC_BINOP
#define BUILD_ATOMIC_TERNARY_OP(Name, Operation, Type) \
case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
node = graph()->NewNode( \
jsgraph()->machine()->Atomic##Operation(MachineType::Type()), \
MemBuffer(offset), index, inputs[1], inputs[2], *effect_, *control_); \
@@ -4326,26 +4435,28 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
ATOMIC_TERNARY_LIST(BUILD_ATOMIC_TERNARY_OP)
#undef BUILD_ATOMIC_TERNARY_OP
-#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
- MemBuffer(offset), index, *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_LOAD_OP(Name, Type) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicLoad(MachineType::Type()), \
+ MemBuffer(offset), index, *effect_, *control_); \
+ break; \
}
ATOMIC_LOAD_LIST(BUILD_ATOMIC_LOAD_OP)
#undef BUILD_ATOMIC_LOAD_OP
-#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
- case wasm::kExpr##Name: { \
- Node* index = BoundsCheckMem(MachineType::Type(), inputs[0], offset, \
- position, kNeedsBoundsCheck); \
- node = graph()->NewNode( \
- jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
- MemBuffer(offset), index, inputs[1], *effect_, *control_); \
- break; \
+#define BUILD_ATOMIC_STORE_OP(Name, Type, Rep) \
+ case wasm::kExpr##Name: { \
+ Node* index = \
+ BoundsCheckMem(wasm::WasmOpcodes::MemSize(MachineType::Type()), \
+ inputs[0], offset, position, kNeedsBoundsCheck); \
+ node = graph()->NewNode( \
+ jsgraph()->machine()->AtomicStore(MachineRepresentation::Rep), \
+ MemBuffer(offset), index, inputs[1], *effect_, *control_); \
+ break; \
}
ATOMIC_STORE_LIST(BUILD_ATOMIC_STORE_OP)
#undef BUILD_ATOMIC_STORE_OP
@@ -4391,7 +4502,8 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
WasmCodeWrapper wasm_code, uint32_t index,
- Address wasm_context_address) {
+ Address wasm_context_address,
+ bool use_trap_handler) {
const wasm::WasmFunction* func = &module->functions[index];
//----------------------------------------------------------------------------
@@ -4410,15 +4522,11 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
Node* effect = nullptr;
// TODO(titzer): compile JS to WASM wrappers without a {ModuleEnv}.
- ModuleEnv env = {
- module,
- std::vector<Address>(), // function_tables
- std::vector<Address>(), // signature_tables
- // TODO(mtrofin): remove these 2 lines when we don't need
- // FLAG_wasm_jit_to_native
- std::vector<Handle<Code>>(), // function_code
- BUILTIN_CODE(isolate, Illegal) // default_function_code
- };
+ ModuleEnv env(module,
+ // TODO(mtrofin): remove the Illegal builtin when we don't need
+ // FLAG_wasm_jit_to_native
+ BUILTIN_CODE(isolate, Illegal), // default_function_code
+ use_trap_handler);
WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), func->sig);
@@ -4470,9 +4578,7 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::WasmModule* module,
namespace {
void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
-#if !DEBUG
- return;
-#endif
+#ifdef DEBUG
// We expect the only embedded objects to be those originating from
// a snapshot, which are immovable.
DisallowHeapAllocation no_gc;
@@ -4493,7 +4599,7 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
default:
UNREACHABLE();
}
- CHECK_NOT_NULL(target);
+ DCHECK_NOT_NULL(target);
bool is_immovable =
target->IsSmi() || Heap::IsImmovable(HeapObject::cast(target));
bool is_allowed_stub = false;
@@ -4503,15 +4609,16 @@ void ValidateImportWrapperReferencesImmovables(Handle<Code> wrapper) {
code->kind() == Code::STUB &&
CodeStub::MajorKeyFromKey(code->stub_key()) == CodeStub::DoubleToI;
}
- CHECK(is_immovable || is_allowed_stub);
+ DCHECK(is_immovable || is_allowed_stub);
}
+#endif
}
} // namespace
Handle<Code> CompileWasmToJSWrapper(
Isolate* isolate, Handle<JSReceiver> target, wasm::FunctionSig* sig,
- uint32_t index, wasm::ModuleOrigin origin,
+ uint32_t index, wasm::ModuleOrigin origin, bool use_trap_handler,
Handle<FixedArray> global_js_imports_table) {
//----------------------------------------------------------------------------
// Create the Graph
@@ -4532,7 +4639,8 @@ Handle<Code> CompileWasmToJSWrapper(
origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
: nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ ModuleEnv env(nullptr, Handle<Code>::null(), use_trap_handler);
+ WasmGraphBuilder builder(&env, &zone, &jsgraph,
CEntryStub(isolate, 1).GetCode(), sig,
source_position_table);
builder.set_control_ptr(&control);
@@ -4618,7 +4726,10 @@ Handle<Code> CompileWasmToWasmWrapper(Isolate* isolate, WasmCodeWrapper target,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, Handle<Code>(), sig);
+ ModuleEnv env(
+ nullptr, Handle<Code>::null(),
+ !target.IsCodeObject() && target.GetWasmCode()->HasTrapHandlerIndex());
+ WasmGraphBuilder builder(&env, &zone, &jsgraph, Handle<Code>(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmToWasmWrapper(target, new_wasm_context_address);
@@ -4804,13 +4915,6 @@ Handle<Code> CompileCWasmEntry(Isolate* isolate, wasm::FunctionSig* sig,
SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
-#if DEBUG
- if (env_) {
- size_t tables_size = env_->module->function_tables.size();
- DCHECK_EQ(tables_size, env_->function_tables.size());
- DCHECK_EQ(tables_size, env_->signature_tables.size());
- }
-#endif
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
@@ -4825,7 +4929,6 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
runtime_exception_support_);
tf_.graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
-
if (tf_.graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
@@ -4844,7 +4947,8 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
if (func_index_ >= FLAG_trace_wasm_ast_start &&
func_index_ < FLAG_trace_wasm_ast_end) {
- PrintRawWasmCode(isolate_->allocator(), func_body_, env_->module);
+ PrintRawWasmCode(isolate_->allocator(), func_body_, env_->module,
+ wasm::kPrintLocals);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -4857,9 +4961,7 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
if (!name.is_empty()) {
return name;
}
-#ifndef DEBUG
- return {};
-#endif
+#ifdef DEBUG
constexpr int kBufferLength = 15;
EmbeddedVector<char, kBufferLength> name_vector;
@@ -4869,6 +4971,9 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
char* index_name = zone->NewArray<char>(name_len);
memcpy(index_name, name_vector.start(), name_len);
return Vector<const char>(index_name, name_len);
+#else
+ return {};
+#endif
}
} // namespace
@@ -5090,7 +5195,7 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
desc, tf_.job_->compilation_info()->wasm_code_desc()->frame_slot_count,
func_index_,
tf_.job_->compilation_info()->wasm_code_desc()->safepoint_table_offset,
- protected_instructions_);
+ std::move(protected_instructions_));
if (!code) {
return WasmCodeWrapper(code);
}
@@ -5107,13 +5212,24 @@ WasmCodeWrapper WasmCompilationUnit::FinishTurbofanCompilation(
MaybeHandle<HandlerTable> handler_table =
tf_.job_->compilation_info()->wasm_code_desc()->handler_table;
- int function_index_as_int = static_cast<int>(func_index_);
native_module_->compiled_module()->source_positions()->set(
- function_index_as_int, *source_positions);
+ func_index_, *source_positions);
if (!handler_table.is_null()) {
native_module_->compiled_module()->handler_table()->set(
- function_index_as_int, *handler_table.ToHandleChecked());
+ func_index_, *handler_table.ToHandleChecked());
}
+
+#ifdef ENABLE_DISASSEMBLER
+ // Note: only do this after setting source positions, as this will be
+ // accessed and printed here.
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ PrintF("--- Native Wasm code ---\n");
+ code->Print(isolate_);
+ PrintF("--- End code ---\n");
+ }
+#endif
+
// TODO(mtrofin): this should probably move up in the common caller,
// once liftoff has source positions. Until then, we'd need to handle
// undefined values, which is complicating the code.
@@ -5147,21 +5263,21 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
wasm::ErrorThrower* thrower) {
CodeDesc desc;
liftoff_.asm_.GetCode(isolate_, &desc);
+
+ Handle<ByteArray> source_positions =
+ liftoff_.source_position_table_builder_.ToSourcePositionTable(isolate_);
+
WasmCodeWrapper ret;
if (!FLAG_wasm_jit_to_native) {
Handle<Code> code;
- code = isolate_->factory()->NewCode(desc, Code::WASM_FUNCTION, code);
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_print_code || FLAG_print_wasm_code) {
- // TODO(wasm): Use proper log files, here and elsewhere.
- OFStream os(stdout);
- os << "--- Wasm liftoff code ---\n";
- EmbeddedVector<char, 32> func_name;
- func_name.Truncate(SNPrintF(func_name, "wasm#%d-liftoff", func_index_));
- code->Disassemble(func_name.start(), os);
- os << "--- End code ---\n";
- }
-#endif
+ code = isolate_->factory()->NewCode(
+ desc, Code::WASM_FUNCTION, code, Builtins::kNoBuiltinId,
+ MaybeHandle<HandlerTable>(), source_positions,
+ MaybeHandle<DeoptimizationData>(), kMovable,
+ 0, // stub_key
+ false, // is_turbofanned
+ liftoff_.asm_.GetTotalFrameSlotCount(), // stack_slots
+ liftoff_.safepoint_table_offset_);
if (isolate_->logger()->is_logging_code_events() ||
isolate_->is_profiling()) {
RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
@@ -5169,15 +5285,34 @@ WasmCodeWrapper WasmCompilationUnit::FinishLiftoffCompilation(
}
PackProtectedInstructions(code);
- return WasmCodeWrapper(code);
+ ret = WasmCodeWrapper(code);
} else {
- // TODO(mtrofin): figure a way to raise events; also, disassembly.
- // Consider lifting them both to FinishCompilation.
- return WasmCodeWrapper(native_module_->AddCode(
- desc, liftoff_.asm_.GetTotalFrameSlotCount(), func_index_,
- liftoff_.asm_.GetSafepointTableOffset(), protected_instructions_,
- true));
+ // TODO(mtrofin): figure a way to raise events.
+ // Consider lifting it to FinishCompilation.
+ native_module_->compiled_module()->source_positions()->set(
+ func_index_, *source_positions);
+ ret = WasmCodeWrapper(
+ native_module_->AddCode(desc, liftoff_.asm_.GetTotalFrameSlotCount(),
+ func_index_, liftoff_.safepoint_table_offset_,
+ std::move(protected_instructions_), true));
}
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_code || FLAG_print_wasm_code) {
+ // TODO(wasm): Use proper log files, here and elsewhere.
+ OFStream os(stdout);
+ os << "--- Wasm liftoff code ---\n";
+ EmbeddedVector<char, 64> func_name;
+ if (func_name_.start() != nullptr) {
+ SNPrintF(func_name, "#%d:%.*s", func_index(), func_name_.length(),
+ func_name_.start());
+ } else {
+ SNPrintF(func_name, "wasm#%d", func_index());
+ }
+ ret.Disassemble(func_name.start(), isolate_, os);
+ os << "--- End code ---\n";
+ }
+#endif
+ return ret;
}
// static
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 146f3044ca..22a2e1071e 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -43,25 +43,34 @@ class WasmCode;
namespace compiler {
+// Indirect function tables contain a <smi(sig), code> pair for each entry.
+enum FunctionTableEntries : int {
+ kFunctionTableSignatureOffset = 0,
+ kFunctionTableCodeOffset = 1,
+ kFunctionTableEntrySize = 2
+};
+constexpr inline int FunctionTableSigOffset(int i) {
+ return kFunctionTableEntrySize * i + kFunctionTableSignatureOffset;
+}
+constexpr inline int FunctionTableCodeOffset(int i) {
+ return kFunctionTableEntrySize * i + kFunctionTableCodeOffset;
+}
+
// The {ModuleEnv} encapsulates the module data that is used by the
// {WasmGraphBuilder} during graph building. It represents the parameters to
// which the compiled code should be specialized, including which code to call
// for direct calls {function_code}, which tables to use for indirect calls
// {function_tables}, memory start address and size {mem_start, mem_size},
-// as well as signature maps {signature_maps} and the module itself {module}.
+// as well as the module itself {module}.
// ModuleEnvs are shareable across multiple compilations.
struct ModuleEnv {
// A pointer to the decoded module's static representation.
const wasm::WasmModule* module;
- // The function tables are FixedArrays of code used to dispatch indirect
- // calls. (the same length as module.function_tables). We use the address
- // to a global handle to the FixedArray.
+ // The function tables are FixedArrays of <smi, code> pairs used to signature
+ // check and dispatch indirect calls. It has the same length as
+ // module.function_tables. We use the address to a global handle to the
+ // FixedArray.
const std::vector<Address> function_tables;
- // The signatures tables are FixedArrays of SMIs used to check signatures
- // match at runtime.
- // (the same length as module.function_tables)
- // We use the address to a global handle to the FixedArray.
- const std::vector<Address> signature_tables;
// TODO(mtrofin): remove these 2 once we don't need FLAG_wasm_jit_to_native
// Contains the code objects to call for each direct call.
@@ -69,6 +78,25 @@ struct ModuleEnv {
const std::vector<Handle<Code>> function_code;
// If the default code is not a null handle, always use it for direct calls.
const Handle<Code> default_function_code;
+ // True if trap handling should be used in compiled code, rather than
+ // compiling in bounds checks for each memory access.
+ const bool use_trap_handler;
+
+ ModuleEnv(const wasm::WasmModule* module, Handle<Code> default_function_code,
+ bool use_trap_handler)
+ : module(module),
+ default_function_code(default_function_code),
+ use_trap_handler(use_trap_handler) {}
+
+ ModuleEnv(const wasm::WasmModule* module,
+ std::vector<Address> function_tables,
+ std::vector<Handle<Code>> function_code,
+ Handle<Code> default_function_code, bool use_trap_handler)
+ : module(module),
+ function_tables(std::move(function_tables)),
+ function_code(std::move(function_code)),
+ default_function_code(default_function_code),
+ use_trap_handler(use_trap_handler) {}
};
enum RuntimeExceptionSupport : bool {
@@ -114,6 +142,11 @@ class WasmCompilationUnit final {
struct LiftoffData {
wasm::LiftoffAssembler asm_;
+ int safepoint_table_offset_;
+ SourcePositionTableBuilder source_position_table_builder_;
+ // The {codegen_zone_} needs to survive until FinishCompilation. It's only
+ // rarely used (e.g. for runtime calls), so it's only allocated when needed.
+ std::unique_ptr<Zone> codegen_zone_;
explicit LiftoffData(Isolate* isolate) : asm_(isolate) {}
};
struct TurbofanData {
@@ -151,7 +184,7 @@ class WasmCompilationUnit final {
size_t memory_cost_ = 0;
wasm::NativeModule* native_module_;
bool lower_simd_;
- std::shared_ptr<std::vector<trap_handler::ProtectedInstructionData>>
+ std::unique_ptr<std::vector<trap_handler::ProtectedInstructionData>>
protected_instructions_;
CompilationMode mode_;
// {liftoff_} is valid if mode_ == kLiftoff, tf_ if mode_ == kTurbofan.
@@ -172,12 +205,13 @@ class WasmCompilationUnit final {
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
wasm::ModuleOrigin origin,
+ bool use_trap_handler,
Handle<FixedArray> global_js_imports_table);
// Wraps a given wasm code object, producing a code object.
V8_EXPORT_PRIVATE Handle<Code> CompileJSToWasmWrapper(
Isolate* isolate, wasm::WasmModule* module, WasmCodeWrapper wasm_code,
- uint32_t index, Address wasm_context_address);
+ uint32_t index, Address wasm_context_address, bool use_trap_handler);
// Wraps a wasm function, producing a code object that can be called from other
// wasm instances (the WasmContext address must be changed).
@@ -221,6 +255,8 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
enum EnforceBoundsCheck : bool { kNeedsBoundsCheck, kCanOmitBoundsCheck };
+ struct IntConvertOps;
+ struct FloatConvertOps;
WasmGraphBuilder(ModuleEnv* env, Zone* zone, JSGraph* graph,
Handle<Code> centry_stub, wasm::FunctionSig* sig,
@@ -351,7 +387,7 @@ class WasmGraphBuilder {
Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
- Node* StoreMem(MachineType memtype, Node* index, uint32_t offset,
+ Node* StoreMem(MachineRepresentation mem_rep, Node* index, uint32_t offset,
uint32_t alignment, Node* val, wasm::WasmCodePosition position,
wasm::ValueType type);
static void PrintDebugName(Node* node);
@@ -413,36 +449,43 @@ class WasmGraphBuilder {
const wasm::WasmModule* module() { return env_ ? env_->module : nullptr; }
+ bool use_trap_handler() const { return env_ && env_->use_trap_handler; }
+
private:
+ enum class NumericImplementation : uint8_t { kTrap, kSaturate };
static const int kDefaultBufferSize = 16;
- Zone* zone_;
- JSGraph* jsgraph_;
- Node* centry_stub_node_;
- ModuleEnv* env_ = nullptr;
- Node* wasm_context_ = nullptr;
- NodeVector signature_tables_;
- NodeVector function_tables_;
- NodeVector function_table_sizes_;
+ Zone* const zone_;
+ JSGraph* const jsgraph_;
+ Node* const centry_stub_node_;
+ // env_ == nullptr means we're not compiling Wasm functions, such as for
+ // wrappers or interpreter stubs.
+ ModuleEnv* const env_ = nullptr;
+ SetOncePointer<Node> wasm_context_;
+ struct FunctionTableNodes {
+ Node* table_addr;
+ Node* size;
+ };
+ ZoneVector<FunctionTableNodes> function_tables_;
Node** control_ = nullptr;
Node** effect_ = nullptr;
WasmContextCacheNodes* context_cache_ = nullptr;
- Node* globals_start_ = nullptr;
+ SetOncePointer<Node> globals_start_;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
bool has_simd_ = false;
bool needs_stack_check_ = false;
- bool untrusted_code_mitigations_ = true;
+ const bool untrusted_code_mitigations_ = true;
// If the runtime doesn't support exception propagation,
// we won't generate stack checks, and trap handling will also
// be generated differently.
- RuntimeExceptionSupport runtime_exception_support_;
+ const RuntimeExceptionSupport runtime_exception_support_;
- wasm::FunctionSig* sig_;
+ wasm::FunctionSig* const sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
- compiler::SourcePositionTable* source_position_table_ = nullptr;
+ compiler::SourcePositionTable* const source_position_table_ = nullptr;
// Internal helper methods.
JSGraph* jsgraph() { return jsgraph_; }
@@ -451,11 +494,12 @@ class WasmGraphBuilder {
Node* String(const char* string);
Node* MemBuffer(uint32_t offset);
// BoundsCheckMem receives a uint32 {index} node and returns a ptrsize index.
- Node* BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset,
+ Node* BoundsCheckMem(uint8_t access_size, Node* index, uint32_t offset,
wasm::WasmCodePosition, EnforceBoundsCheck);
+ Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
- Node* BuildChangeEndiannessStore(Node* node, MachineType type,
+ Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* BuildChangeEndiannessLoad(Node* node, MachineType type,
wasm::ValueType wasmtype = wasm::kWasmStmt);
@@ -470,10 +514,25 @@ class WasmGraphBuilder {
Node* BuildF32CopySign(Node* left, Node* right);
Node* BuildF64CopySign(Node* left, Node* right);
- Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position);
- Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position);
+
+ Node* BuildI32ConvertOp(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl, const Operator* op,
+ wasm::WasmOpcode check_op,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops);
+ Node* BuildConvertCheck(Node* test, Node* result, Node* input,
+ wasm::WasmCodePosition position,
+ NumericImplementation impl,
+ const IntConvertOps* int_ops,
+ const FloatConvertOps* float_ops);
+ Node* BuildI32SConvertF32(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32SConvertF64(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32UConvertF32(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
+ Node* BuildI32UConvertF64(Node* input, wasm::WasmCodePosition position,
+ NumericImplementation impl);
Node* BuildI32Ctz(Node* input);
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index e231d15f10..e7bb3c164a 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -47,7 +47,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ia32 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS esi, eax, edx, ecx, ebx
-#define GP_RETURN_REGISTERS eax, edx, ecx
+#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -56,7 +56,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == x64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS rsi, rax, rdx, rcx, rbx, rdi
-#define GP_RETURN_REGISTERS rax, rdx, rcx
+#define GP_RETURN_REGISTERS rax, rdx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -65,7 +65,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r3, r0, r1, r2
-#define GP_RETURN_REGISTERS r0, r1, r3
+#define GP_RETURN_REGISTERS r0, r1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -74,7 +74,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == arm64 ====================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS x7, x0, x1, x2, x3, x4, x5, x6
-#define GP_RETURN_REGISTERS x0, x1, x2
+#define GP_RETURN_REGISTERS x0, x1
#define FP_PARAM_REGISTERS d0, d1, d2, d3, d4, d5, d6, d7
#define FP_RETURN_REGISTERS d0, d1
@@ -83,7 +83,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3
-#define GP_RETURN_REGISTERS v0, v1, t7
+#define GP_RETURN_REGISTERS v0, v1
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -92,7 +92,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == mips64 =================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
-#define GP_RETURN_REGISTERS v0, v1, t3
+#define GP_RETURN_REGISTERS v0, v1
#define FP_PARAM_REGISTERS f2, f4, f6, f8, f10, f12, f14
#define FP_RETURN_REGISTERS f2, f4
@@ -101,7 +101,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == ppc & ppc64 ============================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r10, r3, r4, r5, r6, r7, r8, r9
-#define GP_RETURN_REGISTERS r3, r4, r5
+#define GP_RETURN_REGISTERS r3, r4
#define FP_PARAM_REGISTERS d1, d2, d3, d4, d5, d6, d7, d8
#define FP_RETURN_REGISTERS d1, d2
@@ -110,7 +110,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390x ==================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3, r4
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2, d4, d6
#define FP_RETURN_REGISTERS d0, d2, d4, d6
@@ -119,7 +119,7 @@ LinkageLocation stackloc(int i, MachineType type) {
// == s390 ===================================================================
// ===========================================================================
#define GP_PARAM_REGISTERS r6, r2, r3, r4, r5
-#define GP_RETURN_REGISTERS r2, r3, r4
+#define GP_RETURN_REGISTERS r2, r3
#define FP_PARAM_REGISTERS d0, d2
#define FP_RETURN_REGISTERS d0, d2
@@ -158,6 +158,8 @@ struct Allocator {
int stack_offset;
+ void AdjustStackOffset(int offset) { stack_offset += offset; }
+
LinkageLocation Next(ValueType type) {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
@@ -226,25 +228,28 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
LocationSignature::Builder locations(zone, fsig->return_count(),
fsig->parameter_count() + 1);
- Allocator rets = return_registers;
-
- // Add return location(s).
- const int return_count = static_cast<int>(locations.return_count_);
- for (int i = 0; i < return_count; i++) {
- ValueType ret = fsig->GetReturn(i);
- locations.AddReturn(rets.Next(ret));
- }
-
+ // Add register and/or stack parameter(s).
Allocator params = parameter_registers;
- // Add parameter for the wasm_context.
+ // The wasm_context.
locations.AddParam(params.Next(MachineType::PointerRepresentation()));
- // Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
ValueType param = fsig->GetParam(i);
- locations.AddParam(params.Next(param));
+ auto l = params.Next(param);
+ locations.AddParam(l);
+ }
+
+ // Add return location(s).
+ Allocator rets = return_registers;
+ rets.AdjustStackOffset(params.stack_offset);
+
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ ValueType ret = fsig->GetReturn(i);
+ auto l = rets.Next(ret);
+ locations.AddReturn(l);
}
const RegList kCalleeSaveRegisters = 0;
@@ -255,22 +260,23 @@ CallDescriptor* GetWasmCallDescriptor(Zone* zone, wasm::FunctionSig* fsig) {
: MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister(target_type);
- CallDescriptor::Flags flags = CallDescriptor::kUseNativeStack;
CallDescriptor::Kind kind = FLAG_wasm_jit_to_native
? CallDescriptor::kCallWasmFunction
: CallDescriptor::kCallCodeObject;
- return new (zone) CallDescriptor( // --
- kind, // kind
- target_type, // target MachineType
- target_loc, // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- compiler::Operator::kNoProperties, // properties
- kCalleeSaveRegisters, // callee-saved registers
- kCalleeSaveFPRegisters, // callee-saved fp regs
- flags, // flags
- "wasm-call");
+ return new (zone) CallDescriptor( // --
+ kind, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ compiler::Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "wasm-call", // debug name
+ 0, // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* ReplaceTypeInCallDescriptorWith(
@@ -295,21 +301,7 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
LocationSignature::Builder locations(zone, return_count, parameter_count);
- Allocator rets = return_registers;
-
- for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
- if (descriptor->GetReturnType(i) == input_type) {
- for (size_t j = 0; j < num_replacements; j++) {
- locations.AddReturn(rets.Next(output_type));
- }
- } else {
- locations.AddReturn(
- rets.Next(descriptor->GetReturnType(i).representation()));
- }
- }
-
Allocator params = parameter_registers;
-
for (size_t i = 0; i < descriptor->ParameterCount(); i++) {
if (descriptor->GetParameterType(i) == input_type) {
for (size_t j = 0; j < num_replacements; j++) {
@@ -321,17 +313,32 @@ CallDescriptor* ReplaceTypeInCallDescriptorWith(
}
}
- return new (zone) CallDescriptor( // --
- descriptor->kind(), // kind
- descriptor->GetInputType(0), // target MachineType
- descriptor->GetInputLocation(0), // target location
- locations.Build(), // location_sig
- params.stack_offset, // stack_parameter_count
- descriptor->properties(), // properties
- descriptor->CalleeSavedRegisters(), // callee-saved registers
- descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
- descriptor->flags(), // flags
- descriptor->debug_name());
+ Allocator rets = return_registers;
+ rets.AdjustStackOffset(params.stack_offset);
+ for (size_t i = 0; i < descriptor->ReturnCount(); i++) {
+ if (descriptor->GetReturnType(i) == input_type) {
+ for (size_t j = 0; j < num_replacements; j++) {
+ locations.AddReturn(rets.Next(output_type));
+ }
+ } else {
+ locations.AddReturn(
+ rets.Next(descriptor->GetReturnType(i).representation()));
+ }
+ }
+
+ return new (zone) CallDescriptor( // --
+ descriptor->kind(), // kind
+ descriptor->GetInputType(0), // target MachineType
+ descriptor->GetInputLocation(0), // target location
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ descriptor->properties(), // properties
+ descriptor->CalleeSavedRegisters(), // callee-saved registers
+ descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ descriptor->flags(), // flags
+ descriptor->debug_name(), // debug name
+ descriptor->AllocatableRegisters(), // allocatable registers
+ rets.stack_offset - params.stack_offset); // stack_return_count
}
CallDescriptor* GetI32WasmCallDescriptor(Zone* zone,
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index ea417533f2..bc92f9707c 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -156,18 +156,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-
-class OutOfLineLoadZero final : public OutOfLineCode {
- public:
- OutOfLineLoadZero(CodeGenerator* gen, Register result)
- : OutOfLineCode(gen), result_(result) {}
-
- void Generate() final { __ xorl(result_, result_); }
-
- private:
- Register const result_;
-};
-
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
@@ -295,7 +283,7 @@ class WasmOutOfLineTrap final : public OutOfLineCode {
ReferenceMap* reference_map = new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
private:
@@ -456,241 +444,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
} \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN) \
- do { \
- auto result = i.OutputDoubleRegister(); \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- OutOfLineCode* ool; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- __ cmpl(index1, length); \
- ool = new (zone()) OutOfLineLoadNaN(this, result); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineLoadFloat final : public OutOfLineCode { \
- public: \
- OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ Pcmpeqd(result_, result_); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(result_, \
- Operand(buffer_, kScratchRegister, times_1, 0)); \
- } \
- \
- private: \
- XMMRegister const result_; \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode rmode_; \
- }; \
- ool = new (zone()) OutOfLineLoadFloat(this, result, buffer, index1, \
- index2, length, rmode); \
- } \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
- do { \
- auto result = i.OutputRegister(); \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- OutOfLineCode* ool; \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- __ cmpl(index1, length); \
- ool = new (zone()) OutOfLineLoadZero(this, result); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineLoadInteger final : public OutOfLineCode { \
- public: \
- OutOfLineLoadInteger(CodeGenerator* gen, Register result, \
- Register buffer, Register index1, int32_t index2, \
- int32_t length, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- result_(result), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- Label oob; \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, &oob, Label::kNear); \
- __ asm_instr(result_, \
- Operand(buffer_, kScratchRegister, times_1, 0)); \
- __ jmp(exit()); \
- __ bind(&oob); \
- __ xorl(result_, result_); \
- } \
- \
- private: \
- Register const result_; \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- RelocInfo::Mode const rmode_; \
- }; \
- ool = new (zone()) OutOfLineLoadInteger(this, result, buffer, index1, \
- index2, length, rmode); \
- } \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(result, Operand(buffer, index1, times_1, index2)); \
- __ bind(ool->exit()); \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- auto value = i.InputDoubleRegister(4); \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- Label done; \
- __ cmpl(index1, length); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(&done); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineStoreFloat final : public OutOfLineCode { \
- public: \
- OutOfLineStoreFloat(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- XMMRegister value, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
- value_); \
- } \
- \
- private: \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- XMMRegister const value_; \
- RelocInfo::Mode rmode_; \
- }; \
- auto ool = new (zone()) OutOfLineStoreFloat( \
- this, buffer, index1, index2, length, value, rmode); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value) \
- do { \
- auto buffer = i.InputRegister(0); \
- auto index1 = i.InputRegister(1); \
- auto index2 = i.InputUint32(2); \
- if (instr->InputAt(3)->IsRegister()) { \
- auto length = i.InputRegister(3); \
- DCHECK_EQ(0u, index2); \
- Label done; \
- __ cmpl(index1, length); \
- __ j(above_equal, &done, Label::kNear); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(&done); \
- } else { \
- auto length = i.InputUint32(3); \
- RelocInfo::Mode rmode = i.ToConstant(instr->InputAt(3)).rmode(); \
- DCHECK_LE(index2, length); \
- __ cmpl(index1, Immediate(length - index2, rmode)); \
- class OutOfLineStoreInteger final : public OutOfLineCode { \
- public: \
- OutOfLineStoreInteger(CodeGenerator* gen, Register buffer, \
- Register index1, int32_t index2, int32_t length, \
- Value value, RelocInfo::Mode rmode) \
- : OutOfLineCode(gen), \
- buffer_(buffer), \
- index1_(index1), \
- index2_(index2), \
- length_(length), \
- value_(value), \
- rmode_(rmode) {} \
- \
- void Generate() final { \
- __ leal(kScratchRegister, Operand(index1_, index2_)); \
- __ cmpl(kScratchRegister, Immediate(length_, rmode_)); \
- __ j(above_equal, exit()); \
- __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0), \
- value_); \
- } \
- \
- private: \
- Register const buffer_; \
- Register const index1_; \
- int32_t const index2_; \
- int32_t const length_; \
- Value const value_; \
- RelocInfo::Mode rmode_; \
- }; \
- auto ool = new (zone()) OutOfLineStoreInteger( \
- this, buffer, index1, index2, length, value, rmode); \
- __ j(above_equal, ool->entry()); \
- __ asm_instr(Operand(buffer, index1, times_1, index2), value); \
- __ bind(ool->exit()); \
- } \
- } while (false)
-
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- if (instr->InputAt(4)->IsRegister()) { \
- Register value = i.InputRegister(4); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register); \
- } else { \
- Immediate value = i.InputImmediate(4); \
- ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
- } \
- } while (false)
-
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
__ PrepareCallCFunction(2); \
@@ -840,6 +593,11 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
+inline bool HasCallDescriptorFlag(Instruction* instr,
+ CallDescriptor::Flag flag) {
+ return MiscField::decode(instr->opcode()) & flag;
+}
+
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -854,7 +612,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -867,11 +629,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (info()->IsWasm()) {
__ near_call(wasm_code, RelocInfo::WASM_CALL);
} else {
- __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ } else {
+ __ Call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
+ }
}
} else {
Register reg = i.InputRegister(0);
- __ call(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineCall(reg);
+ } else {
+ __ call(reg);
+ }
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -890,7 +660,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
Register reg = i.InputRegister(0);
__ addp(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -909,7 +683,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
} else {
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -919,7 +697,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
- __ jmp(reg);
+ if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
+ __ RetpolineJump(reg);
+ } else {
+ __ jmp(reg);
+ }
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -930,7 +712,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
- __ Assert(equal, kWrongFunctionContext);
+ __ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ movp(rcx, FieldOperand(func, JSFunction::kCodeOffset));
__ addp(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -1093,6 +875,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
+ case kLFence:
+ __ lfence();
+ break;
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -2216,22 +2001,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
- } else if (instr->InputAt(0)->IsFPRegister()) {
+ } else if (instr->InputAt(0)->IsFloatRegister() ||
+ instr->InputAt(0)->IsDoubleRegister()) {
// TODO(titzer): use another machine instruction?
__ subq(rsp, Immediate(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kDoubleSize);
__ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
- } else {
+ } else if (instr->InputAt(0)->IsSimd128Register()) {
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kSimd128Size));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSimd128Size);
+ __ Movups(Operand(rsp, 0), i.InputSimd128Register(0));
+ } else if (instr->InputAt(0)->IsStackSlot() ||
+ instr->InputAt(0)->IsFloatStackSlot() ||
+ instr->InputAt(0)->IsDoubleStackSlot()) {
__ pushq(i.InputOperand(0));
frame_access_state()->IncreaseSPDelta(1);
unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
kPointerSize);
+ } else {
+ DCHECK(instr->InputAt(0)->IsSimd128StackSlot());
+ __ Movups(kScratchDoubleReg, i.InputOperand(0));
+ // TODO(titzer): use another machine instruction?
+ __ subq(rsp, Immediate(kSimd128Size));
+ frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
+ unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+ kSimd128Size);
+ __ Movups(Operand(rsp, 0), kScratchDoubleReg);
}
break;
case kX64Poke: {
- int const slot = MiscField::decode(instr->opcode());
+ int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ movq(Operand(rsp, slot * kPointerSize), i.InputImmediate(0));
} else {
@@ -2239,6 +2043,101 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
+ case kX64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Movsd(i.OutputDoubleRegister(), Operand(rbp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
+ __ Movss(i.OutputFloatRegister(), Operand(rbp, offset));
+ }
+ } else {
+ __ movq(i.OutputRegister(), Operand(rbp, offset));
+ }
+ break;
+ }
+ // TODO(gdeepti): Get rid of redundant moves for F32x4Splat/Extract below
+ case kX64F32x4Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Movss(dst, i.InputDoubleRegister(0));
+ } else {
+ __ Movss(dst, i.InputOperand(0));
+ }
+ __ shufps(dst, dst, 0x0);
+ break;
+ }
+ case kX64F32x4ExtractLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ extractps(kScratchRegister, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movd(i.OutputDoubleRegister(), kScratchRegister);
+ break;
+ }
+ case kX64F32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ // The insertps instruction uses imm8[5:4] to indicate the lane
+ // that needs to be replaced.
+ byte select = i.InputInt8(1) << 4 & 0x30;
+ __ insertps(i.OutputSimd128Register(), i.InputDoubleRegister(2), select);
+ break;
+ }
+ case kX64F32x4RecipApprox: {
+ __ rcpps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4RecipSqrtApprox: {
+ __ rsqrtps(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kX64F32x4Add: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ addps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Sub: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ subps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Mul: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ mulps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Min: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ minps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Max: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ maxps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Eq: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1), 0x0);
+ break;
+ }
+ case kX64F32x4Ne: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpps(i.OutputSimd128Register(), i.InputSimd128Register(1), 0x4);
+ break;
+ }
+ case kX64F32x4Lt: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64F32x4Le: {
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ movd(dst, i.InputRegister(0));
@@ -2669,48 +2568,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, i.InputSimd128Register(2));
break;
}
- case kCheckedLoadInt8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
- break;
- case kCheckedLoadUint8:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
- break;
- case kCheckedLoadInt16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
- break;
- case kCheckedLoadUint16:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
- break;
- case kCheckedLoadWord32:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
- break;
- case kCheckedLoadWord64:
- ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
- break;
- case kCheckedLoadFloat32:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movss, OutOfLineLoadFloat32NaN);
- break;
- case kCheckedLoadFloat64:
- ASSEMBLE_CHECKED_LOAD_FLOAT(Movsd, OutOfLineLoadFloat64NaN);
- break;
- case kCheckedStoreWord8:
- ASSEMBLE_CHECKED_STORE_INTEGER(movb);
- break;
- case kCheckedStoreWord16:
- ASSEMBLE_CHECKED_STORE_INTEGER(movw);
- break;
- case kCheckedStoreWord32:
- ASSEMBLE_CHECKED_STORE_INTEGER(movl);
- break;
- case kCheckedStoreWord64:
- ASSEMBLE_CHECKED_STORE_INTEGER(movq);
- break;
- case kCheckedStoreFloat32:
- ASSEMBLE_CHECKED_STORE_FLOAT(Movss);
- break;
- case kCheckedStoreFloat64:
- ASSEMBLE_CHECKED_STORE_FLOAT(Movsd);
- break;
case kX64StackCheck:
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
break;
@@ -2954,7 +2811,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
new (gen_->zone()) ReferenceMap(gen_->zone());
gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
@@ -3082,7 +2939,7 @@ void CodeGenerator::AssembleConstructFrame() {
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
- __ Abort(kShouldNotDirectlyEnterOsrFunction);
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
// Unoptimized code jumps directly to this entrypoint while the unoptimized
// frame is still on the stack. Optimized code uses OSR values directly from
@@ -3124,13 +2981,15 @@ void CodeGenerator::AssembleConstructFrame() {
ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
RecordSafepoint(reference_map, Safepoint::kSimple, 0,
Safepoint::kNoLazyDeopt);
- __ AssertUnreachable(kUnexpectedReturnFromWasmTrap);
+ __ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
__ bind(&done);
}
- // Skip callee-saved slots, which are pushed below.
+ // Skip callee-saved and return slots, which are created below.
shrink_slots -= base::bits::CountPopulation(saves);
- shrink_slots -= base::bits::CountPopulation(saves_fp);
+ shrink_slots -=
+ base::bits::CountPopulation(saves_fp) * (kQuadWordSize / kPointerSize);
+ shrink_slots -= frame()->GetReturnSlotCount();
if (shrink_slots > 0) {
__ subq(rsp, Immediate(shrink_slots * kPointerSize));
}
@@ -3157,6 +3016,11 @@ void CodeGenerator::AssembleConstructFrame() {
__ pushq(Register::from_code(i));
}
}
+
+ // Allocate return slots (located after callee-saved).
+ if (frame()->GetReturnSlotCount() > 0) {
+ __ subq(rsp, Immediate(frame()->GetReturnSlotCount() * kPointerSize));
+ }
}
void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
@@ -3165,6 +3029,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ addq(rsp, Immediate(returns * kPointerSize));
+ }
for (int i = 0; i < Register::kNumRegisters; i++) {
if (!((1 << i) & saves)) continue;
__ popq(Register::from_code(i));
@@ -3212,7 +3080,6 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
}
if (pop->IsImmediate()) {
- DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
pop_size += g.ToConstant(pop).ToInt32() * kPointerSize;
CHECK_LT(pop_size, static_cast<size_t>(std::numeric_limits<int>::max()));
__ Ret(static_cast<int>(pop_size), rcx);
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 9c268ededf..6d9bc6f820 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -56,6 +56,7 @@ namespace compiler {
V(X64Tzcnt32) \
V(X64Popcnt) \
V(X64Popcnt32) \
+ V(LFence) \
V(SSEFloat32Cmp) \
V(SSEFloat32Add) \
V(SSEFloat32Sub) \
@@ -143,7 +144,22 @@ namespace compiler {
V(X64Inc32) \
V(X64Push) \
V(X64Poke) \
+ V(X64Peek) \
V(X64StackCheck) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
V(X64I32x4Splat) \
V(X64I32x4ExtractLane) \
V(X64I32x4ReplaceLane) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index ba775e72af..c16fee5861 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -123,6 +123,20 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Lea:
case kX64Dec32:
case kX64Inc32:
+ case kX64F32x4Splat:
+ case kX64F32x4ExtractLane:
+ case kX64F32x4ReplaceLane:
+ case kX64F32x4RecipApprox:
+ case kX64F32x4RecipSqrtApprox:
+ case kX64F32x4Add:
+ case kX64F32x4Sub:
+ case kX64F32x4Mul:
+ case kX64F32x4Min:
+ case kX64F32x4Max:
+ case kX64F32x4Eq:
+ case kX64F32x4Ne:
+ case kX64F32x4Lt:
+ case kX64F32x4Le:
case kX64I32x4Splat:
case kX64I32x4ExtractLane:
case kX64I32x4ReplaceLane:
@@ -240,12 +254,16 @@ int InstructionScheduler::GetTargetInstructionFlags(
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck:
+ case kX64Peek:
return kIsLoadOperation;
case kX64Push:
case kX64Poke:
return kHasSideEffect;
+ case kLFence:
+ return kHasSideEffect;
+
#define CASE(Name) case k##Name:
COMMON_ARCH_OPCODE_LIST(CASE)
#undef CASE
@@ -261,20 +279,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for x64 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kCheckedLoadInt8:
- case kCheckedLoadUint8:
- case kCheckedLoadInt16:
- case kCheckedLoadUint16:
- case kCheckedLoadWord32:
- case kCheckedLoadWord64:
- case kCheckedLoadFloat32:
- case kCheckedLoadFloat64:
- case kCheckedStoreWord8:
- case kCheckedStoreWord16:
- case kCheckedStoreWord32:
- case kCheckedStoreWord64:
- case kCheckedStoreFloat32:
- case kCheckedStoreFloat64:
case kSSEFloat64Mul:
return 5;
case kX64Imul:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 04fec146de..a0f14c687c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -109,7 +109,7 @@ class X64OperandGenerator final : public OperandGenerator {
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = displacement_mode
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
? UseNegatedImmediate(displacement)
: UseImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
@@ -289,6 +289,11 @@ void InstructionSelector::VisitDebugAbort(Node* node) {
Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), rdx));
}
+void InstructionSelector::VisitSpeculationFence(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kLFence, g.NoOutput());
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
@@ -399,118 +404,6 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
// Architecture supports unaligned access, therefore VisitStore is used instead
void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitCheckedLoad(Node* node) {
- CheckedLoadRepresentation load_rep = CheckedLoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- ArchOpcode opcode = kArchNop;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt8 : kCheckedLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kCheckedLoadInt16 : kCheckedLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedLoadWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedLoadWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedLoadFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedLoadFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
- g.UseRegister(offset), g.TempImmediate(0), length_operand);
-}
-
-
-void InstructionSelector::VisitCheckedStore(Node* node) {
- MachineRepresentation rep = CheckedStoreRepresentationOf(node->op());
- X64OperandGenerator g(this);
- Node* const buffer = node->InputAt(0);
- Node* const offset = node->InputAt(1);
- Node* const length = node->InputAt(2);
- Node* const value = node->InputAt(3);
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kCheckedStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kCheckedStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kCheckedStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kCheckedStoreWord64;
- break;
- case MachineRepresentation::kFloat32:
- opcode = kCheckedStoreFloat32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kCheckedStoreFloat64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
- InstructionOperand value_operand =
- g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
- if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
- Int32Matcher mlength(length);
- Int32BinopMatcher moffset(offset);
- if (mlength.HasValue() && moffset.right().HasValue() &&
- moffset.right().Value() >= 0 &&
- mlength.Value() >= moffset.right().Value()) {
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer),
- g.UseRegister(moffset.left().node()),
- g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
- value_operand);
- return;
- }
- }
- InstructionOperand length_operand =
- g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
- Emit(opcode, g.NoOutput(), g.UseRegister(buffer), g.UseRegister(offset),
- g.TempImmediate(0), length_operand, value_operand);
-}
-
-
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
@@ -579,7 +472,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
opcode = cont->Encode(opcode);
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -597,9 +491,9 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
void InstructionSelector::VisitWord32And(Node* node) {
X64OperandGenerator g(this);
Uint32BinopMatcher m(node);
- if (m.right().Is(0xff)) {
+ if (m.right().Is(0xFF)) {
Emit(kX64Movzxbl, g.DefineAsRegister(node), g.Use(m.left().node()));
- } else if (m.right().Is(0xffff)) {
+ } else if (m.right().Is(0xFFFF)) {
Emit(kX64Movzxwl, g.DefineAsRegister(node), g.Use(m.left().node()));
} else {
VisitBinop(this, node, kX64And32);
@@ -823,6 +717,10 @@ bool TryMatchLoadWord64AndShiftRight(InstructionSelector* selector, Node* node,
}
inputs[input_count++] = ImmediateOperand(ImmediateOperand::INLINE, 4);
} else {
+ // In the case that the base address was zero, the displacement will be
+ // in a register and replacing it with an immediate is not allowed. This
+ // usually only happens in dead code anyway.
+ if (!inputs[input_count - 1].IsImmediate()) return false;
int32_t displacement = g.GetImmediateIntegerValue(mleft.displacement());
inputs[input_count - 1] =
ImmediateOperand(ImmediateOperand::INLINE, displacement + 4);
@@ -1369,6 +1267,7 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
}
RO_OP_LIST(RO_VISITOR)
#undef RO_VISITOR
+#undef RO_OP_LIST
#define RR_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1376,6 +1275,7 @@ RO_OP_LIST(RO_VISITOR)
}
RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
+#undef RR_OP_LIST
void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
VisitRR(this, node, kArchTruncateDoubleToI);
@@ -1538,11 +1438,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
for (size_t n = 0; n < arguments->size(); ++n) {
PushParameter input = (*arguments)[n];
- if (input.node()) {
+ if (input.node) {
int slot = static_cast<int>(n);
- InstructionOperand value = g.CanBeImmediate(input.node())
- ? g.UseImmediate(input.node())
- : g.UseRegister(input.node());
+ InstructionOperand value = g.CanBeImmediate(input.node)
+ ? g.UseImmediate(input.node)
+ : g.UseRegister(input.node);
Emit(kX64Poke | MiscField::encode(slot), g.NoOutput(), value);
}
}
@@ -1550,31 +1450,55 @@ void InstructionSelector::EmitPrepareArguments(
// Push any stack arguments.
int effect_level = GetEffectLevel(node);
for (PushParameter input : base::Reversed(*arguments)) {
- Node* input_node = input.node();
- if (g.CanBeImmediate(input_node)) {
- Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+ // Skip any alignment holes in pushed nodes. We may have one in case of a
+ // Simd128 stack argument.
+ if (input.node == nullptr) continue;
+ if (g.CanBeImmediate(input.node)) {
+ Emit(kX64Push, g.NoOutput(), g.UseImmediate(input.node));
} else if (IsSupported(ATOM) ||
- sequence()->IsFP(GetVirtualRegister(input_node))) {
+ sequence()->IsFP(GetVirtualRegister(input.node))) {
// TODO(titzer): X64Push cannot handle stack->stack double moves
// because there is no way to encode fixed double slots.
- Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
- } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+ Emit(kX64Push, g.NoOutput(), g.UseRegister(input.node));
+ } else if (g.CanBeMemoryOperand(kX64Push, node, input.node,
effect_level)) {
InstructionOperand outputs[1];
InstructionOperand inputs[4];
size_t input_count = 0;
InstructionCode opcode = kX64Push;
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
- input_node, inputs, &input_count);
+ input.node, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
Emit(opcode, 0, outputs, input_count, inputs);
} else {
- Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+ Emit(kX64Push, g.NoOutput(), g.Use(input.node));
}
}
}
}
+void InstructionSelector::EmitPrepareResults(ZoneVector<PushParameter>* results,
+ const CallDescriptor* descriptor,
+ Node* node) {
+ X64OperandGenerator g(this);
+
+ int reverse_slot = 0;
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ reverse_slot += output.location.GetSizeInPointers();
+ // Skip any alignment holes in nodes.
+ if (output.node == nullptr) continue;
+ DCHECK(!descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ }
+ InstructionOperand result = g.DefineAsRegister(output.node);
+ InstructionOperand slot = g.UseImmediate(reverse_slot);
+ Emit(kX64Peek, 1, &result, 1, &slot);
+ }
+}
bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
@@ -1602,7 +1526,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
selector->Emit(opcode, 0, nullptr, input_count, inputs);
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
- cont->kind(), cont->reason(), cont->frame_state());
+ cont->kind(), cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
@@ -1624,7 +1549,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
} else {
@@ -1812,7 +1738,8 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
g.Label(cont->false_block()));
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
- cont->reason(), cont->frame_state());
+ cont->reason(), cont->feedback(),
+ cont->frame_state());
} else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
} else {
@@ -2012,14 +1939,14 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->InputAt(1));
+ kNotEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->InputAt(1));
+ kEqual, p.kind(), p.reason(), p.feedback(), node->InputAt(1));
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
@@ -2442,16 +2369,21 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
#define SIMD_TYPES(V) \
+ V(F32x4) \
V(I32x4) \
V(I16x8) \
V(I8x16)
-#define SIMD_FORMAT_LIST(V) \
- V(32x4) \
- V(16x8) \
- V(8x16)
-
#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add) \
+ V(F32x4Sub) \
+ V(F32x4Mul) \
+ V(F32x4Min) \
+ V(F32x4Max) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I32x4Add) \
V(I32x4AddHoriz) \
V(I32x4Sub) \
@@ -2505,6 +2437,8 @@ VISIT_ATOMIC_BINOP(Xor)
V(S128Xor)
#define SIMD_UNOP_LIST(V) \
+ V(F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox) \
V(I32x4Neg) \
V(I16x8Neg) \
V(I8x16Neg) \
@@ -2580,6 +2514,10 @@ SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
}
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
+#undef SIMD_TYPES
+#undef SIMD_BINOP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_SHIFT_OPCODES
void InstructionSelector::VisitS128Select(Node* node) {
X64OperandGenerator g(this);
@@ -2601,7 +2539,8 @@ MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
- MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz;
+ MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
+ MachineOperatorBuilder::kSpeculationFence;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;